105
vendor/github.com/projectcalico/libcalico-go/lib/apiconfig/apiconfig.go
generated
vendored
105
vendor/github.com/projectcalico/libcalico-go/lib/apiconfig/apiconfig.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package apiconfig
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
type DatastoreType string
|
||||
|
||||
const (
|
||||
EtcdV3 DatastoreType = "etcdv3"
|
||||
Kubernetes DatastoreType = "kubernetes"
|
||||
KindCalicoAPIConfig = "CalicoAPIConfig"
|
||||
)
|
||||
|
||||
// CalicoAPIConfig contains the connection information for a Calico CalicoAPIConfig resource
|
||||
type CalicoAPIConfig struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
// Specification of the BGPConfiguration.
|
||||
Spec CalicoAPIConfigSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
// CalicoAPIConfigSpec contains the specification for a Calico CalicoAPIConfig resource.
|
||||
type CalicoAPIConfigSpec struct {
|
||||
DatastoreType DatastoreType `json:"datastoreType" envconfig:"DATASTORE_TYPE" default:"etcdv3"`
|
||||
// Inline the ectd config fields
|
||||
EtcdConfig
|
||||
// Inline the k8s config fields.
|
||||
KubeConfig
|
||||
}
|
||||
|
||||
type EtcdConfig struct {
|
||||
EtcdEndpoints string `json:"etcdEndpoints" envconfig:"ETCD_ENDPOINTS"`
|
||||
EtcdDiscoverySrv string `json:"etcdDiscoverySrv" envconfig:"ETCD_DISCOVERY_SRV"`
|
||||
EtcdUsername string `json:"etcdUsername" envconfig:"ETCD_USERNAME"`
|
||||
EtcdPassword string `json:"etcdPassword" envconfig:"ETCD_PASSWORD"`
|
||||
EtcdKeyFile string `json:"etcdKeyFile" envconfig:"ETCD_KEY_FILE"`
|
||||
EtcdCertFile string `json:"etcdCertFile" envconfig:"ETCD_CERT_FILE"`
|
||||
EtcdCACertFile string `json:"etcdCACertFile" envconfig:"ETCD_CA_CERT_FILE"`
|
||||
|
||||
// These config file parameters are to support inline certificates, keys and CA / Trusted certificate.
|
||||
// There are no corresponding environment variables to avoid accidental exposure.
|
||||
EtcdKey string `json:"etcdKey" ignored:"true"`
|
||||
EtcdCert string `json:"etcdCert" ignored:"true"`
|
||||
EtcdCACert string `json:"etcdCACert" ignored:"true"`
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
Kubeconfig string `json:"kubeconfig" envconfig:"KUBECONFIG" default:""`
|
||||
K8sAPIEndpoint string `json:"k8sAPIEndpoint" envconfig:"K8S_API_ENDPOINT" default:""`
|
||||
K8sKeyFile string `json:"k8sKeyFile" envconfig:"K8S_KEY_FILE" default:""`
|
||||
K8sCertFile string `json:"k8sCertFile" envconfig:"K8S_CERT_FILE" default:""`
|
||||
K8sCAFile string `json:"k8sCAFile" envconfig:"K8S_CA_FILE" default:""`
|
||||
K8sAPIToken string `json:"k8sAPIToken" ignore:"true"`
|
||||
K8sInsecureSkipTLSVerify bool `json:"k8sInsecureSkipTLSVerify" envconfig:"K8S_INSECURE_SKIP_TLS_VERIFY" default:""`
|
||||
K8sDisableNodePoll bool `json:"k8sDisableNodePoll" envconfig:"K8S_DISABLE_NODE_POLL" default:""`
|
||||
|
||||
// K8sUsePodCIDR controls whether or not IPAM blocks are generated based on Node.Spec.PodCIDR. Set this
|
||||
// to true when using host-local IPAM, and set to false when using calico-ipam.
|
||||
K8sUsePodCIDR bool `json:"usePodCIDR" envconfig:"USE_POD_CIDR" default:""`
|
||||
}
|
||||
|
||||
// NewCalicoAPIConfig creates a new (zeroed) CalicoAPIConfig struct with the
|
||||
// TypeMetadata initialised to the current version.
|
||||
func NewCalicoAPIConfig() *CalicoAPIConfig {
|
||||
return &CalicoAPIConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: KindCalicoAPIConfig,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// IsAlphaFeatureSet checks if the comma separated features have the
|
||||
// name set in it.
|
||||
func IsAlphaFeatureSet(features, name string) bool {
|
||||
|
||||
fs := strings.Split(features, ",")
|
||||
for _, f := range fs {
|
||||
if f == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
76
vendor/github.com/projectcalico/libcalico-go/lib/apiconfig/load.go
generated
vendored
76
vendor/github.com/projectcalico/libcalico-go/lib/apiconfig/load.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
package apiconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
yaml "github.com/projectcalico/go-yaml-wrapper"
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LoadClientConfig loads the ClientConfig from the specified file (if specified)
|
||||
// or from environment variables (if the file is not specified).
|
||||
func LoadClientConfig(filename string) (*CalicoAPIConfig, error) {
|
||||
|
||||
// Override / merge with values loaded from the specified file.
|
||||
if filename != "" {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := LoadClientConfigFromBytes(b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("syntax error in %s: %v", filename, err)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
return LoadClientConfigFromEnvironment()
|
||||
}
|
||||
|
||||
// LoadClientConfig loads the ClientConfig from the supplied bytes containing
|
||||
// YAML or JSON format data.
|
||||
func LoadClientConfigFromBytes(b []byte) (*CalicoAPIConfig, error) {
|
||||
var c CalicoAPIConfig
|
||||
|
||||
// Default the backend type to be etcd v3. This will be overridden if
|
||||
// explicitly specified in the file.
|
||||
log.Debug("Loading config from JSON or YAML data")
|
||||
c = CalicoAPIConfig{
|
||||
Spec: CalicoAPIConfigSpec{
|
||||
DatastoreType: EtcdV3,
|
||||
},
|
||||
}
|
||||
|
||||
if err := yaml.UnmarshalStrict(b, &c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate the version and kind.
|
||||
if c.APIVersion != apiv3.GroupVersionCurrent {
|
||||
return nil, errors.New("invalid config file: unknown APIVersion '" + c.APIVersion + "'")
|
||||
}
|
||||
if c.Kind != KindCalicoAPIConfig {
|
||||
return nil, errors.New("invalid config file: expected kind '" + KindCalicoAPIConfig + "', got '" + c.Kind + "'")
|
||||
}
|
||||
|
||||
log.Debug("Datastore type: ", c.Spec.DatastoreType)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// LoadClientConfig loads the ClientConfig from the specified file (if specified)
|
||||
// or from environment variables (if the file is not specified).
|
||||
func LoadClientConfigFromEnvironment() (*CalicoAPIConfig, error) {
|
||||
c := NewCalicoAPIConfig()
|
||||
|
||||
// Load client config from environment variables.
|
||||
log.Debug("Loading config from environment")
|
||||
if err := envconfig.Process("calico", &c.Spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
250
vendor/github.com/projectcalico/libcalico-go/lib/backend/api/api.go
generated
vendored
250
vendor/github.com/projectcalico/libcalico-go/lib/backend/api/api.go
generated
vendored
@@ -1,250 +0,0 @@
|
||||
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
// SyncStatus represents the overall state of the datastore.
|
||||
// When the status changes, the Syncer calls OnStatusUpdated() on its callback.
|
||||
type SyncStatus uint8
|
||||
|
||||
const (
|
||||
// WaitForDatastore means the Syncer is waiting to connect to the datastore.
|
||||
// (Or, it is waiting for the data in the datastore to be ready to use.)
|
||||
WaitForDatastore SyncStatus = iota
|
||||
// ResyncInProgress means the Syncer is resyncing with the datastore.
|
||||
// During the first resync, the Syncer sends updates for all keys that
|
||||
// exist in the datastore as well as any updates that occur
|
||||
// concurrently.
|
||||
ResyncInProgress
|
||||
// InSync means the Syncer has now sent all the existing keys in the
|
||||
// datastore and the user of hte API has the full picture.
|
||||
InSync
|
||||
)
|
||||
|
||||
func (s SyncStatus) String() string {
|
||||
switch s {
|
||||
case WaitForDatastore:
|
||||
return "wait-for-ready"
|
||||
case InSync:
|
||||
return "in-sync"
|
||||
case ResyncInProgress:
|
||||
return "resync"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown<%v>", uint8(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Client is the interface to the backend datastore. It makes heavy use of the
|
||||
// KVPair struct, which contains a key and (optional) value drawn from the
|
||||
// backend/model package along with opaque revision information that the
|
||||
// datastore uses to enforce consistency.
|
||||
type Client interface {
|
||||
// Create creates the object specified in the KVPair, which must not
|
||||
// already exist. On success, returns a KVPair for the object with
|
||||
// revision information filled-in.
|
||||
Create(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Update modifies the existing object specified in the KVPair.
|
||||
// On success, returns a KVPair for the object with revision
|
||||
// information filled-in. If the input KVPair has revision
|
||||
// information then the update only succeeds if the revision is still
|
||||
// current.
|
||||
Update(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Apply updates or creates the object specified in the KVPair.
|
||||
// On success, returns a KVPair for the object with revision
|
||||
// information filled-in. Revision information is ignored on an Apply.
|
||||
Apply(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Delete removes the object specified by the key. If the call
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
//
|
||||
// Some keys are hierarchical, and Delete is a recursive operation.
|
||||
//
|
||||
// Any objects that were implicitly added by a Create operation should
|
||||
// also be removed when deleting the objects that implicitly created it.
|
||||
// For example, deleting the last WorkloadEndpoint in a Workload will
|
||||
// also remove the Workload.
|
||||
Delete(ctx context.Context, key model.Key, revision string) (*model.KVPair, error)
|
||||
|
||||
// DeleteKVP removes the object specified by the KVPair. If the KVPair
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
//
|
||||
// Some keys are hierarchical, and Delete is a recursive operation.
|
||||
//
|
||||
// Any objects that were implicitly added by a Create operation should
|
||||
// also be removed when deleting the objects that implicitly created it.
|
||||
// For example, deleting the last WorkloadEndpoint in a Workload will
|
||||
// also remove the Workload.
|
||||
DeleteKVP(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Get returns the object identified by the given key as a KVPair with
|
||||
// revision information.
|
||||
Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error)
|
||||
|
||||
// List returns a slice of KVPairs matching the input list options.
|
||||
// list should be passed one of the model.<Type>ListOptions structs.
|
||||
// Non-zero fields in the struct are used as filters.
|
||||
List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error)
|
||||
|
||||
// Watch returns a WatchInterface used for watching a resources matching the
|
||||
// input list options.
|
||||
Watch(ctx context.Context, list model.ListInterface, revision string) (WatchInterface, error)
|
||||
|
||||
// EnsureInitialized ensures that the backend is initialized
|
||||
// any ready to be used.
|
||||
EnsureInitialized() error
|
||||
|
||||
// Clean removes Calico data from the backend datastore. Used for test purposes.
|
||||
Clean() error
|
||||
|
||||
// Close the client.
|
||||
//Close()
|
||||
}
|
||||
|
||||
type Syncer interface {
|
||||
// Starts the Syncer. May start a background goroutine.
|
||||
Start()
|
||||
// Stops the Syncer. Stops all background goroutines. Any cached updates that the syncer knows about
|
||||
// are emitted as delete events.
|
||||
Stop()
|
||||
}
|
||||
|
||||
type SyncerCallbacks interface {
|
||||
// OnStatusUpdated is called when the status of the sync status of the
|
||||
// datastore changes.
|
||||
OnStatusUpdated(status SyncStatus)
|
||||
|
||||
// OnUpdates is called when the Syncer has one or more updates to report.
|
||||
// Updates consist of typed key-value pairs. The keys are drawn from the
|
||||
// backend.model package. The values are either nil, to indicate a
|
||||
// deletion (or failure to parse a value), or a pointer to a value of
|
||||
// the associated value type.
|
||||
//
|
||||
// When a recursive delete is made, deleting many leaf keys, the Syncer
|
||||
// generates deletion updates for all the leaf keys.
|
||||
OnUpdates(updates []Update)
|
||||
}
|
||||
|
||||
// SyncerParseFailCallbacks is an optional interface that can be implemented
|
||||
// by a Syncer callback. Datastores that support it can report a failure to
|
||||
// parse a particular key or value.
|
||||
type SyncerParseFailCallbacks interface {
|
||||
ParseFailed(rawKey string, rawValue string)
|
||||
}
|
||||
|
||||
// Update from the Syncer. A KV pair plus extra metadata.
|
||||
type Update struct {
|
||||
model.KVPair
|
||||
UpdateType UpdateType
|
||||
}
|
||||
|
||||
type UpdateType uint8
|
||||
|
||||
const (
|
||||
UpdateTypeKVUnknown UpdateType = iota
|
||||
UpdateTypeKVNew
|
||||
UpdateTypeKVUpdated
|
||||
UpdateTypeKVDeleted
|
||||
)
|
||||
|
||||
// Interface can be implemented by anything that knows how to watch and report changes.
|
||||
type WatchInterface interface {
|
||||
// Stops watching. Will close the channel returned by ResultChan(). Releases
|
||||
// any resources used by the watch.
|
||||
Stop()
|
||||
|
||||
// Returns a chan which will receive all the events. This channel is closed when:
|
||||
// - Stop() is called, or
|
||||
// - A error of type errors.ErrorWatchTerminated is received.
|
||||
// In both cases the watcher will be cleaned up, and the client should stop receiving
|
||||
// from this channel.
|
||||
ResultChan() <-chan WatchEvent
|
||||
|
||||
// HasTerminated returns true if the watcher has terminated and released all
|
||||
// resources. This is used for test purposes.
|
||||
HasTerminated() bool
|
||||
}
|
||||
|
||||
// WatchEventType defines the possible types of events.
|
||||
type WatchEventType string
|
||||
|
||||
const (
|
||||
WatchAdded WatchEventType = "ADDED"
|
||||
WatchModified WatchEventType = "MODIFIED"
|
||||
WatchDeleted WatchEventType = "DELETED"
|
||||
WatchError WatchEventType = "ERROR"
|
||||
)
|
||||
|
||||
// Event represents a single event to a watched resource.
|
||||
type WatchEvent struct {
|
||||
Type WatchEventType
|
||||
|
||||
// Old is:
|
||||
// * If Type is Added or Error: nil
|
||||
// * If Type is Modified or Deleted: the previous state of the object
|
||||
// New is:
|
||||
// * If Type is Added or Modified: the new state of the object.
|
||||
// * If Type is Deleted or Error: nil
|
||||
Old *model.KVPair
|
||||
New *model.KVPair
|
||||
|
||||
// The error, if EventType is Error.
|
||||
Error error
|
||||
}
|
||||
|
||||
// FakeWatcher is inspired by apimachinery (watch) FakeWatcher
|
||||
type FakeWatcher struct {
|
||||
result chan WatchEvent
|
||||
Stopped bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewFake constructs a FakeWatcher
|
||||
func NewFake() *FakeWatcher {
|
||||
return &FakeWatcher{
|
||||
result: make(chan WatchEvent),
|
||||
}
|
||||
}
|
||||
|
||||
// Stop implements WatchInterface
|
||||
func (f *FakeWatcher) Stop() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if !f.Stopped {
|
||||
close(f.result)
|
||||
f.Stopped = true
|
||||
}
|
||||
}
|
||||
|
||||
// ResultChan implements WatchInterface
|
||||
func (f *FakeWatcher) ResultChan() <-chan WatchEvent {
|
||||
return f.result
|
||||
}
|
||||
|
||||
// HasTerminated implements WatchInterface
|
||||
func (f *FakeWatcher) HasTerminated() bool {
|
||||
return false
|
||||
}
|
||||
40
vendor/github.com/projectcalico/libcalico-go/lib/backend/client.go
generated
vendored
40
vendor/github.com/projectcalico/libcalico-go/lib/backend/client.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/apiconfig"
|
||||
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/etcdv3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewClient creates a new backend datastore client.
|
||||
func NewClient(config apiconfig.CalicoAPIConfig) (c bapi.Client, err error) {
|
||||
log.Debugf("Using datastore type '%s'", config.Spec.DatastoreType)
|
||||
switch config.Spec.DatastoreType {
|
||||
case apiconfig.EtcdV3:
|
||||
c, err = etcdv3.NewEtcdV3Client(&config.Spec.EtcdConfig)
|
||||
case apiconfig.Kubernetes:
|
||||
c, err = k8s.NewKubeClient(&config.Spec)
|
||||
default:
|
||||
err = fmt.Errorf("unknown datastore type: %v",
|
||||
config.Spec.DatastoreType)
|
||||
}
|
||||
return
|
||||
}
|
||||
18
vendor/github.com/projectcalico/libcalico-go/lib/backend/doc.go
generated
vendored
18
vendor/github.com/projectcalico/libcalico-go/lib/backend/doc.go
generated
vendored
@@ -1,18 +0,0 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package backend implements the backend data store client and associated backend data type.
|
||||
*/
|
||||
package backend
|
||||
121
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/conversion.go
generated
vendored
121
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/conversion.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
// convertListResponse converts etcdv3 Kv to a model.KVPair with parsed values.
|
||||
// If the etcdv3 key or value does not represent the resource specified by the ListInterface,
|
||||
// or if value cannot be parsed, this method returns nil.
|
||||
func convertListResponse(ekv *mvccpb.KeyValue, l model.ListInterface) *model.KVPair {
|
||||
log.WithField("etcdv3-etcdKey", string(ekv.Key)).Debug("Processing etcdv3 entry")
|
||||
if k := l.KeyFromDefaultPath(string(ekv.Key)); k != nil {
|
||||
log.WithField("model-etcdKey", k).Debug("Key is valid and converted to model-etcdKey")
|
||||
if v, err := model.ParseValue(k, ekv.Value); err == nil {
|
||||
log.Debug("Value is valid - return KVPair with parsed value")
|
||||
return &model.KVPair{Key: k, Value: v, Revision: strconv.FormatInt(ekv.ModRevision, 10)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertWatchEvent converts an etcdv3 watch event to an api.WatchEvent, or nil if the
|
||||
// event did not correspond to an event that we are interested in.
|
||||
func convertWatchEvent(e *clientv3.Event, l model.ListInterface) (*api.WatchEvent, error) {
|
||||
log.WithField("etcdv3-etcdKey", string(e.Kv.Key)).Debug("Processing etcdv3 event")
|
||||
|
||||
var eventType api.WatchEventType
|
||||
switch {
|
||||
case e.Type == clientv3.EventTypeDelete:
|
||||
eventType = api.WatchDeleted
|
||||
case e.IsCreate():
|
||||
eventType = api.WatchAdded
|
||||
default:
|
||||
eventType = api.WatchModified
|
||||
}
|
||||
|
||||
var oldKV, newKV *model.KVPair
|
||||
var err error
|
||||
if k := l.KeyFromDefaultPath(string(e.Kv.Key)); k != nil {
|
||||
log.WithField("model-etcdKey", k).Debug("Key is valid and converted to model-etcdKey")
|
||||
|
||||
if eventType != api.WatchDeleted {
|
||||
// Add or modify, parse the new value.
|
||||
if newKV, err = etcdToKVPair(k, e.Kv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if eventType != api.WatchAdded {
|
||||
// Delete or modify, parse the old value.
|
||||
if oldKV, err = etcdToKVPair(k, e.PrevKv); err != nil {
|
||||
if eventType == api.WatchDeleted || err != ErrMissingValue {
|
||||
// Ignore missing value for modified events, but we need them for deletion.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.WithField("key", string(e.Kv.Key)).Debug("key filtered")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &api.WatchEvent{
|
||||
Old: oldKV,
|
||||
New: newKV,
|
||||
Type: eventType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
ErrMissingValue = fmt.Errorf("missing etcd KV")
|
||||
)
|
||||
|
||||
// etcdToKVPair converts an etcd KeyValue in to model.KVPair.
|
||||
func etcdToKVPair(key model.Key, ekv *mvccpb.KeyValue) (*model.KVPair, error) {
|
||||
if ekv == nil {
|
||||
return nil, ErrMissingValue
|
||||
}
|
||||
|
||||
v, err := model.ParseValue(key, ekv.Value)
|
||||
if err != nil {
|
||||
if len(ekv.Value) == 0 {
|
||||
// We do this check after the ParseValue call because ParseValue has some special-case logic for handling
|
||||
// empty values for some resource types.
|
||||
return nil, ErrMissingValue
|
||||
}
|
||||
return nil, errors.ErrorParsingDatastoreEntry{
|
||||
RawKey: string(ekv.Key),
|
||||
RawValue: string(ekv.Value),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPair{
|
||||
Key: key,
|
||||
Value: v,
|
||||
Revision: strconv.FormatInt(ekv.ModRevision, 10),
|
||||
}, nil
|
||||
}
|
||||
542
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/etcdv3.go
generated
vendored
542
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/etcdv3.go
generated
vendored
@@ -1,542 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/srv"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/apiconfig"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
clientTimeout = 10 * time.Second
|
||||
keepaliveTime = 30 * time.Second
|
||||
keepaliveTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type etcdV3Client struct {
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
func NewEtcdV3Client(config *apiconfig.EtcdConfig) (api.Client, error) {
|
||||
if config.EtcdEndpoints != "" && config.EtcdDiscoverySrv != "" {
|
||||
log.Warning("Multiple etcd endpoint discovery methods specified in etcdv3 API config")
|
||||
return nil, errors.New("multiple discovery or bootstrap options specified, use either \"etcdEndpoints\" or \"etcdDiscoverySrv\"")
|
||||
}
|
||||
|
||||
// Split the endpoints into a location slice.
|
||||
etcdLocation := []string{}
|
||||
if config.EtcdEndpoints != "" {
|
||||
etcdLocation = strings.Split(config.EtcdEndpoints, ",")
|
||||
}
|
||||
|
||||
if config.EtcdDiscoverySrv != "" {
|
||||
srvs, srvErr := srv.GetClient("etcd-client", config.EtcdDiscoverySrv)
|
||||
if srvErr != nil {
|
||||
return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %v", srvErr)
|
||||
}
|
||||
etcdLocation = srvs.Endpoints
|
||||
}
|
||||
|
||||
if len(etcdLocation) == 0 {
|
||||
log.Warning("No etcd endpoints specified in etcdv3 API config")
|
||||
return nil, errors.New("no etcd endpoints specified")
|
||||
}
|
||||
|
||||
// Create the etcd client
|
||||
// If Etcd Certificate and Key are provided inline through command line agrument,
|
||||
// then the inline values take precedence over the ones in the config file.
|
||||
// All the three parametes, Certificate, key and CA certificate are to be provided inline for processing.
|
||||
var tls *tls.Config
|
||||
var err error
|
||||
|
||||
haveInline := config.EtcdCert != "" || config.EtcdKey != "" || config.EtcdCACert != ""
|
||||
haveFiles := config.EtcdCertFile != "" || config.EtcdKeyFile != "" || config.EtcdCACertFile != ""
|
||||
|
||||
if haveInline && haveFiles {
|
||||
return nil, fmt.Errorf("Cannot mix inline certificate-key and certificate / key files")
|
||||
}
|
||||
|
||||
if haveInline {
|
||||
tlsInfo := &TlsInlineCertKey{
|
||||
CACert: config.EtcdCACert,
|
||||
Cert: config.EtcdCert,
|
||||
Key: config.EtcdKey,
|
||||
}
|
||||
tls, err = tlsInfo.ClientConfigInlineCertKey()
|
||||
} else {
|
||||
tlsInfo := &transport.TLSInfo{
|
||||
CAFile: config.EtcdCACertFile,
|
||||
CertFile: config.EtcdCertFile,
|
||||
KeyFile: config.EtcdKeyFile,
|
||||
}
|
||||
tls, err = tlsInfo.ClientConfig()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not initialize etcdv3 client: %+v", err)
|
||||
}
|
||||
|
||||
// Build the etcdv3 config.
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: etcdLocation,
|
||||
TLS: tls,
|
||||
DialTimeout: clientTimeout,
|
||||
DialKeepAliveTime: keepaliveTime,
|
||||
DialKeepAliveTimeout: keepaliveTimeout,
|
||||
}
|
||||
|
||||
// Plumb through the username and password if both are configured.
|
||||
if config.EtcdUsername != "" && config.EtcdPassword != "" {
|
||||
cfg.Username = config.EtcdUsername
|
||||
cfg.Password = config.EtcdPassword
|
||||
}
|
||||
|
||||
client, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &etcdV3Client{etcdClient: client}, nil
|
||||
}
|
||||
|
||||
// Create an entry in the datastore. If the entry already exists, this will return
|
||||
// an ErrorResourceAlreadyExists error and the current entry.
|
||||
func (c *etcdV3Client) Create(ctx context.Context, d *model.KVPair) (*model.KVPair, error) {
|
||||
logCxt := log.WithFields(log.Fields{"model-etcdKey": d.Key, "value": d.Value, "ttl": d.TTL, "rev": d.Revision})
|
||||
logCxt.Debug("Processing Create request")
|
||||
|
||||
key, value, err := getKeyValueStrings(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logCxt = logCxt.WithField("etcdv3-etcdKey", key)
|
||||
|
||||
putOpts, err := c.getTTLOption(ctx, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Checking for 0 version of the etcdKey, which means it doesn't exists yet,
|
||||
// and if it does, get the current value.
|
||||
logCxt.Debug("Performing etcdv3 transaction for Create request")
|
||||
txnResp, err := c.etcdClient.Txn(ctx).If(
|
||||
clientv3.Compare(clientv3.Version(key), "=", 0),
|
||||
).Then(
|
||||
clientv3.OpPut(key, value, putOpts...),
|
||||
).Else(
|
||||
clientv3.OpGet(key),
|
||||
).Commit()
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Warning("Create failed")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
|
||||
if !txnResp.Succeeded {
|
||||
// The resource must already exist. Extract the current newValue and
|
||||
// return that if possible.
|
||||
logCxt.Debug("Create transaction failed due to resource already existing")
|
||||
var existing *model.KVPair
|
||||
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
|
||||
if len(getResp.Kvs) != 0 {
|
||||
existing, _ = etcdToKVPair(d.Key, getResp.Kvs[0])
|
||||
}
|
||||
return existing, cerrors.ErrorResourceAlreadyExists{Identifier: d.Key}
|
||||
}
|
||||
|
||||
v, err := model.ParseValue(d.Key, []byte(value))
|
||||
if err != nil {
|
||||
return nil, cerrors.ErrorPartialFailure{Err: fmt.Errorf("Unexpected error parsing stored datastore entry '%v': %+v", value, err)}
|
||||
}
|
||||
d.Value = v
|
||||
d.Revision = strconv.FormatInt(txnResp.Header.Revision, 10)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Update an entry in the datastore. If the entry does not exist, this will return
|
||||
// an ErrorResourceDoesNotExist error. The ResourceVersion must be specified, and if
|
||||
// incorrect will return a ErrorResourceUpdateConflict error and the current entry.
|
||||
func (c *etcdV3Client) Update(ctx context.Context, d *model.KVPair) (*model.KVPair, error) {
|
||||
logCxt := log.WithFields(log.Fields{"model-etcdKey": d.Key, "value": d.Value, "ttl": d.TTL, "rev": d.Revision})
|
||||
logCxt.Debug("Processing Update request")
|
||||
key, value, err := getKeyValueStrings(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logCxt = logCxt.WithField("etcdv3-etcdKey", key)
|
||||
|
||||
opts, err := c.getTTLOption(ctx, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ResourceVersion must be set for an Update.
|
||||
rev, err := parseRevision(d.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conds := []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", rev)}
|
||||
|
||||
logCxt.Debug("Performing etcdv3 transaction for Update request")
|
||||
txnResp, err := c.etcdClient.Txn(ctx).If(
|
||||
conds...,
|
||||
).Then(
|
||||
clientv3.OpPut(key, value, opts...),
|
||||
).Else(
|
||||
clientv3.OpGet(key),
|
||||
).Commit()
|
||||
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Warning("Update failed")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
|
||||
// Etcd V3 does not return a error when compare condition fails we must verify the
|
||||
// response Succeeded field instead. If the compare did not succeed then check for
|
||||
// a successful get to return either an UpdateConflict or a ResourceDoesNotExist error.
|
||||
if !txnResp.Succeeded {
|
||||
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
|
||||
if len(getResp.Kvs) == 0 {
|
||||
logCxt.Debug("Update transaction failed due to resource not existing")
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: d.Key}
|
||||
}
|
||||
|
||||
logCxt.Debug("Update transaction failed due to resource update conflict")
|
||||
existing, _ := etcdToKVPair(d.Key, getResp.Kvs[0])
|
||||
return existing, cerrors.ErrorResourceUpdateConflict{Identifier: d.Key}
|
||||
}
|
||||
|
||||
v, err := model.ParseValue(d.Key, []byte(value))
|
||||
cerrors.PanicIfErrored(err, "Unexpected error parsing stored datastore entry: %v", value)
|
||||
d.Value = v
|
||||
d.Revision = strconv.FormatInt(txnResp.Header.Revision, 10)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
//TODO Remove once we get rid of the v1 client. Apply should no longer be supported
|
||||
// at least in it's current guise. Apply will need to be handled further up the stack
|
||||
// by performing a Get/Create or Update to ensure we don't lose certain read-only Metadata.
|
||||
// It's possible that we will just perform that processing in the clients (e.g. calicoctl),
|
||||
// but that is to be decided.
|
||||
func (c *etcdV3Client) Apply(ctx context.Context, d *model.KVPair) (*model.KVPair, error) {
|
||||
logCxt := log.WithFields(log.Fields{"etcdKey": d.Key, "value": d.Value, "ttl": d.TTL, "rev": d.Revision})
|
||||
logCxt.Debug("Processing Apply request")
|
||||
key, value, err := getKeyValueStrings(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
putOpts, err := c.getTTLOption(ctx, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logCxt.Debug("Performing etcdv3 Put for Apply request")
|
||||
resp, err := c.etcdClient.Put(ctx, key, value, putOpts...)
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Warning("Apply failed")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
|
||||
v, err := model.ParseValue(d.Key, []byte(value))
|
||||
cerrors.PanicIfErrored(err, "Unexpected error parsing stored datastore entry: %v", value)
|
||||
d.Value = v
|
||||
d.Revision = strconv.FormatInt(resp.Header.Revision, 10)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (c *etcdV3Client) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision)
|
||||
}
|
||||
|
||||
// Delete an entry in the datastore. This errors if the entry does not exists.
|
||||
func (c *etcdV3Client) Delete(ctx context.Context, k model.Key, revision string) (*model.KVPair, error) {
|
||||
logCxt := log.WithFields(log.Fields{"model-etcdKey": k, "rev": revision})
|
||||
logCxt.Debug("Processing Delete request")
|
||||
key, err := model.KeyToDefaultDeletePath(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logCxt = logCxt.WithField("etcdv3-etcdKey", key)
|
||||
|
||||
conds := []clientv3.Cmp{}
|
||||
if len(revision) != 0 {
|
||||
rev, err := parseRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conds = append(conds, clientv3.Compare(clientv3.ModRevision(key), "=", rev))
|
||||
}
|
||||
|
||||
// Perform the delete transaction - note that this is an exact delete, not a prefix delete.
|
||||
logCxt.Debug("Performing etcdv3 transaction for Delete request")
|
||||
txnResp, err := c.etcdClient.Txn(ctx).If(
|
||||
conds...,
|
||||
).Then(
|
||||
clientv3.OpDelete(key, clientv3.WithPrevKV()),
|
||||
).Else(
|
||||
clientv3.OpGet(key),
|
||||
).Commit()
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Warning("Delete failed")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err, Identifier: k}
|
||||
}
|
||||
|
||||
// Transaction did not succeed - which means the ModifiedIndex check failed. We can respond
|
||||
// with the latest settings.
|
||||
if !txnResp.Succeeded {
|
||||
logCxt.Debug("Delete transaction failed due to resource update conflict")
|
||||
|
||||
getResp := txnResp.Responses[0].GetResponseRange()
|
||||
if len(getResp.Kvs) == 0 {
|
||||
logCxt.Debug("Delete transaction failed due to resource not existing")
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k}
|
||||
}
|
||||
latestValue, err := etcdToKVPair(k, getResp.Kvs[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return latestValue, cerrors.ErrorResourceUpdateConflict{Identifier: k}
|
||||
}
|
||||
|
||||
// The delete response should have succeeded since the Get response did.
|
||||
delResp := txnResp.Responses[0].GetResponseDeleteRange()
|
||||
if delResp.Deleted == 0 {
|
||||
logCxt.Debug("Delete transaction failed due to resource not existing")
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k}
|
||||
}
|
||||
|
||||
// Parse the deleted value. Don't propagate the error in this case since the
|
||||
// delete did succeed.
|
||||
previousValue, _ := etcdToKVPair(k, delResp.PrevKvs[0])
|
||||
return previousValue, nil
|
||||
}
|
||||
|
||||
// Get an entry from the datastore. This errors if the entry does not exist.
|
||||
func (c *etcdV3Client) Get(ctx context.Context, k model.Key, revision string) (*model.KVPair, error) {
|
||||
logCxt := log.WithFields(log.Fields{"model-etcdKey": k, "rev": revision})
|
||||
logCxt.Debug("Processing Get request")
|
||||
|
||||
key, err := model.KeyToDefaultPath(k)
|
||||
if err != nil {
|
||||
logCxt.Error("Unable to convert model.Key to an etcdv3 etcdKey")
|
||||
return nil, err
|
||||
}
|
||||
logCxt = logCxt.WithField("etcdv3-etcdKey", key)
|
||||
|
||||
ops := []clientv3.OpOption{}
|
||||
if len(revision) != 0 {
|
||||
rev, err := parseRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ops = append(ops, clientv3.WithRev(rev))
|
||||
}
|
||||
|
||||
logCxt.Debug("Calling Get on etcdv3 client")
|
||||
resp, err := c.etcdClient.Get(ctx, key, ops...)
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Debug("Error returned from etcdv3 client")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
if len(resp.Kvs) == 0 {
|
||||
logCxt.Debug("No results returned from etcdv3 client")
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k}
|
||||
}
|
||||
|
||||
return etcdToKVPair(k, resp.Kvs[0])
|
||||
}
|
||||
|
||||
// List entries in the datastore. This may return an empty list of there are
|
||||
// no entries matching the request in the ListInterface.
|
||||
func (c *etcdV3Client) List(ctx context.Context, l model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
logCxt := log.WithFields(log.Fields{"list-interface": l, "rev": revision})
|
||||
logCxt.Debug("Processing List request")
|
||||
|
||||
// To list entries, we enumerate from the common root based on the supplied IDs, and then filter the results.
|
||||
key, ops := calculateListKeyAndOptions(logCxt, l)
|
||||
logCxt = logCxt.WithField("etcdv3-etcdKey", key)
|
||||
|
||||
// We may also need to perform a get based on a particular revision.
|
||||
if len(revision) != 0 {
|
||||
rev, err := parseRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ops = append(ops, clientv3.WithRev(rev))
|
||||
}
|
||||
|
||||
logCxt.Debug("Calling Get on etcdv3 client")
|
||||
resp, err := c.etcdClient.Get(ctx, key, ops...)
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Debug("Error returned from etcdv3 client")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
logCxt.WithField("numResults", len(resp.Kvs)).Debug("Processing response from etcdv3")
|
||||
|
||||
// Filter/process the results.
|
||||
list := []*model.KVPair{}
|
||||
for _, p := range resp.Kvs {
|
||||
if kv := convertListResponse(p, l); kv != nil {
|
||||
list = append(list, kv)
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: list,
|
||||
Revision: strconv.FormatInt(resp.Header.Revision, 10),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func calculateListKeyAndOptions(logCxt *log.Entry, l model.ListInterface) (string, []clientv3.OpOption) {
|
||||
// - If the final name segment of the name is itself a prefix, then just perform a prefix Get
|
||||
// using the constructed key.
|
||||
// - If the etcdKey is actually fully qualified, then perform an exact Get using the constructed
|
||||
// key.
|
||||
// - If the etcdKey is not fully qualified then it is a path prefix but the last segment is complete.
|
||||
// Append a terminating "/" and perform a prefix Get. The terminating / for a prefix Get ensures
|
||||
// for a prefix of "/a" we only return "child entries" of "/a" such as "/a/x" and not siblings
|
||||
// such as "/ab".
|
||||
key := model.ListOptionsToDefaultPathRoot(l)
|
||||
var ops []clientv3.OpOption
|
||||
if model.IsListOptionsLastSegmentPrefix(l) {
|
||||
// The last segment is a prefix, perform a prefix Get without adding a segment
|
||||
// delimiter.
|
||||
logCxt.Debug("List options is a name prefix, don't add a / to the path")
|
||||
ops = append(ops, clientv3.WithPrefix())
|
||||
} else if !model.ListOptionsIsFullyQualified(l) {
|
||||
// The etcdKey not a fully qualified etcdKey - it must be a prefix.
|
||||
logCxt.Debug("List options is a parent prefix, ensure path ends in /")
|
||||
if !strings.HasSuffix(key, "/") {
|
||||
logCxt.Debug("Adding / to path")
|
||||
key += "/"
|
||||
}
|
||||
ops = append(ops, clientv3.WithPrefix())
|
||||
}
|
||||
|
||||
return key, ops
|
||||
}
|
||||
|
||||
// EnsureInitialized makes sure that the etcd data is initialized for use by
|
||||
// Calico.
|
||||
func (c *etcdV3Client) EnsureInitialized() error {
|
||||
//TODO - still need to worry about ready flag.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean removes all of the Calico data from the datastore.
|
||||
func (c *etcdV3Client) Clean() error {
|
||||
log.Warning("Cleaning etcdv3 datastore of all Calico data")
|
||||
_, err := c.etcdClient.Txn(context.Background()).If().Then(
|
||||
clientv3.OpDelete("/calico/", clientv3.WithPrefix()),
|
||||
).Commit()
|
||||
|
||||
if err != nil {
|
||||
return cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClean() returns true if there are no /calico/ prefixed entries in the
|
||||
// datastore. This is not part of the exposed API, but is public to allow
|
||||
// direct consumers of the backend API to access this.
|
||||
func (c *etcdV3Client) IsClean() (bool, error) {
|
||||
log.Debug("Calling Get on etcdv3 client")
|
||||
resp, err := c.etcdClient.Get(context.Background(), "/calico/", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Error returned from etcdv3 client")
|
||||
return false, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
|
||||
// The datastore is clean if no results were enumerated.
|
||||
return len(resp.Kvs) == 0, nil
|
||||
}
|
||||
|
||||
// getTTLOption returns a OpOption slice containing a Lease granted for the TTL.
|
||||
func (c *etcdV3Client) getTTLOption(ctx context.Context, d *model.KVPair) ([]clientv3.OpOption, error) {
|
||||
putOpts := []clientv3.OpOption{}
|
||||
|
||||
if d.TTL != 0 {
|
||||
resp, err := c.etcdClient.Lease.Grant(ctx, int64(d.TTL.Seconds()))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to grant a lease")
|
||||
return nil, cerrors.ErrorDatastoreError{Err: err}
|
||||
}
|
||||
|
||||
putOpts = append(putOpts, clientv3.WithLease(resp.ID))
|
||||
}
|
||||
|
||||
return putOpts, nil
|
||||
}
|
||||
|
||||
// getKeyValueStrings returns the etcdv3 etcdKey and serialized value calculated from the
|
||||
// KVPair.
|
||||
func getKeyValueStrings(d *model.KVPair) (string, string, error) {
|
||||
logCxt := log.WithFields(log.Fields{"model-etcdKey": d.Key, "value": d.Value})
|
||||
key, err := model.KeyToDefaultPath(d.Key)
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Error("Failed to convert model-etcdKey to etcdv3 etcdKey")
|
||||
return "", "", cerrors.ErrorDatastoreError{
|
||||
Err: err,
|
||||
Identifier: d.Key,
|
||||
}
|
||||
}
|
||||
bytes, err := model.SerializeValue(d)
|
||||
if err != nil {
|
||||
logCxt.WithError(err).Error("Failed to serialize value")
|
||||
return "", "", cerrors.ErrorDatastoreError{
|
||||
Err: err,
|
||||
Identifier: d.Key,
|
||||
}
|
||||
}
|
||||
|
||||
return key, string(bytes), nil
|
||||
}
|
||||
|
||||
// parseRevision parses the model.KVPair revision string and converts to the
|
||||
// equivalent etcdv3 int64 value.
|
||||
func parseRevision(revs string) (int64, error) {
|
||||
rev, err := strconv.ParseInt(revs, 10, 64)
|
||||
if err != nil {
|
||||
log.WithField("Revision", revs).Debug("Unable to parse Revision")
|
||||
return 0, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{
|
||||
{
|
||||
Name: "ResourceVersion",
|
||||
Value: revs,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return rev, nil
|
||||
}
|
||||
115
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/inline_cert_key.go
generated
vendored
115
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/inline_cert_key.go
generated
vendored
@@ -1,115 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// This code has been based on code from etcd repository
|
||||
// to provide support for inline certificates and keys for calicoctl.
|
||||
// Below are the github links for the files from which the code has been borrowed.
|
||||
|
||||
// Copyright 2015 The etcd Authors
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.3/pkg/transport/listener.go
|
||||
|
||||
// Copyright 2016 The etcd Authors
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.3/pkg/tlsutil/tlsutil.go
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// This struct is used to store the inline cert, key and CA cert data
|
||||
type TlsInlineCertKey struct {
|
||||
Cert string
|
||||
Key string
|
||||
CACert string
|
||||
}
|
||||
|
||||
// ClientConfigInlineCertKey() returns a pointer to tls.Config struct object with certificate data
|
||||
// for client creation using only the inline certificate, key and CA certificate data.
|
||||
func (info TlsInlineCertKey) ClientConfigInlineCertKey() (*tls.Config, error) {
|
||||
var cfg *tls.Config
|
||||
var err error
|
||||
|
||||
if info.Cert == "" || info.Key == "" {
|
||||
return nil, fmt.Errorf("Certificate and Key must both be present inline.")
|
||||
}
|
||||
|
||||
if info.Cert != "" && info.Key != "" {
|
||||
cfg, err = info.baseCertConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if info.CACert != "" {
|
||||
cfg.RootCAs, err = newCertPool(info.CACert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// baseCertConfig() populates tls struct with certificate data
|
||||
func (info TlsInlineCertKey) baseCertConfig() (*tls.Config, error) {
|
||||
_, err := newCert([]byte(info.Cert), []byte(info.Key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return newCert([]byte(info.Cert), []byte(info.Key))
|
||||
}
|
||||
cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
return newCert([]byte(info.Cert), []byte(info.Key))
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// newCertPool() creates the certificate pool from the CA certificates provided
|
||||
func newCertPool(caCert string) (*x509.CertPool, error) {
|
||||
certPool := x509.NewCertPool()
|
||||
if caCert == "" {
|
||||
return nil, nil
|
||||
}
|
||||
var block *pem.Block
|
||||
certByte := []byte(caCert)
|
||||
block, certByte = pem.Decode(certByte)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("Cannot decode PEM block containing certificate")
|
||||
}
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certPool.AddCert(cert)
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// newCert() generates TLS cert by using the given cert and key values.
|
||||
func newCert(cert, key []byte) (*tls.Certificate, error) {
|
||||
tlsCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tlsCert, nil
|
||||
}
|
||||
212
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/watcher.go
generated
vendored
212
vendor/github.com/projectcalico/libcalico-go/lib/backend/etcdv3/watcher.go
generated
vendored
@@ -1,212 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
goerrors "errors"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
resultsBufSize = 100
|
||||
)
|
||||
|
||||
// Watch entries in the datastore matching the resources specified by the ListInterface.
|
||||
func (c *etcdV3Client) Watch(cxt context.Context, l model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
var rev int64
|
||||
if len(revision) != 0 {
|
||||
var err error
|
||||
rev, err = strconv.ParseInt(revision, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
wc := &watcher{
|
||||
client: c,
|
||||
list: l,
|
||||
initialRev: rev,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
}
|
||||
wc.ctx, wc.cancel = context.WithCancel(cxt)
|
||||
go wc.watchLoop()
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
// watcher implements watch.Interface.
|
||||
type watcher struct {
|
||||
client *etcdV3Client
|
||||
initialRev int64
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
list model.ListInterface
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through to the context cancel function.
|
||||
func (wc *watcher) Stop() {
|
||||
wc.cancel()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (wc *watcher) ResultChan() <-chan api.WatchEvent {
|
||||
return wc.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (wc *watcher) HasTerminated() bool {
|
||||
return atomic.LoadUint32(&wc.terminated) != 0
|
||||
}
|
||||
|
||||
// watchLoop starts a watch on the required path prefix and sends a stream of
|
||||
// event updates for internal processing.
|
||||
func (wc *watcher) watchLoop() {
|
||||
// When this loop exits, make sure we terminate the watcher resources.
|
||||
defer wc.terminateWatcher()
|
||||
|
||||
log.Debug("Starting watcher.watchLoop")
|
||||
if wc.initialRev == 0 {
|
||||
// No initial revision supplied, so perform a list of current configuration
|
||||
// which will also get the current revision we will start our watch from.
|
||||
if err := wc.listCurrent(); err != nil {
|
||||
log.Errorf("failed to list current with latest state: %v", err)
|
||||
wc.sendError(err, true)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not watching a specific resource then this is a prefix watch.
|
||||
logCxt := log.WithField("list", wc.list)
|
||||
key, opts := calculateListKeyAndOptions(logCxt, wc.list)
|
||||
opts = append(opts, clientv3.WithRev(wc.initialRev+1), clientv3.WithPrevKV())
|
||||
logCxt = logCxt.WithFields(log.Fields{
|
||||
"etcdv3-etcdKey": key,
|
||||
"rev": wc.initialRev,
|
||||
})
|
||||
logCxt.Debug("Starting etcdv3 watch")
|
||||
wch := wc.client.etcdClient.Watch(wc.ctx, key, opts...)
|
||||
for wres := range wch {
|
||||
if wres.Err() != nil {
|
||||
// A watch channel error is a terminating event, so exit the loop.
|
||||
err := wres.Err()
|
||||
log.WithError(err).Error("Watch channel error")
|
||||
wc.sendError(err, true)
|
||||
return
|
||||
}
|
||||
for _, e := range wres.Events {
|
||||
// Convert the etcdv3 event to the equivalent Watcher event. An error
|
||||
// parsing the event is returned as an error, but don't exit the watcher as
|
||||
// restarting the watcher is unlikely to fix the conversion error.
|
||||
if ae, err := convertWatchEvent(e, wc.list); ae != nil {
|
||||
wc.sendEvent(ae)
|
||||
} else if err != nil {
|
||||
wc.sendError(err, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we exit the loop, it means the watcher has closed for some reason.
|
||||
// Bubble this up as a watch termination error.
|
||||
log.Warn("etcdv3 watch channel closed")
|
||||
wc.sendError(goerrors.New("etcdv3 watch channel closed"), true)
|
||||
}
|
||||
|
||||
// listCurrent retrieves the existing entries and sends an event for each listed
|
||||
func (wc *watcher) listCurrent() error {
|
||||
log.Info("Performing initial list with no revision")
|
||||
list, err := wc.client.List(wc.ctx, wc.list, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wc.initialRev, err = strconv.ParseInt(list.Revision, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("List returned revision that could not be parsed")
|
||||
return err
|
||||
}
|
||||
|
||||
// We are sending an initial sync of entries to the watcher to provide current
|
||||
// state. To the perspective of the watcher, these are added entries, so set the
|
||||
// event type to WatchAdded.
|
||||
log.WithField("NumEntries", len(list.KVPairs)).Debug("Sending create events for each existing entry")
|
||||
for _, kv := range list.KVPairs {
|
||||
wc.sendEvent(&api.WatchEvent{
|
||||
Type: api.WatchAdded,
|
||||
New: kv,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// terminateWatcher terminates the resources associated with the watcher.
|
||||
func (wc *watcher) terminateWatcher() {
|
||||
log.Debug("Terminating etcdv3 watcher")
|
||||
// Cancel the context - which will cancel the etcd Watch, this may have already been
|
||||
// cancelled through an explicit Stop, but it is fine to cancel multiple times.
|
||||
wc.cancel()
|
||||
|
||||
// Close the results channel.
|
||||
close(wc.resultChan)
|
||||
|
||||
// Increment the terminated counter using a goroutine safe operation.
|
||||
atomic.AddUint32(&wc.terminated, 1)
|
||||
}
|
||||
|
||||
// sendError packages up the error as an event and sends it in the results channel.
|
||||
func (wc *watcher) sendError(err error, terminating bool) {
|
||||
// The response from etcd commands may include a context.Canceled error if the context
|
||||
// was cancelled before completion. Since with our Watcher we don't include that as
|
||||
// an error type skip over the Canceled error, the error processing in the main
|
||||
// watch thread will terminate the watcher.
|
||||
if err == context.Canceled {
|
||||
return
|
||||
}
|
||||
|
||||
// If this is a terminating error, wrap the error up in an errors.ErrorWatchTerminated
|
||||
// error type.
|
||||
if terminating {
|
||||
err = errors.ErrorWatchTerminated{Err: err}
|
||||
}
|
||||
|
||||
// Wrap the error up in a WatchEvent and use sendEvent to send it.
|
||||
errEvent := &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: err,
|
||||
}
|
||||
wc.sendEvent(errEvent)
|
||||
}
|
||||
|
||||
// sendEvent sends an event in the results channel.
|
||||
func (wc *watcher) sendEvent(e *api.WatchEvent) {
|
||||
if len(wc.resultChan) == resultsBufSize {
|
||||
log.Warningf("Watch events backing up: %d events", resultsBufSize)
|
||||
}
|
||||
select {
|
||||
case wc.resultChan <- *e:
|
||||
case <-wc.ctx.Done():
|
||||
}
|
||||
}
|
||||
644
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/k8s.go
generated
vendored
644
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/k8s.go
generated
vendored
@@ -1,644 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth" // Import all auth providers.
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/apiconfig"
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/resources"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var (
|
||||
resourceKeyType = reflect.TypeOf(model.ResourceKey{})
|
||||
resourceListType = reflect.TypeOf(model.ResourceListOptions{})
|
||||
)
|
||||
|
||||
type KubeClient struct {
|
||||
// Main Kubernetes clients.
|
||||
ClientSet *kubernetes.Clientset
|
||||
|
||||
// Client for interacting with CustomResourceDefinition.
|
||||
crdClientV1 *rest.RESTClient
|
||||
|
||||
disableNodePoll bool
|
||||
|
||||
// Contains methods for converting Kubernetes resources to
|
||||
// Calico resources.
|
||||
converter conversion.Converter
|
||||
|
||||
// Resource clients keyed off Kind.
|
||||
clientsByResourceKind map[string]resources.K8sResourceClient
|
||||
|
||||
// Non v3 resource clients keyed off Key Type.
|
||||
clientsByKeyType map[reflect.Type]resources.K8sResourceClient
|
||||
|
||||
// Non v3 resource clients keyed off List Type.
|
||||
clientsByListType map[reflect.Type]resources.K8sResourceClient
|
||||
}
|
||||
|
||||
func NewKubeClient(ca *apiconfig.CalicoAPIConfigSpec) (api.Client, error) {
|
||||
// Use the kubernetes client code to load the kubeconfig file and combine it with the overrides.
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
var overridesMap = []struct {
|
||||
variable *string
|
||||
value string
|
||||
}{
|
||||
{&configOverrides.ClusterInfo.Server, ca.K8sAPIEndpoint},
|
||||
{&configOverrides.AuthInfo.ClientCertificate, ca.K8sCertFile},
|
||||
{&configOverrides.AuthInfo.ClientKey, ca.K8sKeyFile},
|
||||
{&configOverrides.ClusterInfo.CertificateAuthority, ca.K8sCAFile},
|
||||
{&configOverrides.AuthInfo.Token, ca.K8sAPIToken},
|
||||
}
|
||||
|
||||
// Set an explicit path to the kubeconfig if one
|
||||
// was provided.
|
||||
loadingRules := clientcmd.ClientConfigLoadingRules{}
|
||||
if ca.Kubeconfig != "" {
|
||||
loadingRules.ExplicitPath = ca.Kubeconfig
|
||||
}
|
||||
|
||||
// Using the override map above, populate any non-empty values.
|
||||
for _, override := range overridesMap {
|
||||
if override.value != "" {
|
||||
*override.variable = override.value
|
||||
}
|
||||
}
|
||||
if ca.K8sInsecureSkipTLSVerify {
|
||||
configOverrides.ClusterInfo.InsecureSkipTLSVerify = true
|
||||
}
|
||||
|
||||
// A kubeconfig file was provided. Use it to load a config, passing through
|
||||
// any overrides.
|
||||
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&loadingRules, configOverrides).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, resources.K8sErrorToCalico(err, nil)
|
||||
}
|
||||
|
||||
// Create the clientset. We increase the burst so that the IPAM code performs
|
||||
// efficiently. The IPAM code can create bursts of requests to the API, so
|
||||
// in order to keep pod creation times sensible we allow a higher request rate.
|
||||
config.Burst = 100
|
||||
cs, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, resources.K8sErrorToCalico(err, nil)
|
||||
}
|
||||
log.Debugf("Created k8s ClientSet: %+v", cs)
|
||||
|
||||
crdClientV1, err := buildCRDClientV1(*config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to build V1 CRD client: %v", err)
|
||||
}
|
||||
|
||||
kubeClient := &KubeClient{
|
||||
ClientSet: cs,
|
||||
crdClientV1: crdClientV1,
|
||||
disableNodePoll: ca.K8sDisableNodePoll,
|
||||
clientsByResourceKind: make(map[string]resources.K8sResourceClient),
|
||||
clientsByKeyType: make(map[reflect.Type]resources.K8sResourceClient),
|
||||
clientsByListType: make(map[reflect.Type]resources.K8sResourceClient),
|
||||
}
|
||||
|
||||
// Create the Calico sub-clients and register them.
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindIPPool,
|
||||
resources.NewIPPoolClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindGlobalNetworkPolicy,
|
||||
resources.NewGlobalNetworkPolicyClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindGlobalNetworkSet,
|
||||
resources.NewGlobalNetworkSetClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindNetworkPolicy,
|
||||
resources.NewNetworkPolicyClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindNetworkSet,
|
||||
resources.NewNetworkSetClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindBGPPeer,
|
||||
resources.NewBGPPeerClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindBGPConfiguration,
|
||||
resources.NewBGPConfigClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindFelixConfiguration,
|
||||
resources.NewFelixConfigClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindClusterInformation,
|
||||
resources.NewClusterInfoClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindNode,
|
||||
resources.NewNodeClient(cs, ca.K8sUsePodCIDR),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindProfile,
|
||||
resources.NewProfileClient(cs),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindHostEndpoint,
|
||||
resources.NewHostEndpointClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.ResourceKey{}),
|
||||
reflect.TypeOf(model.ResourceListOptions{}),
|
||||
apiv3.KindWorkloadEndpoint,
|
||||
resources.NewWorkloadEndpointClient(cs),
|
||||
)
|
||||
|
||||
if ca.K8sUsePodCIDR {
|
||||
// Using host-local IPAM. Use Kubernetes pod CIDRs to back IPAM.
|
||||
log.Info("Using host-local IPAM")
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.BlockAffinityKey{}),
|
||||
reflect.TypeOf(model.BlockAffinityListOptions{}),
|
||||
apiv3.KindBlockAffinity,
|
||||
resources.NewPodCIDRBlockAffinityClient(cs),
|
||||
)
|
||||
} else {
|
||||
// Using Calico IPAM - use CRDs to back IPAM resources.
|
||||
log.Info("Using Calico IPAM")
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.BlockAffinityKey{}),
|
||||
reflect.TypeOf(model.BlockAffinityListOptions{}),
|
||||
apiv3.KindBlockAffinity,
|
||||
resources.NewBlockAffinityClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.BlockKey{}),
|
||||
reflect.TypeOf(model.BlockListOptions{}),
|
||||
apiv3.KindIPAMBlock,
|
||||
resources.NewIPAMBlockClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.IPAMHandleKey{}),
|
||||
reflect.TypeOf(model.IPAMHandleListOptions{}),
|
||||
apiv3.KindIPAMHandle,
|
||||
resources.NewIPAMHandleClient(cs, crdClientV1),
|
||||
)
|
||||
kubeClient.registerResourceClient(
|
||||
reflect.TypeOf(model.IPAMConfigKey{}),
|
||||
nil,
|
||||
apiv3.KindIPAMConfig,
|
||||
resources.NewIPAMConfigClient(cs, crdClientV1),
|
||||
)
|
||||
}
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
// registerResourceClient registers a specific resource client with the associated
|
||||
// key and list types (and for v3 resources with the resource kind - since these share
|
||||
// a common key and list type).
|
||||
func (c *KubeClient) registerResourceClient(keyType, listType reflect.Type, resourceKind string, client resources.K8sResourceClient) {
|
||||
if keyType == resourceKeyType {
|
||||
c.clientsByResourceKind[resourceKind] = client
|
||||
} else {
|
||||
c.clientsByKeyType[keyType] = client
|
||||
c.clientsByListType[listType] = client
|
||||
}
|
||||
}
|
||||
|
||||
// getResourceClientFromKey returns the appropriate resource client for the v3 resource kind.
|
||||
func (c *KubeClient) GetResourceClientFromResourceKind(kind string) resources.K8sResourceClient {
|
||||
return c.clientsByResourceKind[kind]
|
||||
}
|
||||
|
||||
// getResourceClientFromKey returns the appropriate resource client for the key.
|
||||
func (c *KubeClient) getResourceClientFromKey(key model.Key) resources.K8sResourceClient {
|
||||
kt := reflect.TypeOf(key)
|
||||
if kt == resourceKeyType {
|
||||
return c.clientsByResourceKind[key.(model.ResourceKey).Kind]
|
||||
} else {
|
||||
return c.clientsByKeyType[kt]
|
||||
}
|
||||
}
|
||||
|
||||
// getResourceClientFromList returns the appropriate resource client for the list.
|
||||
func (c *KubeClient) getResourceClientFromList(list model.ListInterface) resources.K8sResourceClient {
|
||||
lt := reflect.TypeOf(list)
|
||||
if lt == resourceListType {
|
||||
return c.clientsByResourceKind[list.(model.ResourceListOptions).Kind]
|
||||
} else {
|
||||
return c.clientsByListType[lt]
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureInitialized checks that the necessary custom resource definitions
|
||||
// exist in the backend. This usually passes when using etcd
|
||||
// as a backend but can often fail when using KDD as it relies
|
||||
// on various custom resources existing.
|
||||
// To ensure the datastore is initialized, this function checks that a
|
||||
// known custom resource is defined: GlobalFelixConfig. It accomplishes this
|
||||
// by trying to set the ClusterType (an instance of GlobalFelixConfig).
|
||||
func (c *KubeClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove Calico-creatable data from the datastore. This is purely used for the
|
||||
// test framework.
|
||||
func (c *KubeClient) Clean() error {
|
||||
log.Warning("Cleaning KDD of all Calico-creatable data")
|
||||
kinds := []string{
|
||||
apiv3.KindBGPConfiguration,
|
||||
apiv3.KindBGPPeer,
|
||||
apiv3.KindClusterInformation,
|
||||
apiv3.KindFelixConfiguration,
|
||||
apiv3.KindGlobalNetworkPolicy,
|
||||
apiv3.KindGlobalNetworkSet,
|
||||
apiv3.KindNetworkPolicy,
|
||||
apiv3.KindNetworkSet,
|
||||
apiv3.KindIPPool,
|
||||
apiv3.KindHostEndpoint,
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, k := range kinds {
|
||||
lo := model.ResourceListOptions{Kind: k}
|
||||
if rs, err := c.List(ctx, lo, ""); err != nil {
|
||||
log.WithError(err).WithField("Kind", k).Warning("Failed to list resources")
|
||||
} else {
|
||||
for _, r := range rs.KVPairs {
|
||||
if _, err = c.Delete(ctx, r.Key, r.Revision); err != nil {
|
||||
log.WithField("Key", r.Key).Warning("Failed to delete entry from KDD")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup IPAM resources that have slightly different backend semantics.
|
||||
for _, li := range []model.ListInterface{
|
||||
model.BlockListOptions{},
|
||||
model.BlockAffinityListOptions{},
|
||||
model.BlockAffinityListOptions{},
|
||||
model.IPAMHandleListOptions{},
|
||||
} {
|
||||
if rs, err := c.List(ctx, li, ""); err != nil {
|
||||
log.WithError(err).WithField("Kind", li).Warning("Failed to list resources")
|
||||
} else {
|
||||
for _, r := range rs.KVPairs {
|
||||
if _, err = c.DeleteKVP(ctx, r); err != nil {
|
||||
log.WithError(err).WithField("Key", r.Key).Warning("Failed to delete entry from KDD")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get a list of Nodes and remove all BGP configuration from the nodes.
|
||||
if nodes, err := c.List(ctx, model.ResourceListOptions{Kind: apiv3.KindNode}, ""); err != nil {
|
||||
log.Warning("Failed to list Nodes")
|
||||
} else {
|
||||
for _, nodeKvp := range nodes.KVPairs {
|
||||
node := nodeKvp.Value.(*apiv3.Node)
|
||||
node.Spec.BGP = nil
|
||||
if _, err := c.Update(ctx, nodeKvp); err != nil {
|
||||
log.WithField("Node", node.Name).Warning("Failed to remove Calico config from node")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete global IPAM config
|
||||
if _, err := c.Delete(ctx, model.IPAMConfigKey{}, ""); err != nil {
|
||||
log.WithError(err).WithField("key", model.IPAMConfigGlobalName).Warning("Failed to delete global IPAM Config from KDD")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildCRDClientV1 builds a RESTClient configured to interact with Calico CustomResourceDefinitions
|
||||
func buildCRDClientV1(cfg rest.Config) (*rest.RESTClient, error) {
|
||||
// Generate config using the base config.
|
||||
cfg.GroupVersion = &schema.GroupVersion{
|
||||
Group: "crd.projectcalico.org",
|
||||
Version: "v1",
|
||||
}
|
||||
cfg.APIPath = "/apis"
|
||||
cfg.ContentType = runtime.ContentTypeJSON
|
||||
cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: scheme.Codecs}
|
||||
|
||||
cli, err := rest.RESTClientFor(&cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We also need to register resources.
|
||||
schemeBuilder := runtime.NewSchemeBuilder(
|
||||
func(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(
|
||||
*cfg.GroupVersion,
|
||||
&apiv3.FelixConfiguration{},
|
||||
&apiv3.FelixConfigurationList{},
|
||||
&apiv3.IPPool{},
|
||||
&apiv3.IPPoolList{},
|
||||
&apiv3.BGPPeer{},
|
||||
&apiv3.BGPPeerList{},
|
||||
&apiv3.BGPConfiguration{},
|
||||
&apiv3.BGPConfigurationList{},
|
||||
&apiv3.ClusterInformation{},
|
||||
&apiv3.ClusterInformationList{},
|
||||
&apiv3.GlobalNetworkSet{},
|
||||
&apiv3.GlobalNetworkSetList{},
|
||||
&apiv3.GlobalNetworkPolicy{},
|
||||
&apiv3.GlobalNetworkPolicyList{},
|
||||
&apiv3.NetworkPolicy{},
|
||||
&apiv3.NetworkPolicyList{},
|
||||
&apiv3.NetworkSet{},
|
||||
&apiv3.NetworkSetList{},
|
||||
&apiv3.HostEndpoint{},
|
||||
&apiv3.HostEndpointList{},
|
||||
&apiv3.BlockAffinity{},
|
||||
&apiv3.BlockAffinityList{},
|
||||
&apiv3.IPAMBlock{},
|
||||
&apiv3.IPAMBlockList{},
|
||||
&apiv3.IPAMHandle{},
|
||||
&apiv3.IPAMHandleList{},
|
||||
&apiv3.IPAMConfig{},
|
||||
&apiv3.IPAMConfigList{},
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
schemeBuilder.AddToScheme(scheme.Scheme)
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
// Create an entry in the datastore. This errors if the entry already exists.
|
||||
func (c *KubeClient) Create(ctx context.Context, d *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debugf("Performing 'Create' for %+v", d)
|
||||
client := c.getResourceClientFromKey(d.Key)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'Create' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: d.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
return client.Create(ctx, d)
|
||||
}
|
||||
|
||||
// Update an existing entry in the datastore. This errors if the entry does
|
||||
// not exist.
|
||||
func (c *KubeClient) Update(ctx context.Context, d *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debugf("Performing 'Update' for %+v", d)
|
||||
client := c.getResourceClientFromKey(d.Key)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'Update' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: d.Key,
|
||||
Operation: "Update",
|
||||
}
|
||||
}
|
||||
return client.Update(ctx, d)
|
||||
}
|
||||
|
||||
// Set an existing entry in the datastore. This ignores whether an entry already
|
||||
// exists. This is not exposed in the main client - but we keep here for the backend
|
||||
// API.
|
||||
func (c *KubeClient) Apply(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": kvp.Key,
|
||||
"Value": kvp.Value,
|
||||
})
|
||||
logContext.Debug("Apply Kubernetes resource")
|
||||
|
||||
// Attempt to Create and do an Update if the resource already exists.
|
||||
// We only log debug here since the Create and Update will also log.
|
||||
// Can't set Revision while creating a resource.
|
||||
updated, err := c.Create(ctx, &model.KVPair{
|
||||
Key: kvp.Key,
|
||||
Value: kvp.Value,
|
||||
})
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
|
||||
logContext.Debug("Error applying resource (using Create)")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to Update if the resource already exists.
|
||||
updated, err = c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
logContext.Debug("Error applying resource (using Update)")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// Delete an entry in the datastore.
|
||||
func (c *KubeClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debugf("Performing 'DeleteKVP' for %+v", kvp.Key)
|
||||
client := c.getResourceClientFromKey(kvp.Key)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'DeleteKVP' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
return client.DeleteKVP(ctx, kvp)
|
||||
}
|
||||
|
||||
// Delete an entry in the datastore by key.
|
||||
func (c *KubeClient) Delete(ctx context.Context, k model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debugf("Performing 'Delete' for %+v", k)
|
||||
client := c.getResourceClientFromKey(k)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'Delete' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: k,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
return client.Delete(ctx, k, revision, nil)
|
||||
}
|
||||
|
||||
// Get an entry from the datastore. This errors if the entry does not exist.
|
||||
func (c *KubeClient) Get(ctx context.Context, k model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debugf("Performing 'Get' for %+v %v", k, revision)
|
||||
client := c.getResourceClientFromKey(k)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'Get' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: k,
|
||||
Operation: "Get",
|
||||
}
|
||||
}
|
||||
return client.Get(ctx, k, revision)
|
||||
}
|
||||
|
||||
// List entries in the datastore. This may return an empty list if there are
|
||||
// no entries matching the request in the ListInterface.
|
||||
func (c *KubeClient) List(ctx context.Context, l model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debugf("Performing 'List' for %+v %v", l, reflect.TypeOf(l))
|
||||
client := c.getResourceClientFromList(l)
|
||||
if client == nil {
|
||||
log.Info("Attempt to 'List' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: l,
|
||||
Operation: "List",
|
||||
}
|
||||
}
|
||||
return client.List(ctx, l, revision)
|
||||
}
|
||||
|
||||
// List entries in the datastore. This may return an empty list if there are
|
||||
// no entries matching the request in the ListInterface.
|
||||
func (c *KubeClient) Watch(ctx context.Context, l model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Debugf("Performing 'Watch' for %+v %v", l, reflect.TypeOf(l))
|
||||
client := c.getResourceClientFromList(l)
|
||||
if client == nil {
|
||||
log.Debug("Attempt to 'Watch' using kubernetes backend is not supported.")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: l,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
return client.Watch(ctx, l, revision)
|
||||
}
|
||||
|
||||
func (c *KubeClient) getReadyStatus(ctx context.Context, k model.ReadyFlagKey, revision string) (*model.KVPair, error) {
|
||||
return &model.KVPair{Key: k, Value: true}, nil
|
||||
}
|
||||
|
||||
func (c *KubeClient) listHostConfig(ctx context.Context, l model.HostConfigListOptions, revision string) (*model.KVPairList, error) {
|
||||
var kvps = []*model.KVPair{}
|
||||
|
||||
// Short circuit if they aren't asking for information we can provide.
|
||||
if l.Name != "" && l.Name != "IpInIpTunnelAddr" {
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// First see if we were handed a specific host, if not list all Nodes
|
||||
if l.Hostname == "" {
|
||||
nodes, err := c.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, resources.K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
kvp, err := getTunIp(&node)
|
||||
if err != nil || kvp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
} else {
|
||||
node, err := c.ClientSet.CoreV1().Nodes().Get(l.Hostname, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, resources.K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
kvp, err := getTunIp(node)
|
||||
if err != nil || kvp == nil {
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTunIp(n *v1.Node) (*model.KVPair, error) {
|
||||
if n.Spec.PodCIDR == "" {
|
||||
log.Warnf("Node %s does not have podCIDR for HostConfig", n.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ip, _, err := net.ParseCIDR(n.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid podCIDR for HostConfig: %s, %s", n.Name, n.Spec.PodCIDR)
|
||||
return nil, err
|
||||
}
|
||||
// We need to get the IP for the podCIDR and increment it to the
|
||||
// first IP in the CIDR.
|
||||
tunIp := ip.To4()
|
||||
tunIp[3]++
|
||||
|
||||
kvp := &model.KVPair{
|
||||
Key: model.HostConfigKey{
|
||||
Hostname: n.Name,
|
||||
Name: "IpInIpTunnelAddr",
|
||||
},
|
||||
Value: tunIp.String(),
|
||||
}
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgpconfig.go
generated
vendored
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgpconfig.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
BGPConfigResourceName = "BGPConfigurations"
|
||||
BGPConfigCRDName = "bgpconfigurations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBGPConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BGPConfigCRDName,
|
||||
resource: BGPConfigResourceName,
|
||||
description: "Calico BGP Configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BGPConfiguration{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBGPConfiguration,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BGPConfigurationList{}),
|
||||
resourceKind: apiv3.KindBGPConfiguration,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgppeer.go
generated
vendored
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgppeer.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
BGPPeerResourceName = "BGPPeers"
|
||||
BGPPeerCRDName = "bgppeers.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBGPPeerClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BGPPeerCRDName,
|
||||
resource: BGPPeerResourceName,
|
||||
description: "Calico BGP Peers",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BGPPeer{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBGPPeer,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BGPPeerList{}),
|
||||
resourceKind: apiv3.KindBGPPeer,
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/client.go
generated
vendored
80
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/client.go
generated
vendored
@@ -1,80 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
// K8sResourceClient is the interface to the k8s datastore for CRUD operations
|
||||
// on an individual resource (one for each of the *model* types supported by
|
||||
// the K8s backend).
|
||||
//
|
||||
// Defining a separate client interface from api.Client allows the k8s-specific
|
||||
// client to diverge.
|
||||
type K8sResourceClient interface {
|
||||
// Create creates the object specified in the KVPair, which must not
|
||||
// already exist. On success, returns a KVPair for the object with
|
||||
// revision information filled-in.
|
||||
Create(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Update modifies the existing object specified in the KVPair.
|
||||
// On success, returns a KVPair for the object with revision
|
||||
// information filled-in. If the input KVPair has revision
|
||||
// information then the update only succeeds if the revision is still
|
||||
// current.
|
||||
Update(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Delete removes the object specified by the Key. If the call
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error)
|
||||
|
||||
// DeleteKVP removes the object specified by the KVPair. If the KVPair
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
DeleteKVP(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Get returns the object identified by the given key as a KVPair with
|
||||
// revision information.
|
||||
Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error)
|
||||
|
||||
// List returns a slice of KVPairs matching the input list options.
|
||||
// list should be passed one of the model.<Type>ListOptions structs.
|
||||
// Non-zero fields in the struct are used as filters.
|
||||
List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error)
|
||||
|
||||
// Watch returns a WatchInterface used for watching resources matching the
|
||||
// input list options.
|
||||
Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error)
|
||||
|
||||
// EnsureInitialized ensures that the backend is initialized
|
||||
// any ready to be used.
|
||||
EnsureInitialized() error
|
||||
}
|
||||
|
||||
// K8sNodeResourceClient extends the K8sResourceClient to add a helper method to
|
||||
// extract resources from the supplied K8s Node. This convenience interface is
|
||||
// expected to be removed in a future libcalico-go release.
|
||||
type K8sNodeResourceClient interface {
|
||||
K8sResourceClient
|
||||
ExtractResourcesFromNode(node *apiv1.Node) ([]*model.KVPair, error)
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/clusterinfo.go
generated
vendored
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/clusterinfo.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterInfoResourceName = "ClusterInformations"
|
||||
ClusterInfoCRDName = "clusterinformations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewClusterInfoClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: ClusterInfoCRDName,
|
||||
resource: ClusterInfoResourceName,
|
||||
description: "Calico Cluster Information",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.ClusterInformation{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindClusterInformation,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.ClusterInformationList{}),
|
||||
resourceKind: apiv3.KindClusterInformation,
|
||||
}
|
||||
}
|
||||
416
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/customresource.go
generated
vendored
416
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/customresource.go
generated
vendored
@@ -1,416 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
// customK8sResourceClient implements the K8sResourceClient interface and provides a generic
|
||||
// mechanism for a 1:1 mapping between a Calico Resource and an equivalent Kubernetes
|
||||
// custom resource type.
|
||||
type customK8sResourceClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
restClient *rest.RESTClient
|
||||
name string
|
||||
resource string
|
||||
description string
|
||||
k8sResourceType reflect.Type
|
||||
k8sResourceTypeMeta metav1.TypeMeta
|
||||
k8sListType reflect.Type
|
||||
namespaced bool
|
||||
resourceKind string
|
||||
versionconverter VersionConverter
|
||||
}
|
||||
|
||||
// VersionConverter converts v1 or v3 k8s resources into v3 resources.
|
||||
// For a v3 resource, the conversion should be a no-op.
|
||||
type VersionConverter interface {
|
||||
ConvertFromK8s(Resource) (Resource, error)
|
||||
}
|
||||
|
||||
// Create creates a new Custom K8s Resource instance in the k8s API from the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": kvp.Key,
|
||||
"Value": kvp.Value,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Create custom Kubernetes resource")
|
||||
|
||||
// Convert the KVPair to the K8s resource.
|
||||
resIn, err := c.convertKVPairToResource(kvp)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error creating resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send the update request using the REST interface.
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
namespace := kvp.Key.(model.ResourceKey).Namespace
|
||||
err = c.restClient.Post().
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Context(ctx).
|
||||
Resource(c.resource).
|
||||
Body(resIn).
|
||||
Do().Into(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error creating resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
// Update the return data with the metadata populated by the (Kubernetes) datastore.
|
||||
kvp, err = c.convertResourceToKVPair(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error converting created K8s resource to Calico resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
// Update the revision information from the response.
|
||||
kvp.Revision = resOut.GetObjectMeta().GetResourceVersion()
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
// Update updates an existing Custom K8s Resource instance in the k8s API from the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": kvp.Key,
|
||||
"Value": kvp.Value,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Update custom Kubernetes resource")
|
||||
|
||||
// Create storage for the updated resource.
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
|
||||
var updateError error
|
||||
// Convert the KVPair to a K8s resource.
|
||||
resIn, err := c.convertKVPairToResource(kvp)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error updating resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send the update request using the name.
|
||||
name := resIn.GetObjectMeta().GetName()
|
||||
namespace := resIn.GetObjectMeta().GetNamespace()
|
||||
logContext = logContext.WithField("Name", name)
|
||||
logContext.Debug("Update resource by name")
|
||||
updateError = c.restClient.Put().
|
||||
Context(ctx).
|
||||
Resource(c.resource).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Body(resIn).
|
||||
Name(name).
|
||||
Do().Into(resOut)
|
||||
if updateError != nil {
|
||||
// Failed to update the resource.
|
||||
logContext.WithError(updateError).Error("Error updating resource")
|
||||
return nil, K8sErrorToCalico(updateError, kvp.Key)
|
||||
}
|
||||
|
||||
// Update the return data with the metadata populated by the (Kubernetes) datastore.
|
||||
kvp, err = c.convertResourceToKVPair(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error converting created K8s resource to Calico resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
// Success. Update the revision information from the response.
|
||||
kvp.Revision = resOut.GetObjectMeta().GetResourceVersion()
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
// Delete deletes an existing Custom K8s Resource instance in the k8s API using the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Delete(ctx context.Context, k model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": k,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Delete custom Kubernetes resource")
|
||||
|
||||
// Convert the Key to a resource name.
|
||||
name, err := c.keyToName(k)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error deleting resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
existing, err := c.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace := k.(model.ResourceKey).Namespace
|
||||
|
||||
opts := &metav1.DeleteOptions{}
|
||||
if uid != nil {
|
||||
opts.Preconditions = &metav1.Preconditions{UID: uid}
|
||||
}
|
||||
|
||||
// Delete the resource using the name.
|
||||
logContext = logContext.WithField("Name", name)
|
||||
logContext.Debug("Send delete request by name")
|
||||
err = c.restClient.Delete().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Name(name).
|
||||
Body(opts).
|
||||
Do().
|
||||
Error()
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error deleting resource")
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
// Get gets an existing Custom K8s Resource instance in the k8s API using the supplied Key.
|
||||
func (c *customK8sResourceClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": key,
|
||||
"Resource": c.resource,
|
||||
"Revision": revision,
|
||||
})
|
||||
logContext.Debug("Get custom Kubernetes resource")
|
||||
name, err := c.keyToName(key)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error getting resource")
|
||||
return nil, err
|
||||
}
|
||||
namespace := key.(model.ResourceKey).Namespace
|
||||
|
||||
// Add the name and namespace to the log context now that we know it, and query Kubernetes.
|
||||
logContext = logContext.WithFields(log.Fields{"Name": name, "Namespace": namespace})
|
||||
|
||||
logContext.Debug("Get custom Kubernetes resource by name")
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
err = c.restClient.Get().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Name(name).
|
||||
Do().Into(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error getting resource")
|
||||
return nil, K8sErrorToCalico(err, key)
|
||||
}
|
||||
|
||||
return c.convertResourceToKVPair(resOut)
|
||||
}
|
||||
|
||||
// List lists configured Custom K8s Resource instances in the k8s API matching the
|
||||
// supplied ListInterface.
|
||||
func (c *customK8sResourceClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"ListInterface": list,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("List Custom K8s Resource")
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
if revision != "" {
|
||||
return nil, errors.New("Cannot List this resource type specifying a ResourceVersion")
|
||||
}
|
||||
|
||||
// Attempt to convert the ListInterface to a Key. If possible, the parameters
|
||||
// indicate a fully qualified resource, and we'll need to use Get instead of
|
||||
// List.
|
||||
if key := c.listInterfaceToKey(list); key != nil {
|
||||
logContext.Debug("Performing List using Get")
|
||||
if kvp, err := c.Get(ctx, key, revision); err != nil {
|
||||
// The error will already be a Calico error type. Ignore
|
||||
// error that it doesn't exist - we'll return an empty
|
||||
// list.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
log.WithField("Resource", c.resource).WithError(err).Debug("Error listing resource")
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
} else {
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Since we are not performing an exact Get, Kubernetes will return a
|
||||
// list of resources.
|
||||
reslOut := reflect.New(c.k8sListType).Interface().(ResourceList)
|
||||
|
||||
// If it is a namespaced resource, then we'll need the namespace.
|
||||
namespace := list.(model.ResourceListOptions).Namespace
|
||||
|
||||
// Perform the request.
|
||||
err := c.restClient.Get().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Do().Into(reslOut)
|
||||
if err != nil {
|
||||
// Don't return errors for "not found". This just
|
||||
// means there are no matching Custom K8s Resources, and we should return
|
||||
// an empty list.
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Error listing resources")
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// We expect the list type to have an "Items" field that we can
|
||||
// iterate over.
|
||||
elem := reflect.ValueOf(reslOut).Elem()
|
||||
items := reflect.ValueOf(elem.FieldByName("Items").Interface())
|
||||
for idx := 0; idx < items.Len(); idx++ {
|
||||
res := items.Index(idx).Addr().Interface().(Resource)
|
||||
if kvp, err := c.convertResourceToKVPair(res); err == nil {
|
||||
kvps = append(kvps, kvp)
|
||||
} else {
|
||||
logContext.WithError(err).WithField("Item", res).Warning("unable to process resource, skipping")
|
||||
}
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: reslOut.GetListMeta().GetResourceVersion(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
fieldSelector := fields.Everything()
|
||||
if len(rlo.Name) != 0 {
|
||||
// We've been asked to watch a specific customresource.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single customresource")
|
||||
fieldSelector = fields.OneTermEqualSelector("metadata.name", rlo.Name)
|
||||
}
|
||||
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.restClient, c.resource, rlo.Namespace, fieldSelector)
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
return c.convertResourceToKVPair(r)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, rlo.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *customK8sResourceClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) listInterfaceToKey(l model.ListInterface) model.Key {
|
||||
pl := l.(model.ResourceListOptions)
|
||||
key := model.ResourceKey{Name: pl.Name, Kind: pl.Kind}
|
||||
|
||||
if c.namespaced && pl.Namespace != "" {
|
||||
key.Namespace = pl.Namespace
|
||||
}
|
||||
|
||||
if pl.Name != "" {
|
||||
return key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) keyToName(k model.Key) (string, error) {
|
||||
return k.(model.ResourceKey).Name, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) nameToKey(name string) (model.Key, error) {
|
||||
return model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: c.resourceKind,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) convertResourceToKVPair(r Resource) (*model.KVPair, error) {
|
||||
var err error
|
||||
|
||||
// If the resource has a VersionConverter defined then pass the resource through
|
||||
// the VersionConverter to convert the resource version from v1 to v3.
|
||||
// No-op for a v3 resource.
|
||||
if c.versionconverter != nil {
|
||||
if r, err = c.versionconverter.ConvertFromK8s(r); err != nil {
|
||||
return nil, fmt.Errorf("error converting resource from v1 to v3: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
r.GetObjectKind().SetGroupVersionKind(c.k8sResourceTypeMeta.GetObjectKind().GroupVersionKind())
|
||||
kvp := &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: r.GetObjectMeta().GetName(),
|
||||
Namespace: r.GetObjectMeta().GetNamespace(),
|
||||
Kind: c.resourceKind,
|
||||
},
|
||||
Revision: r.GetObjectMeta().GetResourceVersion(),
|
||||
}
|
||||
|
||||
if err := ConvertK8sResourceToCalicoResource(r); err != nil {
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
kvp.Value = r
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) convertKVPairToResource(kvp *model.KVPair) (Resource, error) {
|
||||
resource := kvp.Value.(Resource)
|
||||
resource.GetObjectMeta().SetResourceVersion(kvp.Revision)
|
||||
resOut, err := ConvertCalicoResourceToK8sResource(resource)
|
||||
if err != nil {
|
||||
return resOut, err
|
||||
}
|
||||
|
||||
return resOut, nil
|
||||
}
|
||||
66
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/errors.go
generated
vendored
66
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/errors.go
generated
vendored
@@ -1,66 +0,0 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
// K8sErrorToCalico returns the equivalent libcalico error for the given
|
||||
// kubernetes error.
|
||||
func K8sErrorToCalico(ke error, id interface{}) error {
|
||||
if ke == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if kerrors.IsAlreadyExists(ke) {
|
||||
return errors.ErrorResourceAlreadyExists{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
if kerrors.IsNotFound(ke) {
|
||||
return errors.ErrorResourceDoesNotExist{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
if kerrors.IsForbidden(ke) || kerrors.IsUnauthorized(ke) {
|
||||
return errors.ErrorConnectionUnauthorized{
|
||||
Err: ke,
|
||||
}
|
||||
}
|
||||
if kerrors.IsConflict(ke) {
|
||||
// Treat precondition errors as not found.
|
||||
if strings.Contains(ke.Error(), "UID in precondition") {
|
||||
return errors.ErrorResourceDoesNotExist{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
return errors.ErrorResourceUpdateConflict{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
return errors.ErrorDatastoreError{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/felixconfig.go
generated
vendored
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/felixconfig.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
FelixConfigResourceName = "FelixConfigurations"
|
||||
FelixConfigCRDName = "felixconfigurations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewFelixConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: FelixConfigCRDName,
|
||||
resource: FelixConfigResourceName,
|
||||
description: "Calico Felix Configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.FelixConfiguration{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindFelixConfiguration,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.FelixConfigurationList{}),
|
||||
resourceKind: apiv3.KindFelixConfiguration,
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
GlobalNetworkPolicyResourceName = "GlobalNetworkPolicies"
|
||||
GlobalNetworkPolicyCRDName = "globalnetworkpolicies.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewGlobalNetworkPolicyClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: GlobalNetworkPolicyCRDName,
|
||||
resource: GlobalNetworkPolicyResourceName,
|
||||
description: "Calico Global Network Policies",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.GlobalNetworkPolicy{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindGlobalNetworkPolicy,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.GlobalNetworkPolicyList{}),
|
||||
resourceKind: apiv3.KindGlobalNetworkPolicy,
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
GlobalNetworkSetResourceName = "GlobalNetworkSets"
|
||||
GlobalNetworkSetCRDName = "globalnetworksets.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewGlobalNetworkSetClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: GlobalNetworkSetCRDName,
|
||||
resource: GlobalNetworkSetResourceName,
|
||||
description: "Calico Global Network Sets",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.GlobalNetworkSet{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindGlobalNetworkSet,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.GlobalNetworkSetList{}),
|
||||
resourceKind: apiv3.KindGlobalNetworkSet,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/hostendpoint.go
generated
vendored
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/hostendpoint.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
HostEndpointResourceName = "HostEndpoints"
|
||||
HostEndpointCRDName = "hostendpoints.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewHostEndpointClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: HostEndpointCRDName,
|
||||
resource: HostEndpointResourceName,
|
||||
description: "Calico HostEndpoints",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.HostEndpoint{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindHostEndpoint,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.HostEndpointList{}),
|
||||
resourceKind: apiv3.KindHostEndpoint,
|
||||
}
|
||||
}
|
||||
280
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_affinity.go
generated
vendored
280
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_affinity.go
generated
vendored
@@ -1,280 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
BlockAffinityResourceName = "BlockAffinities"
|
||||
BlockAffinityCRDName = "blockaffinities.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBlockAffinityClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BlockAffinityCRDName,
|
||||
resource: BlockAffinityResourceName,
|
||||
description: "Calico IPAM block affinities",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BlockAffinity{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BlockAffinityList{}),
|
||||
resourceKind: apiv3.KindBlockAffinity,
|
||||
}
|
||||
|
||||
return &blockAffinityClient{rc: rc}
|
||||
}
|
||||
|
||||
// blockAffinityClient implements the api.Client interface for BlockAffinity objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type blockAffinityClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
// toV1 converts the given v3 CRD KVPair into a v1 model representation
|
||||
// which can be passed to the IPAM code.
|
||||
func (c blockAffinityClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
// Parse the CIDR into a struct.
|
||||
_, cidr, err := net.ParseCIDR(kvpv3.Value.(*apiv3.BlockAffinity).Spec.CIDR)
|
||||
if err != nil {
|
||||
log.WithField("cidr", cidr).WithError(err).Error("failed to parse cidr")
|
||||
return nil, err
|
||||
}
|
||||
state := model.BlockAffinityState(kvpv3.Value.(*apiv3.BlockAffinity).Spec.State)
|
||||
return &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: kvpv3.Value.(*apiv3.BlockAffinity).Spec.Node,
|
||||
},
|
||||
Value: &model.BlockAffinity{
|
||||
State: state,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &kvpv3.Value.(*apiv3.BlockAffinity).UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseKey parses the given model.Key, returning a suitable name, CIDR
|
||||
// and host for use in the Kubernetes API.
|
||||
func (c blockAffinityClient) parseKey(k model.Key) (name, cidr, host string) {
|
||||
host = k.(model.BlockAffinityKey).Host
|
||||
cidr = fmt.Sprintf("%s", k.(model.BlockAffinityKey).CIDR)
|
||||
cidrname := names.CIDRToName(k.(model.BlockAffinityKey).CIDR)
|
||||
|
||||
// Include the hostname as well.
|
||||
host = k.(model.BlockAffinityKey).Host
|
||||
name = fmt.Sprintf("%s-%s", host, cidrname)
|
||||
|
||||
if len(name) >= 253 {
|
||||
// If the name is too long, we need to shorten it.
|
||||
// Remove enough characters to get it below the 253 character limit,
|
||||
// as well as 11 characters to add a hash which helps with uniqueness,
|
||||
// and two characters for the `-` separators between clauses.
|
||||
name = fmt.Sprintf("%s-%s", host[:252-len(cidrname)-13], cidrname)
|
||||
|
||||
// Add a hash to help with uniqueness.
|
||||
h := sha256.New()
|
||||
h.Write([]byte(fmt.Sprintf("%s+%s", host, cidrname)))
|
||||
name = fmt.Sprintf("%s-%s", name, hex.EncodeToString(h.Sum(nil))[:11])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// toV3 takes the given v1 KVPair and converts it into a v3 representation, suitable
|
||||
// for writing as a CRD to the Kubernetes API.
|
||||
func (c blockAffinityClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name, cidr, host := c.parseKey(kvpv1.Key)
|
||||
state := kvpv1.Value.(*model.BlockAffinity).State
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
},
|
||||
Value: &apiv3.BlockAffinity{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.BlockAffinitySpec{
|
||||
State: string(state),
|
||||
Node: host,
|
||||
CIDR: cidr,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name, _, _ := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.BlockAffinity).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindBlockAffinity}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp)
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for affinities, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on BlockAffinity type - use DeleteKVP")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
// Get the object.
|
||||
name, _, _ := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindBlockAffinity}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it to v1.
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.BlockAffinity).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindBlockAffinity}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
host := list.(model.BlockAffinityListOptions).Host
|
||||
requestedIPVersion := list.(model.BlockAffinityListOptions).IPVersion
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp, err := c.toV1(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if host == "" || v1kvp.Key.(model.BlockAffinityKey).Host == host {
|
||||
cidr := v1kvp.Key.(model.BlockAffinityKey).CIDR
|
||||
cidr2 := &cidr
|
||||
if requestedIPVersion == 0 || requestedIPVersion == cidr2.Version() {
|
||||
// Matches the given host and IP version.
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
}
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
resl := model.ResourceListOptions{Kind: apiv3.KindBlockAffinity}
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.rc.restClient, c.rc.resource, "", fields.Everything())
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
conv, err := c.rc.convertResourceToKVPair(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(conv)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, resl.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
275
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block.go
generated
vendored
275
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block.go
generated
vendored
@@ -1,275 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMBlockResourceName = "IPAMBlocks"
|
||||
IPAMBlockCRDName = "ipamblocks.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMBlockClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMBlockCRDName,
|
||||
resource: IPAMBlockResourceName,
|
||||
description: "Calico IPAM blocks",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMBlock{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMBlockList{}),
|
||||
resourceKind: apiv3.KindIPAMBlock,
|
||||
}
|
||||
|
||||
return &ipamBlockClient{rc: rc}
|
||||
}
|
||||
|
||||
// ipamBlockClient implements the api.Client interface for IPAMBlocks. It handles the translation between
|
||||
// v1 objects understood by the IPAM codebase in lib/ipam, and the CRDs which are used
|
||||
// to actually store the data in the Kubernetes API. It uses a customK8sResourceClient under
|
||||
// the covers to perform CRUD operations on kubernetes CRDs.
|
||||
type ipamBlockClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
cidrStr := kvpv3.Value.(*apiv3.IPAMBlock).Spec.CIDR
|
||||
_, cidr, err := net.ParseCIDR(cidrStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ab := kvpv3.Value.(*apiv3.IPAMBlock)
|
||||
|
||||
// Convert attributes.
|
||||
attrs := []model.AllocationAttribute{}
|
||||
for _, a := range ab.Spec.Attributes {
|
||||
attrs = append(attrs, model.AllocationAttribute{
|
||||
AttrPrimary: a.AttrPrimary,
|
||||
AttrSecondary: a.AttrSecondary,
|
||||
})
|
||||
}
|
||||
|
||||
return &model.KVPair{
|
||||
Key: model.BlockKey{
|
||||
CIDR: *cidr,
|
||||
},
|
||||
Value: &model.AllocationBlock{
|
||||
CIDR: *cidr,
|
||||
Affinity: ab.Spec.Affinity,
|
||||
StrictAffinity: ab.Spec.StrictAffinity,
|
||||
Allocations: ab.Spec.Allocations,
|
||||
Unallocated: ab.Spec.Unallocated,
|
||||
Attributes: attrs,
|
||||
Deleted: ab.Spec.Deleted,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &ab.UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) parseKey(k model.Key) (name, cidr string) {
|
||||
cidr = fmt.Sprintf("%s", k.(model.BlockKey).CIDR)
|
||||
name = names.CIDRToName(k.(model.BlockKey).CIDR)
|
||||
return
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name, cidr := c.parseKey(kvpv1.Key)
|
||||
|
||||
ab := kvpv1.Value.(*model.AllocationBlock)
|
||||
|
||||
// Convert attributes.
|
||||
attrs := []apiv3.AllocationAttribute{}
|
||||
for _, a := range ab.Attributes {
|
||||
attrs = append(attrs, apiv3.AllocationAttribute{
|
||||
AttrPrimary: a.AttrPrimary,
|
||||
AttrSecondary: a.AttrSecondary,
|
||||
})
|
||||
}
|
||||
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
},
|
||||
Value: &apiv3.IPAMBlock{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMBlockSpec{
|
||||
CIDR: cidr,
|
||||
Allocations: ab.Allocations,
|
||||
Unallocated: ab.Unallocated,
|
||||
Affinity: ab.Affinity,
|
||||
StrictAffinity: ab.StrictAffinity,
|
||||
Attributes: attrs,
|
||||
Deleted: ab.Deleted,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
b, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
b, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name, _ := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.AllocationBlock).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMBlock}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp)
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for blocks, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on IPAMBlock type - use DeleteKVP")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
// Get the object.
|
||||
name, _ := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMBlock}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it back to V1 format.
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.AllocationBlock).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindIPAMBlock}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp, err := c.toV1(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
resl := model.ResourceListOptions{Kind: apiv3.KindIPAMBlock}
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.rc.restClient, c.rc.resource, "", fields.Everything())
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
conv, err := c.rc.convertResourceToKVPair(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(conv)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, resl.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *ipamBlockClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
func NewPodCIDRBlockAffinityClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &podCIDRBlockClient{
|
||||
clientSet: c,
|
||||
}
|
||||
}
|
||||
|
||||
// podCIDRBlockClient implements the api.Client interface for block affinities using Kubernetes pod CIDR
|
||||
// allocations as the backing store. For use with host-local IPAM. For the Calico IPAM
|
||||
// implementation, see ipam_block.go.
|
||||
type podCIDRBlockClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Update is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation DeleteKVP is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "DeleteKVP",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Warn("Operation Get is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Get",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Debug("Operation Watch is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on block affinities (using host-local IPAM)")
|
||||
bl := list.(model.BlockAffinityListOptions)
|
||||
kvpl := &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
// If a host is specified, then do an exact lookup (ip version should not be expected in the query)
|
||||
if bl.Host != "" && bl.IPVersion == 0 {
|
||||
// Get the node settings, we use the nodes PodCIDR as the only node affinity block.
|
||||
node, err := c.clientSet.CoreV1().Nodes().Get(bl.Host, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
err = K8sErrorToCalico(err, list)
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// Return no results if the pod CIDR is not assigned.
|
||||
podcidr := node.Spec.PodCIDR
|
||||
if len(podcidr) == 0 {
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
_, cidr, err := cnet.ParseCIDR(podcidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.Revision = node.ResourceVersion
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: bl.Host,
|
||||
},
|
||||
Value: &model.BlockAffinity{State: model.StateConfirmed},
|
||||
Revision: node.ResourceVersion,
|
||||
})
|
||||
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// When host is not specified...
|
||||
if bl.IPVersion == 0 {
|
||||
// Get the node settings, we use the nodes PodCIDR as the only node affinity block.
|
||||
nodeList, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
err = K8sErrorToCalico(err, list)
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
kvpl.Revision = nodeList.ResourceVersion
|
||||
for _, node := range nodeList.Items {
|
||||
// Return no results if the pod CIDR is not assigned.
|
||||
podcidr := node.Spec.PodCIDR
|
||||
if len(podcidr) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, cidr, err := cnet.ParseCIDR(podcidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: node.Name,
|
||||
},
|
||||
Value: &model.BlockAffinity{State: model.StateConfirmed},
|
||||
Revision: node.ResourceVersion,
|
||||
})
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// Currently querying the affinity block is only used by the BGP syncer *and* we always
|
||||
// query for a specific Node, so for now fail List requests for all nodes.
|
||||
log.Warn("Operation List (all nodes or all IP versions) is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "List",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
189
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_config.go
generated
vendored
189
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_config.go
generated
vendored
@@ -1,189 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMConfigResourceName = "IPAMConfigs"
|
||||
IPAMConfigCRDName = "ipamconfigs.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &ipamConfigClient{
|
||||
rc: customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMConfigCRDName,
|
||||
resource: IPAMConfigResourceName,
|
||||
description: "Calico IPAM configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMConfig{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMConfigList{}),
|
||||
resourceKind: apiv3.KindIPAMConfig}}
|
||||
}
|
||||
|
||||
// ipamConfigClient implements the api.Client interface for IPAMConfig objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type ipamConfigClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
// toV1 converts the given v3 CRD KVPair into a v1 model representation
|
||||
// which can be passed to the IPAM code.
|
||||
func (c ipamConfigClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
v3obj := kvpv3.Value.(*apiv3.IPAMConfig)
|
||||
return &model.KVPair{
|
||||
Key: model.IPAMConfigKey{},
|
||||
Value: &model.IPAMConfig{
|
||||
StrictAffinity: v3obj.Spec.StrictAffinity,
|
||||
AutoAllocateBlocks: v3obj.Spec.AutoAllocateBlocks,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &kvpv3.Value.(*apiv3.IPAMConfig).UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// toV3 takes the given v1 KVPair and converts it into a v3 representation, suitable
|
||||
// for writing as a CRD to the Kubernetes API.
|
||||
func (c ipamConfigClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
v1obj := kvpv1.Value.(*model.IPAMConfig)
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
},
|
||||
Value: &apiv3.IPAMConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMConfigSpec{
|
||||
StrictAffinity: v1obj.StrictAffinity,
|
||||
AutoAllocateBlocks: v1obj.AutoAllocateBlocks,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on IPAMConfig type")
|
||||
nkvp, err := c.rc.Create(ctx, c.toV3(kvp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err = c.toV1(nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on IPAMConfig type")
|
||||
nkvp, err := c.rc.Update(ctx, c.toV3(kvp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err = c.toV1(nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
k := model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
}
|
||||
kvp, err := c.rc.Delete(ctx, k, revision, uid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1nkvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1nkvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on IPAMConfig type")
|
||||
k := model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Warn("Operation List is not supported on IPAMConfig type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "List",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Warn("Operation Watch is not supported on IPAMConfig type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *ipamConfigClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
208
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_handle.go
generated
vendored
208
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_handle.go
generated
vendored
@@ -1,208 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMHandleResourceName = "IPAMHandles"
|
||||
IPAMHandleCRDName = "ipamhandles.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMHandleClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMHandleCRDName,
|
||||
resource: IPAMHandleResourceName,
|
||||
description: "Calico IPAM handles",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMHandle{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMHandleList{}),
|
||||
resourceKind: apiv3.KindIPAMHandle,
|
||||
}
|
||||
|
||||
return &ipamHandleClient{rc: rc}
|
||||
}
|
||||
|
||||
// affinityHandleClient implements the api.Client interface for IPAMHandle objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type ipamHandleClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) toV1(kvpv3 *model.KVPair) *model.KVPair {
|
||||
handle := kvpv3.Value.(*apiv3.IPAMHandle).Spec.HandleID
|
||||
block := kvpv3.Value.(*apiv3.IPAMHandle).Spec.Block
|
||||
return &model.KVPair{
|
||||
Key: model.IPAMHandleKey{
|
||||
HandleID: handle,
|
||||
},
|
||||
Value: &model.IPAMHandle{
|
||||
HandleID: handle,
|
||||
Block: block,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) parseKey(k model.Key) string {
|
||||
return strings.ToLower(k.(model.IPAMHandleKey).HandleID)
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name := c.parseKey(kvpv1.Key)
|
||||
handle := kvpv1.Key.(model.IPAMHandleKey).HandleID
|
||||
block := kvpv1.Value.(*model.IPAMHandle).Block
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
},
|
||||
Value: &apiv3.IPAMHandle{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMHandleSpec{
|
||||
HandleID: handle,
|
||||
Block: block,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.IPAMHandle).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMHandle}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for handles, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on IPAMHandle type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
name := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMHandle}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it to v1.
|
||||
v1kvp := c.toV1(kvp)
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.IPAMHandle).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindIPAMHandle}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp := c.toV1(i)
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Warn("Operation Watch is not supported on IPAMHandle type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
84
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ippool.go
generated
vendored
84
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ippool.go
generated
vendored
@@ -1,84 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/encap"
|
||||
)
|
||||
|
||||
const (
|
||||
IPPoolResourceName = "IPPools"
|
||||
IPPoolCRDName = "ippools.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPPoolClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPPoolCRDName,
|
||||
resource: IPPoolResourceName,
|
||||
description: "Calico IP Pools",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPPool{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPPool,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPPoolList{}),
|
||||
resourceKind: apiv3.KindIPPool,
|
||||
versionconverter: IPPoolv1v3Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// IPPoolv1v3Converter implements VersionConverter interface.
|
||||
type IPPoolv1v3Converter struct{}
|
||||
|
||||
// ConvertFromK8s converts v1 IPPool Resource to v3 IPPool resource
|
||||
func (c IPPoolv1v3Converter) ConvertFromK8s(inRes Resource) (Resource, error) {
|
||||
ipp, ok := inRes.(*apiv3.IPPool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type conversion")
|
||||
}
|
||||
|
||||
// If IPIP field is not nil, then it means the resource has v1 IPIP data
|
||||
// and we must convert it to v3 equivalent data.
|
||||
if ipp.Spec.IPIP != nil {
|
||||
if !ipp.Spec.IPIP.Enabled {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeNever
|
||||
} else if ipp.Spec.IPIP.Mode == encap.CrossSubnet {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeCrossSubnet
|
||||
} else {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeAlways
|
||||
}
|
||||
|
||||
// Set IPIP to nil since we've already converted v1 IPIP fields to v3.
|
||||
ipp.Spec.IPIP = nil
|
||||
}
|
||||
|
||||
// Take a logical OR of the v1 NATOutgoing field with the v3 NATOutgoing.
|
||||
ipp.Spec.NATOutgoing = ipp.Spec.NATOutgoingV1 || ipp.Spec.NATOutgoing
|
||||
|
||||
// Set v1 NatOutgoing to false since we've already converted it to v3 NatOutgoing.
|
||||
ipp.Spec.NATOutgoingV1 = false
|
||||
|
||||
return ipp, nil
|
||||
}
|
||||
520
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkpolicy.go
generated
vendored
520
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkpolicy.go
generated
vendored
@@ -1,520 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
NetworkPolicyResourceName = "NetworkPolicies"
|
||||
NetworkPolicyCRDName = "networkpolicies.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewNetworkPolicyClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
crdClient := &customK8sResourceClient{
|
||||
restClient: r,
|
||||
name: NetworkPolicyCRDName,
|
||||
resource: NetworkPolicyResourceName,
|
||||
description: "Calico Network Policies",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.NetworkPolicy{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindNetworkPolicy,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.NetworkPolicyList{}),
|
||||
resourceKind: apiv3.KindNetworkPolicy,
|
||||
namespaced: true,
|
||||
}
|
||||
return &networkPolicyClient{
|
||||
clientSet: c,
|
||||
crdClient: crdClient,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for NetworkPolicys.
|
||||
type networkPolicyClient struct {
|
||||
conversion.Converter
|
||||
resourceName string
|
||||
clientSet *kubernetes.Clientset
|
||||
crdClient *customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on NetworkPolicy type")
|
||||
key := kvp.Key.(model.ResourceKey)
|
||||
if strings.HasPrefix(key.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Create of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
kvp, err := c.crdClient.Create(ctx, kvp)
|
||||
if kvp != nil {
|
||||
// Convert the revision to the combined CRD/k8s revision - the k8s rev will be empty, but this
|
||||
// format will allow the revision to be passed into List and Watch calls.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on NetworkPolicy type")
|
||||
|
||||
key := kvp.Key.(model.ResourceKey)
|
||||
if strings.HasPrefix(key.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Update of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Update",
|
||||
}
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the CRD rev and use that for the update.
|
||||
crdRev, _, err := c.SplitNetworkPolicyRevision(kvp.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp.Revision = crdRev
|
||||
kvp, err = c.crdClient.Update(ctx, kvp)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty, but this
|
||||
// format will allow the revision to be passed into List and Watch calls.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Apply(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Apply",
|
||||
}
|
||||
}
|
||||
func (c *networkPolicyClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Debug("Received Delete request on NetworkPolicy type")
|
||||
k := key.(model.ResourceKey)
|
||||
if strings.HasPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Delete of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the CRD rev and use that for the delete.
|
||||
crdRev, _, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err := c.crdClient.Delete(ctx, key, crdRev, uid)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on NetworkPolicy type")
|
||||
k := key.(model.ResourceKey)
|
||||
if k.Name == "" {
|
||||
return nil, errors.New("Missing policy name")
|
||||
}
|
||||
if k.Namespace == "" {
|
||||
return nil, errors.New("Missing policy namespace")
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the k8s rev and use the correct version depending on whether we are querying the CRD or the
|
||||
// k8s NetworkPolicy.
|
||||
crdRev, k8sRev, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check to see if this is backed by a NetworkPolicy.
|
||||
if strings.HasPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// Backed by a NetworkPolicy - extract the name.
|
||||
policyName := strings.TrimPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix)
|
||||
|
||||
// Get the NetworkPolicy from the API and convert it.
|
||||
networkPolicy := networkingv1.NetworkPolicy{}
|
||||
err = c.clientSet.NetworkingV1().RESTClient().
|
||||
Get().
|
||||
Resource("networkpolicies").
|
||||
Namespace(k.Namespace).
|
||||
Name(policyName).
|
||||
VersionedParams(&metav1.GetOptions{ResourceVersion: k8sRev}, scheme.ParameterCodec).
|
||||
Do().Into(&networkPolicy)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
kvp, err := c.K8sNetworkPolicyToCalico(&networkPolicy)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the CRD rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions("", kvp.Revision)
|
||||
}
|
||||
return kvp, err
|
||||
} else {
|
||||
kvp, err := c.crdClient.Get(ctx, k, crdRev)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on NetworkPolicy type")
|
||||
l := list.(model.ResourceListOptions)
|
||||
if l.Name != "" {
|
||||
// Exact lookup on a NetworkPolicy.
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: l.Name, Namespace: l.Namespace, Kind: l.Kind}, revision)
|
||||
if err != nil {
|
||||
// Return empty slice of KVPair if the object doesn't exist, return the error otherwise.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List all Namespaced Calico Network Policies.
|
||||
npKvps, err := c.crdClient.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Unable to list Calico CRD-backed Network Policy resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the revision to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
for _, kvp := range npKvps.KVPairs {
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
|
||||
// List all of the k8s NetworkPolicy objects in all Namespaces.
|
||||
networkPolicies := networkingv1.NetworkPolicyList{}
|
||||
req := c.clientSet.NetworkingV1().RESTClient().
|
||||
Get().
|
||||
Resource("networkpolicies")
|
||||
if l.Namespace != "" {
|
||||
// Add the namespace if requested.
|
||||
req = req.Namespace(l.Namespace)
|
||||
}
|
||||
err = req.Do().Into(&networkPolicies)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Unable to list K8s Network Policy resources")
|
||||
return nil, K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
// For each policy, turn it into a Policy and generate the list.
|
||||
for _, p := range networkPolicies.Items {
|
||||
kvp, err := c.K8sNetworkPolicyToCalico(&p)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Failed to convert K8s Network Policy")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the revision to the combined CRD/k8s revision - the CRD rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions("", kvp.Revision)
|
||||
npKvps.KVPairs = append(npKvps.KVPairs, kvp)
|
||||
}
|
||||
|
||||
// Combine the two resource versions to a single resource version for the List
|
||||
// that can be decoded by the Watch.
|
||||
npKvps.Revision = c.JoinNetworkPolicyRevisions(npKvps.Revision, networkPolicies.ResourceVersion)
|
||||
|
||||
log.WithField("KVPs", npKvps).Info("Returning NP KVPs")
|
||||
return npKvps, nil
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
|
||||
// Setting to Watch all networkPolicies in all namespaces; overriden below
|
||||
watchK8s, watchCrd := true, true
|
||||
|
||||
// Watch a specific networkPolicy
|
||||
if len(rlo.Name) != 0 {
|
||||
if len(rlo.Namespace) == 0 {
|
||||
return nil, errors.New("cannot watch a specific NetworkPolicy without a namespace")
|
||||
}
|
||||
// We've been asked to watch a specific networkpolicy.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single networkpolicy")
|
||||
// Backed by a NetworkPolicy - extract the name.
|
||||
policyName := rlo.Name
|
||||
if strings.HasPrefix(rlo.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
watchCrd = false
|
||||
policyName = strings.TrimPrefix(rlo.Name, conversion.K8sNetworkPolicyNamePrefix)
|
||||
} else {
|
||||
watchK8s = false
|
||||
}
|
||||
// write back in rlo for custom resource watch below
|
||||
rlo.Name = policyName
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", policyName).String()
|
||||
}
|
||||
|
||||
// If a revision is specified, see if it contains a "/" and if so split into separate
|
||||
// revisions for the CRD and for the K8s resource.
|
||||
crdNPRev, k8sNPRev, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.ResourceVersion = k8sNPRev
|
||||
var k8sRawWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchK8s {
|
||||
log.Debugf("Watching networkPolicy (k8s) at revision %q", k8sNPRev)
|
||||
k8sRawWatch, err = c.clientSet.NetworkingV1().NetworkPolicies(rlo.Namespace).Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
np, ok := r.(*networkingv1.NetworkPolicy)
|
||||
if !ok {
|
||||
return nil, errors.New("NetworkPolicy conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.K8sNetworkPolicyToCalico(np)
|
||||
}
|
||||
k8sWatch := newK8sWatcherConverter(ctx, "NetworkPolicy (namespaced)", converter, k8sRawWatch)
|
||||
|
||||
var calicoWatch api.WatchInterface = api.NewFake()
|
||||
if watchCrd {
|
||||
log.Debugf("Watching networkPolicy (crd) at revision %q", crdNPRev)
|
||||
calicoWatch, err = c.crdClient.Watch(ctx, rlo, crdNPRev)
|
||||
if err != nil {
|
||||
k8sWatch.Stop()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newNetworkPolicyWatcher(ctx, k8sNPRev, crdNPRev, k8sWatch, calicoWatch), nil
|
||||
}
|
||||
|
||||
func newNetworkPolicyWatcher(ctx context.Context, k8sRev, crdRev string, k8sWatch, calicoWatch api.WatchInterface) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &networkPolicyWatcher{
|
||||
k8sNPRev: k8sRev,
|
||||
crdNPRev: crdRev,
|
||||
k8sNPWatch: k8sWatch,
|
||||
crdNPWatch: calicoWatch,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
}
|
||||
go wc.processNPEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type networkPolicyWatcher struct {
|
||||
conversion.Converter
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sNPRev string
|
||||
crdNPRev string
|
||||
k8sNPWatch api.WatchInterface
|
||||
crdNPWatch api.WatchInterface
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through to the context cancel function.
|
||||
func (npw *networkPolicyWatcher) Stop() {
|
||||
npw.cancel()
|
||||
npw.k8sNPWatch.Stop()
|
||||
npw.crdNPWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (npw *networkPolicyWatcher) ResultChan() <-chan api.WatchEvent {
|
||||
return npw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (npw *networkPolicyWatcher) HasTerminated() bool {
|
||||
terminated := atomic.LoadUint32(&npw.terminated) != 0
|
||||
|
||||
if npw.k8sNPWatch != nil {
|
||||
terminated = terminated && npw.k8sNPWatch.HasTerminated()
|
||||
}
|
||||
if npw.crdNPWatch != nil {
|
||||
terminated = terminated && npw.crdNPWatch.HasTerminated()
|
||||
}
|
||||
|
||||
return terminated
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (npw *networkPolicyWatcher) processNPEvents() {
|
||||
log.Debug("Watcher process started")
|
||||
defer func() {
|
||||
log.Debug("Watcher process terminated")
|
||||
npw.Stop()
|
||||
close(npw.resultChan)
|
||||
atomic.AddUint32(&npw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
var e api.WatchEvent
|
||||
var isCRDEvent bool
|
||||
select {
|
||||
case e, ok = <-npw.crdNPWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Calico NP channel closed")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Calico NP watch channel closed"),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Calico NP event")
|
||||
isCRDEvent = true
|
||||
|
||||
case e, ok = <-npw.k8sNPWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Kubernetes NP channel closed")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Kubernetes NP watch channel closed"),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Kubernetes NP event")
|
||||
isCRDEvent = false
|
||||
|
||||
case <-npw.context.Done(): // user cancel
|
||||
log.Debug("Process watcher done event in KDD client")
|
||||
return
|
||||
}
|
||||
|
||||
// Update the resource version of the Object in the watcher. The version returned on a watch
|
||||
// event needs to able to be passed back into a Watch client so that we can resume watching
|
||||
// when a watch fails. The watch client is expecting a slash separated list of resource
|
||||
// versions in the format <CRD NP Revision>/<k8s NP Revision>.
|
||||
var value interface{}
|
||||
switch e.Type {
|
||||
case api.WatchModified, api.WatchAdded:
|
||||
value = e.New.Value
|
||||
case api.WatchDeleted:
|
||||
value = e.Old.Value
|
||||
}
|
||||
|
||||
if value != nil {
|
||||
oma, ok := value.(metav1.ObjectMetaAccessor)
|
||||
if !ok {
|
||||
log.WithField("event", e).Error(
|
||||
"Resource returned from watch does not implement the ObjectMetaAccessor interface")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: errors.New("Resource returned from watch does not implement the ObjectMetaAccessor interface"),
|
||||
},
|
||||
}
|
||||
}
|
||||
if isCRDEvent {
|
||||
npw.crdNPRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
} else {
|
||||
npw.k8sNPRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
}
|
||||
oma.GetObjectMeta().SetResourceVersion(npw.JoinNetworkPolicyRevisions(npw.crdNPRev, npw.k8sNPRev))
|
||||
} else if e.Error == nil {
|
||||
log.WithField("event", e).Warning("Event had nil error and value")
|
||||
}
|
||||
|
||||
// Send the processed event.
|
||||
select {
|
||||
case npw.resultChan <- e:
|
||||
// If this is an error event, check to see if it's a terminating one.
|
||||
// If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
log.WithError(e.Error).Debug("Kubernetes event converted to backend watcher error event")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
log.Debug("Watch terminated event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case <-npw.context.Done():
|
||||
log.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
48
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkset.go
generated
vendored
48
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkset.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
NetworkSetResourceName = "NetworkSets"
|
||||
NetworkSetCRDName = "networksets.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewNetworkSetClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: NetworkSetCRDName,
|
||||
resource: NetworkSetResourceName,
|
||||
description: "Calico Network Sets",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.NetworkSet{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindNetworkSet,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.NetworkSetList{}),
|
||||
resourceKind: apiv3.KindNetworkSet,
|
||||
namespaced: true,
|
||||
}
|
||||
}
|
||||
437
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/node.go
generated
vendored
437
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/node.go
generated
vendored
@@ -1,437 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeBgpIpv4AddrAnnotation = "projectcalico.org/IPv4Address"
|
||||
nodeBgpIpv4IPIPTunnelAddrAnnotation = "projectcalico.org/IPv4IPIPTunnelAddr"
|
||||
nodeBgpIpv4VXLANTunnelAddrAnnotation = "projectcalico.org/IPv4VXLANTunnelAddr"
|
||||
nodeBgpVXLANTunnelMACAddrAnnotation = "projectcalico.org/VXLANTunnelMACAddr"
|
||||
nodeBgpIpv6AddrAnnotation = "projectcalico.org/IPv6Address"
|
||||
nodeBgpAsnAnnotation = "projectcalico.org/ASNumber"
|
||||
nodeBgpCIDAnnotation = "projectcalico.org/RouteReflectorClusterID"
|
||||
nodeK8sLabelAnnotation = "projectcalico.org/kube-labels"
|
||||
)
|
||||
|
||||
func NewNodeClient(c *kubernetes.Clientset, usePodCIDR bool) K8sResourceClient {
|
||||
return &nodeClient{
|
||||
clientSet: c,
|
||||
usePodCIDR: usePodCIDR,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for Nodes.
|
||||
type nodeClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
usePodCIDR bool
|
||||
}
|
||||
|
||||
func (c *nodeClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on Node type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on Node type")
|
||||
// Get a current copy of the node to fill in fields we don't track.
|
||||
oldNode, err := c.clientSet.CoreV1().Nodes().Get(kvp.Key.(model.ResourceKey).Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
node, err := mergeCalicoNodeIntoK8sNode(kvp.Value.(*apiv3.Node), oldNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newNode, err := c.clientSet.CoreV1().Nodes().UpdateStatus(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Error updating Node resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
newCalicoNode, err := K8sNodeToCalico(newNode, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse returned Node after call to update %+v", newNode)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newCalicoNode, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *nodeClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on Node type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on Node type")
|
||||
node, err := c.clientSet.CoreV1().Nodes().Get(key.(model.ResourceKey).Name, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, key)
|
||||
}
|
||||
|
||||
kvp, err := K8sNodeToCalico(node, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Couldn't convert k8s node.")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on Node type")
|
||||
nl := list.(model.ResourceListOptions)
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
if nl.Name != "" {
|
||||
// The node is already fully qualified, so perform a Get instead.
|
||||
// If the entry does not exist then we just return an empty list.
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: nl.Name, Kind: apiv3.KindNode}, revision)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Listing all nodes.
|
||||
nodes, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
kvp, err := K8sNodeToCalico(&node, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to convert k8s node to Calico node: node=%s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
if len(rlo.Name) != 0 {
|
||||
// We've been asked to watch a specific node resource.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single node")
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", rlo.Name).String()
|
||||
}
|
||||
|
||||
k8sWatch, err := c.clientSet.CoreV1().Nodes().Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sNode, ok := r.(*kapiv1.Node)
|
||||
if !ok {
|
||||
return nil, errors.New("node conversion with incorrect k8s resource type")
|
||||
}
|
||||
return K8sNodeToCalico(k8sNode, c.usePodCIDR)
|
||||
}
|
||||
return newK8sWatcherConverter(ctx, "Node", converter, k8sWatch), nil
|
||||
}
|
||||
|
||||
// K8sNodeToCalico converts a Kubernetes format node, with Calico annotations, to a Calico Node.
|
||||
func K8sNodeToCalico(k8sNode *kapiv1.Node, usePodCIDR bool) (*model.KVPair, error) {
|
||||
// Create a new CalicoNode resource and copy the settings across from the k8s Node.
|
||||
calicoNode := apiv3.NewNode()
|
||||
calicoNode.ObjectMeta.Name = k8sNode.Name
|
||||
SetCalicoMetadataFromK8sAnnotations(calicoNode, k8sNode)
|
||||
|
||||
// Calico Nodes inherit labels from Kubernetes nodes, do that merge.
|
||||
err := mergeCalicoAndK8sLabels(calicoNode, k8sNode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to merge Calico and Kubernetes labels.")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract the BGP configuration stored in the annotations.
|
||||
bgpSpec := &apiv3.NodeBGPSpec{}
|
||||
annotations := k8sNode.ObjectMeta.Annotations
|
||||
bgpSpec.IPv4Address = annotations[nodeBgpIpv4AddrAnnotation]
|
||||
bgpSpec.IPv6Address = annotations[nodeBgpIpv6AddrAnnotation]
|
||||
bgpSpec.RouteReflectorClusterID = annotations[nodeBgpCIDAnnotation]
|
||||
asnString, ok := annotations[nodeBgpAsnAnnotation]
|
||||
if ok {
|
||||
asn, err := numorstring.ASNumberFromString(asnString)
|
||||
if err != nil {
|
||||
log.WithError(err).Infof("failed to read node AS number from annotation: %s", nodeBgpAsnAnnotation)
|
||||
} else {
|
||||
bgpSpec.ASNumber = &asn
|
||||
}
|
||||
}
|
||||
bgpSpec.IPv4IPIPTunnelAddr = annotations[nodeBgpIpv4IPIPTunnelAddrAnnotation]
|
||||
|
||||
// If using host-local IPAM, assign an IPIP tunnel address statically.
|
||||
if usePodCIDR && k8sNode.Spec.PodCIDR != "" {
|
||||
// For back compatibility with v2.6.x, always generate a tunnel address if we have the pod CIDR.
|
||||
bgpSpec.IPv4IPIPTunnelAddr, err = getIPIPTunnelAddress(k8sNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Only set the BGP spec if it is not empty.
|
||||
if !reflect.DeepEqual(*bgpSpec, apiv3.NodeBGPSpec{}) {
|
||||
calicoNode.Spec.BGP = bgpSpec
|
||||
}
|
||||
|
||||
// Set the VXLAN tunnel address based on annotation.
|
||||
calicoNode.Spec.IPv4VXLANTunnelAddr = annotations[nodeBgpIpv4VXLANTunnelAddrAnnotation]
|
||||
calicoNode.Spec.VXLANTunnelMACAddr = annotations[nodeBgpVXLANTunnelMACAddrAnnotation]
|
||||
|
||||
// Create the resource key from the node name.
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: k8sNode.Name,
|
||||
Kind: apiv3.KindNode,
|
||||
},
|
||||
Value: calicoNode,
|
||||
Revision: k8sNode.ObjectMeta.ResourceVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mergeCalicoNodeIntoK8sNode takes a k8s node and a Calico node and puts the values from the Calico
|
||||
// node into the k8s node.
|
||||
func mergeCalicoNodeIntoK8sNode(calicoNode *apiv3.Node, k8sNode *kapiv1.Node) (*kapiv1.Node, error) {
|
||||
// Nodes inherit labels from Kubernetes, but we also have our own set of labels that are stored in an annotation.
|
||||
// For nodes that are being updated, we want to avoid writing k8s labels that we inherited into our annotation
|
||||
// and we don't want to touch the k8s labels directly. Take a copy of the node resource and update its labels
|
||||
// to match what we want to store in our annotation only.
|
||||
calicoNode, err := restoreCalicoLabels(calicoNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the k8s annotations from the Calico node metadata.
|
||||
SetK8sAnnotationsFromCalicoMetadata(k8sNode, calicoNode)
|
||||
|
||||
// Handle VXLAN address.
|
||||
if calicoNode.Spec.IPv4VXLANTunnelAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4VXLANTunnelAddrAnnotation] = calicoNode.Spec.IPv4VXLANTunnelAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4VXLANTunnelAddrAnnotation)
|
||||
}
|
||||
|
||||
// Handle VXLAN MAC address.
|
||||
if calicoNode.Spec.VXLANTunnelMACAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpVXLANTunnelMACAddrAnnotation] = calicoNode.Spec.VXLANTunnelMACAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpVXLANTunnelMACAddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP == nil {
|
||||
// If it is a empty NodeBGPSpec, remove all annotations.
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4AddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4IPIPTunnelAddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpIpv6AddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpAsnAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpCIDAnnotation)
|
||||
return k8sNode, nil
|
||||
}
|
||||
|
||||
// If the BGP spec is not nil, then handle each field within the BGP spec individually.
|
||||
if calicoNode.Spec.BGP.IPv4Address != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4AddrAnnotation] = calicoNode.Spec.BGP.IPv4Address
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4AddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.IPv4IPIPTunnelAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4IPIPTunnelAddrAnnotation] = calicoNode.Spec.BGP.IPv4IPIPTunnelAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4IPIPTunnelAddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.IPv6Address != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv6AddrAnnotation] = calicoNode.Spec.BGP.IPv6Address
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv6AddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.ASNumber != nil {
|
||||
k8sNode.Annotations[nodeBgpAsnAnnotation] = calicoNode.Spec.BGP.ASNumber.String()
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpAsnAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.RouteReflectorClusterID != "" {
|
||||
k8sNode.Annotations[nodeBgpCIDAnnotation] = calicoNode.Spec.BGP.RouteReflectorClusterID
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpCIDAnnotation)
|
||||
}
|
||||
|
||||
return k8sNode, nil
|
||||
}
|
||||
|
||||
// mergeCalicoAndK8sLabels merges the Kubernetes labels (from k8sNode.Labels) with those that are already present in
|
||||
// calicoNode (which were loaded from our annotation). Kubernetes labels take precedence. To make the operation
|
||||
// reversible (so that we can support write back of a Calico node that was read from Kubernetes), we also store the
|
||||
// complete set of Kubernetes labels in an annotation.
|
||||
//
|
||||
// Note: if a Kubernetes label shadows a Calico label, the Calico label will be lost when the resource is written
|
||||
// back to the datastore. This is consistent with kube-controllers' behavior.
|
||||
func mergeCalicoAndK8sLabels(calicoNode *apiv3.Node, k8sNode *kapiv1.Node) error {
|
||||
// Now, copy the Kubernetes Node labels over. Note: this may overwrite Calico labels of the same name, but that's
|
||||
// consistent with the kube-controllers behavior.
|
||||
for k, v := range k8sNode.Labels {
|
||||
if calicoNode.Labels == nil {
|
||||
calicoNode.Labels = map[string]string{}
|
||||
}
|
||||
calicoNode.Labels[k] = v
|
||||
}
|
||||
|
||||
// For consistency with kube-controllers, and so we can correctly round-trip labels, we stash the kubernetes labels
|
||||
// in an annotation.
|
||||
if calicoNode.Annotations == nil {
|
||||
calicoNode.Annotations = map[string]string{}
|
||||
}
|
||||
bytes, err := json.Marshal(k8sNode.Labels)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error marshalling node labels")
|
||||
return err
|
||||
}
|
||||
calicoNode.Annotations[nodeK8sLabelAnnotation] = string(bytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// restoreCalicoLabels tries to undo the transformation done by mergeCalicoLabels. If no changes are needed, it
|
||||
// returns the input value; otherwise, it returns a copy.
|
||||
func restoreCalicoLabels(calicoNode *apiv3.Node) (*apiv3.Node, error) {
|
||||
rawLabels := calicoNode.Annotations[nodeK8sLabelAnnotation]
|
||||
if rawLabels == "" {
|
||||
return calicoNode, nil
|
||||
}
|
||||
|
||||
// We're about to update the labels and annotations on the node, take a copy.
|
||||
calicoNode = calicoNode.DeepCopy()
|
||||
|
||||
// We stashed the k8s labels in an annotation, extract them so we can compare with the combined labels.
|
||||
k8sLabels := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(rawLabels), &k8sLabels); err != nil {
|
||||
log.WithError(err).Error("Failed to unmarshal k8s node labels from " +
|
||||
nodeK8sLabelAnnotation + " annotation")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now remove any labels that match the k8s ones.
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
log.WithField("k8s", k8sLabels).Debug("Loaded label annotations")
|
||||
}
|
||||
for k, k8sVal := range k8sLabels {
|
||||
if calVal, ok := calicoNode.Labels[k]; ok && calVal != k8sVal {
|
||||
log.WithFields(log.Fields{
|
||||
"label": k,
|
||||
"newValue": calVal,
|
||||
"k8sValue": k8sVal,
|
||||
}).Warn("Update to label that is shadowed by a Kubernetes label will be ignored.")
|
||||
}
|
||||
|
||||
// The k8s value was inherited and there was no old Calico value, drop the label so that we don't copy
|
||||
// it to the Calico annotation.
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
log.WithField("key", k).Debug("Removing inherited k8s label")
|
||||
}
|
||||
delete(calicoNode.Labels, k)
|
||||
}
|
||||
|
||||
// Filter out our bookkeeping annotation, which is only used for round-tripping labels correctly.
|
||||
delete(calicoNode.Annotations, nodeK8sLabelAnnotation)
|
||||
if len(calicoNode.Annotations) == 0 {
|
||||
calicoNode.Annotations = nil
|
||||
}
|
||||
|
||||
return calicoNode, nil
|
||||
}
|
||||
|
||||
// getIPIPTunnelAddress calculates the IPv4 address to use for the IPIP tunnel based on the node's pod CIDR, for use
|
||||
// in conjunction with host-local IPAM backed by node.Spec.PodCIDR allocations.
|
||||
func getIPIPTunnelAddress(n *kapiv1.Node) (string, error) {
|
||||
ip, _, err := net.ParseCIDR(n.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid pod CIDR for node: %s, %s", n.Name, n.Spec.PodCIDR)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// We need to get the IP for the podCIDR and increment it to the
|
||||
// first IP in the CIDR.
|
||||
tunIp := ip.To4()
|
||||
if tunIp == nil {
|
||||
log.WithField("podCIDR", n.Spec.PodCIDR).Infof("Cannot pick an IPv4 tunnel address from the given CIDR")
|
||||
return "", nil
|
||||
}
|
||||
tunIp[3]++
|
||||
|
||||
return tunIp.String(), nil
|
||||
}
|
||||
471
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/profile.go
generated
vendored
471
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/profile.go
generated
vendored
@@ -1,471 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
func NewProfileClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &profileClient{
|
||||
clientSet: c,
|
||||
Converter: conversion.Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for Profiles.
|
||||
type profileClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
conversion.Converter
|
||||
}
|
||||
|
||||
func (c *profileClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Update is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Update",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *profileClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) getSaKv(sa *kapiv1.ServiceAccount) (*model.KVPair, error) {
|
||||
kvPair, err := c.ServiceAccountToProfile(sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvPair, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) getServiceAccount(ctx context.Context, rk model.ResourceKey, revision string) (*model.KVPair, error) {
|
||||
|
||||
namespace, serviceAccountName, err := c.ProfileNameToServiceAccount(rk.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceAccount, err := c.clientSet.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, rk)
|
||||
}
|
||||
|
||||
return c.getSaKv(serviceAccount)
|
||||
}
|
||||
|
||||
func (c *profileClient) getNsKv(ns *kapiv1.Namespace) (*model.KVPair, error) {
|
||||
kvPair, err := c.NamespaceToProfile(ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvPair, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) getNamespace(ctx context.Context, rk model.ResourceKey, revision string) (*model.KVPair, error) {
|
||||
namespaceName, err := c.ProfileNameToNamespace(rk.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace, err := c.clientSet.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, rk)
|
||||
}
|
||||
|
||||
return c.getNsKv(namespace)
|
||||
}
|
||||
|
||||
func (c *profileClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on Profile type")
|
||||
rk := key.(model.ResourceKey)
|
||||
if rk.Name == "" {
|
||||
return nil, fmt.Errorf("Profile key missing name: %+v", rk)
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
splits := strings.SplitAfterN(rk.Name, ".", 2)
|
||||
if len(splits) == 1 {
|
||||
return nil, fmt.Errorf("Invalid name %s", rk.Name)
|
||||
}
|
||||
|
||||
switch splits[0] {
|
||||
case conversion.NamespaceProfileNamePrefix:
|
||||
return c.getNamespace(ctx, rk, nsRev)
|
||||
case conversion.ServiceAccountProfileNamePrefix:
|
||||
return c.getServiceAccount(ctx, rk, saRev)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Revision %s invalid", revision)
|
||||
}
|
||||
|
||||
func (c *profileClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on Profile type")
|
||||
nl := list.(model.ResourceListOptions)
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
// If a name is specified, then do an exact lookup.
|
||||
if nl.Name != "" {
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: nl.Name, Kind: nl.Kind}, revision)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Otherwise, enumerate all.
|
||||
namespaces, err := c.clientSet.CoreV1().Namespaces().List(metav1.ListOptions{ResourceVersion: nsRev})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, nl)
|
||||
}
|
||||
|
||||
// For each Namespace, return a profile.
|
||||
for _, ns := range namespaces.Items {
|
||||
kvp, err := c.getNsKv(&ns)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to convert k8s Namespace to Calico Profile: Namespace=%s: %v", ns.Name, err)
|
||||
continue
|
||||
}
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
|
||||
// Enumerate all SA
|
||||
var serviceaccounts *kapiv1.ServiceAccountList
|
||||
// TBD: narrow down to only to the required namespace
|
||||
serviceaccounts, err = c.clientSet.CoreV1().ServiceAccounts(kapiv1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: saRev})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, nl)
|
||||
}
|
||||
|
||||
for _, sa := range serviceaccounts.Items {
|
||||
kvp, err := c.getSaKv(&sa)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to convert k8s service account to Calico Profile: %s", sa.Name)
|
||||
continue
|
||||
}
|
||||
log.Debug("Converted k8s sa to Calico profile ", sa.Name)
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: c.JoinProfileRevisions(namespaces.ResourceVersion, serviceaccounts.ResourceVersion),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *profileClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
|
||||
// Setting to Watch all profiles in all namespaces; overriden below
|
||||
watchNS, watchSA := true, true
|
||||
ns := kapiv1.NamespaceAll
|
||||
sa := ""
|
||||
|
||||
// Watch a specific profile.
|
||||
if len(rlo.Name) != 0 {
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single profile")
|
||||
var err error
|
||||
if strings.HasPrefix(rlo.Name, conversion.NamespaceProfileNamePrefix) {
|
||||
watchSA = false
|
||||
ns, err = c.ProfileNameToNamespace(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", ns).String()
|
||||
} else if strings.HasPrefix(rlo.Name, conversion.ServiceAccountProfileNamePrefix) {
|
||||
watchNS = false
|
||||
ns, sa, err = c.ProfileNameToServiceAccount(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", sa).String()
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unsupported prefix for resource name: %s", rlo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.ResourceVersion = nsRev
|
||||
var nsWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchNS {
|
||||
log.Debugf("Watching namespace at revision %q", nsRev)
|
||||
nsWatch, err = c.clientSet.CoreV1().Namespaces().Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sNamespace, ok := r.(*kapiv1.Namespace)
|
||||
if !ok {
|
||||
return nil, errors.New("Profile conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.NamespaceToProfile(k8sNamespace)
|
||||
}
|
||||
nsWatcher := newK8sWatcherConverter(ctx, "Profile-NS", converter, nsWatch)
|
||||
|
||||
// Watch all service accounts in relevant namespace(s)
|
||||
opts.ResourceVersion = saRev
|
||||
var saWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchSA {
|
||||
log.Debugf("Watching serviceAccount at revision %q", saRev)
|
||||
saWatch, err = c.clientSet.CoreV1().ServiceAccounts(ns).Watch(opts)
|
||||
if err != nil {
|
||||
nsWatch.Stop()
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converterSA := func(r Resource) (*model.KVPair, error) {
|
||||
k8sServiceAccount, ok := r.(*kapiv1.ServiceAccount)
|
||||
if !ok {
|
||||
nsWatch.Stop()
|
||||
return nil, errors.New("Profile conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.ServiceAccountToProfile(k8sServiceAccount)
|
||||
}
|
||||
saWatcher := newK8sWatcherConverter(ctx, "Profile-SA", converterSA, saWatch)
|
||||
|
||||
return newProfileWatcher(ctx, nsWatcher, saWatcher), nil
|
||||
}
|
||||
|
||||
func newProfileWatcher(ctx context.Context, k8sWatchNS, k8sWatchSA api.WatchInterface) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &profileWatcher{
|
||||
k8sNSWatch: k8sWatchNS,
|
||||
k8sSAWatch: k8sWatchSA,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
Converter: conversion.Converter{},
|
||||
}
|
||||
go wc.processProfileEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type profileWatcher struct {
|
||||
conversion.Converter
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sNSWatch api.WatchInterface
|
||||
k8sSAWatch api.WatchInterface
|
||||
k8sNSRev string
|
||||
k8sSARev string
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through the context cancel function.
|
||||
func (pw *profileWatcher) Stop() {
|
||||
pw.cancel()
|
||||
pw.k8sNSWatch.Stop()
|
||||
pw.k8sSAWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (pw *profileWatcher) ResultChan() <-chan api.WatchEvent {
|
||||
return pw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (pw *profileWatcher) HasTerminated() bool {
|
||||
terminated := atomic.LoadUint32(&pw.terminated) != 0
|
||||
|
||||
if pw.k8sNSWatch != nil {
|
||||
terminated = terminated && pw.k8sNSWatch.HasTerminated()
|
||||
}
|
||||
if pw.k8sSAWatch != nil {
|
||||
terminated = terminated && pw.k8sSAWatch.HasTerminated()
|
||||
}
|
||||
|
||||
return terminated
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (pw *profileWatcher) processProfileEvents() {
|
||||
log.Debug("Watcher process started for profile.")
|
||||
defer func() {
|
||||
log.Debug("Profile watcher process terminated")
|
||||
pw.Stop()
|
||||
close(pw.resultChan)
|
||||
atomic.AddUint32(&pw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
var e api.WatchEvent
|
||||
var isNsEvent bool
|
||||
select {
|
||||
case e, ok = <-pw.k8sNSWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Profile, namespace watch channel closed.")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile namespace watch channel closed."),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Namespace event")
|
||||
isNsEvent = true
|
||||
|
||||
case e, ok = <-pw.k8sSAWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Profile, serviceaccount watch channel closed.")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile serviceaccount watch channel closed."),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing ServiceAccount event")
|
||||
isNsEvent = false
|
||||
|
||||
case <-pw.context.Done(): //user cancel
|
||||
log.Debug("Process watcher done event in kdd client")
|
||||
return
|
||||
}
|
||||
|
||||
// Update the resource version of the Object in the watcher. The version returned on a watch
|
||||
// event needs to be such that the Watch client can resume watching when a watch fails.
|
||||
// The watch client expects a slash separated list of resource versions in the format
|
||||
// <NS Revision/SA Revision>.
|
||||
var value interface{}
|
||||
switch e.Type {
|
||||
case api.WatchModified, api.WatchAdded:
|
||||
value = e.New.Value
|
||||
case api.WatchDeleted:
|
||||
value = e.Old.Value
|
||||
}
|
||||
|
||||
if value != nil {
|
||||
oma, ok := value.(metav1.ObjectMetaAccessor)
|
||||
|
||||
if !ok {
|
||||
log.WithField("event", e).Error(
|
||||
"Resource returned from watch does not implement ObjectMetaAccessor interface")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile value does not implement ObjectMetaAccessor interface."),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if isNsEvent {
|
||||
pw.k8sNSRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
} else {
|
||||
pw.k8sSARev = oma.GetObjectMeta().GetResourceVersion()
|
||||
}
|
||||
oma.GetObjectMeta().SetResourceVersion(pw.JoinProfileRevisions(pw.k8sNSRev, pw.k8sSARev))
|
||||
}
|
||||
} else if e.Error == nil {
|
||||
log.WithField("event", e).Warning("Event without error or value")
|
||||
}
|
||||
|
||||
// Send the processed event.
|
||||
select {
|
||||
case pw.resultChan <- e:
|
||||
// If this is an error event. check to see if it's a terminating one.
|
||||
// If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
log.WithError(e.Error).Debug("Kubernetes event converted to backend watcher error event")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
log.Debug("Watch terminated event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case <-pw.context.Done():
|
||||
log.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
196
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/resources.go
generated
vendored
196
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/resources.go
generated
vendored
@@ -1,196 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
const (
|
||||
labelsAnnotation = "projectcalico.org/labels"
|
||||
annotationsAnnotation = "projectcalico.org/annotations"
|
||||
metadataAnnotation = "projectcalico.org/metadata"
|
||||
)
|
||||
|
||||
// Interface that all Kubernetes and Calico resources implement.
|
||||
type Resource interface {
|
||||
runtime.Object
|
||||
metav1.ObjectMetaAccessor
|
||||
}
|
||||
|
||||
// Interface that all Kubernetes and Calico resource lists implement.
|
||||
type ResourceList interface {
|
||||
runtime.Object
|
||||
metav1.ListMetaAccessor
|
||||
}
|
||||
|
||||
// Function signature for conversion function to convert a K8s resouce to a
|
||||
// KVPair equivalent.
|
||||
type ConvertK8sResourceToKVPair func(Resource) (*model.KVPair, error)
|
||||
|
||||
// Store Calico Metadata in the k8s resource annotations for non-CRD backed resources.
|
||||
// Currently this just stores Annotations and Labels and drops all other metadata
|
||||
// attributes.
|
||||
func SetK8sAnnotationsFromCalicoMetadata(k8sRes Resource, calicoRes Resource) {
|
||||
a := k8sRes.GetObjectMeta().GetAnnotations()
|
||||
if a == nil {
|
||||
a = make(map[string]string)
|
||||
}
|
||||
if labels := calicoRes.GetObjectMeta().GetLabels(); len(labels) > 0 {
|
||||
if lann, err := json.Marshal(labels); err != nil {
|
||||
log.WithError(err).Warning("unable to store labels as an annotation")
|
||||
} else {
|
||||
a[labelsAnnotation] = string(lann)
|
||||
}
|
||||
} else {
|
||||
// There are no Calico labels - nil out the k8s res.
|
||||
delete(a, labelsAnnotation)
|
||||
}
|
||||
if annotations := calicoRes.GetObjectMeta().GetAnnotations(); len(annotations) > 0 {
|
||||
if aann, err := json.Marshal(annotations); err != nil {
|
||||
log.WithError(err).Warning("unable to store annotations as an annotation")
|
||||
} else {
|
||||
a[annotationsAnnotation] = string(aann)
|
||||
}
|
||||
} else {
|
||||
// There are no Calico annotations - nil out the k8s res.
|
||||
delete(a, annotationsAnnotation)
|
||||
}
|
||||
k8sRes.GetObjectMeta().SetAnnotations(a)
|
||||
}
|
||||
|
||||
// Extract the Calico resource Metadata from the k8s resource annotations for non-CRD
|
||||
// backed resources. This extracts the Annotations and Labels stored as a annotation,
|
||||
// and fills in the CreationTimestamp and UID from the k8s resource.
|
||||
func SetCalicoMetadataFromK8sAnnotations(calicoRes Resource, k8sRes Resource) {
|
||||
com := calicoRes.GetObjectMeta()
|
||||
kom := k8sRes.GetObjectMeta()
|
||||
com.SetResourceVersion(kom.GetResourceVersion())
|
||||
com.SetCreationTimestamp(kom.GetCreationTimestamp())
|
||||
com.SetUID(kom.GetUID())
|
||||
a := kom.GetAnnotations()
|
||||
if a == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if lann, ok := a[labelsAnnotation]; ok {
|
||||
var labels map[string]string
|
||||
if err := json.Unmarshal([]byte(lann), &labels); err != nil {
|
||||
log.WithError(err).Warning("unable to parse labels annotation")
|
||||
} else {
|
||||
com.SetLabels(labels)
|
||||
}
|
||||
}
|
||||
if aann, ok := a[annotationsAnnotation]; ok {
|
||||
var annotations map[string]string
|
||||
if err := json.Unmarshal([]byte(aann), &annotations); err != nil {
|
||||
log.WithError(err).Warning("unable to parse annotations annotation")
|
||||
} else {
|
||||
com.SetAnnotations(annotations)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store Calico Metadata in the in the k8s resource annotations for CRD backed resources.
|
||||
// This should store all Metadata except for those stored in Annotations and Labels and
|
||||
// store them in annotations.
|
||||
func ConvertCalicoResourceToK8sResource(resIn Resource) (Resource, error) {
|
||||
rom := resIn.GetObjectMeta()
|
||||
|
||||
// Make sure to remove data that is passed to Kubernetes so it is not duplicated in
|
||||
// the annotations.
|
||||
romCopy := &metav1.ObjectMeta{}
|
||||
rom.(*metav1.ObjectMeta).DeepCopyInto(romCopy)
|
||||
romCopy.Name = ""
|
||||
romCopy.Namespace = ""
|
||||
romCopy.ResourceVersion = ""
|
||||
romCopy.Labels = nil
|
||||
romCopy.Annotations = nil
|
||||
|
||||
// Marshal the data and store the json representation in the annotations.
|
||||
metadataBytes, err := json.Marshal(romCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
annotations := rom.GetAnnotations()
|
||||
if len(annotations) == 0 {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations[metadataAnnotation] = string(metadataBytes)
|
||||
|
||||
// Make sure to clear out all of the Calico Metadata out of the ObjectMeta except for
|
||||
// Name, Namespace, ResourceVersion, Labels, and Annotations. Annotations is already
|
||||
// copied so it can be set separately.
|
||||
meta := &metav1.ObjectMeta{}
|
||||
meta.Name = rom.GetName()
|
||||
meta.Namespace = rom.GetNamespace()
|
||||
meta.ResourceVersion = rom.GetResourceVersion()
|
||||
meta.Labels = rom.GetLabels()
|
||||
meta.UID = rom.GetUID()
|
||||
|
||||
resOut := resIn.DeepCopyObject().(Resource)
|
||||
romOut := resOut.GetObjectMeta()
|
||||
meta.DeepCopyInto(romOut.(*metav1.ObjectMeta))
|
||||
romOut.SetAnnotations(annotations)
|
||||
|
||||
return resOut, nil
|
||||
}
|
||||
|
||||
// Retrieve all of the Calico Metadata from the k8s resource annotations for CRD backed
|
||||
// resources. This should remove the relevant Calico Metadata annotation when it has finished.
|
||||
func ConvertK8sResourceToCalicoResource(res Resource) error {
|
||||
rom := res.GetObjectMeta()
|
||||
annotations := rom.GetAnnotations()
|
||||
if len(annotations) == 0 {
|
||||
// Make no changes if there are no annotations to read Calico Metadata out of.
|
||||
return nil
|
||||
}
|
||||
if _, ok := annotations[metadataAnnotation]; !ok {
|
||||
// No changes if there are no annotations stored on the Resource.
|
||||
return nil
|
||||
}
|
||||
|
||||
meta := &metav1.ObjectMeta{}
|
||||
err := json.Unmarshal([]byte(annotations[metadataAnnotation]), meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear out the annotations
|
||||
delete(annotations, metadataAnnotation)
|
||||
if len(annotations) == 0 {
|
||||
annotations = nil
|
||||
}
|
||||
|
||||
// Manually write in the data not stored in the annotations: Name, Namespace, ResourceVersion,
|
||||
// Labels, and Annotations so that they do not get overwritten.
|
||||
meta.Name = rom.GetName()
|
||||
meta.Namespace = rom.GetNamespace()
|
||||
meta.ResourceVersion = rom.GetResourceVersion()
|
||||
meta.Labels = rom.GetLabels()
|
||||
meta.Annotations = annotations
|
||||
meta.UID = rom.GetUID()
|
||||
|
||||
// Overwrite the K8s metadata with the Calico metadata.
|
||||
meta.DeepCopyInto(rom.(*metav1.ObjectMeta))
|
||||
|
||||
return nil
|
||||
}
|
||||
190
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/watcher.go
generated
vendored
190
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/watcher.go
generated
vendored
@@ -1,190 +0,0 @@
|
||||
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
resultsBufSize = 100
|
||||
)
|
||||
|
||||
func newK8sWatcherConverter(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
converter ConvertK8sResourceToKVPair,
|
||||
k8sWatch kwatch.Interface,
|
||||
) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &k8sWatcherConverter{
|
||||
logCxt: logrus.WithField("resource", name),
|
||||
converter: converter,
|
||||
k8sWatch: k8sWatch,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
}
|
||||
go wc.processK8sEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type k8sWatcherConverter struct {
|
||||
logCxt *logrus.Entry
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sWatch kwatch.Interface
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through to the context cancel function.
|
||||
func (crw *k8sWatcherConverter) Stop() {
|
||||
crw.cancel()
|
||||
crw.k8sWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (crw *k8sWatcherConverter) ResultChan() <-chan api.WatchEvent {
|
||||
return crw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (crw *k8sWatcherConverter) HasTerminated() bool {
|
||||
return atomic.LoadUint32(&crw.terminated) != 0
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (crw *k8sWatcherConverter) processK8sEvents() {
|
||||
crw.logCxt.Debug("Kubernetes watcher/converter started")
|
||||
defer func() {
|
||||
crw.logCxt.Debug("Kubernetes watcher/converter stopped, closing result channel")
|
||||
crw.Stop()
|
||||
close(crw.resultChan)
|
||||
atomic.AddUint32(&crw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-crw.k8sWatch.ResultChan():
|
||||
var e *api.WatchEvent
|
||||
if !ok {
|
||||
// The channel is closed so send a terminating watcher event indicating the watch was
|
||||
// closed by the remote.
|
||||
crw.logCxt.Debug("Watcher terminated by remote")
|
||||
e = &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: fmt.Errorf("terminating error event from Kubernetes watcher: closed by remote"),
|
||||
ClosedByRemote: true,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// We have a valid event, so convert it.
|
||||
e = crw.convertEvent(event)
|
||||
if e == nil {
|
||||
crw.logCxt.Debug("Event converted to a no-op")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case crw.resultChan <- *e:
|
||||
crw.logCxt.Debug("Kubernetes event converted and sent to backend watcher")
|
||||
|
||||
// If this is an error event, check to see if it's a terminating one (the
|
||||
// convertEvent method will decide that). If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
crw.logCxt.WithError(e.Error).Debug("Watch event was an error event type")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
crw.logCxt.Debug("Watch event indicates a terminated watcher")
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-crw.context.Done():
|
||||
crw.logCxt.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
case <-crw.context.Done(): // user cancel
|
||||
crw.logCxt.Debug("Process watcher done event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convertEvent converts a Kubernetes Watch event into the equivalent Calico backend
|
||||
// client watch event.
|
||||
func (crw *k8sWatcherConverter) convertEvent(kevent kwatch.Event) *api.WatchEvent {
|
||||
var kvp *model.KVPair
|
||||
var err error
|
||||
if kevent.Type != kwatch.Error {
|
||||
k8sRes := kevent.Object.(Resource)
|
||||
kvp, err = crw.converter(k8sRes)
|
||||
if err != nil {
|
||||
crw.logCxt.WithError(err).Warning("Error converting Kubernetes resource to Calico resource")
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
if kvp == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch kevent.Type {
|
||||
case kwatch.Error:
|
||||
// An error directly from the k8s watcher is a terminating event.
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: fmt.Errorf("terminating error event from Kubernetes watcher: %v", kevent.Object),
|
||||
},
|
||||
}
|
||||
case kwatch.Deleted:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchDeleted,
|
||||
Old: kvp,
|
||||
}
|
||||
case kwatch.Added:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchAdded,
|
||||
New: kvp,
|
||||
}
|
||||
case kwatch.Modified:
|
||||
// In KDD we don't have access to the previous settings, so just set the current settings.
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchModified,
|
||||
New: kvp,
|
||||
}
|
||||
default:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: fmt.Errorf("unhandled Kubernetes watcher event type: %v", kevent.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
260
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/workloadendpoint.go
generated
vendored
260
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/workloadendpoint.go
generated
vendored
@@ -1,260 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func NewWorkloadEndpointClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &WorkloadEndpointClient{
|
||||
clientSet: c,
|
||||
converter: conversion.Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for WorkloadEndpoints.
|
||||
type WorkloadEndpointClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
converter conversion.Converter
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on WorkloadEndpoint type")
|
||||
// As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated.
|
||||
// This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time:
|
||||
// https://github.com/kubernetes/kubernetes/issues/39113.
|
||||
//
|
||||
// Note: it's a bit odd to do this in the Create, but the CNI plugin uses CreateOrUpdate(). Doing it
|
||||
// here makes sure that, if the update fails: we retry here, and, we don't report success without
|
||||
// making the patch.
|
||||
return c.patchPodIP(ctx, kvp)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on WorkloadEndpoint type")
|
||||
// As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated.
|
||||
// This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time:
|
||||
// https://github.com/kubernetes/kubernetes/issues/39113.
|
||||
return c.patchPodIP(ctx, kvp)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on WorkloadEndpoint type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
// patchPodIP PATCHes the Kubernetes Pod associated with the given KVPair with the IP address it contains.
|
||||
// This is a no-op if there is no IP address.
|
||||
//
|
||||
// We store the IP address in an annotation because patching the PodIP directly races with changes that
|
||||
// kubelet makes so kubelet can undo our changes.
|
||||
func (c *WorkloadEndpointClient) patchPodIP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
ips := kvp.Value.(*apiv3.WorkloadEndpoint).Spec.IPNetworks
|
||||
if len(ips) == 0 {
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
log.Debugf("PATCHing pod with IP: %v", ips[0])
|
||||
wepID, err := c.converter.ParseWorkloadEndpointName(kvp.Key.(model.ResourceKey).Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wepID.Pod == "" {
|
||||
return nil, cerrors.ErrorInsufficientIdentifiers{Name: kvp.Key.(model.ResourceKey).Name}
|
||||
}
|
||||
// Write the IP address into an annotation. This generates an event more quickly than
|
||||
// waiting for kubelet to update the Status.PodIP field.
|
||||
ns := kvp.Key.(model.ResourceKey).Namespace
|
||||
patch, err := calculateAnnotationPatch(conversion.AnnotationPodIP, ips[0])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to calculate Pod patch.")
|
||||
return nil, err
|
||||
}
|
||||
pod, err := c.clientSet.CoreV1().Pods(ns).Patch(wepID.Pod, types.StrategicMergePatchType, patch, "status")
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
log.Debugf("Successfully PATCHed pod to add podIP annotation: %+v", pod)
|
||||
return c.converter.PodToWorkloadEndpoint(pod)
|
||||
}
|
||||
|
||||
const annotationPatchTemplate = `{"metadata": {"annotations": {%s: %s}}}`
|
||||
|
||||
func calculateAnnotationPatch(name, value string) ([]byte, error) {
|
||||
// Marshal the key and value in order to make sure all the escaping is done correctly.
|
||||
nameJson, err := json.Marshal(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueJson, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patch := []byte(fmt.Sprintf(annotationPatchTemplate, nameJson, valueJson))
|
||||
return patch, nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on WorkloadEndpoint type")
|
||||
k := key.(model.ResourceKey)
|
||||
|
||||
// Parse resource name so we can get get the podName
|
||||
wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wepID.Pod == "" {
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{
|
||||
Identifier: key,
|
||||
Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"),
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := c.clientSet.CoreV1().Pods(k.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
|
||||
// Decide if this pod should be displayed.
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(pod) {
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k}
|
||||
}
|
||||
return c.converter.PodToWorkloadEndpoint(pod)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on WorkloadEndpoint type")
|
||||
l := list.(model.ResourceListOptions)
|
||||
|
||||
// If a workload is provided, we can do an exact lookup of this
|
||||
// workload endpoint.
|
||||
if l.Name != "" {
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{
|
||||
Name: l.Name,
|
||||
Namespace: l.Namespace,
|
||||
Kind: l.Kind,
|
||||
}, revision)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
// Return empty slice of KVPair if the object doesn't exist, return the error otherwise.
|
||||
case cerrors.ErrorResourceDoesNotExist:
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Otherwise, enumerate all pods in a namespace.
|
||||
pods, err := c.clientSet.CoreV1().Pods(l.Namespace).List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
// For each Pod, return a workload endpoint.
|
||||
ret := []*model.KVPair{}
|
||||
for _, pod := range pods.Items {
|
||||
// Decide if this pod should be included.
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(&pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
kvp, err := c.converter.PodToWorkloadEndpoint(&pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, kvp)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: ret,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
if len(rlo.Name) != 0 {
|
||||
if len(rlo.Namespace) == 0 {
|
||||
return nil, errors.New("cannot watch a specific WorkloadEndpoint without a namespace")
|
||||
}
|
||||
// We've been asked to watch a specific workloadendpoint
|
||||
wepids, err := c.converter.ParseWorkloadEndpointName(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("name", wepids.Pod).Debug("Watching a single workloadendpoint")
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", wepids.Pod).String()
|
||||
}
|
||||
|
||||
ns := list.(model.ResourceListOptions).Namespace
|
||||
k8sWatch, err := c.clientSet.CoreV1().Pods(ns).Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sPod, ok := r.(*kapiv1.Pod)
|
||||
if !ok {
|
||||
return nil, errors.New("Pod conversion with incorrect k8s resource type")
|
||||
}
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(k8sPod) {
|
||||
// If this is not a valid Calico workload endpoint then don't return in the watch.
|
||||
// Returning a nil KVP and a nil error swallows the event.
|
||||
return nil, nil
|
||||
}
|
||||
return c.converter.PodToWorkloadEndpoint(k8sPod)
|
||||
}
|
||||
return newK8sWatcherConverter(ctx, "Pod", converter, k8sWatch), nil
|
||||
}
|
||||
155
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/bgpconfig.go
generated
vendored
155
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/bgpconfig.go
generated
vendored
@@ -1,155 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// BGPConfigurationInterface has methods to work with BGPConfiguration resources.
|
||||
type BGPConfigurationInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.BGPConfiguration, opts options.SetOptions) (*apiv3.BGPConfiguration, error)
|
||||
Update(ctx context.Context, res *apiv3.BGPConfiguration, opts options.SetOptions) (*apiv3.BGPConfiguration, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.BGPConfiguration, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.BGPConfiguration, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.BGPConfigurationList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// bgpConfigurations implements BGPConfigurationInterface
|
||||
type bgpConfigurations struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a BGPConfiguration and creates it.
|
||||
// Returns the stored representation of the BGPConfiguration, and an error
|
||||
// if there is any.
|
||||
func (r bgpConfigurations) Create(ctx context.Context, res *apiv3.BGPConfiguration, opts options.SetOptions) (*apiv3.BGPConfiguration, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := r.ValidateDefaultOnlyFields(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindBGPConfiguration, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a BGPConfiguration and updates it.
|
||||
// Returns the stored representation of the BGPConfiguration, and an error
|
||||
// if there is any.
|
||||
func (r bgpConfigurations) Update(ctx context.Context, res *apiv3.BGPConfiguration, opts options.SetOptions) (*apiv3.BGPConfiguration, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that NodeToNodeMeshEnabled and ASNumber are set. Can only be set on "default".
|
||||
if err := r.ValidateDefaultOnlyFields(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindBGPConfiguration, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the BGPConfiguration and deletes it. Returns an
|
||||
// error if one occurs.
|
||||
func (r bgpConfigurations) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.BGPConfiguration, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindBGPConfiguration, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the BGPConfiguration, and returns the corresponding
|
||||
// BGPConfiguration object, and an error if there is any.
|
||||
func (r bgpConfigurations) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.BGPConfiguration, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindBGPConfiguration, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of BGPConfiguration objects that match the supplied options.
|
||||
func (r bgpConfigurations) List(ctx context.Context, opts options.ListOptions) (*apiv3.BGPConfigurationList, error) {
|
||||
res := &apiv3.BGPConfigurationList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindBGPConfiguration, apiv3.KindBGPConfigurationList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the BGPConfiguration that
|
||||
// match the supplied options.
|
||||
func (r bgpConfigurations) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindBGPConfiguration, nil)
|
||||
}
|
||||
|
||||
func (r bgpConfigurations) ValidateDefaultOnlyFields(res *apiv3.BGPConfiguration) error {
|
||||
errFields := []cerrors.ErroredField{}
|
||||
if res.ObjectMeta.GetName() != "default" {
|
||||
if res.Spec.NodeToNodeMeshEnabled != nil {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "BGPConfiguration.Spec.NodeToNodeMeshEnabled",
|
||||
Reason: "Cannot set nodeToNodeMeshEnabled on a non default BGP Configuration.",
|
||||
})
|
||||
}
|
||||
|
||||
if res.Spec.ASNumber != nil {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "BGPConfiguration.Spec.ASNumber",
|
||||
Reason: "Cannot set ASNumber on a non default BGP Configuration.",
|
||||
})
|
||||
}
|
||||
|
||||
if res.Spec.ServiceExternalIPs != nil && len(res.Spec.ServiceExternalIPs) > 0 {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "BGPConfiguration.Spec.ServiceExternalIPs",
|
||||
Reason: "Cannot set ServiceExternalIPs on a non default BGP Configuration.",
|
||||
})
|
||||
}
|
||||
|
||||
if res.Spec.ServiceClusterIPs != nil && len(res.Spec.ServiceClusterIPs) > 0 {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "BGPConfiguration.Spec.ServiceClusterIPs",
|
||||
Reason: "Cannot set ServiceClusterIPs on a non default BGP Configuration.",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(errFields) > 0 {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: errFields,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/bgppeer.go
generated
vendored
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/bgppeer.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// BGPPeerInterface has methods to work with BGPPeer resources.
|
||||
type BGPPeerInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.BGPPeer, opts options.SetOptions) (*apiv3.BGPPeer, error)
|
||||
Update(ctx context.Context, res *apiv3.BGPPeer, opts options.SetOptions) (*apiv3.BGPPeer, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.BGPPeer, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.BGPPeer, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.BGPPeerList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// bgpPeers implements BGPPeerInterface
|
||||
type bgpPeers struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a BGPPeer and creates it. Returns the stored
|
||||
// representation of the BGPPeer, and an error, if there is any.
|
||||
func (r bgpPeers) Create(ctx context.Context, res *apiv3.BGPPeer, opts options.SetOptions) (*apiv3.BGPPeer, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindBGPPeer, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPPeer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a BGPPeer and updates it. Returns the stored
|
||||
// representation of the BGPPeer, and an error, if there is any.
|
||||
func (r bgpPeers) Update(ctx context.Context, res *apiv3.BGPPeer, opts options.SetOptions) (*apiv3.BGPPeer, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindBGPPeer, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPPeer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the BGPPeer and deletes it. Returns an error if one occurs.
|
||||
func (r bgpPeers) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.BGPPeer, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindBGPPeer, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPPeer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the BGPPeer, and returns the corresponding BGPPeer object,
|
||||
// and an error if there is any.
|
||||
func (r bgpPeers) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.BGPPeer, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindBGPPeer, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.BGPPeer), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of BGPPeer objects that match the supplied options.
|
||||
func (r bgpPeers) List(ctx context.Context, opts options.ListOptions) (*apiv3.BGPPeerList, error) {
|
||||
res := &apiv3.BGPPeerList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindBGPPeer, apiv3.KindBGPPeerList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the BGPPeers that match the
|
||||
// supplied options.
|
||||
func (r bgpPeers) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindBGPPeer, nil)
|
||||
}
|
||||
339
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/client.go
generated
vendored
339
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/client.go
generated
vendored
@@ -1,339 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/apiconfig"
|
||||
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend"
|
||||
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/ipam"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
"github.com/projectcalico/libcalico-go/lib/set"
|
||||
)
|
||||
|
||||
// client implements the client.Interface.
|
||||
type client struct {
|
||||
// The config we were created with.
|
||||
config apiconfig.CalicoAPIConfig
|
||||
|
||||
// The backend client.
|
||||
backend bapi.Client
|
||||
|
||||
// The resources client used internally.
|
||||
resources resourceInterface
|
||||
}
|
||||
|
||||
// New returns a connected client. The ClientConfig can either be created explicitly,
|
||||
// or can be loaded from a config file or environment variables using the LoadClientConfig() function.
|
||||
func New(config apiconfig.CalicoAPIConfig) (Interface, error) {
|
||||
be, err := backend.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client{
|
||||
config: config,
|
||||
backend: be,
|
||||
resources: &resources{backend: be},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewFromEnv loads the config from ENV variables and returns a connected client.
|
||||
func NewFromEnv() (Interface, error) {
|
||||
|
||||
config, err := apiconfig.LoadClientConfigFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return New(*config)
|
||||
}
|
||||
|
||||
// Nodes returns an interface for managing node resources.
|
||||
func (c client) Nodes() NodeInterface {
|
||||
return nodes{client: c}
|
||||
}
|
||||
|
||||
// NetworkPolicies returns an interface for managing policy resources.
|
||||
func (c client) NetworkPolicies() NetworkPolicyInterface {
|
||||
return networkPolicies{client: c}
|
||||
}
|
||||
|
||||
// GlobalNetworkPolicies returns an interface for managing policy resources.
|
||||
func (c client) GlobalNetworkPolicies() GlobalNetworkPolicyInterface {
|
||||
return globalNetworkPolicies{client: c}
|
||||
}
|
||||
|
||||
// IPPools returns an interface for managing IP pool resources.
|
||||
func (c client) IPPools() IPPoolInterface {
|
||||
return ipPools{client: c}
|
||||
}
|
||||
|
||||
// Profiles returns an interface for managing profile resources.
|
||||
func (c client) Profiles() ProfileInterface {
|
||||
return profiles{client: c}
|
||||
}
|
||||
|
||||
// GlobalNetworkSets returns an interface for managing host endpoint resources.
|
||||
func (c client) GlobalNetworkSets() GlobalNetworkSetInterface {
|
||||
return globalNetworkSets{client: c}
|
||||
}
|
||||
|
||||
// NetworkSets returns an interface for managing host endpoint resources.
|
||||
func (c client) NetworkSets() NetworkSetInterface {
|
||||
return networkSets{client: c}
|
||||
}
|
||||
|
||||
// HostEndpoints returns an interface for managing host endpoint resources.
|
||||
func (c client) HostEndpoints() HostEndpointInterface {
|
||||
return hostEndpoints{client: c}
|
||||
}
|
||||
|
||||
// WorkloadEndpoints returns an interface for managing workload endpoint resources.
|
||||
func (c client) WorkloadEndpoints() WorkloadEndpointInterface {
|
||||
return workloadEndpoints{client: c}
|
||||
}
|
||||
|
||||
// BGPPeers returns an interface for managing BGP peer resources.
|
||||
func (c client) BGPPeers() BGPPeerInterface {
|
||||
return bgpPeers{client: c}
|
||||
}
|
||||
|
||||
// IPAM returns an interface for managing IP address assignment and releasing.
|
||||
func (c client) IPAM() ipam.Interface {
|
||||
return ipam.NewIPAMClient(c.backend, poolAccessor{client: &c})
|
||||
}
|
||||
|
||||
// BGPConfigurations returns an interface for managing the BGP configuration resources.
|
||||
func (c client) BGPConfigurations() BGPConfigurationInterface {
|
||||
return bgpConfigurations{client: c}
|
||||
}
|
||||
|
||||
// FelixConfigurations returns an interface for managing the Felix configuration resources.
|
||||
func (c client) FelixConfigurations() FelixConfigurationInterface {
|
||||
return felixConfigurations{client: c}
|
||||
}
|
||||
|
||||
// ClusterInformation returns an interface for managing the cluster information resource.
|
||||
func (c client) ClusterInformation() ClusterInformationInterface {
|
||||
return clusterInformation{client: c}
|
||||
}
|
||||
|
||||
type poolAccessor struct {
|
||||
client *client
|
||||
}
|
||||
|
||||
func (p poolAccessor) GetEnabledPools(ipVersion int) ([]v3.IPPool, error) {
|
||||
return p.getPools(func(pool *v3.IPPool) bool {
|
||||
if pool.Spec.Disabled {
|
||||
log.Debugf("Skipping disabled IP pool (%s)", pool.Name)
|
||||
return false
|
||||
}
|
||||
if _, cidr, err := net.ParseCIDR(pool.Spec.CIDR); err == nil && cidr.Version() == ipVersion {
|
||||
log.Debugf("Adding pool (%s) to the IPPool list", cidr.String())
|
||||
return true
|
||||
} else if err != nil {
|
||||
log.Warnf("Failed to parse the IPPool: %s. Ignoring that IPPool", pool.Spec.CIDR)
|
||||
} else {
|
||||
log.Debugf("Ignoring IPPool: %s. IP version is different.", pool.Spec.CIDR)
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (p poolAccessor) getPools(filter func(pool *v3.IPPool) bool) ([]v3.IPPool, error) {
|
||||
pools, err := p.client.IPPools().List(context.Background(), options.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Got list of all IPPools: %v", pools)
|
||||
var filtered []v3.IPPool
|
||||
for _, pool := range pools.Items {
|
||||
if filter(&pool) {
|
||||
filtered = append(filtered, pool)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
func (p poolAccessor) GetAllPools() ([]v3.IPPool, error) {
|
||||
return p.getPools(func(pool *v3.IPPool) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureInitialized is used to ensure the backend datastore is correctly
|
||||
// initialized for use by Calico. This method may be called multiple times, and
|
||||
// will have no effect if the datastore is already correctly initialized.
|
||||
//
|
||||
// Most Calico deployment scenarios will automatically implicitly invoke this
|
||||
// method and so a general consumer of this API can assume that the datastore
|
||||
// is already initialized.
|
||||
func (c client) EnsureInitialized(ctx context.Context, calicoVersion, clusterType string) error {
|
||||
// Perform datastore specific initialization first.
|
||||
if err := c.backend.EnsureInitialized(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ensureClusterInformation(ctx, calicoVersion, clusterType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const globalClusterInfoName = "default"
|
||||
|
||||
// ensureClusterInformation ensures that the ClusterInformation fields i.e. ClusterType,
|
||||
// CalicoVersion and ClusterGUID are set. It creates/updates the ClusterInformation as needed.
|
||||
func (c client) ensureClusterInformation(ctx context.Context, calicoVersion, clusterType string) error {
|
||||
// Append "kdd" last if the datastoreType is 'kubernetes'.
|
||||
if c.config.Spec.DatastoreType == apiconfig.Kubernetes {
|
||||
// If clusterType is already set then append ",kdd" at the end.
|
||||
if clusterType != "" {
|
||||
// Trim the trailing ",", if any.
|
||||
clusterType = strings.TrimSuffix(clusterType, ",")
|
||||
// Append "kdd" very last thing in the list.
|
||||
clusterType = fmt.Sprintf("%s,%s", clusterType, "kdd")
|
||||
} else {
|
||||
clusterType = "kdd"
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
clusterInfo, err := c.ClusterInformation().Get(ctx, globalClusterInfoName, options.GetOptions{})
|
||||
if err != nil {
|
||||
// Create the default config if it doesn't already exist.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
|
||||
newClusterInfo := v3.NewClusterInformation()
|
||||
newClusterInfo.Name = globalClusterInfoName
|
||||
newClusterInfo.Spec.CalicoVersion = calicoVersion
|
||||
newClusterInfo.Spec.ClusterType = clusterType
|
||||
newClusterInfo.Spec.ClusterGUID = fmt.Sprintf("%s", hex.EncodeToString(uuid.NewV4().Bytes()))
|
||||
datastoreReady := true
|
||||
newClusterInfo.Spec.DatastoreReady = &datastoreReady
|
||||
_, err = c.ClusterInformation().Create(ctx, newClusterInfo, options.SetOptions{})
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
|
||||
log.Info("Failed to create global ClusterInformation; another node got there first.")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
log.WithError(err).WithField("ClusterInformation", newClusterInfo).Errorf("Error creating cluster information config")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.WithError(err).WithField("ClusterInformation", globalClusterInfoName).Errorf("Error getting cluster information config")
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
updateNeeded := false
|
||||
if calicoVersion != "" {
|
||||
// Only update the version if it's different from what we have.
|
||||
if clusterInfo.Spec.CalicoVersion != calicoVersion {
|
||||
clusterInfo.Spec.CalicoVersion = calicoVersion
|
||||
updateNeeded = true
|
||||
} else {
|
||||
log.WithField("CalicoVersion", clusterInfo.Spec.CalicoVersion).Debug("Calico version value already assigned")
|
||||
}
|
||||
}
|
||||
|
||||
if clusterInfo.Spec.ClusterGUID == "" {
|
||||
clusterInfo.Spec.ClusterGUID = fmt.Sprintf("%s", hex.EncodeToString(uuid.NewV4().Bytes()))
|
||||
updateNeeded = true
|
||||
} else {
|
||||
log.WithField("ClusterGUID", clusterInfo.Spec.ClusterGUID).Debug("Cluster GUID value already set")
|
||||
}
|
||||
|
||||
if clusterInfo.Spec.DatastoreReady == nil {
|
||||
// If the ready flag is nil, default it to true (but if it's explicitly false, leave
|
||||
// it as-is).
|
||||
datastoreReady := true
|
||||
clusterInfo.Spec.DatastoreReady = &datastoreReady
|
||||
updateNeeded = true
|
||||
} else {
|
||||
log.WithField("DatastoreReady", clusterInfo.Spec.DatastoreReady).Debug("DatastoreReady value already set")
|
||||
}
|
||||
|
||||
if clusterType != "" {
|
||||
if clusterInfo.Spec.ClusterType == "" {
|
||||
clusterInfo.Spec.ClusterType = clusterType
|
||||
updateNeeded = true
|
||||
|
||||
} else {
|
||||
allClusterTypes := strings.Split(clusterInfo.Spec.ClusterType, ",")
|
||||
existingClusterTypes := set.FromArray(allClusterTypes)
|
||||
localClusterTypes := strings.Split(clusterType, ",")
|
||||
|
||||
clusterTypeUpdateNeeded := false
|
||||
for _, lct := range localClusterTypes {
|
||||
if existingClusterTypes.Contains(lct) {
|
||||
continue
|
||||
}
|
||||
clusterTypeUpdateNeeded = true
|
||||
allClusterTypes = append(allClusterTypes, lct)
|
||||
}
|
||||
|
||||
if clusterTypeUpdateNeeded {
|
||||
clusterInfo.Spec.ClusterType = strings.Join(allClusterTypes, ",")
|
||||
updateNeeded = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if updateNeeded {
|
||||
_, err = c.ClusterInformation().Update(ctx, clusterInfo, options.SetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
|
||||
log.WithError(err).WithField("ClusterInformation", clusterInfo).Warning(
|
||||
"Conflict while updating cluster information, may retry")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.WithError(err).WithField("ClusterInformation", clusterInfo).Errorf(
|
||||
"Error updating cluster information")
|
||||
return err
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Backend returns the backend client used by the v3 client. Not exposed on the main
|
||||
// client API, but available publicly for consumers that require access to the backend
|
||||
// client (e.g. for syncer support).
|
||||
func (c client) Backend() bapi.Client {
|
||||
return c.backend
|
||||
}
|
||||
108
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/clusterinfo.go
generated
vendored
108
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/clusterinfo.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// ClusterInformationInterface has methods to work with ClusterInformation resources.
|
||||
type ClusterInformationInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.ClusterInformation, opts options.SetOptions) (*apiv3.ClusterInformation, error)
|
||||
Update(ctx context.Context, res *apiv3.ClusterInformation, opts options.SetOptions) (*apiv3.ClusterInformation, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.ClusterInformation, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.ClusterInformation, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.ClusterInformationList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// clusterInformation implements ClusterInformationInterface
|
||||
type clusterInformation struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a ClusterInformation and creates it.
|
||||
// Returns the stored representation of the ClusterInformation, and an error
|
||||
// if there is any.
|
||||
func (r clusterInformation) Create(ctx context.Context, res *apiv3.ClusterInformation, opts options.SetOptions) (*apiv3.ClusterInformation, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.ObjectMeta.GetName() != "default" {
|
||||
return nil, errors.New("Cannot create a Cluster Information resource with a name other than \"default\"")
|
||||
}
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindClusterInformation, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.ClusterInformation), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a ClusterInformation and updates it.
|
||||
// Returns the stored representation of the ClusterInformation, and an error
|
||||
// if there is any.
|
||||
func (r clusterInformation) Update(ctx context.Context, res *apiv3.ClusterInformation, opts options.SetOptions) (*apiv3.ClusterInformation, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindClusterInformation, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.ClusterInformation), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the ClusterInformation and deletes it. Returns an
|
||||
// error if one occurs.
|
||||
func (r clusterInformation) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.ClusterInformation, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindClusterInformation, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.ClusterInformation), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the ClusterInformation, and returns the corresponding
|
||||
// ClusterInformation object, and an error if there is any.
|
||||
func (r clusterInformation) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.ClusterInformation, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindClusterInformation, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.ClusterInformation), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of ClusterInformation objects that match the supplied options.
|
||||
func (r clusterInformation) List(ctx context.Context, opts options.ListOptions) (*apiv3.ClusterInformationList, error) {
|
||||
res := &apiv3.ClusterInformationList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindClusterInformation, apiv3.KindClusterInformationList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the ClusterInformation that
|
||||
// match the supplied options.
|
||||
func (r clusterInformation) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindClusterInformation, nil)
|
||||
}
|
||||
38
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/doc.go
generated
vendored
38
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/doc.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package client implements the northbound client used to manage Calico configuration.
|
||||
|
||||
This client is the main entry point for applications that are managing or querying
|
||||
Calico configuration.
|
||||
|
||||
This client provides a typed interface for managing different resource types. The
|
||||
definitions for each resource type are defined in the following package:
|
||||
github.com/projectcalico/libcalico-go/lib/api
|
||||
|
||||
The client has a number of methods that return interfaces for managing:
|
||||
- BGP Peer resources
|
||||
- Policy resources
|
||||
- IP Pool resources
|
||||
- Global network sets resources
|
||||
- Host endpoint resources
|
||||
- Workload endpoint resources
|
||||
- Profile resources
|
||||
- IP Address Management (IPAM)
|
||||
|
||||
See [resource definitions](http://docs.projectcalico.org/latest/reference/calicoctl/resources/) for details about the set of management commands for each
|
||||
resource type.
|
||||
*/
|
||||
package clientv3
|
||||
104
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/felixconfig.go
generated
vendored
104
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/felixconfig.go
generated
vendored
@@ -1,104 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// FelixConfigurationInterface has methods to work with FelixConfiguration resources.
|
||||
type FelixConfigurationInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.FelixConfiguration, opts options.SetOptions) (*apiv3.FelixConfiguration, error)
|
||||
Update(ctx context.Context, res *apiv3.FelixConfiguration, opts options.SetOptions) (*apiv3.FelixConfiguration, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.FelixConfiguration, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.FelixConfiguration, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.FelixConfigurationList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// felixConfigurations implements FelixConfigurationInterface
|
||||
type felixConfigurations struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a FelixConfiguration and creates it.
|
||||
// Returns the stored representation of the FelixConfiguration, and an error
|
||||
// if there is any.
|
||||
func (r felixConfigurations) Create(ctx context.Context, res *apiv3.FelixConfiguration, opts options.SetOptions) (*apiv3.FelixConfiguration, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindFelixConfiguration, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.FelixConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a FelixConfiguration and updates it.
|
||||
// Returns the stored representation of the FelixConfiguration, and an error
|
||||
// if there is any.
|
||||
func (r felixConfigurations) Update(ctx context.Context, res *apiv3.FelixConfiguration, opts options.SetOptions) (*apiv3.FelixConfiguration, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindFelixConfiguration, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.FelixConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the FelixConfiguration and deletes it. Returns an
|
||||
// error if one occurs.
|
||||
func (r felixConfigurations) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.FelixConfiguration, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindFelixConfiguration, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.FelixConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the FelixConfiguration, and returns the corresponding
|
||||
// FelixConfiguration object, and an error if there is any.
|
||||
func (r felixConfigurations) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.FelixConfiguration, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindFelixConfiguration, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.FelixConfiguration), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of FelixConfiguration objects that match the supplied options.
|
||||
func (r felixConfigurations) List(ctx context.Context, opts options.ListOptions) (*apiv3.FelixConfigurationList, error) {
|
||||
res := &apiv3.FelixConfigurationList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindFelixConfiguration, apiv3.KindFelixConfigurationList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the FelixConfiguration that
|
||||
// match the supplied options.
|
||||
func (r felixConfigurations) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindFelixConfiguration, nil)
|
||||
}
|
||||
206
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/globalnetworkpolicy.go
generated
vendored
206
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/globalnetworkpolicy.go
generated
vendored
@@ -1,206 +0,0 @@
|
||||
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// GlobalNetworkPolicyInterface has methods to work with GlobalNetworkPolicy resources.
|
||||
type GlobalNetworkPolicyInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.GlobalNetworkPolicy, opts options.SetOptions) (*apiv3.GlobalNetworkPolicy, error)
|
||||
Update(ctx context.Context, res *apiv3.GlobalNetworkPolicy, opts options.SetOptions) (*apiv3.GlobalNetworkPolicy, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.GlobalNetworkPolicy, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.GlobalNetworkPolicy, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.GlobalNetworkPolicyList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// globalNetworkPolicies implements GlobalNetworkPolicyInterface
|
||||
type globalNetworkPolicies struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a GlobalNetworkPolicy and creates it. Returns the stored
|
||||
// representation of the GlobalNetworkPolicy, and an error, if there is any.
|
||||
func (r globalNetworkPolicies) Create(ctx context.Context, res *apiv3.GlobalNetworkPolicy, opts options.SetOptions) (*apiv3.GlobalNetworkPolicy, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
defaultPolicyTypesField(res.Spec.Ingress, res.Spec.Egress, &res.Spec.Types)
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Properly prefix the name
|
||||
res.GetObjectMeta().SetName(convertPolicyNameForStorage(res.GetObjectMeta().GetName()))
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindGlobalNetworkPolicy, res)
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.GlobalNetworkPolicy), err
|
||||
}
|
||||
|
||||
// Remove the prefix out of the returned policy name.
|
||||
res.GetObjectMeta().SetName(convertPolicyNameFromStorage(res.GetObjectMeta().GetName()))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a GlobalNetworkPolicy and updates it. Returns the stored
|
||||
// representation of the GlobalNetworkPolicy, and an error, if there is any.
|
||||
func (r globalNetworkPolicies) Update(ctx context.Context, res *apiv3.GlobalNetworkPolicy, opts options.SetOptions) (*apiv3.GlobalNetworkPolicy, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
defaultPolicyTypesField(res.Spec.Ingress, res.Spec.Egress, &res.Spec.Types)
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Properly prefix the name
|
||||
res.GetObjectMeta().SetName(convertPolicyNameForStorage(res.GetObjectMeta().GetName()))
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindGlobalNetworkPolicy, res)
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.GlobalNetworkPolicy), err
|
||||
}
|
||||
|
||||
// Remove the prefix out of the returned policy name.
|
||||
res.GetObjectMeta().SetName(convertPolicyNameFromStorage(res.GetObjectMeta().GetName()))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the GlobalNetworkPolicy and deletes it. Returns an error if one occurs.
|
||||
func (r globalNetworkPolicies) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.GlobalNetworkPolicy, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindGlobalNetworkPolicy, noNamespace, convertPolicyNameForStorage(name))
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.GlobalNetworkPolicy), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the GlobalNetworkPolicy, and returns the corresponding GlobalNetworkPolicy object,
|
||||
// and an error if there is any.
|
||||
func (r globalNetworkPolicies) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.GlobalNetworkPolicy, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindGlobalNetworkPolicy, noNamespace, convertPolicyNameForStorage(name))
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.GlobalNetworkPolicy), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of GlobalNetworkPolicy objects that match the supplied options.
|
||||
func (r globalNetworkPolicies) List(ctx context.Context, opts options.ListOptions) (*apiv3.GlobalNetworkPolicyList, error) {
|
||||
res := &apiv3.GlobalNetworkPolicyList{}
|
||||
// Add the name prefix if name is provided
|
||||
if opts.Name != "" {
|
||||
opts.Name = convertPolicyNameForStorage(opts.Name)
|
||||
}
|
||||
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindGlobalNetworkPolicy, apiv3.KindGlobalNetworkPolicyList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the prefix off of each policy name
|
||||
for i, _ := range res.Items {
|
||||
name := res.Items[i].GetObjectMeta().GetName()
|
||||
res.Items[i].GetObjectMeta().SetName(convertPolicyNameFromStorage(name))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the globalNetworkPolicies that match the
|
||||
// supplied options.
|
||||
func (r globalNetworkPolicies) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
// Add the name prefix if name is provided
|
||||
if opts.Name != "" {
|
||||
opts.Name = convertPolicyNameForStorage(opts.Name)
|
||||
}
|
||||
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindGlobalNetworkPolicy, &policyConverter{})
|
||||
}
|
||||
|
||||
func defaultPolicyTypesField(ingressRules, egressRules []apiv3.Rule, types *[]apiv3.PolicyType) {
|
||||
if len(*types) == 0 {
|
||||
// Default the Types field according to what inbound and outbound rules are present
|
||||
// in the policy.
|
||||
if len(egressRules) == 0 {
|
||||
// Policy has no egress rules, so apply this policy to ingress only. (Note:
|
||||
// intentionally including the case where the policy also has no ingress
|
||||
// rules.)
|
||||
*types = []apiv3.PolicyType{apiv3.PolicyTypeIngress}
|
||||
} else if len(ingressRules) == 0 {
|
||||
// Policy has egress rules but no ingress rules, so apply this policy to
|
||||
// egress only.
|
||||
*types = []apiv3.PolicyType{apiv3.PolicyTypeEgress}
|
||||
} else {
|
||||
// Policy has both ingress and egress rules, so apply this policy to both
|
||||
// ingress and egress.
|
||||
*types = []apiv3.PolicyType{apiv3.PolicyTypeIngress, apiv3.PolicyTypeEgress}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func convertPolicyNameForStorage(name string) string {
|
||||
// Do nothing on names prefixed with "knp."
|
||||
if strings.HasPrefix(name, "knp.") {
|
||||
return name
|
||||
}
|
||||
// Similarly for "ossg."
|
||||
if strings.HasPrefix(name, "ossg.") {
|
||||
return name
|
||||
}
|
||||
return "default." + name
|
||||
}
|
||||
|
||||
func convertPolicyNameFromStorage(name string) string {
|
||||
// Do nothing on names prefixed with "knp."
|
||||
if strings.HasPrefix(name, "knp.") {
|
||||
return name
|
||||
}
|
||||
// Similarly for "ossg."
|
||||
if strings.HasPrefix(name, "ossg.") {
|
||||
return name
|
||||
}
|
||||
parts := strings.SplitN(name, ".", 2)
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
type policyConverter struct{}
|
||||
|
||||
func (pc *policyConverter) Convert(r resource) resource {
|
||||
r.GetObjectMeta().SetName(convertPolicyNameFromStorage(r.GetObjectMeta().GetName()))
|
||||
return r
|
||||
}
|
||||
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/globalnetworkset.go
generated
vendored
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/globalnetworkset.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// GlobalNetworkSetInterface has methods to work with GlobalNetworkSet resources.
|
||||
type GlobalNetworkSetInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.GlobalNetworkSet, opts options.SetOptions) (*apiv3.GlobalNetworkSet, error)
|
||||
Update(ctx context.Context, res *apiv3.GlobalNetworkSet, opts options.SetOptions) (*apiv3.GlobalNetworkSet, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.GlobalNetworkSet, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.GlobalNetworkSet, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.GlobalNetworkSetList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// globalNetworkSets implements GlobalNetworkSetInterface
|
||||
type globalNetworkSets struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a GlobalNetworkSet and creates it. Returns the stored
|
||||
// representation of the GlobalNetworkSet, and an error, if there is any.
|
||||
func (r globalNetworkSets) Create(ctx context.Context, res *apiv3.GlobalNetworkSet, opts options.SetOptions) (*apiv3.GlobalNetworkSet, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindGlobalNetworkSet, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.GlobalNetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a GlobalNetworkSet and updates it. Returns the stored
|
||||
// representation of the GlobalNetworkSet, and an error, if there is any.
|
||||
func (r globalNetworkSets) Update(ctx context.Context, res *apiv3.GlobalNetworkSet, opts options.SetOptions) (*apiv3.GlobalNetworkSet, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindGlobalNetworkSet, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.GlobalNetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the GlobalNetworkSet and deletes it. Returns an error if one occurs.
|
||||
func (r globalNetworkSets) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.GlobalNetworkSet, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindGlobalNetworkSet, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.GlobalNetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the GlobalNetworkSet, and returns the corresponding GlobalNetworkSet object,
|
||||
// and an error if there is any.
|
||||
func (r globalNetworkSets) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.GlobalNetworkSet, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindGlobalNetworkSet, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.GlobalNetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of GlobalNetworkSet objects that match the supplied options.
|
||||
func (r globalNetworkSets) List(ctx context.Context, opts options.ListOptions) (*apiv3.GlobalNetworkSetList, error) {
|
||||
res := &apiv3.GlobalNetworkSetList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindGlobalNetworkSet, apiv3.KindGlobalNetworkSetList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the GlobalNetworkSets that match the
|
||||
// supplied options.
|
||||
func (r globalNetworkSets) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindGlobalNetworkSet, nil)
|
||||
}
|
||||
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/hostendpoint.go
generated
vendored
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/hostendpoint.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// HostEndpointInterface has methods to work with HostEndpoint resources.
|
||||
type HostEndpointInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.HostEndpoint, opts options.SetOptions) (*apiv3.HostEndpoint, error)
|
||||
Update(ctx context.Context, res *apiv3.HostEndpoint, opts options.SetOptions) (*apiv3.HostEndpoint, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.HostEndpoint, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.HostEndpoint, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.HostEndpointList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// hostEndpoints implements HostEndpointInterface
|
||||
type hostEndpoints struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a HostEndpoint and creates it. Returns the stored
|
||||
// representation of the HostEndpoint, and an error, if there is any.
|
||||
func (r hostEndpoints) Create(ctx context.Context, res *apiv3.HostEndpoint, opts options.SetOptions) (*apiv3.HostEndpoint, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindHostEndpoint, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.HostEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a HostEndpoint and updates it. Returns the stored
|
||||
// representation of the HostEndpoint, and an error, if there is any.
|
||||
func (r hostEndpoints) Update(ctx context.Context, res *apiv3.HostEndpoint, opts options.SetOptions) (*apiv3.HostEndpoint, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindHostEndpoint, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.HostEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the HostEndpoint and deletes it. Returns an error if one occurs.
|
||||
func (r hostEndpoints) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.HostEndpoint, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindHostEndpoint, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.HostEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the HostEndpoint, and returns the corresponding HostEndpoint object,
|
||||
// and an error if there is any.
|
||||
func (r hostEndpoints) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.HostEndpoint, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindHostEndpoint, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.HostEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of HostEndpoint objects that match the supplied options.
|
||||
func (r hostEndpoints) List(ctx context.Context, opts options.ListOptions) (*apiv3.HostEndpointList, error) {
|
||||
res := &apiv3.HostEndpointList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindHostEndpoint, apiv3.KindHostEndpointList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the HostEndpoints that match the
|
||||
// supplied options.
|
||||
func (r hostEndpoints) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindHostEndpoint, nil)
|
||||
}
|
||||
62
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/interface.go
generated
vendored
62
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/interface.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/ipam"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
// Nodes returns an interface for managing node resources.
|
||||
Nodes() NodeInterface
|
||||
// GlobalNetworkPolicies returns an interface for managing global network policy resources.
|
||||
GlobalNetworkPolicies() GlobalNetworkPolicyInterface
|
||||
// NetworkPolicies returns an interface for managing namespaced network policy resources.
|
||||
NetworkPolicies() NetworkPolicyInterface
|
||||
// IPPools returns an interface for managing IP pool resources.
|
||||
IPPools() IPPoolInterface
|
||||
// Profiles returns an interface for managing profile resources.
|
||||
Profiles() ProfileInterface
|
||||
// GlobalNetworkSets returns an interface for managing global network sets resources.
|
||||
GlobalNetworkSets() GlobalNetworkSetInterface
|
||||
// NetworkSets returns an interface for managing network sets resources.
|
||||
NetworkSets() NetworkSetInterface
|
||||
// HostEndpoints returns an interface for managing host endpoint resources.
|
||||
HostEndpoints() HostEndpointInterface
|
||||
// WorkloadEndpoints returns an interface for managing workload endpoint resources.
|
||||
WorkloadEndpoints() WorkloadEndpointInterface
|
||||
// BGPPeers returns an interface for managing BGP peer resources.
|
||||
BGPPeers() BGPPeerInterface
|
||||
// IPAM returns an interface for managing IP address assignment and releasing.
|
||||
IPAM() ipam.Interface
|
||||
// BGPConfigurations returns an interface for managing the BGP configuration resources.
|
||||
BGPConfigurations() BGPConfigurationInterface
|
||||
// FelixConfigurations returns an interface for managing the Felix configuration resources.
|
||||
FelixConfigurations() FelixConfigurationInterface
|
||||
// ClusterInformation returns an interface for managing the cluster information resource.
|
||||
ClusterInformation() ClusterInformationInterface
|
||||
// EnsureInitialized is used to ensure the backend datastore is correctly
|
||||
// initialized for use by Calico. This method may be called multiple times, and
|
||||
// will have no effect if the datastore is already correctly initialized.
|
||||
// Most Calico deployment scenarios will automatically implicitly invoke this
|
||||
// method and so a general consumer of this API can assume that the datastore
|
||||
// is already initialized.
|
||||
EnsureInitialized(ctx context.Context, calicoVersion, clusterType string) error
|
||||
}
|
||||
|
||||
// Compile-time assertion that our client implements its interface.
|
||||
var _ Interface = (*client)(nil)
|
||||
620
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/ippool.go
generated
vendored
620
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/ippool.go
generated
vendored
@@ -1,620 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// IPPoolInterface has methods to work with IPPool resources.
|
||||
type IPPoolInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.IPPool, opts options.SetOptions) (*apiv3.IPPool, error)
|
||||
Update(ctx context.Context, res *apiv3.IPPool, opts options.SetOptions) (*apiv3.IPPool, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.IPPool, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.IPPool, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.IPPoolList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ipPools implements IPPoolInterface
|
||||
type ipPools struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a IPPool and creates it. Returns the stored
|
||||
// representation of the IPPool, and an error, if there is any.
|
||||
func (r ipPools) Create(ctx context.Context, res *apiv3.IPPool, opts options.SetOptions) (*apiv3.IPPool, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
// Validate the IPPool before creating the resource.
|
||||
if err := r.validateAndSetDefaults(ctx, res, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that there are no existing blocks in the pool range that have a different block size.
|
||||
poolBlockSize := res.Spec.BlockSize
|
||||
poolIP, poolCIDR, err := net.ParseCIDR(res.Spec.CIDR)
|
||||
if err != nil {
|
||||
return nil, cerrors.ErrorParsingDatastoreEntry{
|
||||
RawKey: "CIDR",
|
||||
RawValue: string(res.Spec.CIDR),
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
ipVersion := 4
|
||||
if poolIP.To4() == nil {
|
||||
ipVersion = 6
|
||||
}
|
||||
|
||||
blocks, err := r.client.backend.List(ctx, model.BlockListOptions{IPVersion: ipVersion}, "")
|
||||
if _, ok := err.(cerrors.ErrorOperationNotSupported); !ok && err != nil {
|
||||
// There was an error and it wasn't OperationNotSupported - return it.
|
||||
return nil, err
|
||||
} else if err == nil {
|
||||
// Skip the block check if the error is OperationUnsupported - listing blocks is not
|
||||
// supported with host-local IPAM on KDD.
|
||||
for _, b := range blocks.KVPairs {
|
||||
k := b.Key.(model.BlockKey)
|
||||
ones, _ := k.CIDR.Mask.Size()
|
||||
// Check if this block has a different size to the pool, and that it overlaps with the pool.
|
||||
if ones != poolBlockSize && k.CIDR.IsNetOverlap(*poolCIDR) {
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "IPPool.Spec.BlockSize",
|
||||
Reason: "IPPool blocksSize conflicts with existing allocations that use a different blockSize",
|
||||
Value: res.Spec.BlockSize,
|
||||
}},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable IPIP or VXLAN globally if required. Do this before the Create so if it fails the user
|
||||
// can retry the same command.
|
||||
err = r.maybeEnableIPIP(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.maybeEnableVXLAN(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindIPPool, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.IPPool), err
|
||||
}
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
// Update takes the representation of a IPPool and updates it. Returns the stored
|
||||
// representation of the IPPool, and an error, if there is any.
|
||||
func (r ipPools) Update(ctx context.Context, res *apiv3.IPPool, opts options.SetOptions) (*apiv3.IPPool, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
|
||||
// Get the existing settings, so that we can validate the CIDR and block size have not changed.
|
||||
old, err := r.Get(ctx, res.Name, options.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate the IPPool updating the resource.
|
||||
if err := r.validateAndSetDefaults(ctx, res, old); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Enable IPIP globally if required. Do this before the Update so if it fails the user
|
||||
// can retry the same command.
|
||||
err = r.maybeEnableIPIP(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.maybeEnableVXLAN(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindIPPool, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.IPPool), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the IPPool and deletes it. Returns an error if one occurs.
|
||||
func (r ipPools) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.IPPool, error) {
|
||||
// Deleting a pool requires a little care because of existing endpoints
|
||||
// using IP addresses allocated in the pool. We do the deletion in
|
||||
// the following steps:
|
||||
// - disable the pool so no more IPs are assigned from it
|
||||
// - remove all affinities associated with the pool
|
||||
// - delete the pool
|
||||
|
||||
// Get the pool so that we can find the CIDR associated with it.
|
||||
pool, err := r.Get(ctx, name, options.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logCxt := log.WithFields(log.Fields{
|
||||
"CIDR": pool.Spec.CIDR,
|
||||
"Name": name,
|
||||
})
|
||||
|
||||
// If the pool is active, set the disabled flag to ensure we stop allocating from this pool.
|
||||
if !pool.Spec.Disabled {
|
||||
logCxt.Info("Disabling pool to release affinities")
|
||||
pool.Spec.Disabled = true
|
||||
|
||||
// If the Delete has been called with a ResourceVersion then use that to perform the
|
||||
// update - that way we'll catch update conflicts (we could actually check here, but
|
||||
// the most likely scenario is there isn't one - so just pass it through and let the
|
||||
// Update handle any conflicts).
|
||||
if opts.ResourceVersion != "" {
|
||||
pool.ResourceVersion = opts.ResourceVersion
|
||||
}
|
||||
if _, err := r.Update(ctx, pool, options.SetOptions{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reset the resource version before the actual delete since the version of that resource
|
||||
// will now have been updated.
|
||||
opts.ResourceVersion = ""
|
||||
}
|
||||
|
||||
// Release affinities associated with this pool. We do this even if the pool was disabled
|
||||
// (since it may have been enabled at one time, and if there are no affine blocks created
|
||||
// then this will be a no-op). We've already validated the CIDR so we know it will parse.
|
||||
if _, cidrNet, err := cnet.ParseCIDR(pool.Spec.CIDR); err == nil {
|
||||
logCxt.Info("Releasing pool affinities")
|
||||
|
||||
// Pause for a short period before releasing the affinities - this gives any in-progress
|
||||
// allocations an opportunity to finish.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
err = r.client.IPAM().ReleasePoolAffinities(ctx, *cidrNet)
|
||||
|
||||
// Depending on the datastore, IPAM may not be supported. If we get a not supported
|
||||
// error, then continue. Any other error, fail.
|
||||
if _, ok := err.(cerrors.ErrorOperationNotSupported); !ok && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally, delete the pool.
|
||||
logCxt.Info("Deleting pool")
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindIPPool, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.IPPool), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the IPPool, and returns the corresponding IPPool object,
|
||||
// and an error if there is any.
|
||||
func (r ipPools) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.IPPool, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindIPPool, noNamespace, name)
|
||||
if out != nil {
|
||||
convertIpPoolFromStorage(out.(*apiv3.IPPool))
|
||||
return out.(*apiv3.IPPool), err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of IPPool objects that match the supplied options.
|
||||
func (r ipPools) List(ctx context.Context, opts options.ListOptions) (*apiv3.IPPoolList, error) {
|
||||
res := &apiv3.IPPoolList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindIPPool, apiv3.KindIPPoolList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Default values when reading from backend.
|
||||
for i := range res.Items {
|
||||
convertIpPoolFromStorage(&res.Items[i])
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Default pool values when reading from storage
|
||||
func convertIpPoolFromStorage(pool *apiv3.IPPool) error {
|
||||
// Default the blockSize if it wasn't previously set
|
||||
if pool.Spec.BlockSize == 0 {
|
||||
// Get the IP address of the CIDR to find the IP version
|
||||
ipAddr, _, err := cnet.ParseCIDR(pool.Spec.CIDR)
|
||||
if err != nil {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR must be a valid subnet",
|
||||
Value: pool.Spec.CIDR,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
if ipAddr.Version() == 4 {
|
||||
pool.Spec.BlockSize = 26
|
||||
} else {
|
||||
pool.Spec.BlockSize = 122
|
||||
}
|
||||
}
|
||||
|
||||
// Default the nodeSelector if it wasn't previously set.
|
||||
if pool.Spec.NodeSelector == "" {
|
||||
pool.Spec.NodeSelector = "all()"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the IPPools that match the
|
||||
// supplied options.
|
||||
func (r ipPools) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindIPPool, nil)
|
||||
}
|
||||
|
||||
// validateAndSetDefaults validates IPPool fields and sets default values that are
|
||||
// not assigned.
|
||||
// The old pool will be unassigned for a Create.
|
||||
func (r ipPools) validateAndSetDefaults(ctx context.Context, new, old *apiv3.IPPool) error {
|
||||
errFields := []cerrors.ErroredField{}
|
||||
|
||||
// Spec.CIDR field must not be empty.
|
||||
if new.Spec.CIDR == "" {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR must be specified",
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the CIDR is parsable.
|
||||
ipAddr, cidr, err := cnet.ParseCIDR(new.Spec.CIDR)
|
||||
if err != nil {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR must be a valid subnet",
|
||||
Value: new.Spec.CIDR,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// Normalize the CIDR before persisting.
|
||||
new.Spec.CIDR = cidr.String()
|
||||
|
||||
// If a nodeSelector is not specified, then this IP pool selects all nodes.
|
||||
if new.Spec.NodeSelector == "" {
|
||||
new.Spec.NodeSelector = "all()"
|
||||
}
|
||||
|
||||
// If there was a previous pool then this must be an Update, validate that the
|
||||
// CIDR has not changed. Since we are using normalized CIDRs we can just do a
|
||||
// simple string comparison.
|
||||
if old != nil && old.Spec.CIDR != new.Spec.CIDR {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR cannot be modified",
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
|
||||
// Default the blockSize
|
||||
if new.Spec.BlockSize == 0 {
|
||||
if ipAddr.Version() == 4 {
|
||||
new.Spec.BlockSize = 26
|
||||
} else {
|
||||
new.Spec.BlockSize = 122
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the blockSize hasn't changed since updates are not supported.
|
||||
if old != nil && old.Spec.BlockSize != new.Spec.BlockSize {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.BlockSize",
|
||||
Reason: "IPPool BlockSize cannot be modified",
|
||||
Value: new.Spec.BlockSize,
|
||||
})
|
||||
}
|
||||
|
||||
if ipAddr.Version() == 4 {
|
||||
if new.Spec.BlockSize > 32 || new.Spec.BlockSize < 20 {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.BlockSize",
|
||||
Reason: "IPv4 block size must be between 20 and 32",
|
||||
Value: new.Spec.BlockSize,
|
||||
})
|
||||
|
||||
}
|
||||
} else {
|
||||
if new.Spec.BlockSize > 128 || new.Spec.BlockSize < 116 {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.BlockSize",
|
||||
Reason: "IPv6 block size must be between 116 and 128",
|
||||
Value: new.Spec.BlockSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// The Calico IPAM places restrictions on the minimum IP pool size. If
|
||||
// the ippool is enabled, check that the pool is at least the minimum size.
|
||||
if !new.Spec.Disabled {
|
||||
ones, _ := cidr.Mask.Size()
|
||||
log.Debugf("Pool CIDR: %s, mask: %d, blockSize: %d", cidr.String(), ones, new.Spec.BlockSize)
|
||||
if ones > new.Spec.BlockSize {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IP pool size is too small for use with Calico IPAM. It must be equal to or greater than the block size.",
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// If there was no previous pool then this must be a Create. Check that the CIDR
|
||||
// does not overlap with any other pool CIDRs.
|
||||
if old == nil {
|
||||
allPools, err := r.List(ctx, options.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, otherPool := range allPools.Items {
|
||||
// It's possible that Create is called for a pre-existing pool, so skip our own
|
||||
// pool and let the generic processing handle the pre-existing resource error case.
|
||||
if otherPool.Name == new.Name {
|
||||
continue
|
||||
}
|
||||
_, otherCIDR, err := cnet.ParseCIDR(otherPool.Spec.CIDR)
|
||||
if err != nil {
|
||||
log.WithField("Name", otherPool.Name).WithError(err).Error("IPPool is configured with an invalid CIDR")
|
||||
continue
|
||||
}
|
||||
if otherCIDR.IsNetOverlap(cidr.IPNet) {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: fmt.Sprintf("IPPool(%s) CIDR overlaps with IPPool(%s) CIDR %s", new.Name, otherPool.Name, otherPool.Spec.CIDR),
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure IPIPMode is defaulted to "Never".
|
||||
if len(new.Spec.IPIPMode) == 0 {
|
||||
new.Spec.IPIPMode = apiv3.IPIPModeNever
|
||||
}
|
||||
|
||||
// Make sure VXLANMode is defaulted to "Never".
|
||||
if len(new.Spec.VXLANMode) == 0 {
|
||||
new.Spec.VXLANMode = apiv3.VXLANModeNever
|
||||
}
|
||||
|
||||
// Make sure only one of VXLAN and IPIP is enabled.
|
||||
if new.Spec.VXLANMode != apiv3.VXLANModeNever && new.Spec.IPIPMode != apiv3.IPIPModeNever {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.VXLANMode",
|
||||
Reason: "Cannot enable both VXLAN and IPIP on the same IPPool",
|
||||
Value: new.Spec.VXLANMode,
|
||||
})
|
||||
}
|
||||
|
||||
// IPIP cannot be enabled for IPv6.
|
||||
if cidr.Version() == 6 && new.Spec.IPIPMode != apiv3.IPIPModeNever {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.IPIPMode",
|
||||
Reason: "IPIP is not supported on an IPv6 IP pool",
|
||||
Value: new.Spec.IPIPMode,
|
||||
})
|
||||
}
|
||||
|
||||
// The Calico CIDR should be strictly masked
|
||||
log.Debugf("IPPool CIDR: %s, Masked IP: %d", new.Spec.CIDR, cidr.IP)
|
||||
if cidr.IP.String() != ipAddr.String() {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR is not strictly masked",
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
|
||||
// IPv4 link local subnet.
|
||||
ipv4LinkLocalNet := net.IPNet{
|
||||
IP: net.ParseIP("169.254.0.0"),
|
||||
Mask: net.CIDRMask(16, 32),
|
||||
}
|
||||
// IPv6 link local subnet.
|
||||
ipv6LinkLocalNet := net.IPNet{
|
||||
IP: net.ParseIP("fe80::"),
|
||||
Mask: net.CIDRMask(10, 128),
|
||||
}
|
||||
|
||||
// IP Pool CIDR cannot overlap with IPv4 or IPv6 link local address range.
|
||||
if cidr.Version() == 4 && cidr.IsNetOverlap(ipv4LinkLocalNet) {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR overlaps with IPv4 Link Local range 169.254.0.0/16",
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
|
||||
if cidr.Version() == 6 && cidr.IsNetOverlap(ipv6LinkLocalNet) {
|
||||
errFields = append(errFields, cerrors.ErroredField{
|
||||
Name: "IPPool.Spec.CIDR",
|
||||
Reason: "IPPool CIDR overlaps with IPv6 Link Local range fe80::/10",
|
||||
Value: new.Spec.CIDR,
|
||||
})
|
||||
}
|
||||
|
||||
// Return the errors if we have one or more validation errors.
|
||||
if len(errFields) > 0 {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: errFields,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeEnableIPIP enables global IPIP if a default setting is not already configured
|
||||
// and the pool has IPIP enabled.
|
||||
func (c ipPools) maybeEnableIPIP(ctx context.Context, pool *apiv3.IPPool) error {
|
||||
if pool.Spec.IPIPMode == apiv3.IPIPModeNever {
|
||||
log.Debug("IPIP is not enabled for this pool - no need to check global setting")
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
ipEnabled := true
|
||||
for i := 0; i < maxApplyRetries; i++ {
|
||||
log.WithField("Retry", i).Debug("Checking global IPIP setting")
|
||||
res, err := c.client.FelixConfigurations().Get(ctx, "default", options.GetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok && err != nil {
|
||||
log.WithError(err).Debug("Error getting current FelixConfiguration resource")
|
||||
return err
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
log.Debug("Global FelixConfiguration does not exist - creating")
|
||||
res = apiv3.NewFelixConfiguration()
|
||||
res.Name = "default"
|
||||
} else if res.Spec.IPIPEnabled != nil {
|
||||
// A value for the default config is set so leave unchanged. It may be set to false,
|
||||
// so log the actual value - but we shouldn't update it if someone has explicitly
|
||||
// disabled it globally.
|
||||
log.WithField("IPIPEnabled", res.Spec.IPIPEnabled).Debug("Global IPIPEnabled setting is already configured")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enable IpInIp and do the Create or Update.
|
||||
res.Spec.IPIPEnabled = &ipEnabled
|
||||
if res.ResourceVersion == "" {
|
||||
res, err = c.client.FelixConfigurations().Create(ctx, res, options.SetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
|
||||
log.Debug("FelixConfiguration already exists - retry update")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
res, err = c.client.FelixConfigurations().Update(ctx, res, options.SetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
|
||||
log.Debug("FelixConfiguration update conflict - retry update")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
log.Debug("FelixConfiguration updated successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithError(err).Debug("Error updating FelixConfiguration to enable IPIP")
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the error from the final Update.
|
||||
log.WithError(err).Info("Too many conflict failures attempting to update FelixConfiguration to enable IPIP")
|
||||
return err
|
||||
}
|
||||
|
||||
// maybeEnableVXLAN enables global VXLAN if a default setting is not already configured
|
||||
// and the pool has VXLAN enabled.
|
||||
func (c ipPools) maybeEnableVXLAN(ctx context.Context, pool *apiv3.IPPool) error {
|
||||
if pool.Spec.VXLANMode == apiv3.VXLANModeNever {
|
||||
log.Debug("VXLAN is not enabled for this pool - no need to check global setting")
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
ipEnabled := true
|
||||
for i := 0; i < maxApplyRetries; i++ {
|
||||
log.WithField("Retry", i).Debug("Checking global VXLAN setting")
|
||||
res, err := c.client.FelixConfigurations().Get(ctx, "default", options.GetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok && err != nil {
|
||||
log.WithError(err).Debug("Error getting current FelixConfiguration resource")
|
||||
return err
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
log.Debug("Global FelixConfiguration does not exist - creating")
|
||||
res = apiv3.NewFelixConfiguration()
|
||||
res.Name = "default"
|
||||
} else if res.Spec.VXLANEnabled != nil {
|
||||
// A value for the default config is set so leave unchanged. It may be set to false,
|
||||
// so log the actual value - but we shouldn't update it if someone has explicitly
|
||||
// disabled it globally.
|
||||
log.WithField("VXLANEnabled", res.Spec.VXLANEnabled).Debug("Global VXLANEnabled setting is already configured")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enable IpInIp and do the Create or Update.
|
||||
res.Spec.VXLANEnabled = &ipEnabled
|
||||
if res.ResourceVersion == "" {
|
||||
res, err = c.client.FelixConfigurations().Create(ctx, res, options.SetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
|
||||
log.Debug("FelixConfiguration already exists - retry update")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
res, err = c.client.FelixConfigurations().Update(ctx, res, options.SetOptions{})
|
||||
if _, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
|
||||
log.Debug("FelixConfiguration update conflict - retry update")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
log.Debug("FelixConfiguration updated successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithError(err).Debug("Error updating FelixConfiguration to enable VXLAN")
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the error from the final Update.
|
||||
log.WithError(err).Info("Too many conflict failures attempting to update FelixConfiguration to enable VXLAN")
|
||||
return err
|
||||
}
|
||||
152
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/networkpolicy.go
generated
vendored
152
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/networkpolicy.go
generated
vendored
@@ -1,152 +0,0 @@
|
||||
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
|
||||
type NetworkPolicyInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.NetworkPolicy, opts options.SetOptions) (*apiv3.NetworkPolicy, error)
|
||||
Update(ctx context.Context, res *apiv3.NetworkPolicy, opts options.SetOptions) (*apiv3.NetworkPolicy, error)
|
||||
Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.NetworkPolicy, error)
|
||||
Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.NetworkPolicy, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.NetworkPolicyList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// networkPolicies implements NetworkPolicyInterface
|
||||
type networkPolicies struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a NetworkPolicy and creates it. Returns the stored
|
||||
// representation of the NetworkPolicy, and an error, if there is any.
|
||||
func (r networkPolicies) Create(ctx context.Context, res *apiv3.NetworkPolicy, opts options.SetOptions) (*apiv3.NetworkPolicy, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
defaultPolicyTypesField(res.Spec.Ingress, res.Spec.Egress, &res.Spec.Types)
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Properly prefix the name
|
||||
res.GetObjectMeta().SetName(convertPolicyNameForStorage(res.GetObjectMeta().GetName()))
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindNetworkPolicy, res)
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.NetworkPolicy), err
|
||||
}
|
||||
|
||||
// Remove the prefix out of the returned policy name.
|
||||
res.GetObjectMeta().SetName(convertPolicyNameFromStorage(res.GetObjectMeta().GetName()))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a NetworkPolicy and updates it. Returns the stored
|
||||
// representation of the NetworkPolicy, and an error, if there is any.
|
||||
func (r networkPolicies) Update(ctx context.Context, res *apiv3.NetworkPolicy, opts options.SetOptions) (*apiv3.NetworkPolicy, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
defaultPolicyTypesField(res.Spec.Ingress, res.Spec.Egress, &res.Spec.Types)
|
||||
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Properly prefix the name
|
||||
res.GetObjectMeta().SetName(convertPolicyNameForStorage(res.GetObjectMeta().GetName()))
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindNetworkPolicy, res)
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.NetworkPolicy), err
|
||||
}
|
||||
|
||||
// Remove the prefix out of the returned policy name.
|
||||
res.GetObjectMeta().SetName(convertPolicyNameFromStorage(res.GetObjectMeta().GetName()))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the NetworkPolicy and deletes it. Returns an error if one occurs.
|
||||
func (r networkPolicies) Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.NetworkPolicy, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindNetworkPolicy, namespace, convertPolicyNameForStorage(name))
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.NetworkPolicy), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the NetworkPolicy, and returns the corresponding NetworkPolicy object,
|
||||
// and an error if there is any.
|
||||
func (r networkPolicies) Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.NetworkPolicy, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindNetworkPolicy, namespace, convertPolicyNameForStorage(name))
|
||||
if out != nil {
|
||||
// Remove the prefix out of the returned policy name.
|
||||
out.GetObjectMeta().SetName(convertPolicyNameFromStorage(out.GetObjectMeta().GetName()))
|
||||
return out.(*apiv3.NetworkPolicy), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of NetworkPolicy objects that match the supplied options.
|
||||
func (r networkPolicies) List(ctx context.Context, opts options.ListOptions) (*apiv3.NetworkPolicyList, error) {
|
||||
res := &apiv3.NetworkPolicyList{}
|
||||
// Add the name prefix if name is provided
|
||||
if opts.Name != "" {
|
||||
opts.Name = convertPolicyNameForStorage(opts.Name)
|
||||
}
|
||||
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindNetworkPolicy, apiv3.KindNetworkPolicyList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the prefix off of each policy name
|
||||
for i, _ := range res.Items {
|
||||
name := res.Items[i].GetObjectMeta().GetName()
|
||||
res.Items[i].GetObjectMeta().SetName(convertPolicyNameFromStorage(name))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the NetworkPolicies that match the
|
||||
// supplied options.
|
||||
func (r networkPolicies) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
// Add the name prefix if name is provided
|
||||
if opts.Name != "" {
|
||||
opts.Name = convertPolicyNameForStorage(opts.Name)
|
||||
}
|
||||
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindNetworkPolicy, &policyConverter{})
|
||||
}
|
||||
99
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/networkset.go
generated
vendored
99
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/networkset.go
generated
vendored
@@ -1,99 +0,0 @@
|
||||
// Copyright (c) 2018-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// NetworkSetInterface has methods to work with NetworkSet resources.
|
||||
type NetworkSetInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.NetworkSet, opts options.SetOptions) (*apiv3.NetworkSet, error)
|
||||
Update(ctx context.Context, res *apiv3.NetworkSet, opts options.SetOptions) (*apiv3.NetworkSet, error)
|
||||
Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.NetworkSet, error)
|
||||
Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.NetworkSet, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.NetworkSetList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// networkSets implements NetworkSetInterface
|
||||
type networkSets struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a NetworkSet and creates it. Returns the stored
|
||||
// representation of the NetworkSet, and an error, if there is any.
|
||||
func (r networkSets) Create(ctx context.Context, res *apiv3.NetworkSet, opts options.SetOptions) (*apiv3.NetworkSet, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindNetworkSet, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.NetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a NetworkSet and updates it. Returns the stored
|
||||
// representation of the NetworkSet, and an error, if there is any.
|
||||
func (r networkSets) Update(ctx context.Context, res *apiv3.NetworkSet, opts options.SetOptions) (*apiv3.NetworkSet, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindNetworkSet, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.NetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the NetworkSet and deletes it. Returns an error if one occurs.
|
||||
func (r networkSets) Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.NetworkSet, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindNetworkSet, namespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.NetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the NetworkSet, and returns the corresponding NetworkSet object,
|
||||
// and an error if there is any.
|
||||
func (r networkSets) Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.NetworkSet, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindNetworkSet, namespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.NetworkSet), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of NetworkSet objects that match the supplied options.
|
||||
func (r networkSets) List(ctx context.Context, opts options.ListOptions) (*apiv3.NetworkSetList, error) {
|
||||
res := &apiv3.NetworkSetList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindNetworkSet, apiv3.KindNetworkSetList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the NetworkSets that match the
|
||||
// supplied options.
|
||||
func (r networkSets) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindNetworkSet, nil)
|
||||
}
|
||||
238
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/node.go
generated
vendored
238
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/node.go
generated
vendored
@@ -1,238 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fmt"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NodeInterface has methods to work with Node resources.
|
||||
type NodeInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)
|
||||
Update(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// nodes implements NodeInterface
|
||||
type nodes struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a Node and creates it. Returns the stored
|
||||
// representation of the Node, and an error, if there is any.
|
||||
func (r nodes) Create(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For host-protection only clusters, we instruct the user to create a Node as the first
|
||||
// operation. Piggy-back the datastore initialisation on that to ensure the Ready flag gets
|
||||
// set. Since we're likely being called from calicoctl, we don't know the Calico version.
|
||||
err := r.client.EnsureInitialized(ctx, "", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindNode, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Node), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a Node and updates it. Returns the stored
|
||||
// representation of the Node, and an error, if there is any.
|
||||
func (r nodes) Update(ctx context.Context, res *apiv3.Node, opts options.SetOptions) (*apiv3.Node, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindNode, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Node), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the Node and deletes it. Returns an error if one occurs.
|
||||
func (r nodes) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Node, error) {
|
||||
pname, err := names.WorkloadEndpointIdentifiers{Node: name}.CalculateWorkloadEndpointName(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get all weps belonging to the node
|
||||
weps, err := r.client.WorkloadEndpoints().List(ctx, options.ListOptions{
|
||||
Prefix: true,
|
||||
Name: pname,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Collate all IPs across all endpoints, and then release those IPs.
|
||||
ips := []net.IP{}
|
||||
for _, wep := range weps.Items {
|
||||
// The prefix match is unfortunately not a perfect match on the Node (since it is theoretically possible for
|
||||
// another node to match the prefix (e.g. a node name of the format <thisnode>-foobar would also match a prefix
|
||||
// search of the node <thisnode>). Therefore, we will also need to check that the Spec.Node field matches the Node.
|
||||
if wep.Spec.Node != name {
|
||||
continue
|
||||
}
|
||||
for _, ip := range wep.Spec.IPNetworks {
|
||||
ipAddr, _, err := cnet.ParseCIDROrIP(ip)
|
||||
if err == nil {
|
||||
ips = append(ips, *ipAddr)
|
||||
} else {
|
||||
// Validation for wep insists upon CIDR, so we should always succeed
|
||||
log.WithError(err).Warnf("Failed to parse CIDR: %s", ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add in tunnel addresses if they exist for the node.
|
||||
if n, err := r.client.Nodes().Get(ctx, name, options.GetOptions{}); err != nil {
|
||||
if _, ok := err.(errors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
// Resource does not exist, carry on and clean up as much as we can.
|
||||
} else {
|
||||
if n.Spec.BGP != nil && n.Spec.BGP.IPv4IPIPTunnelAddr != "" {
|
||||
ipAddr, _, err := cnet.ParseCIDROrIP(n.Spec.BGP.IPv4IPIPTunnelAddr)
|
||||
if err == nil {
|
||||
ips = append(ips, *ipAddr)
|
||||
} else {
|
||||
log.WithError(err).Warnf("Failed to parse IPIP tunnel address CIDR: %s", n.Spec.BGP.IPv4IPIPTunnelAddr)
|
||||
}
|
||||
}
|
||||
if n.Spec.IPv4VXLANTunnelAddr != "" {
|
||||
ipAddr, _, err := cnet.ParseCIDROrIP(n.Spec.IPv4VXLANTunnelAddr)
|
||||
if err == nil {
|
||||
ips = append(ips, *ipAddr)
|
||||
} else {
|
||||
log.WithError(err).Warnf("Failed to parse VXLAN tunnel address CIDR: %s", n.Spec.IPv4VXLANTunnelAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = r.client.IPAM().ReleaseIPs(context.Background(), ips)
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete the weps.
|
||||
for _, wep := range weps.Items {
|
||||
if wep.Spec.Node != name {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = r.client.WorkloadEndpoints().Delete(ctx, wep.Namespace, wep.Name, options.DeleteOptions{})
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the node from the IPAM data if it exists.
|
||||
err = r.client.IPAM().RemoveIPAMHost(ctx, name)
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove BGPPeers.
|
||||
bgpPeers, err := r.client.BGPPeers().List(ctx, options.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, peer := range bgpPeers.Items {
|
||||
if peer.Spec.Node != name {
|
||||
continue
|
||||
}
|
||||
_, err = r.client.BGPPeers().Delete(ctx, peer.Name, options.DeleteOptions{})
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete felix configuration
|
||||
nodeConfName := fmt.Sprintf("node.%s", name)
|
||||
_, err = r.client.FelixConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete bgp configuration
|
||||
_, err = r.client.BGPConfigurations().Delete(ctx, nodeConfName, options.DeleteOptions{})
|
||||
switch err.(type) {
|
||||
case nil, errors.ErrorResourceDoesNotExist, errors.ErrorOperationNotSupported:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete the node.
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindNode, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Node), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the Node, and returns the corresponding Node object,
|
||||
// and an error if there is any.
|
||||
func (r nodes) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Node, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindNode, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Node), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of Node objects that match the supplied options.
|
||||
func (r nodes) List(ctx context.Context, opts options.ListOptions) (*apiv3.NodeList, error) {
|
||||
res := &apiv3.NodeList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindNode, apiv3.KindNodeList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the Nodes that match the
|
||||
// supplied options.
|
||||
func (r nodes) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindNode, nil)
|
||||
}
|
||||
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/profile.go
generated
vendored
101
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/profile.go
generated
vendored
@@ -1,101 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// ProfileInterface has methods to work with Profile resources.
|
||||
type ProfileInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.Profile, opts options.SetOptions) (*apiv3.Profile, error)
|
||||
Update(ctx context.Context, res *apiv3.Profile, opts options.SetOptions) (*apiv3.Profile, error)
|
||||
Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Profile, error)
|
||||
Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Profile, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.ProfileList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// profiles implements ProfileInterface
|
||||
type profiles struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a Profile and creates it. Returns the stored
|
||||
// representation of the Profile, and an error, if there is any.
|
||||
func (r profiles) Create(ctx context.Context, res *apiv3.Profile, opts options.SetOptions) (*apiv3.Profile, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindProfile, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Profile), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a Profile and updates it. Returns the stored
|
||||
// representation of the Profile, and an error, if there is any.
|
||||
func (r profiles) Update(ctx context.Context, res *apiv3.Profile, opts options.SetOptions) (*apiv3.Profile, error) {
|
||||
if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindProfile, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Profile), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the Profile and deletes it. Returns an error if one occurs.
|
||||
func (r profiles) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv3.Profile, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindProfile, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Profile), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the Profile, and returns the corresponding Profile object,
|
||||
// and an error if there is any.
|
||||
func (r profiles) Get(ctx context.Context, name string, opts options.GetOptions) (*apiv3.Profile, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindProfile, noNamespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.Profile), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of Profile objects that match the supplied options.
|
||||
func (r profiles) List(ctx context.Context, opts options.ListOptions) (*apiv3.ProfileList, error) {
|
||||
res := &apiv3.ProfileList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindProfile, apiv3.KindProfileList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the Profiles that match the
|
||||
// supplied options.
|
||||
func (r profiles) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindProfile, nil)
|
||||
}
|
||||
434
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/resources.go
generated
vendored
434
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/resources.go
generated
vendored
@@ -1,434 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/namespace"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
const (
|
||||
noNamespace = ""
|
||||
defaultNamespace = "default"
|
||||
maxApplyRetries = 10
|
||||
)
|
||||
|
||||
// All Calico resources implement the resource interface.
|
||||
type resource interface {
|
||||
runtime.Object
|
||||
v1.ObjectMetaAccessor
|
||||
}
|
||||
|
||||
// All Calico resource lists implement the resourceList interface.
|
||||
type resourceList interface {
|
||||
runtime.Object
|
||||
v1.ListMetaAccessor
|
||||
}
|
||||
|
||||
// resourceInterface has methods to work with generic resource types.
|
||||
type resourceInterface interface {
|
||||
Create(ctx context.Context, opts options.SetOptions, kind string, in resource) (resource, error)
|
||||
Update(ctx context.Context, opts options.SetOptions, kind string, in resource) (resource, error)
|
||||
Delete(ctx context.Context, opts options.DeleteOptions, kind, ns, name string) (resource, error)
|
||||
Get(ctx context.Context, opts options.GetOptions, kind, ns, name string) (resource, error)
|
||||
List(ctx context.Context, opts options.ListOptions, kind, listkind string, inout resourceList) error
|
||||
Watch(ctx context.Context, opts options.ListOptions, kind string, converter watcherConverter) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// resources implements resourceInterface.
|
||||
type resources struct {
|
||||
backend bapi.Client
|
||||
}
|
||||
|
||||
// Create creates a resource in the backend datastore.
|
||||
func (c *resources) Create(ctx context.Context, opts options.SetOptions, kind string, in resource) (resource, error) {
|
||||
// Resource must have a Name. Currently we do not support GenerateName.
|
||||
if len(in.GetObjectMeta().GetName()) == 0 {
|
||||
var generateNameMessage string
|
||||
if len(in.GetObjectMeta().GetGenerateName()) != 0 {
|
||||
generateNameMessage = " (GenerateName is not supported)"
|
||||
}
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.Name",
|
||||
Reason: "field must be set for a Create request" + generateNameMessage,
|
||||
Value: in.GetObjectMeta().GetName(),
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// A ResourceVersion should never be specified on a Create.
|
||||
if len(in.GetObjectMeta().GetResourceVersion()) != 0 {
|
||||
logWithResource(in).Info("Rejecting Create request with non-empty resource version")
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.ResourceVersion",
|
||||
Reason: "field must not be set for a Create request",
|
||||
Value: in.GetObjectMeta().GetResourceVersion(),
|
||||
}},
|
||||
}
|
||||
}
|
||||
if err := c.checkNamespace(in.GetObjectMeta().GetNamespace(), kind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add in the UID and creation timestamp for the resource if needed.
|
||||
creationTimestamp := in.GetObjectMeta().GetCreationTimestamp()
|
||||
if creationTimestamp.IsZero() {
|
||||
in.GetObjectMeta().SetCreationTimestamp(v1.Now())
|
||||
}
|
||||
if in.GetObjectMeta().GetUID() == "" {
|
||||
in.GetObjectMeta().SetUID(uuid.NewUUID())
|
||||
}
|
||||
|
||||
// Convert the resource to a KVPair and pass that to the backend datastore, converting
|
||||
// the response (if we get one) back to a resource.
|
||||
kvp, err := c.backend.Create(ctx, c.resourceToKVPair(opts, kind, in))
|
||||
if kvp != nil {
|
||||
return c.kvPairToResource(kvp), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update updates a resource in the backend datastore.
|
||||
func (c *resources) Update(ctx context.Context, opts options.SetOptions, kind string, in resource) (resource, error) {
|
||||
// A ResourceVersion should always be specified on an Update.
|
||||
if len(in.GetObjectMeta().GetResourceVersion()) == 0 {
|
||||
logWithResource(in).Info("Rejecting Update request with empty resource version")
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.ResourceVersion",
|
||||
Reason: "field must be set for an Update request",
|
||||
Value: in.GetObjectMeta().GetResourceVersion(),
|
||||
}},
|
||||
}
|
||||
}
|
||||
if err := c.checkNamespace(in.GetObjectMeta().GetNamespace(), kind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
creationTimestamp := in.GetObjectMeta().GetCreationTimestamp()
|
||||
if creationTimestamp.IsZero() {
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.CreationTimestamp",
|
||||
Reason: "field must be set for an Update request",
|
||||
Value: in.GetObjectMeta().GetCreationTimestamp(),
|
||||
}},
|
||||
}
|
||||
}
|
||||
if in.GetObjectMeta().GetUID() == "" {
|
||||
return nil, cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.UID",
|
||||
Reason: "field must be set for an Update request",
|
||||
Value: in.GetObjectMeta().GetUID(),
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the resource to a KVPair and pass that to the backend datastore, converting
|
||||
// the response (if we get one) back to a resource.
|
||||
kvp, err := c.backend.Update(ctx, c.resourceToKVPair(opts, kind, in))
|
||||
if kvp != nil {
|
||||
return c.kvPairToResource(kvp), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete deletes a resource from the backend datastore.
|
||||
func (c *resources) Delete(ctx context.Context, opts options.DeleteOptions, kind, ns, name string) (resource, error) {
|
||||
if err := c.checkNamespace(ns, kind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a ResourceKey and pass that to the backend datastore.
|
||||
key := model.ResourceKey{
|
||||
Kind: kind,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
}
|
||||
kvp, err := c.backend.Delete(ctx, key, opts.ResourceVersion)
|
||||
if kvp != nil {
|
||||
return c.kvPairToResource(kvp), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get gets a resource from the backend datastore.
|
||||
func (c *resources) Get(ctx context.Context, opts options.GetOptions, kind, ns, name string) (resource, error) {
|
||||
if err := c.checkNamespace(ns, kind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := model.ResourceKey{
|
||||
Kind: kind,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
}
|
||||
kvp, err := c.backend.Get(ctx, key, opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := c.kvPairToResource(kvp)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// List lists a resource from the backend datastore.
|
||||
func (c *resources) List(ctx context.Context, opts options.ListOptions, kind, listKind string, listObj resourceList) error {
|
||||
list := model.ResourceListOptions{
|
||||
Kind: kind,
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
Prefix: opts.Prefix,
|
||||
}
|
||||
|
||||
// Query the backend.
|
||||
kvps, err := c.backend.List(ctx, list, opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert the slice of KVPairs to a slice of Objects.
|
||||
resources := []runtime.Object{}
|
||||
for _, kvp := range kvps.KVPairs {
|
||||
resources = append(resources, c.kvPairToResource(kvp))
|
||||
}
|
||||
err = meta.SetList(listObj, resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally, set the resource version and api group version of the list object.
|
||||
listObj.GetListMeta().SetResourceVersion(kvps.Revision)
|
||||
listObj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: apiv3.Group,
|
||||
Version: apiv3.VersionCurrent,
|
||||
Kind: listKind,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch watches a specific resource or resource type.
|
||||
func (c *resources) Watch(ctx context.Context, opts options.ListOptions, kind string, converter watcherConverter) (watch.Interface, error) {
|
||||
list := model.ResourceListOptions{
|
||||
Kind: kind,
|
||||
Name: opts.Name,
|
||||
Namespace: opts.Namespace,
|
||||
}
|
||||
|
||||
// Create the backend watcher. We need to process the results to add revision data etc.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
backend, err := c.backend.Watch(ctx, list, opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w := &watcher{
|
||||
results: make(chan watch.Event, 100),
|
||||
client: c,
|
||||
cancel: cancel,
|
||||
context: ctx,
|
||||
backend: backend,
|
||||
converter: converter,
|
||||
}
|
||||
go w.run()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// resourceToKVPair converts the resource to a KVPair that can be consumed by the
|
||||
// backend datastore client.
|
||||
func (c *resources) resourceToKVPair(opts options.SetOptions, kind string, in resource) *model.KVPair {
|
||||
// Prepare the resource to remove non-persisted fields.
|
||||
rv := in.GetObjectMeta().GetResourceVersion()
|
||||
in.GetObjectMeta().SetResourceVersion("")
|
||||
in.GetObjectMeta().SetSelfLink("")
|
||||
|
||||
// Make sure the kind and version are set before storing.
|
||||
in.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: apiv3.Group,
|
||||
Version: apiv3.VersionCurrent,
|
||||
Kind: kind,
|
||||
})
|
||||
|
||||
// Create a KVPair using the "generic" resource Key, and the actual object as
|
||||
// the value.
|
||||
return &model.KVPair{
|
||||
TTL: opts.TTL,
|
||||
Value: in,
|
||||
Key: model.ResourceKey{
|
||||
Kind: kind,
|
||||
Name: in.GetObjectMeta().GetName(),
|
||||
Namespace: in.GetObjectMeta().GetNamespace(),
|
||||
},
|
||||
Revision: rv,
|
||||
}
|
||||
}
|
||||
|
||||
// kvPairToResource converts a KVPair returned by the backend datastore client to a
|
||||
// resource.
|
||||
func (c *resources) kvPairToResource(kvp *model.KVPair) resource {
|
||||
// Extract the resource from the returned value - the backend will already have
|
||||
// decoded it.
|
||||
out := kvp.Value.(resource)
|
||||
|
||||
// Remove the SelfLink which Calico does not use, and set the ResourceVersion from the
|
||||
// value returned from the backend datastore.
|
||||
out.GetObjectMeta().SetSelfLink("")
|
||||
out.GetObjectMeta().SetResourceVersion(kvp.Revision)
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// checkNamespace checks that the namespace is supplied on a namespaced resource type.
|
||||
func (c *resources) checkNamespace(ns, kind string) error {
|
||||
|
||||
if namespace.IsNamespaced(kind) && len(ns) == 0 {
|
||||
return cerrors.ErrorValidation{
|
||||
ErroredFields: []cerrors.ErroredField{{
|
||||
Name: "Metadata.Namespace",
|
||||
Reason: "namespace is not specified on namespaced resource",
|
||||
}},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// watcher implements the watch.Interface.
|
||||
type watcher struct {
|
||||
backend bapi.WatchInterface
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
results chan watch.Event
|
||||
client *resources
|
||||
terminated uint32
|
||||
converter watcherConverter
|
||||
}
|
||||
|
||||
func (w *watcher) Stop() {
|
||||
w.cancel()
|
||||
}
|
||||
|
||||
func (w *watcher) ResultChan() <-chan watch.Event {
|
||||
return w.results
|
||||
}
|
||||
|
||||
// run is the main watch loop, pulling events from the backend watcher and sending
|
||||
// down the results channel.
|
||||
func (w *watcher) run() {
|
||||
log.Info("Main client watcher loop")
|
||||
|
||||
// Make sure we terminate resources if we exit.
|
||||
defer w.terminate()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-w.backend.ResultChan():
|
||||
if !ok {
|
||||
log.Debug("Watcher results channel closed by remote")
|
||||
return
|
||||
}
|
||||
e := w.convertEvent(event)
|
||||
select {
|
||||
case w.results <- e:
|
||||
case <-w.context.Done():
|
||||
log.Info("Process backend watcher done event during watch event in main client")
|
||||
return
|
||||
}
|
||||
case <-w.context.Done(): // user cancel
|
||||
log.Info("Process backend watcher done event in main client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// terminate all resources associated with this watcher.
|
||||
func (w *watcher) terminate() {
|
||||
log.Info("Terminating main client watcher loop")
|
||||
w.cancel()
|
||||
close(w.results)
|
||||
atomic.AddUint32(&w.terminated, 1)
|
||||
}
|
||||
|
||||
// convertEvent converts a backend watch event into a client watch event.
|
||||
func (w *watcher) convertEvent(backendEvent bapi.WatchEvent) watch.Event {
|
||||
apiEvent := watch.Event{
|
||||
Error: backendEvent.Error,
|
||||
}
|
||||
switch backendEvent.Type {
|
||||
case bapi.WatchError:
|
||||
apiEvent.Type = watch.Error
|
||||
case bapi.WatchAdded:
|
||||
apiEvent.Type = watch.Added
|
||||
case bapi.WatchDeleted:
|
||||
apiEvent.Type = watch.Deleted
|
||||
case bapi.WatchModified:
|
||||
apiEvent.Type = watch.Modified
|
||||
}
|
||||
|
||||
if backendEvent.Old != nil {
|
||||
res := w.client.kvPairToResource(backendEvent.Old)
|
||||
if w.converter != nil {
|
||||
res = w.converter.Convert(res)
|
||||
}
|
||||
apiEvent.Previous = res
|
||||
}
|
||||
if backendEvent.New != nil {
|
||||
res := w.client.kvPairToResource(backendEvent.New)
|
||||
if w.converter != nil {
|
||||
apiEvent.Object = w.converter.Convert(res)
|
||||
}
|
||||
apiEvent.Object = res
|
||||
}
|
||||
|
||||
return apiEvent
|
||||
}
|
||||
|
||||
// hasTerminated returns true if the watcher has terminated, release all resources.
|
||||
// Used for test purposes.
|
||||
func (w *watcher) hasTerminated() bool {
|
||||
t := atomic.LoadUint32(&w.terminated) != 0
|
||||
bt := w.backend.HasTerminated()
|
||||
log.Infof("hasTerminated() terminated=%v; backend-terminated=%v", t, bt)
|
||||
return t && bt
|
||||
}
|
||||
|
||||
// logWithResource returns a logrus entry with key resource attributes included.
|
||||
func logWithResource(res resource) *log.Entry {
|
||||
return log.WithFields(log.Fields{
|
||||
"Kind": res.GetObjectKind().GroupVersionKind(),
|
||||
"Name": res.GetObjectMeta().GetName(),
|
||||
"Namespace": res.GetObjectMeta().GetNamespace(),
|
||||
"ResourceVersion": res.GetObjectMeta().GetResourceVersion(),
|
||||
})
|
||||
}
|
||||
|
||||
// watcherConverter represents a formatter for calico resources returned by Watch.
|
||||
type watcherConverter interface {
|
||||
// Convert the internal representation of a resource to a readable format.
|
||||
Convert(resource) resource
|
||||
}
|
||||
166
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/workloadendpoint.go
generated
vendored
166
vendor/github.com/projectcalico/libcalico-go/lib/clientv3/workloadendpoint.go
generated
vendored
@@ -1,166 +0,0 @@
|
||||
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/options"
|
||||
validator "github.com/projectcalico/libcalico-go/lib/validator/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/watch"
|
||||
)
|
||||
|
||||
// WorkloadEndpointInterface has methods to work with WorkloadEndpoint resources.
|
||||
type WorkloadEndpointInterface interface {
|
||||
Create(ctx context.Context, res *apiv3.WorkloadEndpoint, opts options.SetOptions) (*apiv3.WorkloadEndpoint, error)
|
||||
Update(ctx context.Context, res *apiv3.WorkloadEndpoint, opts options.SetOptions) (*apiv3.WorkloadEndpoint, error)
|
||||
Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.WorkloadEndpoint, error)
|
||||
Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.WorkloadEndpoint, error)
|
||||
List(ctx context.Context, opts options.ListOptions) (*apiv3.WorkloadEndpointList, error)
|
||||
Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// workloadEndpoints implements WorkloadEndpointInterface
|
||||
type workloadEndpoints struct {
|
||||
client client
|
||||
}
|
||||
|
||||
// Create takes the representation of a WorkloadEndpoint and creates it. Returns the stored
|
||||
// representation of the WorkloadEndpoint, and an error, if there is any.
|
||||
func (r workloadEndpoints) Create(ctx context.Context, res *apiv3.WorkloadEndpoint, opts options.SetOptions) (*apiv3.WorkloadEndpoint, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
if err := r.assignOrValidateName(res); err != nil {
|
||||
return nil, err
|
||||
} else if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.updateLabelsForStorage(res)
|
||||
out, err := r.client.resources.Create(ctx, opts, apiv3.KindWorkloadEndpoint, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.WorkloadEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update takes the representation of a WorkloadEndpoint and updates it. Returns the stored
|
||||
// representation of the WorkloadEndpoint, and an error, if there is any.
|
||||
func (r workloadEndpoints) Update(ctx context.Context, res *apiv3.WorkloadEndpoint, opts options.SetOptions) (*apiv3.WorkloadEndpoint, error) {
|
||||
if res != nil {
|
||||
// Since we're about to default some fields, take a (shallow) copy of the input data
|
||||
// before we do so.
|
||||
resCopy := *res
|
||||
res = &resCopy
|
||||
}
|
||||
if err := r.assignOrValidateName(res); err != nil {
|
||||
return nil, err
|
||||
} else if err := validator.Validate(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.updateLabelsForStorage(res)
|
||||
out, err := r.client.resources.Update(ctx, opts, apiv3.KindWorkloadEndpoint, res)
|
||||
if out != nil {
|
||||
return out.(*apiv3.WorkloadEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete takes name of the WorkloadEndpoint and deletes it. Returns an error if one occurs.
|
||||
func (r workloadEndpoints) Delete(ctx context.Context, namespace, name string, opts options.DeleteOptions) (*apiv3.WorkloadEndpoint, error) {
|
||||
out, err := r.client.resources.Delete(ctx, opts, apiv3.KindWorkloadEndpoint, namespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.WorkloadEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get takes name of the WorkloadEndpoint, and returns the corresponding WorkloadEndpoint object,
|
||||
// and an error if there is any.
|
||||
func (r workloadEndpoints) Get(ctx context.Context, namespace, name string, opts options.GetOptions) (*apiv3.WorkloadEndpoint, error) {
|
||||
out, err := r.client.resources.Get(ctx, opts, apiv3.KindWorkloadEndpoint, namespace, name)
|
||||
if out != nil {
|
||||
return out.(*apiv3.WorkloadEndpoint), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List returns the list of WorkloadEndpoint objects that match the supplied options.
|
||||
func (r workloadEndpoints) List(ctx context.Context, opts options.ListOptions) (*apiv3.WorkloadEndpointList, error) {
|
||||
res := &apiv3.WorkloadEndpointList{}
|
||||
if err := r.client.resources.List(ctx, opts, apiv3.KindWorkloadEndpoint, apiv3.KindWorkloadEndpointList, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the NetworkPolicies that match the
|
||||
// supplied options.
|
||||
func (r workloadEndpoints) Watch(ctx context.Context, opts options.ListOptions) (watch.Interface, error) {
|
||||
return r.client.resources.Watch(ctx, opts, apiv3.KindWorkloadEndpoint, nil)
|
||||
}
|
||||
|
||||
// assignOrValidateName either assigns the name calculated from the Spec fields, or validates
|
||||
// the name against the spec fields.
|
||||
func (r workloadEndpoints) assignOrValidateName(res *apiv3.WorkloadEndpoint) error {
|
||||
// Validate the workload endpoint indices and the name match.
|
||||
wepids := names.WorkloadEndpointIdentifiers{
|
||||
Node: res.Spec.Node,
|
||||
Orchestrator: res.Spec.Orchestrator,
|
||||
Endpoint: res.Spec.Endpoint,
|
||||
Workload: res.Spec.Workload,
|
||||
Pod: res.Spec.Pod,
|
||||
ContainerID: res.Spec.ContainerID,
|
||||
}
|
||||
expectedName, err := wepids.CalculateWorkloadEndpointName(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.Name) == 0 {
|
||||
// If a name was not specified then we will calculate it on behalf of the caller.
|
||||
res.Name = expectedName
|
||||
return nil
|
||||
}
|
||||
if res.Name != expectedName {
|
||||
return errors.ErrorValidation{
|
||||
ErroredFields: []errors.ErroredField{{
|
||||
Name: "Name",
|
||||
Value: res.Name,
|
||||
Reason: fmt.Sprintf("the WorkloadEndpoint name does not match the primary identifiers assigned in the Spec: expected name %s", expectedName),
|
||||
}},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateLabelsForStorage updates the set of labels that we persist. It adds/overrides
|
||||
// the Namespace and Orchestrator labels which must be set to the correct values and are
|
||||
// not user configurable.
|
||||
func (r workloadEndpoints) updateLabelsForStorage(res *apiv3.WorkloadEndpoint) {
|
||||
labelsCopy := make(map[string]string, len(res.GetLabels())+2)
|
||||
for k, v := range res.GetLabels() {
|
||||
labelsCopy[k] = v
|
||||
}
|
||||
labelsCopy[apiv3.LabelNamespace] = res.Namespace
|
||||
labelsCopy[apiv3.LabelOrchestrator] = res.Spec.Orchestrator
|
||||
res.SetLabels(labelsCopy)
|
||||
}
|
||||
96
vendor/github.com/projectcalico/libcalico-go/lib/ipam/interface.go
generated
vendored
96
vendor/github.com/projectcalico/libcalico-go/lib/ipam/interface.go
generated
vendored
@@ -1,96 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
// ipam.Interface has methods to perform IP address management.
|
||||
type Interface interface {
|
||||
// AssignIP assigns the provided IP address to the provided host. The IP address
|
||||
// must fall within a configured pool. AssignIP will claim block affinity as needed
|
||||
// in order to satisfy the assignment. An error will be returned if the IP address
|
||||
// is already assigned, or if StrictAffinity is enabled and the address is within
|
||||
// a block that does not have affinity for the given host.
|
||||
AssignIP(ctx context.Context, args AssignIPArgs) error
|
||||
|
||||
// AutoAssign automatically assigns one or more IP addresses as specified by the
|
||||
// provided AutoAssignArgs. AutoAssign returns the list of the assigned IPv4 addresses,
|
||||
// and the list of the assigned IPv6 addresses in IPNet format.
|
||||
//
|
||||
// In case of error, returns the IPs allocated so far along with the error.
|
||||
AutoAssign(ctx context.Context, args AutoAssignArgs) ([]cnet.IPNet, []cnet.IPNet, error)
|
||||
|
||||
// ReleaseIPs releases any of the given IP addresses that are currently assigned,
|
||||
// so that they are available to be used in another assignment.
|
||||
ReleaseIPs(ctx context.Context, ips []cnet.IP) ([]cnet.IP, error)
|
||||
|
||||
// GetAssignmentAttributes returns the attributes stored with the given IP address
|
||||
// upon assignment.
|
||||
GetAssignmentAttributes(ctx context.Context, addr cnet.IP) (map[string]string, error)
|
||||
|
||||
// IPsByHandle returns a list of all IP addresses that have been
|
||||
// assigned using the provided handle.
|
||||
IPsByHandle(ctx context.Context, handleID string) ([]cnet.IP, error)
|
||||
|
||||
// ReleaseByHandle releases all IP addresses that have been assigned
|
||||
// using the provided handle. Returns an error if no addresses
|
||||
// are assigned with the given handle.
|
||||
ReleaseByHandle(ctx context.Context, handleID string) error
|
||||
|
||||
// ClaimAffinity claims affinity to the given host for all blocks
|
||||
// within the given CIDR. The given CIDR must fall within a configured
|
||||
// pool. If an empty string is passed as the host, then the value returned by os.Hostname is used.
|
||||
ClaimAffinity(ctx context.Context, cidr cnet.IPNet, host string) ([]cnet.IPNet, []cnet.IPNet, error)
|
||||
|
||||
// ReleaseAffinity releases affinity for all blocks within the given CIDR
|
||||
// on the given host. If an empty string is passed as the host, then the
|
||||
// value returned by os.Hostname will be used. If mustBeEmpty is true, then an error
|
||||
// will be returned if any blocks within the CIDR are not empty - in this case, this
|
||||
// function may release some but not all blocks within the given CIDR.
|
||||
ReleaseAffinity(ctx context.Context, cidr cnet.IPNet, host string, mustBeEmpty bool) error
|
||||
|
||||
// ReleaseHostAffinities releases affinity for all blocks that are affine
|
||||
// to the given host. If an empty string is passed as the host, the value returned by
|
||||
// os.Hostname will be used. If mustBeEmpty is true, then an error
|
||||
// will be returned if any blocks within the CIDR are not empty - in this case, this
|
||||
// function may release some but not all blocks attached to this host.
|
||||
ReleaseHostAffinities(ctx context.Context, host string, mustBeEmpty bool) error
|
||||
|
||||
// ReleasePoolAffinities releases affinity for all blocks within
|
||||
// the specified pool across all hosts.
|
||||
ReleasePoolAffinities(ctx context.Context, pool cnet.IPNet) error
|
||||
|
||||
// GetIPAMConfig returns the global IPAM configuration. If no IPAM configuration
|
||||
// has been set, returns a default configuration with StrictAffinity disabled
|
||||
// and AutoAllocateBlocks enabled.
|
||||
GetIPAMConfig(ctx context.Context) (*IPAMConfig, error)
|
||||
|
||||
// SetIPAMConfig sets global IPAM configuration. This can only
|
||||
// be done when there are no allocated blocks and IP addresses.
|
||||
SetIPAMConfig(ctx context.Context, cfg IPAMConfig) error
|
||||
|
||||
// RemoveIPAMHost releases affinity for all blocks on the given host,
|
||||
// and removes all host-specific IPAM data from the datastore.
|
||||
// RemoveIPAMHost does not release any IP addresses claimed on the given host.
|
||||
// If an empty string is passed as the host then the value returned by os.Hostname is used.
|
||||
RemoveIPAMHost(ctx context.Context, host string) error
|
||||
|
||||
// GetUtilization returns IP utilization info for the specified pools, or for all pools.
|
||||
GetUtilization(ctx context.Context, args GetUtilizationArgs) ([]*PoolUtilization, error)
|
||||
}
|
||||
1589
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam.go
generated
vendored
1589
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam.go
generated
vendored
File diff suppressed because it is too large
Load Diff
431
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_block.go
generated
vendored
431
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_block.go
generated
vendored
@@ -1,431 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
// windwowsReservedHandle is the handle used to reserve addresses required for Windows
|
||||
// networking so that workloads do not get assigned these addresses.
|
||||
const windowsReservedHandle = "windows-reserved-IPAM-handle"
|
||||
|
||||
// Wrap the backend AllocationBlock struct so that we can
|
||||
// attach methods to it.
|
||||
type allocationBlock struct {
|
||||
*model.AllocationBlock
|
||||
}
|
||||
|
||||
func newBlock(cidr cnet.IPNet) allocationBlock {
|
||||
ones, size := cidr.Mask.Size()
|
||||
numAddresses := 1 << uint(size-ones)
|
||||
b := model.AllocationBlock{}
|
||||
b.Allocations = make([]*int, numAddresses)
|
||||
b.Unallocated = make([]int, numAddresses)
|
||||
b.StrictAffinity = false
|
||||
b.CIDR = cidr
|
||||
|
||||
// Initialize unallocated ordinals.
|
||||
for i := 0; i < numAddresses; i++ {
|
||||
b.Unallocated[i] = i
|
||||
}
|
||||
|
||||
return allocationBlock{&b}
|
||||
}
|
||||
|
||||
func (b *allocationBlock) autoAssign(
|
||||
num int, handleID *string, host string, attrs map[string]string, affinityCheck bool) ([]cnet.IPNet, error) {
|
||||
|
||||
// Determine if we need to check for affinity.
|
||||
checkAffinity := b.StrictAffinity || affinityCheck
|
||||
if checkAffinity && b.Affinity != nil && !hostAffinityMatches(host, b.AllocationBlock) {
|
||||
// Affinity check is enabled but the host does not match - error.
|
||||
s := fmt.Sprintf("Block affinity (%s) does not match provided (%s)", *b.Affinity, host)
|
||||
return nil, errors.New(s)
|
||||
} else if b.Affinity == nil {
|
||||
log.Warnf("Attempting to assign IPs from block with no affinity: %v", b)
|
||||
if checkAffinity {
|
||||
// If we're checking strict affinity, we can't assign from a block with no affinity.
|
||||
return nil, fmt.Errorf("Attempt to assign from block %v with no affinity", b.CIDR)
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the allocations until we find enough addresses.
|
||||
ordinals := []int{}
|
||||
for len(b.Unallocated) > 0 && len(ordinals) < num {
|
||||
ordinals = append(ordinals, b.Unallocated[0])
|
||||
b.Unallocated = b.Unallocated[1:]
|
||||
}
|
||||
|
||||
// Create slice of IPs and perform the allocations.
|
||||
ips := []cnet.IPNet{}
|
||||
_, mask, _ := cnet.ParseCIDR(b.CIDR.String())
|
||||
for _, o := range ordinals {
|
||||
attrIndex := b.findOrAddAttribute(handleID, attrs)
|
||||
b.Allocations[o] = &attrIndex
|
||||
ipNets := cnet.IPNet(*mask)
|
||||
ipNets.IP = cnet.IncrementIP(cnet.IP{b.CIDR.IP}, big.NewInt(int64(o))).IP
|
||||
ips = append(ips, ipNets)
|
||||
}
|
||||
|
||||
log.Debugf("Block %s returned ips: %v", b.CIDR.String(), ips)
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (b *allocationBlock) assign(address cnet.IP, handleID *string, attrs map[string]string, host string) error {
|
||||
if b.StrictAffinity && b.Affinity != nil && !hostAffinityMatches(host, b.AllocationBlock) {
|
||||
// Affinity check is enabled but the host does not match - error.
|
||||
return errors.New("Block host affinity does not match")
|
||||
} else if b.Affinity == nil {
|
||||
log.Warnf("Attempting to assign IP from block with no affinity: %v", b)
|
||||
if b.StrictAffinity {
|
||||
// If we're checking strict affinity, we can't assign from a block with no affinity.
|
||||
return fmt.Errorf("Attempt to assign from block %v with no affinity", b.CIDR)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to an ordinal.
|
||||
ordinal, err := b.IPToOrdinal(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if already allocated.
|
||||
if b.Allocations[ordinal] != nil {
|
||||
return errors.New("Address already assigned in block")
|
||||
}
|
||||
|
||||
// Set up attributes.
|
||||
attrIndex := b.findOrAddAttribute(handleID, attrs)
|
||||
b.Allocations[ordinal] = &attrIndex
|
||||
|
||||
// Remove from unallocated.
|
||||
for i, unallocated := range b.Unallocated {
|
||||
if unallocated == ordinal {
|
||||
b.Unallocated = append(b.Unallocated[:i], b.Unallocated[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hostAffinityMatches checks if the provided host matches the provided affinity.
|
||||
func hostAffinityMatches(host string, block *model.AllocationBlock) bool {
|
||||
return *block.Affinity == "host:"+host
|
||||
}
|
||||
|
||||
func getHostAffinity(block *model.AllocationBlock) string {
|
||||
if block.Affinity != nil && strings.HasPrefix(*block.Affinity, "host:") {
|
||||
return strings.TrimPrefix(*block.Affinity, "host:")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (b allocationBlock) numFreeAddresses() int {
|
||||
return len(b.Unallocated)
|
||||
}
|
||||
|
||||
// empty returns true if the block has released all of its assignable addresses,
|
||||
// and returns false if any assignable addresses are in use.
|
||||
func (b allocationBlock) empty() bool {
|
||||
return b.containsOnlyReservedIPs()
|
||||
}
|
||||
|
||||
// containsOnlyReservedIPs returns true if the block is empty excepted for
|
||||
// expected "reserved" IP addresses.
|
||||
func (b *allocationBlock) containsOnlyReservedIPs() bool {
|
||||
for _, attrIdx := range b.Allocations {
|
||||
if attrIdx == nil {
|
||||
continue
|
||||
}
|
||||
attrs := b.Attributes[*attrIdx]
|
||||
if attrs.AttrPrimary == nil || *attrs.AttrPrimary != windowsReservedHandle {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *allocationBlock) release(addresses []cnet.IP) ([]cnet.IP, map[string]int, error) {
|
||||
// Store return values.
|
||||
unallocated := []cnet.IP{}
|
||||
countByHandle := map[string]int{}
|
||||
|
||||
// Used internally.
|
||||
var ordinals []int
|
||||
delRefCounts := map[int]int{}
|
||||
attrsToDelete := []int{}
|
||||
|
||||
// De-duplicate addresses to ensure reference counting is correcet
|
||||
uniqueAddresses := make(map[string]struct{})
|
||||
for _, ip := range addresses {
|
||||
uniqueAddresses[ip.IP.String()] = struct{}{}
|
||||
}
|
||||
|
||||
// Determine the ordinals that need to be released and the
|
||||
// attributes that need to be cleaned up.
|
||||
log.Debugf("Releasing addresses from block: %v", uniqueAddresses)
|
||||
for ipStr := range uniqueAddresses {
|
||||
ip := cnet.MustParseIP(ipStr)
|
||||
// Convert to an ordinal.
|
||||
ordinal, err := b.IPToOrdinal(ip)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
log.Debugf("Address %s is ordinal %d", ip, ordinal)
|
||||
|
||||
// Check if allocated.
|
||||
log.Debugf("Checking if allocated: %v", b.Allocations)
|
||||
attrIdx := b.Allocations[ordinal]
|
||||
if attrIdx == nil {
|
||||
log.Debugf("Asked to release address that was not allocated")
|
||||
unallocated = append(unallocated, ip)
|
||||
continue
|
||||
}
|
||||
ordinals = append(ordinals, ordinal)
|
||||
log.Debugf("%s is allocated, ordinals to release are now %v", ip, ordinals)
|
||||
|
||||
// Increment reference counting for attributes.
|
||||
cnt := 1
|
||||
if cur, exists := delRefCounts[*attrIdx]; exists {
|
||||
cnt = cur + 1
|
||||
}
|
||||
delRefCounts[*attrIdx] = cnt
|
||||
log.Debugf("delRefCounts: %v", delRefCounts)
|
||||
|
||||
// Increment count of addresses by handle if a handle
|
||||
// exists.
|
||||
log.Debugf("Looking up attribute with index %d", *attrIdx)
|
||||
handleID := b.Attributes[*attrIdx].AttrPrimary
|
||||
if handleID != nil {
|
||||
log.Debugf("HandleID is %s", *handleID)
|
||||
handleCount := 0
|
||||
if count, ok := countByHandle[*handleID]; !ok {
|
||||
handleCount = count
|
||||
}
|
||||
log.Debugf("Handle ref count is %d, incrementing", handleCount)
|
||||
handleCount += 1
|
||||
countByHandle[*handleID] = handleCount
|
||||
log.Debugf("countByHandle %v", countByHandle)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle cleaning up of attributes. We do this by
|
||||
// reference counting. If we're deleting the last reference to
|
||||
// a given attribute, then it needs to be cleaned up.
|
||||
refCounts := b.attributeRefCounts()
|
||||
log.Debugf("Cleaning up attributes, refCounts: %v", refCounts)
|
||||
for idx, refs := range delRefCounts {
|
||||
log.Debugf("Checking ref count index %d", idx)
|
||||
if refCounts[idx] == refs {
|
||||
attrsToDelete = append(attrsToDelete, idx)
|
||||
}
|
||||
}
|
||||
if len(attrsToDelete) != 0 {
|
||||
log.Debugf("Deleting attributes: %v", attrsToDelete)
|
||||
b.deleteAttributes(attrsToDelete, ordinals)
|
||||
}
|
||||
|
||||
// Release requested addresses.
|
||||
log.Debugf("Allocations: %v", b.Allocations)
|
||||
log.Debugf("Releasing ordinals: %v", ordinals)
|
||||
for _, ordinal := range ordinals {
|
||||
log.Debugf("Releasing ordinal %d", ordinal)
|
||||
b.Allocations[ordinal] = nil
|
||||
b.Unallocated = append(b.Unallocated, ordinal)
|
||||
}
|
||||
return unallocated, countByHandle, nil
|
||||
}
|
||||
|
||||
func (b *allocationBlock) deleteAttributes(delIndexes, ordinals []int) {
|
||||
newIndexes := make([]*int, len(b.Attributes))
|
||||
newAttrs := []model.AllocationAttribute{}
|
||||
y := 0 // Next free slot in the new attributes list.
|
||||
for x := range b.Attributes {
|
||||
if !intInSlice(x, delIndexes) {
|
||||
// Attribute at x is not being deleted. Build a mapping
|
||||
// of old attribute index (x) to new attribute index (y).
|
||||
log.Debugf("%d in %v", x, delIndexes)
|
||||
newIndex := y
|
||||
newIndexes[x] = &newIndex
|
||||
y += 1
|
||||
newAttrs = append(newAttrs, b.Attributes[x])
|
||||
}
|
||||
}
|
||||
b.Attributes = newAttrs
|
||||
|
||||
// Update attribute indexes for all allocations in this block.
|
||||
for i := 0; i < b.NumAddresses(); i++ {
|
||||
if b.Allocations[i] != nil {
|
||||
// Get the new index that corresponds to the old index
|
||||
// and update the allocation.
|
||||
newIndex := newIndexes[*b.Allocations[i]]
|
||||
b.Allocations[i] = newIndex
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b allocationBlock) attributeRefCounts() map[int]int {
|
||||
refCounts := map[int]int{}
|
||||
for _, a := range b.Allocations {
|
||||
if a == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if count, ok := refCounts[*a]; !ok {
|
||||
// No entry for given attribute index.
|
||||
refCounts[*a] = 1
|
||||
} else {
|
||||
refCounts[*a] = count + 1
|
||||
}
|
||||
}
|
||||
return refCounts
|
||||
}
|
||||
|
||||
func (b allocationBlock) attributeIndexesByHandle(handleID string) []int {
|
||||
indexes := []int{}
|
||||
for i, attr := range b.Attributes {
|
||||
if attr.AttrPrimary != nil && *attr.AttrPrimary == handleID {
|
||||
indexes = append(indexes, i)
|
||||
}
|
||||
}
|
||||
return indexes
|
||||
}
|
||||
|
||||
func (b *allocationBlock) releaseByHandle(handleID string) int {
|
||||
attrIndexes := b.attributeIndexesByHandle(handleID)
|
||||
log.Debugf("Attribute indexes to release: %v", attrIndexes)
|
||||
if len(attrIndexes) == 0 {
|
||||
// Nothing to release.
|
||||
log.Debugf("No addresses assigned to handle '%s'", handleID)
|
||||
return 0
|
||||
}
|
||||
|
||||
// There are addresses to release.
|
||||
ordinals := []int{}
|
||||
var o int
|
||||
for o = 0; o < b.NumAddresses(); o++ {
|
||||
// Only check allocated ordinals.
|
||||
if b.Allocations[o] != nil && intInSlice(*b.Allocations[o], attrIndexes) {
|
||||
// Release this ordinal.
|
||||
ordinals = append(ordinals, o)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean and reorder attributes.
|
||||
b.deleteAttributes(attrIndexes, ordinals)
|
||||
|
||||
// Release the addresses.
|
||||
for _, o := range ordinals {
|
||||
b.Allocations[o] = nil
|
||||
b.Unallocated = append(b.Unallocated, o)
|
||||
}
|
||||
return len(ordinals)
|
||||
}
|
||||
|
||||
func (b allocationBlock) ipsByHandle(handleID string) []cnet.IP {
|
||||
ips := []cnet.IP{}
|
||||
attrIndexes := b.attributeIndexesByHandle(handleID)
|
||||
var o int
|
||||
for o = 0; o < b.NumAddresses(); o++ {
|
||||
if b.Allocations[o] != nil && intInSlice(*b.Allocations[o], attrIndexes) {
|
||||
ip := b.OrdinalToIP(o)
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
func (b allocationBlock) attributesForIP(ip cnet.IP) (map[string]string, error) {
|
||||
// Convert to an ordinal.
|
||||
ordinal, err := b.IPToOrdinal(ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if allocated.
|
||||
attrIndex := b.Allocations[ordinal]
|
||||
if attrIndex == nil {
|
||||
log.Debugf("IP %s is not currently assigned in block", ip)
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: ip.String(), Err: errors.New("IP is unassigned")}
|
||||
}
|
||||
return b.Attributes[*attrIndex].AttrSecondary, nil
|
||||
}
|
||||
|
||||
func (b *allocationBlock) findOrAddAttribute(handleID *string, attrs map[string]string) int {
|
||||
logCtx := log.WithField("attrs", attrs)
|
||||
if handleID != nil {
|
||||
logCtx = log.WithField("handle", *handleID)
|
||||
}
|
||||
attr := model.AllocationAttribute{handleID, attrs}
|
||||
for idx, existing := range b.Attributes {
|
||||
if reflect.DeepEqual(attr, existing) {
|
||||
log.Debugf("Attribute '%+v' already exists", attr)
|
||||
return idx
|
||||
}
|
||||
}
|
||||
|
||||
// Does not exist - add it.
|
||||
logCtx.Debugf("New allocation attribute: %#v", attr)
|
||||
attrIndex := len(b.Attributes)
|
||||
b.Attributes = append(b.Attributes, attr)
|
||||
return attrIndex
|
||||
}
|
||||
|
||||
func getBlockCIDRForAddress(addr cnet.IP, pool *v3.IPPool) cnet.IPNet {
|
||||
var mask net.IPMask
|
||||
if addr.Version() == 6 {
|
||||
// This is an IPv6 address.
|
||||
mask = net.CIDRMask(pool.Spec.BlockSize, 128)
|
||||
} else {
|
||||
// This is an IPv4 address.
|
||||
mask = net.CIDRMask(pool.Spec.BlockSize, 32)
|
||||
}
|
||||
masked := addr.Mask(mask)
|
||||
return cnet.IPNet{IPNet: net.IPNet{IP: masked, Mask: mask}}
|
||||
}
|
||||
|
||||
func getIPVersion(ip cnet.IP) int {
|
||||
if ip.To4() == nil {
|
||||
return 6
|
||||
}
|
||||
return 4
|
||||
}
|
||||
|
||||
func largerThanOrEqualToBlock(blockCIDR cnet.IPNet, pool *v3.IPPool) bool {
|
||||
ones, _ := blockCIDR.Mask.Size()
|
||||
return ones <= pool.Spec.BlockSize
|
||||
}
|
||||
|
||||
func intInSlice(searchInt int, slice []int) bool {
|
||||
for _, v := range slice {
|
||||
if v == searchInt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
563
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_block_reader_writer.go
generated
vendored
563
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_block_reader_writer.go
generated
vendored
@@ -1,563 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
type blockReaderWriter struct {
|
||||
client bapi.Client
|
||||
pools PoolAccessorInterface
|
||||
}
|
||||
|
||||
func (rw blockReaderWriter) getAffineBlocks(ctx context.Context, host string, ver int, pools []v3.IPPool) (blocksInPool, blocksNotInPool []cnet.IPNet, err error) {
|
||||
blocksInPool = []cnet.IPNet{}
|
||||
blocksNotInPool = []cnet.IPNet{}
|
||||
|
||||
// Lookup blocks affine to the specified host.
|
||||
opts := model.BlockAffinityListOptions{Host: host, IPVersion: ver}
|
||||
datastoreObjs, err := rw.client.List(ctx, opts, "")
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
|
||||
// The block path does not exist yet. This is OK - it means
|
||||
// there are no affine blocks.
|
||||
return
|
||||
} else {
|
||||
log.Errorf("Error getting affine blocks: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate through and extract the block CIDRs.
|
||||
for _, o := range datastoreObjs.KVPairs {
|
||||
k := o.Key.(model.BlockAffinityKey)
|
||||
|
||||
// Add the block if no IP pools were specified, or if IP pools were specified
|
||||
// and the block falls within the given IP pools.
|
||||
if len(pools) == 0 {
|
||||
blocksInPool = append(blocksInPool, k.CIDR)
|
||||
} else {
|
||||
found := false
|
||||
for _, pool := range pools {
|
||||
var poolNet *cnet.IPNet
|
||||
_, poolNet, err = cnet.ParseCIDR(pool.Spec.CIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing CIDR: %s from pool: %s %v", pool.Spec.CIDR, pool.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
if poolNet.Contains(k.CIDR.IPNet.IP) {
|
||||
blocksInPool = append(blocksInPool, k.CIDR)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
blocksNotInPool = append(blocksNotInPool, k.CIDR)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// findUnclaimedBlock finds a block cidr which does not yet exist within the given list of pools. The provided pools
|
||||
// should already be sanitized and only enclude existing, enabled pools. Note that the block may become claimed
|
||||
// between receiving the cidr from this function and attempting to claim the corresponding block as this function
|
||||
// does not reserve the returned IPNet.
|
||||
func (rw blockReaderWriter) findUnclaimedBlock(ctx context.Context, host string, version int, pools []v3.IPPool, config IPAMConfig) (*cnet.IPNet, error) {
|
||||
// If there are no pools, we cannot assign addresses.
|
||||
if len(pools) == 0 {
|
||||
return nil, fmt.Errorf("no configured Calico pools for node %s", host)
|
||||
}
|
||||
|
||||
// Iterate through pools to find a new block.
|
||||
for _, pool := range pools {
|
||||
// Use a block generator to iterate through all of the blocks
|
||||
// that fall within the pool.
|
||||
log.Debugf("Looking for blocks in pool %+v", pool)
|
||||
blocks := randomBlockGenerator(pool, host)
|
||||
for subnet := blocks(); subnet != nil; subnet = blocks() {
|
||||
// Check if a block already exists for this subnet.
|
||||
log.Debugf("Getting block: %s", subnet.String())
|
||||
_, err := rw.queryBlock(ctx, *subnet, "")
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
|
||||
log.Infof("Found free block: %+v", *subnet)
|
||||
return subnet, nil
|
||||
}
|
||||
log.Errorf("Error getting block: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Block %s already exists", subnet.String())
|
||||
}
|
||||
}
|
||||
return nil, noFreeBlocksError("No Free Blocks")
|
||||
}
|
||||
|
||||
// getPendingAffinity claims a pending affinity for the given host and subnet. The affinity can then
|
||||
// be used to claim a block. If an affinity already exists, it will return that affinity.
|
||||
func (rw blockReaderWriter) getPendingAffinity(ctx context.Context, host string, subnet cnet.IPNet) (*model.KVPair, error) {
|
||||
logCtx := log.WithFields(log.Fields{"host": host, "subnet": subnet})
|
||||
logCtx.Info("Trying to create affinity in pending state")
|
||||
obj := model.KVPair{
|
||||
Key: model.BlockAffinityKey{Host: host, CIDR: subnet},
|
||||
Value: &model.BlockAffinity{State: model.StatePending},
|
||||
}
|
||||
aff, err := rw.client.Create(ctx, &obj)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
|
||||
logCtx.WithError(err).Error("Failed to claim affinity")
|
||||
return nil, err
|
||||
}
|
||||
logCtx.Info("Block affinity already exists, getting existing affinity")
|
||||
|
||||
// Get the existing affinity.
|
||||
aff, err = rw.queryAffinity(ctx, host, subnet, "")
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("Failed to get existing affinity")
|
||||
return nil, err
|
||||
}
|
||||
logCtx.Info("Got existing affinity")
|
||||
|
||||
// If the affinity has not been confirmed already, mark it as pending.
|
||||
if aff.Value.(*model.BlockAffinity).State != model.StateConfirmed {
|
||||
logCtx.Infof("Marking existing affinity with current state %s as pending", aff.Value.(*model.BlockAffinity).State)
|
||||
aff.Value.(*model.BlockAffinity).State = model.StatePending
|
||||
return rw.updateAffinity(ctx, aff)
|
||||
}
|
||||
logCtx.Info("Existing affinity is already confirmed")
|
||||
return aff, nil
|
||||
}
|
||||
logCtx.Infof("Successfully created pending affinity for block")
|
||||
return aff, nil
|
||||
}
|
||||
|
||||
// claimAffineBlock claims the provided block using the given pending affinity. If successful, it will confirm the affinity. If another host
|
||||
// steals the block, claimAffineBlock will attempt to delete the provided pending affinity.
|
||||
func (rw blockReaderWriter) claimAffineBlock(ctx context.Context, aff *model.KVPair, config IPAMConfig) (*model.KVPair, error) {
|
||||
// Pull out relevant fields.
|
||||
subnet := aff.Key.(model.BlockAffinityKey).CIDR
|
||||
host := aff.Key.(model.BlockAffinityKey).Host
|
||||
logCtx := log.WithFields(log.Fields{"host": host, "subnet": subnet})
|
||||
|
||||
// Create the new block.
|
||||
affinityKeyStr := "host:" + host
|
||||
block := newBlock(subnet)
|
||||
block.Affinity = &affinityKeyStr
|
||||
block.StrictAffinity = config.StrictAffinity
|
||||
|
||||
// Create the new block in the datastore.
|
||||
o := model.KVPair{
|
||||
Key: model.BlockKey{CIDR: block.CIDR},
|
||||
Value: block.AllocationBlock,
|
||||
}
|
||||
logCtx.Info("Attempting to create a new block")
|
||||
kvp, err := rw.client.Create(ctx, &o)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
|
||||
// Block already exists, check affinity.
|
||||
logCtx.Info("The block already exists, getting it from data store")
|
||||
obj, err := rw.queryBlock(ctx, subnet, "")
|
||||
if err != nil {
|
||||
// We failed to create the block, but the affinity still exists. We don't know
|
||||
// if someone else beat us to the block since we can't get it.
|
||||
logCtx.WithError(err).Errorf("Error reading block")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Pull out the allocationBlock object.
|
||||
b := allocationBlock{obj.Value.(*model.AllocationBlock)}
|
||||
|
||||
if b.Affinity != nil && *b.Affinity == affinityKeyStr {
|
||||
// Block has affinity to this host, meaning another
|
||||
// process on this host claimed it. Confirm the affinity
|
||||
// and return the existing block.
|
||||
logCtx.Info("Block is already claimed by this host, confirm the affinity")
|
||||
if _, err := rw.confirmAffinity(ctx, aff); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Some other host beat us to this block. Cleanup and return an error.
|
||||
log.Info("Block is owned by another host, delete our pending affinity")
|
||||
if err = rw.deleteAffinity(ctx, aff); err != nil {
|
||||
// Failed to clean up our claim to this block.
|
||||
logCtx.WithError(err).Errorf("Error deleting block affinity")
|
||||
}
|
||||
return nil, errBlockClaimConflict{Block: b}
|
||||
}
|
||||
logCtx.WithError(err).Warningf("Problem creating block while claiming block")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We've successfully claimed the block - confirm the affinity.
|
||||
log.Info("Successfully created block")
|
||||
if _, err = rw.confirmAffinity(ctx, aff); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (rw blockReaderWriter) confirmAffinity(ctx context.Context, aff *model.KVPair) (*model.KVPair, error) {
|
||||
host := aff.Key.(model.BlockAffinityKey).Host
|
||||
cidr := aff.Key.(model.BlockAffinityKey).CIDR
|
||||
logCtx := log.WithFields(log.Fields{"host": host, "subnet": cidr})
|
||||
logCtx.Info("Confirming affinity")
|
||||
aff.Value.(*model.BlockAffinity).State = model.StateConfirmed
|
||||
confirmed, err := rw.updateAffinity(ctx, aff)
|
||||
if err != nil {
|
||||
// We couldn't confirm the block - check to see if it was confirmed by
|
||||
// another process.
|
||||
kvp, err2 := rw.queryAffinity(ctx, host, cidr, "")
|
||||
if err2 == nil && kvp.Value.(*model.BlockAffinity).State == model.StateConfirmed {
|
||||
// Confirmed by someone else - we can use this.
|
||||
logCtx.Info("Affinity is already confirmed")
|
||||
return kvp, nil
|
||||
}
|
||||
logCtx.WithError(err).Error("Failed to confirm block affinity")
|
||||
return nil, err
|
||||
}
|
||||
logCtx.Info("Successfully confirmed affinity")
|
||||
return confirmed, nil
|
||||
}
|
||||
|
||||
// releaseBlockAffinity releases the host's affinity to the given block, and returns an affinityClaimedError if
|
||||
// the host does not claim an affinity for the block.
|
||||
func (rw blockReaderWriter) releaseBlockAffinity(ctx context.Context, host string, blockCIDR cnet.IPNet, requireEmpty bool) error {
|
||||
// Make sure hostname is not empty.
|
||||
if host == "" {
|
||||
log.Errorf("Hostname can't be empty")
|
||||
return errors.New("Hostname must be sepcified to release block affinity")
|
||||
}
|
||||
|
||||
// Read the model.KVPair containing the block affinity.
|
||||
logCtx := log.WithFields(log.Fields{"host": host, "subnet": blockCIDR.String()})
|
||||
logCtx.Debugf("Attempt to release affinity for block")
|
||||
aff, err := rw.queryAffinity(ctx, host, blockCIDR, "")
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Errorf("Error getting block affinity %s", blockCIDR.String())
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the model.KVPair containing the block
|
||||
// and pull out the allocationBlock object. We need to hold on to this
|
||||
// so that we can pass it back to the datastore on Update.
|
||||
obj, err := rw.queryBlock(ctx, blockCIDR, "")
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Warnf("Error getting block")
|
||||
return err
|
||||
}
|
||||
b := allocationBlock{obj.Value.(*model.AllocationBlock)}
|
||||
|
||||
// Check that the block affinity matches the given affinity.
|
||||
if b.Affinity != nil && !hostAffinityMatches(host, b.AllocationBlock) {
|
||||
// This means the affinity is stale - we can delete it.
|
||||
logCtx.Errorf("Mismatched affinity: %s != %s - try to delete stale affinity", *b.Affinity, "host:"+host)
|
||||
if err := rw.deleteAffinity(ctx, aff); err != nil {
|
||||
logCtx.Warn("Failed to delete stale affinity")
|
||||
}
|
||||
return errBlockClaimConflict{Block: b}
|
||||
}
|
||||
|
||||
// Don't release block affinity if we require it to be empty and it's not empty.
|
||||
if requireEmpty && !b.empty() {
|
||||
logCtx.Info("Block must be empty but is not empty, refusing to remove affinity.")
|
||||
return errBlockNotEmpty{Block: b}
|
||||
}
|
||||
|
||||
// Mark the affinity as pending deletion.
|
||||
aff.Value.(*model.BlockAffinity).State = model.StatePendingDeletion
|
||||
aff, err = rw.updateAffinity(ctx, aff)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Warnf("Failed to mark block affinity as pending deletion")
|
||||
return err
|
||||
}
|
||||
|
||||
if b.empty() {
|
||||
// If the block is empty, we can delete it.
|
||||
logCtx.Debug("Block is empty - delete it")
|
||||
err := rw.deleteBlock(ctx, obj)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
logCtx.WithError(err).Error("Error deleting block")
|
||||
return err
|
||||
}
|
||||
logCtx.Debug("Block has already been deleted, carry on")
|
||||
}
|
||||
} else {
|
||||
// Otherwise, we need to remove affinity from it.
|
||||
// This prevents the host from automatically assigning
|
||||
// from this block unless we're allowed to overflow into
|
||||
// non-affine blocks.
|
||||
logCtx.Debug("Block is not empty - remove the affinity")
|
||||
b.Affinity = nil
|
||||
|
||||
// Pass back the original KVPair with the new
|
||||
// block information so we can do a CAS.
|
||||
obj.Value = b.AllocationBlock
|
||||
_, err = rw.updateBlock(ctx, obj)
|
||||
if err != nil {
|
||||
logCtx.WithError(err).Error("Failed to remove affinity from block")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We've removed / updated the block, so perform a compare-and-delete on the BlockAffinity.
|
||||
if err := rw.deleteAffinity(ctx, aff); err != nil {
|
||||
// Return the error unless the affinity didn't exist.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
logCtx.Errorf("Error deleting block affinity: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queryAffinity gets an affinity for the given host + CIDR key.
|
||||
func (rw blockReaderWriter) queryAffinity(ctx context.Context, host string, cidr cnet.IPNet, revision string) (*model.KVPair, error) {
|
||||
return rw.client.Get(ctx, model.BlockAffinityKey{Host: host, CIDR: cidr}, revision)
|
||||
}
|
||||
|
||||
// updateAffinity updates the given affinity.
|
||||
func (rw blockReaderWriter) updateAffinity(ctx context.Context, aff *model.KVPair) (*model.KVPair, error) {
|
||||
return rw.client.Update(ctx, aff)
|
||||
}
|
||||
|
||||
// deleteAffinity deletes the given affinity.
|
||||
func (rw blockReaderWriter) deleteAffinity(ctx context.Context, aff *model.KVPair) error {
|
||||
_, err := rw.client.DeleteKVP(ctx, aff)
|
||||
return err
|
||||
}
|
||||
|
||||
// queryBlock gets a block for the given block CIDR key.
|
||||
func (rw blockReaderWriter) queryBlock(ctx context.Context, blockCIDR cnet.IPNet, revision string) (*model.KVPair, error) {
|
||||
return rw.client.Get(ctx, model.BlockKey{CIDR: blockCIDR}, revision)
|
||||
}
|
||||
|
||||
// updateBlock updates the given block.
|
||||
func (rw blockReaderWriter) updateBlock(ctx context.Context, b *model.KVPair) (*model.KVPair, error) {
|
||||
return rw.client.Update(ctx, b)
|
||||
}
|
||||
|
||||
// deleteBlock deletes the given block.
|
||||
func (rw blockReaderWriter) deleteBlock(ctx context.Context, b *model.KVPair) error {
|
||||
_, err := rw.client.DeleteKVP(ctx, b)
|
||||
return err
|
||||
}
|
||||
|
||||
// queryHandle gets a handle for the given handleID key.
|
||||
func (rw blockReaderWriter) queryHandle(ctx context.Context, handleID, revision string) (*model.KVPair, error) {
|
||||
return rw.client.Get(ctx, model.IPAMHandleKey{HandleID: handleID}, revision)
|
||||
}
|
||||
|
||||
// updateHandle updates the given handle.
|
||||
func (rw blockReaderWriter) updateHandle(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return rw.client.Update(ctx, kvp)
|
||||
}
|
||||
|
||||
// deleteHandle deletes the given handle.
|
||||
func (rw blockReaderWriter) deleteHandle(ctx context.Context, kvp *model.KVPair) error {
|
||||
_, err := rw.client.DeleteKVP(ctx, kvp)
|
||||
return err
|
||||
}
|
||||
|
||||
// getPoolForIP returns the pool if the given IP is within a configured
|
||||
// Calico pool, and nil otherwise.
|
||||
func (rw blockReaderWriter) getPoolForIP(ip cnet.IP, enabledPools []v3.IPPool) (*v3.IPPool, error) {
|
||||
if enabledPools == nil {
|
||||
var err error
|
||||
enabledPools, err = rw.pools.GetEnabledPools(ip.Version())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, p := range enabledPools {
|
||||
// Compare any enabled pools.
|
||||
_, pool, err := cnet.ParseCIDR(p.Spec.CIDR)
|
||||
if err != nil {
|
||||
fields := log.Fields{"pool": p.Name, "cidr": p.Spec.CIDR}
|
||||
log.WithError(err).WithFields(fields).Warn("Pool has invalid CIDR")
|
||||
} else if pool.Contains(ip.IP) {
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Generator to get list of block CIDRs which
|
||||
// fall within the given cidr. The passed in pool
|
||||
// must contain the passed in block cidr.
|
||||
// Returns nil when no more blocks can be generated.
|
||||
func blockGenerator(pool *v3.IPPool, cidr cnet.IPNet) func() *cnet.IPNet {
|
||||
ip := cnet.IP{IP: cidr.IP}
|
||||
|
||||
var blockMask net.IPMask
|
||||
if ip.Version() == 4 {
|
||||
blockMask = net.CIDRMask(pool.Spec.BlockSize, 32)
|
||||
} else {
|
||||
blockMask = net.CIDRMask(pool.Spec.BlockSize, 128)
|
||||
}
|
||||
|
||||
ones, size := blockMask.Size()
|
||||
blockSize := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(size-ones)), nil)
|
||||
|
||||
return func() *cnet.IPNet {
|
||||
returnIP := ip
|
||||
|
||||
if cidr.Contains(ip.IP) {
|
||||
ipnet := net.IPNet{IP: returnIP.IP, Mask: blockMask}
|
||||
cidr := cnet.IPNet{IPNet: ipnet}
|
||||
ip = cnet.IncrementIP(ip, blockSize)
|
||||
return &cidr
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func determineSeed(mask net.IPMask, hostname string) int64 {
|
||||
if ones, bits := mask.Size(); ones == bits {
|
||||
// For small blocks, we don't care about the same host picking the same
|
||||
// block, so just use a seed based on timestamp. This optimization reduces
|
||||
// the number of reads required to find an unclaimed block on a host.
|
||||
return time.Now().UTC().UnixNano()
|
||||
}
|
||||
|
||||
// Create a random number generator seed based on the hostname.
|
||||
// This is to avoid assigning multiple blocks when multiple
|
||||
// workloads request IPs around the same time.
|
||||
hostHash := fnv.New32()
|
||||
hostHash.Write([]byte(hostname))
|
||||
return int64(hostHash.Sum32())
|
||||
}
|
||||
|
||||
// Returns a generator that, when called, returns a random
|
||||
// block from the given pool. When there are no blocks left,
|
||||
// the it returns nil.
|
||||
func randomBlockGenerator(ipPool v3.IPPool, hostName string) func() *cnet.IPNet {
|
||||
_, pool, err := cnet.ParseCIDR(ipPool.Spec.CIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing CIDR: %s %v", ipPool.Spec.CIDR, err)
|
||||
return func() *cnet.IPNet { return nil }
|
||||
}
|
||||
|
||||
// Determine the IP type to use.
|
||||
baseIP := cnet.IP{IP: pool.IP}
|
||||
version := getIPVersion(baseIP)
|
||||
var blockMask net.IPMask
|
||||
if version == 4 {
|
||||
blockMask = net.CIDRMask(ipPool.Spec.BlockSize, 32)
|
||||
} else {
|
||||
blockMask = net.CIDRMask(ipPool.Spec.BlockSize, 128)
|
||||
}
|
||||
|
||||
// Determine the number of blocks within this pool.
|
||||
ones, size := pool.Mask.Size()
|
||||
numIP := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(size-ones)), nil)
|
||||
|
||||
ones, size = blockMask.Size()
|
||||
blockSize := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(size-ones)), nil)
|
||||
|
||||
numBlocks := new(big.Int)
|
||||
numBlocks.Div(numIP, blockSize)
|
||||
|
||||
// Build a random number generator.
|
||||
seed := determineSeed(blockMask, hostName)
|
||||
randm := rand.New(rand.NewSource(seed))
|
||||
|
||||
// initialIndex keeps track of the random starting point
|
||||
initialIndex := new(big.Int)
|
||||
initialIndex.Rand(randm, numBlocks)
|
||||
|
||||
// i keeps track of current index while walking the blocks in a pool
|
||||
i := initialIndex
|
||||
|
||||
// numReturned keeps track of number of blocks returned
|
||||
numReturned := big.NewInt(0)
|
||||
|
||||
// numDiff = numBlocks - i
|
||||
numDiff := new(big.Int)
|
||||
|
||||
return func() *cnet.IPNet {
|
||||
// The `big.NewInt(0)` part creates a temp variable and assigns the result of multiplication of `i` and `big.NewInt(blockSize)`
|
||||
// Note: we are not using `i.Mul()` because that will assign the result of the multiplication to `i`, which will cause unexpected issues
|
||||
ip := cnet.IncrementIP(baseIP, big.NewInt(0).Mul(i, blockSize))
|
||||
|
||||
ipnet := net.IPNet{ip.IP, blockMask}
|
||||
|
||||
numDiff.Sub(numBlocks, i)
|
||||
|
||||
if numDiff.Cmp(big.NewInt(1)) <= 0 {
|
||||
// Index has reached end of the blocks;
|
||||
// Loop back to beginning of pool rather than
|
||||
// increment, because incrementing would put us outside of the pool.
|
||||
i = big.NewInt(0)
|
||||
} else {
|
||||
// Increment to the next block
|
||||
i.Add(i, big.NewInt(1))
|
||||
}
|
||||
|
||||
if numReturned.Cmp(numBlocks) >= 0 {
|
||||
// Index finished one full circle across the blocks
|
||||
// Used all of the blocks in this pool.
|
||||
return nil
|
||||
}
|
||||
numReturned.Add(numReturned, big.NewInt(1))
|
||||
|
||||
// Return the block from this pool that corresponds with the index.
|
||||
return &cnet.IPNet{ipnet}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the block for a given IP (without needing a pool)
|
||||
func (rw blockReaderWriter) getBlockForIP(ctx context.Context, ip cnet.IP) (*cnet.IPNet, error) {
|
||||
// Lookup all blocks by providing an empty BlockListOptions to the List operation.
|
||||
opts := model.BlockListOptions{IPVersion: ip.Version()}
|
||||
datastoreObjs, err := rw.client.List(ctx, opts, "")
|
||||
if err != nil {
|
||||
log.Errorf("Error getting affine blocks: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Iterate through and extract the block CIDRs.
|
||||
for _, o := range datastoreObjs.KVPairs {
|
||||
k := o.Key.(model.BlockKey)
|
||||
if k.CIDR.IPNet.Contains(ip.IP) {
|
||||
log.Debugf("Found IP %s in block %s", ip.String(), k.String())
|
||||
return &k.CIDR, nil
|
||||
}
|
||||
}
|
||||
|
||||
// No blocks found.
|
||||
log.Debugf("IP %s could not be found in any blocks", ip.String())
|
||||
return nil, nil
|
||||
}
|
||||
73
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_errors.go
generated
vendored
73
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_errors.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// invalidSizeError indicates that the requested IP network size is not valid.
|
||||
type invalidSizeError string
|
||||
|
||||
func (e invalidSizeError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// ipamConfigConflictError indicates an attempt to change IPAM configuration
|
||||
// that conflicts with existing allocations.
|
||||
type ipamConfigConflictError string
|
||||
|
||||
func (e ipamConfigConflictError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// noFreeBlocksError indicates an attempt to claim a block
|
||||
// when there are none available.
|
||||
type noFreeBlocksError string
|
||||
|
||||
func (e noFreeBlocksError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// errBlockClaimConflict indicates that a given block has already
|
||||
// been claimed by another host.
|
||||
type errBlockClaimConflict struct {
|
||||
Block allocationBlock
|
||||
}
|
||||
|
||||
func (e errBlockClaimConflict) Error() string {
|
||||
if e.Block.Affinity != nil {
|
||||
return fmt.Sprintf("%v already claimed by %v", e.Block.CIDR, *e.Block.Affinity)
|
||||
}
|
||||
return fmt.Sprintf("%v already claimed", e.Block.CIDR)
|
||||
}
|
||||
|
||||
// errBlockNotEmpty indicates that a given block has already
|
||||
// been claimed by another host.
|
||||
type errBlockNotEmpty struct {
|
||||
Block allocationBlock
|
||||
}
|
||||
|
||||
func (e errBlockNotEmpty) Error() string {
|
||||
return fmt.Sprintf("block '%v' is not empty", e.Block.CIDR)
|
||||
}
|
||||
|
||||
// errStaleAffinity indicates to the calling code that the given affinity
|
||||
// is not confirmed, and that the corresponding block belongs to another host.
|
||||
type errStaleAffinity string
|
||||
|
||||
func (e errStaleAffinity) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
65
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_handle.go
generated
vendored
65
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_handle.go
generated
vendored
@@ -1,65 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
type allocationHandle struct {
|
||||
*model.IPAMHandle
|
||||
}
|
||||
|
||||
func (h allocationHandle) incrementBlock(blockCidr cnet.IPNet, num int) int {
|
||||
blockId := blockCidr.String()
|
||||
newNum := num
|
||||
if val, ok := h.Block[blockId]; ok {
|
||||
// An entry exists for this block, increment the number
|
||||
// of allocations.
|
||||
newNum = val + num
|
||||
}
|
||||
h.Block[blockId] = newNum
|
||||
return newNum
|
||||
}
|
||||
|
||||
func (h allocationHandle) decrementBlock(blockCidr cnet.IPNet, num int) (*int, error) {
|
||||
blockId := blockCidr.String()
|
||||
if current, ok := h.Block[blockId]; !ok {
|
||||
// This entry doesn't exist.
|
||||
errStr := fmt.Sprintf("Tried to decrement block %s by %v but it isn't linked to handle %s", blockId, num, h.HandleID)
|
||||
return nil, errors.New(errStr)
|
||||
} else {
|
||||
newNum := current - num
|
||||
if newNum < 0 {
|
||||
errStr := fmt.Sprintf("Tried to decrement block %s by %v but it only has %v addresses on handle %s", blockId, num, current, h.HandleID)
|
||||
return nil, errors.New(errStr)
|
||||
}
|
||||
|
||||
if newNum == 0 {
|
||||
delete(h.Block, blockId)
|
||||
} else {
|
||||
h.Block[blockId] = newNum
|
||||
}
|
||||
return &newNum, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h allocationHandle) empty() bool {
|
||||
return len(h.Block) == 0
|
||||
}
|
||||
119
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_types.go
generated
vendored
119
vendor/github.com/projectcalico/libcalico-go/lib/ipam/ipam_types.go
generated
vendored
@@ -1,119 +0,0 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
// AssignIPArgs defines the set of arguments for assigning a specific IP address.
|
||||
type AssignIPArgs struct {
|
||||
// The IP address to assign.
|
||||
IP cnet.IP
|
||||
|
||||
// If specified, a handle which can be used to retrieve / release
|
||||
// the allocated IP addresses in the future.
|
||||
HandleID *string
|
||||
|
||||
// A key/value mapping of metadata to store with the allocations.
|
||||
Attrs map[string]string
|
||||
|
||||
// If specified, the hostname of the host on which IP addresses
|
||||
// will be allocated. If not specified, this will default
|
||||
// to the value provided by os.Hostname.
|
||||
Hostname string
|
||||
}
|
||||
|
||||
// AutoAssignArgs defines the set of arguments for assigning one or more
|
||||
// IP addresses.
|
||||
type AutoAssignArgs struct {
|
||||
// The number of IPv4 addresses to automatically assign.
|
||||
Num4 int
|
||||
|
||||
// The number of IPv6 addresses to automatically assign.
|
||||
Num6 int
|
||||
|
||||
// If specified, a handle which can be used to retrieve / release
|
||||
// the allocated IP addresses in the future.
|
||||
HandleID *string
|
||||
|
||||
// A key/value mapping of metadata to store with the allocations.
|
||||
Attrs map[string]string
|
||||
|
||||
// If specified, the hostname of the host on which IP addresses
|
||||
// will be allocated. If not specified, this will default
|
||||
// to the value provided by os.Hostname.
|
||||
Hostname string
|
||||
|
||||
// If specified, the previously configured IPv4 pools from which
|
||||
// to assign IPv4 addresses. If not specified, this defaults to all IPv4 pools.
|
||||
IPv4Pools []cnet.IPNet
|
||||
|
||||
// If specified, the previously configured IPv6 pools from which
|
||||
// to assign IPv6 addresses. If not specified, this defaults to all IPv6 pools.
|
||||
IPv6Pools []cnet.IPNet
|
||||
|
||||
// If non-zero, limit on the number of affine blocks this host is allowed to claim
|
||||
// (per IP version).
|
||||
MaxBlocksPerHost int
|
||||
}
|
||||
|
||||
// IPAMConfig contains global configuration options for Calico IPAM.
|
||||
// This IPAM configuration is stored in the datastore and configures the behavior
|
||||
// of Calico IPAM across an entire Calico cluster.
|
||||
type IPAMConfig struct {
|
||||
// When StrictAffinity is true, addresses from a given block can only be
|
||||
// assigned by hosts with the blocks affinity. If false, then AutoAllocateBlocks
|
||||
// must be true. The default value is false.
|
||||
StrictAffinity bool
|
||||
|
||||
// When AutoAllocateBlocks is true, Calico will automatically
|
||||
// allocate blocks of IP address to hosts as needed to assign addresses.
|
||||
// If false, then StrictAffinity must be true. The default value is true.
|
||||
AutoAllocateBlocks bool
|
||||
}
|
||||
|
||||
// GetUtilizationArgs defines the set of arguments for requesting IP utilization.
|
||||
type GetUtilizationArgs struct {
|
||||
// If specified, the pools whose utilization should be reported. Each string here
|
||||
// can be a pool name or CIDR. If not specified, this defaults to all pools.
|
||||
Pools []string
|
||||
}
|
||||
|
||||
// BlockUtilization reports IP utilization for a single allocation block.
|
||||
type BlockUtilization struct {
|
||||
// This block's CIDR.
|
||||
CIDR net.IPNet
|
||||
|
||||
// Number of possible IPs in this block.
|
||||
Capacity int
|
||||
|
||||
// Number of available IPs in this block.
|
||||
Available int
|
||||
}
|
||||
|
||||
// PoolUtilization reports IP utilization for a single IP pool.
|
||||
type PoolUtilization struct {
|
||||
// This pool's name.
|
||||
Name string
|
||||
|
||||
// This pool's CIDR.
|
||||
CIDR net.IPNet
|
||||
|
||||
// Utilization for each of this pool's blocks.
|
||||
Blocks []BlockUtilization
|
||||
}
|
||||
24
vendor/github.com/projectcalico/libcalico-go/lib/ipam/pools.go
generated
vendored
24
vendor/github.com/projectcalico/libcalico-go/lib/ipam/pools.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package ipam
|
||||
|
||||
import v3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
// Interface used to access the enabled IPPools.
|
||||
type PoolAccessorInterface interface {
|
||||
// Returns a list of enabled pools sorted in alphanumeric name order.
|
||||
GetEnabledPools(ipVersion int) ([]v3.IPPool, error)
|
||||
// Returns a list of all pools sorted in alphanumeric name order.
|
||||
GetAllPools() ([]v3.IPPool, error)
|
||||
}
|
||||
24
vendor/github.com/projectcalico/libcalico-go/lib/options/delete.go
generated
vendored
24
vendor/github.com/projectcalico/libcalico-go/lib/options/delete.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
// DeleteOptions is the standard options for deleting a resource through the Calico API.
|
||||
type DeleteOptions struct {
|
||||
// When specified:
|
||||
// - if unset, then the result is returned from remote storage based on quorum-read flag;
|
||||
// - if set to non zero, then the result is at least as fresh as given rv.
|
||||
// +optional
|
||||
ResourceVersion string
|
||||
}
|
||||
24
vendor/github.com/projectcalico/libcalico-go/lib/options/get.go
generated
vendored
24
vendor/github.com/projectcalico/libcalico-go/lib/options/get.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
// GetOptions is the standard query options for getting a resource through the Calico API.
|
||||
type GetOptions struct {
|
||||
// When specified:
|
||||
// - if unset, then the result is returned from remote storage based on quorum-read flag;
|
||||
// - if set to non zero, then the result is at least as fresh as given rv.
|
||||
// +optional
|
||||
ResourceVersion string
|
||||
}
|
||||
39
vendor/github.com/projectcalico/libcalico-go/lib/options/listwatch.go
generated
vendored
39
vendor/github.com/projectcalico/libcalico-go/lib/options/listwatch.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
// ListOptions is the query options a List or Watch operation in the Calico API.
|
||||
type ListOptions struct {
|
||||
// The namespace of the resource to List or Watch. If blank, the list or watch wildcards
|
||||
// the namespace. Only used for namespaced resource types.
|
||||
Namespace string
|
||||
|
||||
// The name of the resource to List or Watch. If blank, the list or watch wildcards
|
||||
// the name.
|
||||
Name string
|
||||
|
||||
// The resource version to List or Watch from.
|
||||
// When specified for list:
|
||||
// - if unset, then the result is returned from remote storage based on quorum-read flag;
|
||||
// - if set to non zero, then the result is at least as fresh as given rv.
|
||||
// +optional
|
||||
ResourceVersion string
|
||||
|
||||
// Whether the Name specified is a prefix rather than the full name. This is fully supported
|
||||
// for etcdv3, and is supported in a very limited fashion in KDD for WorkloadEndpoints only
|
||||
// as a mechanism for enumerating endpoints within a Pod (since the name construction for a
|
||||
// Workload endpoint is hierarchically constructed).
|
||||
Prefix bool
|
||||
}
|
||||
25
vendor/github.com/projectcalico/libcalico-go/lib/options/set.go
generated
vendored
25
vendor/github.com/projectcalico/libcalico-go/lib/options/set.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
import "time"
|
||||
|
||||
// SetOptions is the standard options for Create/Update actions on the Calico
|
||||
// API.
|
||||
type SetOptions struct {
|
||||
// TTL for the datastore entry.
|
||||
// +optional
|
||||
TTL time.Duration
|
||||
}
|
||||
146
vendor/github.com/projectcalico/libcalico-go/lib/set/set.go
generated
vendored
146
vendor/github.com/projectcalico/libcalico-go/lib/set/set.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package set
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Set interface {
|
||||
Len() int
|
||||
Add(interface{})
|
||||
AddAll(itemArray interface{})
|
||||
Discard(interface{})
|
||||
Clear()
|
||||
Contains(interface{}) bool
|
||||
Iter(func(item interface{}) error)
|
||||
Copy() Set
|
||||
Equals(Set) bool
|
||||
ContainsAll(Set) bool
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
|
||||
var emptyValue = empty{}
|
||||
|
||||
var (
|
||||
StopIteration = errors.New("Stop iteration")
|
||||
RemoveItem = errors.New("Remove item")
|
||||
)
|
||||
|
||||
func New() Set {
|
||||
return make(mapSet)
|
||||
}
|
||||
|
||||
func From(members ...interface{}) Set {
|
||||
s := New()
|
||||
s.AddAll(members)
|
||||
return s
|
||||
}
|
||||
|
||||
func FromArray(membersArray interface{}) Set {
|
||||
s := New()
|
||||
s.AddAll(membersArray)
|
||||
return s
|
||||
}
|
||||
|
||||
func Empty() Set {
|
||||
return mapSet(nil)
|
||||
}
|
||||
|
||||
type mapSet map[interface{}]empty
|
||||
|
||||
func (set mapSet) Len() int {
|
||||
return len(set)
|
||||
}
|
||||
|
||||
func (set mapSet) Add(item interface{}) {
|
||||
set[item] = emptyValue
|
||||
}
|
||||
|
||||
func (set mapSet) AddAll(itemArray interface{}) {
|
||||
|
||||
arrVal := reflect.ValueOf(itemArray)
|
||||
for i := 0; i < arrVal.Len(); i++ {
|
||||
set.Add(arrVal.Index(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (set mapSet) Discard(item interface{}) {
|
||||
delete(set, item)
|
||||
}
|
||||
|
||||
func (set mapSet) Clear() {
|
||||
for item := range set {
|
||||
delete(set, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (set mapSet) Contains(item interface{}) bool {
|
||||
_, present := set[item]
|
||||
return present
|
||||
}
|
||||
|
||||
func (set mapSet) Iter(visitor func(item interface{}) error) {
|
||||
loop:
|
||||
for item := range set {
|
||||
err := visitor(item)
|
||||
switch err {
|
||||
case StopIteration:
|
||||
break loop
|
||||
case RemoveItem:
|
||||
delete(set, item)
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
log.WithError(err).Panic("Unexpected iteration error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (set mapSet) Copy() Set {
|
||||
cpy := New()
|
||||
for item := range set {
|
||||
cpy.Add(item)
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
func (set mapSet) Equals(other Set) bool {
|
||||
if set.Len() != other.Len() {
|
||||
return false
|
||||
}
|
||||
for item := range set {
|
||||
if !other.Contains(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (set mapSet) ContainsAll(other Set) bool {
|
||||
result := true
|
||||
other.Iter(func(item interface{}) error {
|
||||
if !set.Contains(item) {
|
||||
result = false
|
||||
return StopIteration
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return result
|
||||
}
|
||||
21
vendor/github.com/projectcalico/libcalico-go/lib/validator/v3/doc.go
generated
vendored
21
vendor/github.com/projectcalico/libcalico-go/lib/validator/v3/doc.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package v3 implements common field and structure validation that is
|
||||
used to validate the API structures supplied on the client interface, and
|
||||
is also used internally to validate the information stored in the backend
|
||||
datastore.
|
||||
*/
|
||||
package v3
|
||||
1206
vendor/github.com/projectcalico/libcalico-go/lib/validator/v3/validator.go
generated
vendored
1206
vendor/github.com/projectcalico/libcalico-go/lib/validator/v3/validator.go
generated
vendored
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/projectcalico/libcalico-go/lib/watch/interface.go
generated
vendored
72
vendor/github.com/projectcalico/libcalico-go/lib/watch/interface.go
generated
vendored
@@ -1,72 +0,0 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// Interface can be implemented by anything that knows how to watch and report changes.
|
||||
type Interface interface {
|
||||
// Stops watching. Will close the channel returned by ResultChan(). Releases
|
||||
// any resources used by the watch.
|
||||
Stop()
|
||||
|
||||
// Returns a chan which will receive all the events. If an error occurs
|
||||
// or Stop() is called, this channel will be closed, in which case the
|
||||
// watch should be completely cleaned up.
|
||||
ResultChan() <-chan Event
|
||||
}
|
||||
|
||||
// EventType defines the possible types of events.
|
||||
type EventType string
|
||||
|
||||
const (
|
||||
// Event type:
|
||||
// Added
|
||||
// * a new Object has been added. If the Watcher does not have a specific
|
||||
// ResourceVersion to watch from, existing entries will first be listed
|
||||
// and propagated as "Added" events.
|
||||
// Modified
|
||||
// * an Object has been modified.
|
||||
// Deleted
|
||||
// * an Object has been deleted
|
||||
// Error
|
||||
// * an error has occurred. If the error is terminating, the results channel
|
||||
// will be closed.
|
||||
Added EventType = "ADDED"
|
||||
Modified EventType = "MODIFIED"
|
||||
Deleted EventType = "DELETED"
|
||||
Error EventType = "ERROR"
|
||||
|
||||
DefaultChanSize int32 = 100
|
||||
)
|
||||
|
||||
// Event represents a single event to a watched resource.
|
||||
type Event struct {
|
||||
Type EventType
|
||||
|
||||
// Previous is:
|
||||
// * If Type is Added, Error or Synced: nil
|
||||
// * If Type is Modified or Deleted: the previous state of the object
|
||||
// Object is:
|
||||
// * If Type is Added or Modified: the new state of the object.
|
||||
// * If Type is Deleted, Error or Synced: nil
|
||||
Previous runtime.Object
|
||||
Object runtime.Object
|
||||
|
||||
// The error, if EventType is Error.
|
||||
Error error
|
||||
}
|
||||
Reference in New Issue
Block a user