temp commit
This commit is contained in:
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgpconfig.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgpconfig.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
BGPConfigResourceName = "BGPConfigurations"
|
||||
BGPConfigCRDName = "bgpconfigurations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBGPConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BGPConfigCRDName,
|
||||
resource: BGPConfigResourceName,
|
||||
description: "Calico BGP Configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BGPConfiguration{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBGPConfiguration,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BGPConfigurationList{}),
|
||||
resourceKind: apiv3.KindBGPConfiguration,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgppeer.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/bgppeer.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
BGPPeerResourceName = "BGPPeers"
|
||||
BGPPeerCRDName = "bgppeers.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBGPPeerClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BGPPeerCRDName,
|
||||
resource: BGPPeerResourceName,
|
||||
description: "Calico BGP Peers",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BGPPeer{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBGPPeer,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BGPPeerList{}),
|
||||
resourceKind: apiv3.KindBGPPeer,
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/client.go
generated
vendored
Normal file
80
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/client.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
// K8sResourceClient is the interface to the k8s datastore for CRUD operations
|
||||
// on an individual resource (one for each of the *model* types supported by
|
||||
// the K8s backend).
|
||||
//
|
||||
// Defining a separate client interface from api.Client allows the k8s-specific
|
||||
// client to diverge.
|
||||
type K8sResourceClient interface {
|
||||
// Create creates the object specified in the KVPair, which must not
|
||||
// already exist. On success, returns a KVPair for the object with
|
||||
// revision information filled-in.
|
||||
Create(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Update modifies the existing object specified in the KVPair.
|
||||
// On success, returns a KVPair for the object with revision
|
||||
// information filled-in. If the input KVPair has revision
|
||||
// information then the update only succeeds if the revision is still
|
||||
// current.
|
||||
Update(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Delete removes the object specified by the Key. If the call
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error)
|
||||
|
||||
// DeleteKVP removes the object specified by the KVPair. If the KVPair
|
||||
// contains revision information, the delete only succeeds if the
|
||||
// revision is still current.
|
||||
DeleteKVP(ctx context.Context, object *model.KVPair) (*model.KVPair, error)
|
||||
|
||||
// Get returns the object identified by the given key as a KVPair with
|
||||
// revision information.
|
||||
Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error)
|
||||
|
||||
// List returns a slice of KVPairs matching the input list options.
|
||||
// list should be passed one of the model.<Type>ListOptions structs.
|
||||
// Non-zero fields in the struct are used as filters.
|
||||
List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error)
|
||||
|
||||
// Watch returns a WatchInterface used for watching resources matching the
|
||||
// input list options.
|
||||
Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error)
|
||||
|
||||
// EnsureInitialized ensures that the backend is initialized
|
||||
// any ready to be used.
|
||||
EnsureInitialized() error
|
||||
}
|
||||
|
||||
// K8sNodeResourceClient extends the K8sResourceClient to add a helper method to
|
||||
// extract resources from the supplied K8s Node. This convenience interface is
|
||||
// expected to be removed in a future libcalico-go release.
|
||||
type K8sNodeResourceClient interface {
|
||||
K8sResourceClient
|
||||
ExtractResourcesFromNode(node *apiv1.Node) ([]*model.KVPair, error)
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/clusterinfo.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/clusterinfo.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterInfoResourceName = "ClusterInformations"
|
||||
ClusterInfoCRDName = "clusterinformations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewClusterInfoClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: ClusterInfoCRDName,
|
||||
resource: ClusterInfoResourceName,
|
||||
description: "Calico Cluster Information",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.ClusterInformation{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindClusterInformation,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.ClusterInformationList{}),
|
||||
resourceKind: apiv3.KindClusterInformation,
|
||||
}
|
||||
}
|
||||
416
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/customresource.go
generated
vendored
Normal file
416
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/customresource.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
// customK8sResourceClient implements the K8sResourceClient interface and provides a generic
|
||||
// mechanism for a 1:1 mapping between a Calico Resource and an equivalent Kubernetes
|
||||
// custom resource type.
|
||||
type customK8sResourceClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
restClient *rest.RESTClient
|
||||
name string
|
||||
resource string
|
||||
description string
|
||||
k8sResourceType reflect.Type
|
||||
k8sResourceTypeMeta metav1.TypeMeta
|
||||
k8sListType reflect.Type
|
||||
namespaced bool
|
||||
resourceKind string
|
||||
versionconverter VersionConverter
|
||||
}
|
||||
|
||||
// VersionConverter converts v1 or v3 k8s resources into v3 resources.
|
||||
// For a v3 resource, the conversion should be a no-op.
|
||||
type VersionConverter interface {
|
||||
ConvertFromK8s(Resource) (Resource, error)
|
||||
}
|
||||
|
||||
// Create creates a new Custom K8s Resource instance in the k8s API from the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": kvp.Key,
|
||||
"Value": kvp.Value,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Create custom Kubernetes resource")
|
||||
|
||||
// Convert the KVPair to the K8s resource.
|
||||
resIn, err := c.convertKVPairToResource(kvp)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error creating resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send the update request using the REST interface.
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
namespace := kvp.Key.(model.ResourceKey).Namespace
|
||||
err = c.restClient.Post().
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Context(ctx).
|
||||
Resource(c.resource).
|
||||
Body(resIn).
|
||||
Do().Into(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error creating resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
// Update the return data with the metadata populated by the (Kubernetes) datastore.
|
||||
kvp, err = c.convertResourceToKVPair(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error converting created K8s resource to Calico resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
// Update the revision information from the response.
|
||||
kvp.Revision = resOut.GetObjectMeta().GetResourceVersion()
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
// Update updates an existing Custom K8s Resource instance in the k8s API from the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": kvp.Key,
|
||||
"Value": kvp.Value,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Update custom Kubernetes resource")
|
||||
|
||||
// Create storage for the updated resource.
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
|
||||
var updateError error
|
||||
// Convert the KVPair to a K8s resource.
|
||||
resIn, err := c.convertKVPairToResource(kvp)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error updating resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send the update request using the name.
|
||||
name := resIn.GetObjectMeta().GetName()
|
||||
namespace := resIn.GetObjectMeta().GetNamespace()
|
||||
logContext = logContext.WithField("Name", name)
|
||||
logContext.Debug("Update resource by name")
|
||||
updateError = c.restClient.Put().
|
||||
Context(ctx).
|
||||
Resource(c.resource).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Body(resIn).
|
||||
Name(name).
|
||||
Do().Into(resOut)
|
||||
if updateError != nil {
|
||||
// Failed to update the resource.
|
||||
logContext.WithError(updateError).Error("Error updating resource")
|
||||
return nil, K8sErrorToCalico(updateError, kvp.Key)
|
||||
}
|
||||
|
||||
// Update the return data with the metadata populated by the (Kubernetes) datastore.
|
||||
kvp, err = c.convertResourceToKVPair(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error converting created K8s resource to Calico resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
// Success. Update the revision information from the response.
|
||||
kvp.Revision = resOut.GetObjectMeta().GetResourceVersion()
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
// Delete deletes an existing Custom K8s Resource instance in the k8s API using the supplied KVPair.
|
||||
func (c *customK8sResourceClient) Delete(ctx context.Context, k model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": k,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("Delete custom Kubernetes resource")
|
||||
|
||||
// Convert the Key to a resource name.
|
||||
name, err := c.keyToName(k)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error deleting resource")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
existing, err := c.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace := k.(model.ResourceKey).Namespace
|
||||
|
||||
opts := &metav1.DeleteOptions{}
|
||||
if uid != nil {
|
||||
opts.Preconditions = &metav1.Preconditions{UID: uid}
|
||||
}
|
||||
|
||||
// Delete the resource using the name.
|
||||
logContext = logContext.WithField("Name", name)
|
||||
logContext.Debug("Send delete request by name")
|
||||
err = c.restClient.Delete().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Name(name).
|
||||
Body(opts).
|
||||
Do().
|
||||
Error()
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error deleting resource")
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
// Get gets an existing Custom K8s Resource instance in the k8s API using the supplied Key.
|
||||
func (c *customK8sResourceClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"Key": key,
|
||||
"Resource": c.resource,
|
||||
"Revision": revision,
|
||||
})
|
||||
logContext.Debug("Get custom Kubernetes resource")
|
||||
name, err := c.keyToName(key)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error getting resource")
|
||||
return nil, err
|
||||
}
|
||||
namespace := key.(model.ResourceKey).Namespace
|
||||
|
||||
// Add the name and namespace to the log context now that we know it, and query Kubernetes.
|
||||
logContext = logContext.WithFields(log.Fields{"Name": name, "Namespace": namespace})
|
||||
|
||||
logContext.Debug("Get custom Kubernetes resource by name")
|
||||
resOut := reflect.New(c.k8sResourceType).Interface().(Resource)
|
||||
err = c.restClient.Get().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Name(name).
|
||||
Do().Into(resOut)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Debug("Error getting resource")
|
||||
return nil, K8sErrorToCalico(err, key)
|
||||
}
|
||||
|
||||
return c.convertResourceToKVPair(resOut)
|
||||
}
|
||||
|
||||
// List lists configured Custom K8s Resource instances in the k8s API matching the
|
||||
// supplied ListInterface.
|
||||
func (c *customK8sResourceClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
logContext := log.WithFields(log.Fields{
|
||||
"ListInterface": list,
|
||||
"Resource": c.resource,
|
||||
})
|
||||
logContext.Debug("List Custom K8s Resource")
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
if revision != "" {
|
||||
return nil, errors.New("Cannot List this resource type specifying a ResourceVersion")
|
||||
}
|
||||
|
||||
// Attempt to convert the ListInterface to a Key. If possible, the parameters
|
||||
// indicate a fully qualified resource, and we'll need to use Get instead of
|
||||
// List.
|
||||
if key := c.listInterfaceToKey(list); key != nil {
|
||||
logContext.Debug("Performing List using Get")
|
||||
if kvp, err := c.Get(ctx, key, revision); err != nil {
|
||||
// The error will already be a Calico error type. Ignore
|
||||
// error that it doesn't exist - we'll return an empty
|
||||
// list.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
log.WithField("Resource", c.resource).WithError(err).Debug("Error listing resource")
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
} else {
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Since we are not performing an exact Get, Kubernetes will return a
|
||||
// list of resources.
|
||||
reslOut := reflect.New(c.k8sListType).Interface().(ResourceList)
|
||||
|
||||
// If it is a namespaced resource, then we'll need the namespace.
|
||||
namespace := list.(model.ResourceListOptions).Namespace
|
||||
|
||||
// Perform the request.
|
||||
err := c.restClient.Get().
|
||||
Context(ctx).
|
||||
NamespaceIfScoped(namespace, c.namespaced).
|
||||
Resource(c.resource).
|
||||
Do().Into(reslOut)
|
||||
if err != nil {
|
||||
// Don't return errors for "not found". This just
|
||||
// means there are no matching Custom K8s Resources, and we should return
|
||||
// an empty list.
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Error listing resources")
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// We expect the list type to have an "Items" field that we can
|
||||
// iterate over.
|
||||
elem := reflect.ValueOf(reslOut).Elem()
|
||||
items := reflect.ValueOf(elem.FieldByName("Items").Interface())
|
||||
for idx := 0; idx < items.Len(); idx++ {
|
||||
res := items.Index(idx).Addr().Interface().(Resource)
|
||||
if kvp, err := c.convertResourceToKVPair(res); err == nil {
|
||||
kvps = append(kvps, kvp)
|
||||
} else {
|
||||
logContext.WithError(err).WithField("Item", res).Warning("unable to process resource, skipping")
|
||||
}
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: reslOut.GetListMeta().GetResourceVersion(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
fieldSelector := fields.Everything()
|
||||
if len(rlo.Name) != 0 {
|
||||
// We've been asked to watch a specific customresource.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single customresource")
|
||||
fieldSelector = fields.OneTermEqualSelector("metadata.name", rlo.Name)
|
||||
}
|
||||
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.restClient, c.resource, rlo.Namespace, fieldSelector)
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
return c.convertResourceToKVPair(r)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, rlo.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *customK8sResourceClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) listInterfaceToKey(l model.ListInterface) model.Key {
|
||||
pl := l.(model.ResourceListOptions)
|
||||
key := model.ResourceKey{Name: pl.Name, Kind: pl.Kind}
|
||||
|
||||
if c.namespaced && pl.Namespace != "" {
|
||||
key.Namespace = pl.Namespace
|
||||
}
|
||||
|
||||
if pl.Name != "" {
|
||||
return key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) keyToName(k model.Key) (string, error) {
|
||||
return k.(model.ResourceKey).Name, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) nameToKey(name string) (model.Key, error) {
|
||||
return model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: c.resourceKind,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) convertResourceToKVPair(r Resource) (*model.KVPair, error) {
|
||||
var err error
|
||||
|
||||
// If the resource has a VersionConverter defined then pass the resource through
|
||||
// the VersionConverter to convert the resource version from v1 to v3.
|
||||
// No-op for a v3 resource.
|
||||
if c.versionconverter != nil {
|
||||
if r, err = c.versionconverter.ConvertFromK8s(r); err != nil {
|
||||
return nil, fmt.Errorf("error converting resource from v1 to v3: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
r.GetObjectKind().SetGroupVersionKind(c.k8sResourceTypeMeta.GetObjectKind().GroupVersionKind())
|
||||
kvp := &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: r.GetObjectMeta().GetName(),
|
||||
Namespace: r.GetObjectMeta().GetNamespace(),
|
||||
Kind: c.resourceKind,
|
||||
},
|
||||
Revision: r.GetObjectMeta().GetResourceVersion(),
|
||||
}
|
||||
|
||||
if err := ConvertK8sResourceToCalicoResource(r); err != nil {
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
kvp.Value = r
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *customK8sResourceClient) convertKVPairToResource(kvp *model.KVPair) (Resource, error) {
|
||||
resource := kvp.Value.(Resource)
|
||||
resource.GetObjectMeta().SetResourceVersion(kvp.Revision)
|
||||
resOut, err := ConvertCalicoResourceToK8sResource(resource)
|
||||
if err != nil {
|
||||
return resOut, err
|
||||
}
|
||||
|
||||
return resOut, nil
|
||||
}
|
||||
66
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/errors.go
generated
vendored
Normal file
66
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/errors.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/errors"
|
||||
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
// K8sErrorToCalico returns the equivalent libcalico error for the given
|
||||
// kubernetes error.
|
||||
func K8sErrorToCalico(ke error, id interface{}) error {
|
||||
if ke == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if kerrors.IsAlreadyExists(ke) {
|
||||
return errors.ErrorResourceAlreadyExists{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
if kerrors.IsNotFound(ke) {
|
||||
return errors.ErrorResourceDoesNotExist{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
if kerrors.IsForbidden(ke) || kerrors.IsUnauthorized(ke) {
|
||||
return errors.ErrorConnectionUnauthorized{
|
||||
Err: ke,
|
||||
}
|
||||
}
|
||||
if kerrors.IsConflict(ke) {
|
||||
// Treat precondition errors as not found.
|
||||
if strings.Contains(ke.Error(), "UID in precondition") {
|
||||
return errors.ErrorResourceDoesNotExist{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
return errors.ErrorResourceUpdateConflict{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
return errors.ErrorDatastoreError{
|
||||
Err: ke,
|
||||
Identifier: id,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/felixconfig.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/felixconfig.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
FelixConfigResourceName = "FelixConfigurations"
|
||||
FelixConfigCRDName = "felixconfigurations.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewFelixConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: FelixConfigCRDName,
|
||||
resource: FelixConfigResourceName,
|
||||
description: "Calico Felix Configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.FelixConfiguration{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindFelixConfiguration,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.FelixConfigurationList{}),
|
||||
resourceKind: apiv3.KindFelixConfiguration,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/globalnetworkpolicies.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/globalnetworkpolicies.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
GlobalNetworkPolicyResourceName = "GlobalNetworkPolicies"
|
||||
GlobalNetworkPolicyCRDName = "globalnetworkpolicies.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewGlobalNetworkPolicyClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: GlobalNetworkPolicyCRDName,
|
||||
resource: GlobalNetworkPolicyResourceName,
|
||||
description: "Calico Global Network Policies",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.GlobalNetworkPolicy{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindGlobalNetworkPolicy,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.GlobalNetworkPolicyList{}),
|
||||
resourceKind: apiv3.KindGlobalNetworkPolicy,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/globalnetworkset.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/globalnetworkset.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
GlobalNetworkSetResourceName = "GlobalNetworkSets"
|
||||
GlobalNetworkSetCRDName = "globalnetworksets.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewGlobalNetworkSetClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: GlobalNetworkSetCRDName,
|
||||
resource: GlobalNetworkSetResourceName,
|
||||
description: "Calico Global Network Sets",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.GlobalNetworkSet{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindGlobalNetworkSet,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.GlobalNetworkSetList{}),
|
||||
resourceKind: apiv3.KindGlobalNetworkSet,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/hostendpoint.go
generated
vendored
Normal file
47
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/hostendpoint.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
HostEndpointResourceName = "HostEndpoints"
|
||||
HostEndpointCRDName = "hostendpoints.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewHostEndpointClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: HostEndpointCRDName,
|
||||
resource: HostEndpointResourceName,
|
||||
description: "Calico HostEndpoints",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.HostEndpoint{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindHostEndpoint,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.HostEndpointList{}),
|
||||
resourceKind: apiv3.KindHostEndpoint,
|
||||
}
|
||||
}
|
||||
280
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_affinity.go
generated
vendored
Normal file
280
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_affinity.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
BlockAffinityResourceName = "BlockAffinities"
|
||||
BlockAffinityCRDName = "blockaffinities.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewBlockAffinityClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: BlockAffinityCRDName,
|
||||
resource: BlockAffinityResourceName,
|
||||
description: "Calico IPAM block affinities",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.BlockAffinity{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.BlockAffinityList{}),
|
||||
resourceKind: apiv3.KindBlockAffinity,
|
||||
}
|
||||
|
||||
return &blockAffinityClient{rc: rc}
|
||||
}
|
||||
|
||||
// blockAffinityClient implements the api.Client interface for BlockAffinity objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type blockAffinityClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
// toV1 converts the given v3 CRD KVPair into a v1 model representation
|
||||
// which can be passed to the IPAM code.
|
||||
func (c blockAffinityClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
// Parse the CIDR into a struct.
|
||||
_, cidr, err := net.ParseCIDR(kvpv3.Value.(*apiv3.BlockAffinity).Spec.CIDR)
|
||||
if err != nil {
|
||||
log.WithField("cidr", cidr).WithError(err).Error("failed to parse cidr")
|
||||
return nil, err
|
||||
}
|
||||
state := model.BlockAffinityState(kvpv3.Value.(*apiv3.BlockAffinity).Spec.State)
|
||||
return &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: kvpv3.Value.(*apiv3.BlockAffinity).Spec.Node,
|
||||
},
|
||||
Value: &model.BlockAffinity{
|
||||
State: state,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &kvpv3.Value.(*apiv3.BlockAffinity).UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseKey parses the given model.Key, returning a suitable name, CIDR
|
||||
// and host for use in the Kubernetes API.
|
||||
func (c blockAffinityClient) parseKey(k model.Key) (name, cidr, host string) {
|
||||
host = k.(model.BlockAffinityKey).Host
|
||||
cidr = fmt.Sprintf("%s", k.(model.BlockAffinityKey).CIDR)
|
||||
cidrname := names.CIDRToName(k.(model.BlockAffinityKey).CIDR)
|
||||
|
||||
// Include the hostname as well.
|
||||
host = k.(model.BlockAffinityKey).Host
|
||||
name = fmt.Sprintf("%s-%s", host, cidrname)
|
||||
|
||||
if len(name) >= 253 {
|
||||
// If the name is too long, we need to shorten it.
|
||||
// Remove enough characters to get it below the 253 character limit,
|
||||
// as well as 11 characters to add a hash which helps with uniqueness,
|
||||
// and two characters for the `-` separators between clauses.
|
||||
name = fmt.Sprintf("%s-%s", host[:252-len(cidrname)-13], cidrname)
|
||||
|
||||
// Add a hash to help with uniqueness.
|
||||
h := sha256.New()
|
||||
h.Write([]byte(fmt.Sprintf("%s+%s", host, cidrname)))
|
||||
name = fmt.Sprintf("%s-%s", name, hex.EncodeToString(h.Sum(nil))[:11])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// toV3 takes the given v1 KVPair and converts it into a v3 representation, suitable
|
||||
// for writing as a CRD to the Kubernetes API.
|
||||
func (c blockAffinityClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name, cidr, host := c.parseKey(kvpv1.Key)
|
||||
state := kvpv1.Value.(*model.BlockAffinity).State
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
},
|
||||
Value: &apiv3.BlockAffinity{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindBlockAffinity,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.BlockAffinitySpec{
|
||||
State: string(state),
|
||||
Node: host,
|
||||
CIDR: cidr,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name, _, _ := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.BlockAffinity).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindBlockAffinity}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp)
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for affinities, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on BlockAffinity type - use DeleteKVP")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
// Get the object.
|
||||
name, _, _ := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindBlockAffinity}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it to v1.
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.BlockAffinity).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindBlockAffinity}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
host := list.(model.BlockAffinityListOptions).Host
|
||||
requestedIPVersion := list.(model.BlockAffinityListOptions).IPVersion
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp, err := c.toV1(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if host == "" || v1kvp.Key.(model.BlockAffinityKey).Host == host {
|
||||
cidr := v1kvp.Key.(model.BlockAffinityKey).CIDR
|
||||
cidr2 := &cidr
|
||||
if requestedIPVersion == 0 || requestedIPVersion == cidr2.Version() {
|
||||
// Matches the given host and IP version.
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
}
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
resl := model.ResourceListOptions{Kind: apiv3.KindBlockAffinity}
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.rc.restClient, c.rc.resource, "", fields.Everything())
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
conv, err := c.rc.convertResourceToKVPair(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(conv)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, resl.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
func (c *blockAffinityClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
275
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block.go
generated
vendored
Normal file
275
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block.go
generated
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/names"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMBlockResourceName = "IPAMBlocks"
|
||||
IPAMBlockCRDName = "ipamblocks.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMBlockClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMBlockCRDName,
|
||||
resource: IPAMBlockResourceName,
|
||||
description: "Calico IPAM blocks",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMBlock{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMBlockList{}),
|
||||
resourceKind: apiv3.KindIPAMBlock,
|
||||
}
|
||||
|
||||
return &ipamBlockClient{rc: rc}
|
||||
}
|
||||
|
||||
// ipamBlockClient implements the api.Client interface for IPAMBlocks. It handles the translation between
|
||||
// v1 objects understood by the IPAM codebase in lib/ipam, and the CRDs which are used
|
||||
// to actually store the data in the Kubernetes API. It uses a customK8sResourceClient under
|
||||
// the covers to perform CRUD operations on kubernetes CRDs.
|
||||
type ipamBlockClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
cidrStr := kvpv3.Value.(*apiv3.IPAMBlock).Spec.CIDR
|
||||
_, cidr, err := net.ParseCIDR(cidrStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ab := kvpv3.Value.(*apiv3.IPAMBlock)
|
||||
|
||||
// Convert attributes.
|
||||
attrs := []model.AllocationAttribute{}
|
||||
for _, a := range ab.Spec.Attributes {
|
||||
attrs = append(attrs, model.AllocationAttribute{
|
||||
AttrPrimary: a.AttrPrimary,
|
||||
AttrSecondary: a.AttrSecondary,
|
||||
})
|
||||
}
|
||||
|
||||
return &model.KVPair{
|
||||
Key: model.BlockKey{
|
||||
CIDR: *cidr,
|
||||
},
|
||||
Value: &model.AllocationBlock{
|
||||
CIDR: *cidr,
|
||||
Affinity: ab.Spec.Affinity,
|
||||
StrictAffinity: ab.Spec.StrictAffinity,
|
||||
Allocations: ab.Spec.Allocations,
|
||||
Unallocated: ab.Spec.Unallocated,
|
||||
Attributes: attrs,
|
||||
Deleted: ab.Spec.Deleted,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &ab.UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) parseKey(k model.Key) (name, cidr string) {
|
||||
cidr = fmt.Sprintf("%s", k.(model.BlockKey).CIDR)
|
||||
name = names.CIDRToName(k.(model.BlockKey).CIDR)
|
||||
return
|
||||
}
|
||||
|
||||
func (c ipamBlockClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name, cidr := c.parseKey(kvpv1.Key)
|
||||
|
||||
ab := kvpv1.Value.(*model.AllocationBlock)
|
||||
|
||||
// Convert attributes.
|
||||
attrs := []apiv3.AllocationAttribute{}
|
||||
for _, a := range ab.Attributes {
|
||||
attrs = append(attrs, apiv3.AllocationAttribute{
|
||||
AttrPrimary: a.AttrPrimary,
|
||||
AttrSecondary: a.AttrSecondary,
|
||||
})
|
||||
}
|
||||
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
},
|
||||
Value: &apiv3.IPAMBlock{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMBlock,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMBlockSpec{
|
||||
CIDR: cidr,
|
||||
Allocations: ab.Allocations,
|
||||
Unallocated: ab.Unallocated,
|
||||
Affinity: ab.Affinity,
|
||||
StrictAffinity: ab.StrictAffinity,
|
||||
Attributes: attrs,
|
||||
Deleted: ab.Deleted,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
b, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
b, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name, _ := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.AllocationBlock).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMBlock}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp)
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for blocks, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on IPAMBlock type - use DeleteKVP")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
// Get the object.
|
||||
name, _ := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMBlock}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it back to V1 format.
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.AllocationBlock).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindIPAMBlock}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp, err := c.toV1(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *ipamBlockClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
resl := model.ResourceListOptions{Kind: apiv3.KindIPAMBlock}
|
||||
k8sWatchClient := cache.NewListWatchFromClient(c.rc.restClient, c.rc.resource, "", fields.Everything())
|
||||
k8sWatch, err := k8sWatchClient.WatchFunc(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
toKVPair := func(r Resource) (*model.KVPair, error) {
|
||||
conv, err := c.rc.convertResourceToKVPair(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(conv)
|
||||
}
|
||||
|
||||
return newK8sWatcherConverter(ctx, resl.Kind+" (custom)", toKVPair, k8sWatch), nil
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *ipamBlockClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
183
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block_pod_cidr.go
generated
vendored
Normal file
183
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_block_pod_cidr.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
cnet "github.com/projectcalico/libcalico-go/lib/net"
|
||||
)
|
||||
|
||||
func NewPodCIDRBlockAffinityClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &podCIDRBlockClient{
|
||||
clientSet: c,
|
||||
}
|
||||
}
|
||||
|
||||
// podCIDRBlockClient implements the api.Client interface for block affinities using Kubernetes pod CIDR
|
||||
// allocations as the backing store. For use with host-local IPAM. For the Calico IPAM
|
||||
// implementation, see ipam_block.go.
|
||||
type podCIDRBlockClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Update is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation DeleteKVP is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "DeleteKVP",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Warn("Operation Get is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Get",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Debug("Operation Watch is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on block affinities (using host-local IPAM)")
|
||||
bl := list.(model.BlockAffinityListOptions)
|
||||
kvpl := &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
// If a host is specified, then do an exact lookup (ip version should not be expected in the query)
|
||||
if bl.Host != "" && bl.IPVersion == 0 {
|
||||
// Get the node settings, we use the nodes PodCIDR as the only node affinity block.
|
||||
node, err := c.clientSet.CoreV1().Nodes().Get(bl.Host, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
err = K8sErrorToCalico(err, list)
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// Return no results if the pod CIDR is not assigned.
|
||||
podcidr := node.Spec.PodCIDR
|
||||
if len(podcidr) == 0 {
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
_, cidr, err := cnet.ParseCIDR(podcidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.Revision = node.ResourceVersion
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: bl.Host,
|
||||
},
|
||||
Value: &model.BlockAffinity{State: model.StateConfirmed},
|
||||
Revision: node.ResourceVersion,
|
||||
})
|
||||
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// When host is not specified...
|
||||
if bl.IPVersion == 0 {
|
||||
// Get the node settings, we use the nodes PodCIDR as the only node affinity block.
|
||||
nodeList, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
err = K8sErrorToCalico(err, list)
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
kvpl.Revision = nodeList.ResourceVersion
|
||||
for _, node := range nodeList.Items {
|
||||
// Return no results if the pod CIDR is not assigned.
|
||||
podcidr := node.Spec.PodCIDR
|
||||
if len(podcidr) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, cidr, err := cnet.ParseCIDR(podcidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, &model.KVPair{
|
||||
Key: model.BlockAffinityKey{
|
||||
CIDR: *cidr,
|
||||
Host: node.Name,
|
||||
},
|
||||
Value: &model.BlockAffinity{State: model.StateConfirmed},
|
||||
Revision: node.ResourceVersion,
|
||||
})
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
// Currently querying the affinity block is only used by the BGP syncer *and* we always
|
||||
// query for a specific Node, so for now fail List requests for all nodes.
|
||||
log.Warn("Operation List (all nodes or all IP versions) is not supported on block affinities when using host-local IPAM")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "List",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *podCIDRBlockClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
189
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_config.go
generated
vendored
Normal file
189
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_config.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMConfigResourceName = "IPAMConfigs"
|
||||
IPAMConfigCRDName = "ipamconfigs.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMConfigClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &ipamConfigClient{
|
||||
rc: customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMConfigCRDName,
|
||||
resource: IPAMConfigResourceName,
|
||||
description: "Calico IPAM configuration",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMConfig{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMConfigList{}),
|
||||
resourceKind: apiv3.KindIPAMConfig}}
|
||||
}
|
||||
|
||||
// ipamConfigClient implements the api.Client interface for IPAMConfig objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type ipamConfigClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
// toV1 converts the given v3 CRD KVPair into a v1 model representation
|
||||
// which can be passed to the IPAM code.
|
||||
func (c ipamConfigClient) toV1(kvpv3 *model.KVPair) (*model.KVPair, error) {
|
||||
v3obj := kvpv3.Value.(*apiv3.IPAMConfig)
|
||||
return &model.KVPair{
|
||||
Key: model.IPAMConfigKey{},
|
||||
Value: &model.IPAMConfig{
|
||||
StrictAffinity: v3obj.Spec.StrictAffinity,
|
||||
AutoAllocateBlocks: v3obj.Spec.AutoAllocateBlocks,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
UID: &kvpv3.Value.(*apiv3.IPAMConfig).UID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// toV3 takes the given v1 KVPair and converts it into a v3 representation, suitable
|
||||
// for writing as a CRD to the Kubernetes API.
|
||||
func (c ipamConfigClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
v1obj := kvpv1.Value.(*model.IPAMConfig)
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
},
|
||||
Value: &apiv3.IPAMConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMConfigSpec{
|
||||
StrictAffinity: v1obj.StrictAffinity,
|
||||
AutoAllocateBlocks: v1obj.AutoAllocateBlocks,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on IPAMConfig type")
|
||||
nkvp, err := c.rc.Create(ctx, c.toV3(kvp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err = c.toV1(nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on IPAMConfig type")
|
||||
nkvp, err := c.rc.Update(ctx, c.toV3(kvp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err = c.toV1(nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
k := model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
}
|
||||
kvp, err := c.rc.Delete(ctx, k, revision, uid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1nkvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1nkvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on IPAMConfig type")
|
||||
k := model.ResourceKey{
|
||||
Name: model.IPAMConfigGlobalName,
|
||||
Kind: apiv3.KindIPAMConfig,
|
||||
}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1kvp, err := c.toV1(kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1kvp, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Warn("Operation List is not supported on IPAMConfig type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "List",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamConfigClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Warn("Operation Watch is not supported on IPAMConfig type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureInitialized is a no-op since the CRD should be
|
||||
// initialized in advance.
|
||||
func (c *ipamConfigClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
208
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_handle.go
generated
vendored
Normal file
208
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ipam_handle.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
IPAMHandleResourceName = "IPAMHandles"
|
||||
IPAMHandleCRDName = "ipamhandles.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPAMHandleClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
// Create a resource client which manages k8s CRDs.
|
||||
rc := customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPAMHandleCRDName,
|
||||
resource: IPAMHandleResourceName,
|
||||
description: "Calico IPAM handles",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPAMHandle{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPAMHandleList{}),
|
||||
resourceKind: apiv3.KindIPAMHandle,
|
||||
}
|
||||
|
||||
return &ipamHandleClient{rc: rc}
|
||||
}
|
||||
|
||||
// affinityHandleClient implements the api.Client interface for IPAMHandle objects. It
|
||||
// handles the translation between v1 objects understood by the IPAM codebase in lib/ipam,
|
||||
// and the CRDs which are used to actually store the data in the Kubernetes API.
|
||||
// It uses a customK8sResourceClient under the covers to perform CRUD operations on
|
||||
// kubernetes CRDs.
|
||||
type ipamHandleClient struct {
|
||||
rc customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) toV1(kvpv3 *model.KVPair) *model.KVPair {
|
||||
handle := kvpv3.Value.(*apiv3.IPAMHandle).Spec.HandleID
|
||||
block := kvpv3.Value.(*apiv3.IPAMHandle).Spec.Block
|
||||
return &model.KVPair{
|
||||
Key: model.IPAMHandleKey{
|
||||
HandleID: handle,
|
||||
},
|
||||
Value: &model.IPAMHandle{
|
||||
HandleID: handle,
|
||||
Block: block,
|
||||
},
|
||||
Revision: kvpv3.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) parseKey(k model.Key) string {
|
||||
return strings.ToLower(k.(model.IPAMHandleKey).HandleID)
|
||||
}
|
||||
|
||||
func (c ipamHandleClient) toV3(kvpv1 *model.KVPair) *model.KVPair {
|
||||
name := c.parseKey(kvpv1.Key)
|
||||
handle := kvpv1.Key.(model.IPAMHandleKey).HandleID
|
||||
block := kvpv1.Value.(*model.IPAMHandle).Block
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: name,
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
},
|
||||
Value: &apiv3.IPAMHandle{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPAMHandle,
|
||||
APIVersion: "crd.projectcalico.org/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: kvpv1.Revision,
|
||||
},
|
||||
Spec: apiv3.IPAMHandleSpec{
|
||||
HandleID: handle,
|
||||
Block: block,
|
||||
},
|
||||
},
|
||||
Revision: kvpv1.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Create(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
nkvp := c.toV3(kvp)
|
||||
kvp, err := c.rc.Update(ctx, nkvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
// We need to mark as deleted first, since the Kubernetes API doesn't support
|
||||
// compare-and-delete. This update operation allows us to eliminate races with other clients.
|
||||
name := c.parseKey(kvp.Key)
|
||||
kvp.Value.(*model.IPAMHandle).Deleted = true
|
||||
v1kvp, err := c.Update(ctx, kvp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now actually delete the object.
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMHandle}
|
||||
kvp, err = c.rc.Delete(ctx, k, v1kvp.Revision, kvp.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.toV1(kvp), nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
// Delete should not be used for handles, since we need the object UID for correctness.
|
||||
log.Warn("Operation Delete is not supported on IPAMHandle type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
name := c.parseKey(key)
|
||||
k := model.ResourceKey{Name: name, Kind: apiv3.KindIPAMHandle}
|
||||
kvp, err := c.rc.Get(ctx, k, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert it to v1.
|
||||
v1kvp := c.toV1(kvp)
|
||||
|
||||
// If this object has been marked as deleted, then we need to clean it up and
|
||||
// return not found.
|
||||
if v1kvp.Value.(*model.IPAMHandle).Deleted {
|
||||
if _, err := c.DeleteKVP(ctx, v1kvp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{fmt.Errorf("Resource was deleted"), key}
|
||||
}
|
||||
|
||||
return v1kvp, nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
l := model.ResourceListOptions{Kind: apiv3.KindIPAMHandle}
|
||||
v3list, err := c.rc.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kvpl := &model.KVPairList{KVPairs: []*model.KVPair{}}
|
||||
for _, i := range v3list.KVPairs {
|
||||
v1kvp := c.toV1(i)
|
||||
kvpl.KVPairs = append(kvpl.KVPairs, v1kvp)
|
||||
}
|
||||
return kvpl, nil
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
log.Warn("Operation Watch is not supported on IPAMHandle type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: list,
|
||||
Operation: "Watch",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ipamHandleClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
84
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ippool.go
generated
vendored
Normal file
84
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/ippool.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/encap"
|
||||
)
|
||||
|
||||
const (
|
||||
IPPoolResourceName = "IPPools"
|
||||
IPPoolCRDName = "ippools.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewIPPoolClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: IPPoolCRDName,
|
||||
resource: IPPoolResourceName,
|
||||
description: "Calico IP Pools",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.IPPool{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindIPPool,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.IPPoolList{}),
|
||||
resourceKind: apiv3.KindIPPool,
|
||||
versionconverter: IPPoolv1v3Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// IPPoolv1v3Converter implements VersionConverter interface.
|
||||
type IPPoolv1v3Converter struct{}
|
||||
|
||||
// ConvertFromK8s converts v1 IPPool Resource to v3 IPPool resource
|
||||
func (c IPPoolv1v3Converter) ConvertFromK8s(inRes Resource) (Resource, error) {
|
||||
ipp, ok := inRes.(*apiv3.IPPool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type conversion")
|
||||
}
|
||||
|
||||
// If IPIP field is not nil, then it means the resource has v1 IPIP data
|
||||
// and we must convert it to v3 equivalent data.
|
||||
if ipp.Spec.IPIP != nil {
|
||||
if !ipp.Spec.IPIP.Enabled {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeNever
|
||||
} else if ipp.Spec.IPIP.Mode == encap.CrossSubnet {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeCrossSubnet
|
||||
} else {
|
||||
ipp.Spec.IPIPMode = apiv3.IPIPModeAlways
|
||||
}
|
||||
|
||||
// Set IPIP to nil since we've already converted v1 IPIP fields to v3.
|
||||
ipp.Spec.IPIP = nil
|
||||
}
|
||||
|
||||
// Take a logical OR of the v1 NATOutgoing field with the v3 NATOutgoing.
|
||||
ipp.Spec.NATOutgoing = ipp.Spec.NATOutgoingV1 || ipp.Spec.NATOutgoing
|
||||
|
||||
// Set v1 NatOutgoing to false since we've already converted it to v3 NatOutgoing.
|
||||
ipp.Spec.NATOutgoingV1 = false
|
||||
|
||||
return ipp, nil
|
||||
}
|
||||
520
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkpolicy.go
generated
vendored
Normal file
520
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkpolicy.go
generated
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
NetworkPolicyResourceName = "NetworkPolicies"
|
||||
NetworkPolicyCRDName = "networkpolicies.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewNetworkPolicyClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
crdClient := &customK8sResourceClient{
|
||||
restClient: r,
|
||||
name: NetworkPolicyCRDName,
|
||||
resource: NetworkPolicyResourceName,
|
||||
description: "Calico Network Policies",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.NetworkPolicy{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindNetworkPolicy,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.NetworkPolicyList{}),
|
||||
resourceKind: apiv3.KindNetworkPolicy,
|
||||
namespaced: true,
|
||||
}
|
||||
return &networkPolicyClient{
|
||||
clientSet: c,
|
||||
crdClient: crdClient,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for NetworkPolicys.
|
||||
type networkPolicyClient struct {
|
||||
conversion.Converter
|
||||
resourceName string
|
||||
clientSet *kubernetes.Clientset
|
||||
crdClient *customK8sResourceClient
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on NetworkPolicy type")
|
||||
key := kvp.Key.(model.ResourceKey)
|
||||
if strings.HasPrefix(key.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Create of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
kvp, err := c.crdClient.Create(ctx, kvp)
|
||||
if kvp != nil {
|
||||
// Convert the revision to the combined CRD/k8s revision - the k8s rev will be empty, but this
|
||||
// format will allow the revision to be passed into List and Watch calls.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on NetworkPolicy type")
|
||||
|
||||
key := kvp.Key.(model.ResourceKey)
|
||||
if strings.HasPrefix(key.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Update of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Update",
|
||||
}
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the CRD rev and use that for the update.
|
||||
crdRev, _, err := c.SplitNetworkPolicyRevision(kvp.Revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp.Revision = crdRev
|
||||
kvp, err = c.crdClient.Update(ctx, kvp)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty, but this
|
||||
// format will allow the revision to be passed into List and Watch calls.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Apply(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Apply",
|
||||
}
|
||||
}
|
||||
func (c *networkPolicyClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Debug("Received Delete request on NetworkPolicy type")
|
||||
k := key.(model.ResourceKey)
|
||||
if strings.HasPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// We don't support Delete of a Kubernetes NetworkPolicy.
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the CRD rev and use that for the delete.
|
||||
crdRev, _, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvp, err := c.crdClient.Delete(ctx, key, crdRev, uid)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on NetworkPolicy type")
|
||||
k := key.(model.ResourceKey)
|
||||
if k.Name == "" {
|
||||
return nil, errors.New("Missing policy name")
|
||||
}
|
||||
if k.Namespace == "" {
|
||||
return nil, errors.New("Missing policy namespace")
|
||||
}
|
||||
|
||||
// The revision, if supplied, will be a combination of CRD and k8s-backed revisions. Extract
|
||||
// the k8s rev and use the correct version depending on whether we are querying the CRD or the
|
||||
// k8s NetworkPolicy.
|
||||
crdRev, k8sRev, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check to see if this is backed by a NetworkPolicy.
|
||||
if strings.HasPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
// Backed by a NetworkPolicy - extract the name.
|
||||
policyName := strings.TrimPrefix(k.Name, conversion.K8sNetworkPolicyNamePrefix)
|
||||
|
||||
// Get the NetworkPolicy from the API and convert it.
|
||||
networkPolicy := networkingv1.NetworkPolicy{}
|
||||
err = c.clientSet.NetworkingV1().RESTClient().
|
||||
Get().
|
||||
Resource("networkpolicies").
|
||||
Namespace(k.Namespace).
|
||||
Name(policyName).
|
||||
VersionedParams(&metav1.GetOptions{ResourceVersion: k8sRev}, scheme.ParameterCodec).
|
||||
Do().Into(&networkPolicy)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
kvp, err := c.K8sNetworkPolicyToCalico(&networkPolicy)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the CRD rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions("", kvp.Revision)
|
||||
}
|
||||
return kvp, err
|
||||
} else {
|
||||
kvp, err := c.crdClient.Get(ctx, k, crdRev)
|
||||
|
||||
if kvp != nil {
|
||||
// Convert the revision back to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
return kvp, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on NetworkPolicy type")
|
||||
l := list.(model.ResourceListOptions)
|
||||
if l.Name != "" {
|
||||
// Exact lookup on a NetworkPolicy.
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: l.Name, Namespace: l.Namespace, Kind: l.Kind}, revision)
|
||||
if err != nil {
|
||||
// Return empty slice of KVPair if the object doesn't exist, return the error otherwise.
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List all Namespaced Calico Network Policies.
|
||||
npKvps, err := c.crdClient.List(ctx, l, revision)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Unable to list Calico CRD-backed Network Policy resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the revision to the combined CRD/k8s revision - the k8s rev will be empty.
|
||||
for _, kvp := range npKvps.KVPairs {
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions(kvp.Revision, "")
|
||||
}
|
||||
|
||||
// List all of the k8s NetworkPolicy objects in all Namespaces.
|
||||
networkPolicies := networkingv1.NetworkPolicyList{}
|
||||
req := c.clientSet.NetworkingV1().RESTClient().
|
||||
Get().
|
||||
Resource("networkpolicies")
|
||||
if l.Namespace != "" {
|
||||
// Add the namespace if requested.
|
||||
req = req.Namespace(l.Namespace)
|
||||
}
|
||||
err = req.Do().Into(&networkPolicies)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Unable to list K8s Network Policy resources")
|
||||
return nil, K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
// For each policy, turn it into a Policy and generate the list.
|
||||
for _, p := range networkPolicies.Items {
|
||||
kvp, err := c.K8sNetworkPolicyToCalico(&p)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Failed to convert K8s Network Policy")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the revision to the combined CRD/k8s revision - the CRD rev will be empty.
|
||||
kvp.Revision = c.JoinNetworkPolicyRevisions("", kvp.Revision)
|
||||
npKvps.KVPairs = append(npKvps.KVPairs, kvp)
|
||||
}
|
||||
|
||||
// Combine the two resource versions to a single resource version for the List
|
||||
// that can be decoded by the Watch.
|
||||
npKvps.Revision = c.JoinNetworkPolicyRevisions(npKvps.Revision, networkPolicies.ResourceVersion)
|
||||
|
||||
log.WithField("KVPs", npKvps).Info("Returning NP KVPs")
|
||||
return npKvps, nil
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *networkPolicyClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
|
||||
// Setting to Watch all networkPolicies in all namespaces; overriden below
|
||||
watchK8s, watchCrd := true, true
|
||||
|
||||
// Watch a specific networkPolicy
|
||||
if len(rlo.Name) != 0 {
|
||||
if len(rlo.Namespace) == 0 {
|
||||
return nil, errors.New("cannot watch a specific NetworkPolicy without a namespace")
|
||||
}
|
||||
// We've been asked to watch a specific networkpolicy.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single networkpolicy")
|
||||
// Backed by a NetworkPolicy - extract the name.
|
||||
policyName := rlo.Name
|
||||
if strings.HasPrefix(rlo.Name, conversion.K8sNetworkPolicyNamePrefix) {
|
||||
watchCrd = false
|
||||
policyName = strings.TrimPrefix(rlo.Name, conversion.K8sNetworkPolicyNamePrefix)
|
||||
} else {
|
||||
watchK8s = false
|
||||
}
|
||||
// write back in rlo for custom resource watch below
|
||||
rlo.Name = policyName
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", policyName).String()
|
||||
}
|
||||
|
||||
// If a revision is specified, see if it contains a "/" and if so split into separate
|
||||
// revisions for the CRD and for the K8s resource.
|
||||
crdNPRev, k8sNPRev, err := c.SplitNetworkPolicyRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.ResourceVersion = k8sNPRev
|
||||
var k8sRawWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchK8s {
|
||||
log.Debugf("Watching networkPolicy (k8s) at revision %q", k8sNPRev)
|
||||
k8sRawWatch, err = c.clientSet.NetworkingV1().NetworkPolicies(rlo.Namespace).Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
np, ok := r.(*networkingv1.NetworkPolicy)
|
||||
if !ok {
|
||||
return nil, errors.New("NetworkPolicy conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.K8sNetworkPolicyToCalico(np)
|
||||
}
|
||||
k8sWatch := newK8sWatcherConverter(ctx, "NetworkPolicy (namespaced)", converter, k8sRawWatch)
|
||||
|
||||
var calicoWatch api.WatchInterface = api.NewFake()
|
||||
if watchCrd {
|
||||
log.Debugf("Watching networkPolicy (crd) at revision %q", crdNPRev)
|
||||
calicoWatch, err = c.crdClient.Watch(ctx, rlo, crdNPRev)
|
||||
if err != nil {
|
||||
k8sWatch.Stop()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newNetworkPolicyWatcher(ctx, k8sNPRev, crdNPRev, k8sWatch, calicoWatch), nil
|
||||
}
|
||||
|
||||
func newNetworkPolicyWatcher(ctx context.Context, k8sRev, crdRev string, k8sWatch, calicoWatch api.WatchInterface) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &networkPolicyWatcher{
|
||||
k8sNPRev: k8sRev,
|
||||
crdNPRev: crdRev,
|
||||
k8sNPWatch: k8sWatch,
|
||||
crdNPWatch: calicoWatch,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
}
|
||||
go wc.processNPEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type networkPolicyWatcher struct {
|
||||
conversion.Converter
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sNPRev string
|
||||
crdNPRev string
|
||||
k8sNPWatch api.WatchInterface
|
||||
crdNPWatch api.WatchInterface
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through to the context cancel function.
|
||||
func (npw *networkPolicyWatcher) Stop() {
|
||||
npw.cancel()
|
||||
npw.k8sNPWatch.Stop()
|
||||
npw.crdNPWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (npw *networkPolicyWatcher) ResultChan() <-chan api.WatchEvent {
|
||||
return npw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (npw *networkPolicyWatcher) HasTerminated() bool {
|
||||
terminated := atomic.LoadUint32(&npw.terminated) != 0
|
||||
|
||||
if npw.k8sNPWatch != nil {
|
||||
terminated = terminated && npw.k8sNPWatch.HasTerminated()
|
||||
}
|
||||
if npw.crdNPWatch != nil {
|
||||
terminated = terminated && npw.crdNPWatch.HasTerminated()
|
||||
}
|
||||
|
||||
return terminated
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (npw *networkPolicyWatcher) processNPEvents() {
|
||||
log.Debug("Watcher process started")
|
||||
defer func() {
|
||||
log.Debug("Watcher process terminated")
|
||||
npw.Stop()
|
||||
close(npw.resultChan)
|
||||
atomic.AddUint32(&npw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
var e api.WatchEvent
|
||||
var isCRDEvent bool
|
||||
select {
|
||||
case e, ok = <-npw.crdNPWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Calico NP channel closed")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Calico NP watch channel closed"),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Calico NP event")
|
||||
isCRDEvent = true
|
||||
|
||||
case e, ok = <-npw.k8sNPWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Kubernetes NP channel closed")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Kubernetes NP watch channel closed"),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Kubernetes NP event")
|
||||
isCRDEvent = false
|
||||
|
||||
case <-npw.context.Done(): // user cancel
|
||||
log.Debug("Process watcher done event in KDD client")
|
||||
return
|
||||
}
|
||||
|
||||
// Update the resource version of the Object in the watcher. The version returned on a watch
|
||||
// event needs to able to be passed back into a Watch client so that we can resume watching
|
||||
// when a watch fails. The watch client is expecting a slash separated list of resource
|
||||
// versions in the format <CRD NP Revision>/<k8s NP Revision>.
|
||||
var value interface{}
|
||||
switch e.Type {
|
||||
case api.WatchModified, api.WatchAdded:
|
||||
value = e.New.Value
|
||||
case api.WatchDeleted:
|
||||
value = e.Old.Value
|
||||
}
|
||||
|
||||
if value != nil {
|
||||
oma, ok := value.(metav1.ObjectMetaAccessor)
|
||||
if !ok {
|
||||
log.WithField("event", e).Error(
|
||||
"Resource returned from watch does not implement the ObjectMetaAccessor interface")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: errors.New("Resource returned from watch does not implement the ObjectMetaAccessor interface"),
|
||||
},
|
||||
}
|
||||
}
|
||||
if isCRDEvent {
|
||||
npw.crdNPRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
} else {
|
||||
npw.k8sNPRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
}
|
||||
oma.GetObjectMeta().SetResourceVersion(npw.JoinNetworkPolicyRevisions(npw.crdNPRev, npw.k8sNPRev))
|
||||
} else if e.Error == nil {
|
||||
log.WithField("event", e).Warning("Event had nil error and value")
|
||||
}
|
||||
|
||||
// Send the processed event.
|
||||
select {
|
||||
case npw.resultChan <- e:
|
||||
// If this is an error event, check to see if it's a terminating one.
|
||||
// If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
log.WithError(e.Error).Debug("Kubernetes event converted to backend watcher error event")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
log.Debug("Watch terminated event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case <-npw.context.Done():
|
||||
log.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
48
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkset.go
generated
vendored
Normal file
48
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/networkset.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
NetworkSetResourceName = "NetworkSets"
|
||||
NetworkSetCRDName = "networksets.crd.projectcalico.org"
|
||||
)
|
||||
|
||||
func NewNetworkSetClient(c *kubernetes.Clientset, r *rest.RESTClient) K8sResourceClient {
|
||||
return &customK8sResourceClient{
|
||||
clientSet: c,
|
||||
restClient: r,
|
||||
name: NetworkSetCRDName,
|
||||
resource: NetworkSetResourceName,
|
||||
description: "Calico Network Sets",
|
||||
k8sResourceType: reflect.TypeOf(apiv3.NetworkSet{}),
|
||||
k8sResourceTypeMeta: metav1.TypeMeta{
|
||||
Kind: apiv3.KindNetworkSet,
|
||||
APIVersion: apiv3.GroupVersionCurrent,
|
||||
},
|
||||
k8sListType: reflect.TypeOf(apiv3.NetworkSetList{}),
|
||||
resourceKind: apiv3.KindNetworkSet,
|
||||
namespaced: true,
|
||||
}
|
||||
}
|
||||
437
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/node.go
generated
vendored
Normal file
437
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/node.go
generated
vendored
Normal file
@@ -0,0 +1,437 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"github.com/projectcalico/libcalico-go/lib/net"
|
||||
"github.com/projectcalico/libcalico-go/lib/numorstring"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeBgpIpv4AddrAnnotation = "projectcalico.org/IPv4Address"
|
||||
nodeBgpIpv4IPIPTunnelAddrAnnotation = "projectcalico.org/IPv4IPIPTunnelAddr"
|
||||
nodeBgpIpv4VXLANTunnelAddrAnnotation = "projectcalico.org/IPv4VXLANTunnelAddr"
|
||||
nodeBgpVXLANTunnelMACAddrAnnotation = "projectcalico.org/VXLANTunnelMACAddr"
|
||||
nodeBgpIpv6AddrAnnotation = "projectcalico.org/IPv6Address"
|
||||
nodeBgpAsnAnnotation = "projectcalico.org/ASNumber"
|
||||
nodeBgpCIDAnnotation = "projectcalico.org/RouteReflectorClusterID"
|
||||
nodeK8sLabelAnnotation = "projectcalico.org/kube-labels"
|
||||
)
|
||||
|
||||
func NewNodeClient(c *kubernetes.Clientset, usePodCIDR bool) K8sResourceClient {
|
||||
return &nodeClient{
|
||||
clientSet: c,
|
||||
usePodCIDR: usePodCIDR,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for Nodes.
|
||||
type nodeClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
usePodCIDR bool
|
||||
}
|
||||
|
||||
func (c *nodeClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on Node type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on Node type")
|
||||
// Get a current copy of the node to fill in fields we don't track.
|
||||
oldNode, err := c.clientSet.CoreV1().Nodes().Get(kvp.Key.(model.ResourceKey).Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
node, err := mergeCalicoNodeIntoK8sNode(kvp.Value.(*apiv3.Node), oldNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newNode, err := c.clientSet.CoreV1().Nodes().UpdateStatus(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("Error updating Node resource")
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
|
||||
newCalicoNode, err := K8sNodeToCalico(newNode, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse returned Node after call to update %+v", newNode)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newCalicoNode, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *nodeClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on Node type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on Node type")
|
||||
node, err := c.clientSet.CoreV1().Nodes().Get(key.(model.ResourceKey).Name, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, key)
|
||||
}
|
||||
|
||||
kvp, err := K8sNodeToCalico(node, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Couldn't convert k8s node.")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on Node type")
|
||||
nl := list.(model.ResourceListOptions)
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
if nl.Name != "" {
|
||||
// The node is already fully qualified, so perform a Get instead.
|
||||
// If the entry does not exist then we just return an empty list.
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: nl.Name, Kind: apiv3.KindNode}, revision)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Listing all nodes.
|
||||
nodes, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
kvp, err := K8sNodeToCalico(&node, c.usePodCIDR)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to convert k8s node to Calico node: node=%s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *nodeClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
if len(rlo.Name) != 0 {
|
||||
// We've been asked to watch a specific node resource.
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single node")
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", rlo.Name).String()
|
||||
}
|
||||
|
||||
k8sWatch, err := c.clientSet.CoreV1().Nodes().Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sNode, ok := r.(*kapiv1.Node)
|
||||
if !ok {
|
||||
return nil, errors.New("node conversion with incorrect k8s resource type")
|
||||
}
|
||||
return K8sNodeToCalico(k8sNode, c.usePodCIDR)
|
||||
}
|
||||
return newK8sWatcherConverter(ctx, "Node", converter, k8sWatch), nil
|
||||
}
|
||||
|
||||
// K8sNodeToCalico converts a Kubernetes format node, with Calico annotations, to a Calico Node.
|
||||
func K8sNodeToCalico(k8sNode *kapiv1.Node, usePodCIDR bool) (*model.KVPair, error) {
|
||||
// Create a new CalicoNode resource and copy the settings across from the k8s Node.
|
||||
calicoNode := apiv3.NewNode()
|
||||
calicoNode.ObjectMeta.Name = k8sNode.Name
|
||||
SetCalicoMetadataFromK8sAnnotations(calicoNode, k8sNode)
|
||||
|
||||
// Calico Nodes inherit labels from Kubernetes nodes, do that merge.
|
||||
err := mergeCalicoAndK8sLabels(calicoNode, k8sNode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to merge Calico and Kubernetes labels.")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract the BGP configuration stored in the annotations.
|
||||
bgpSpec := &apiv3.NodeBGPSpec{}
|
||||
annotations := k8sNode.ObjectMeta.Annotations
|
||||
bgpSpec.IPv4Address = annotations[nodeBgpIpv4AddrAnnotation]
|
||||
bgpSpec.IPv6Address = annotations[nodeBgpIpv6AddrAnnotation]
|
||||
bgpSpec.RouteReflectorClusterID = annotations[nodeBgpCIDAnnotation]
|
||||
asnString, ok := annotations[nodeBgpAsnAnnotation]
|
||||
if ok {
|
||||
asn, err := numorstring.ASNumberFromString(asnString)
|
||||
if err != nil {
|
||||
log.WithError(err).Infof("failed to read node AS number from annotation: %s", nodeBgpAsnAnnotation)
|
||||
} else {
|
||||
bgpSpec.ASNumber = &asn
|
||||
}
|
||||
}
|
||||
bgpSpec.IPv4IPIPTunnelAddr = annotations[nodeBgpIpv4IPIPTunnelAddrAnnotation]
|
||||
|
||||
// If using host-local IPAM, assign an IPIP tunnel address statically.
|
||||
if usePodCIDR && k8sNode.Spec.PodCIDR != "" {
|
||||
// For back compatibility with v2.6.x, always generate a tunnel address if we have the pod CIDR.
|
||||
bgpSpec.IPv4IPIPTunnelAddr, err = getIPIPTunnelAddress(k8sNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Only set the BGP spec if it is not empty.
|
||||
if !reflect.DeepEqual(*bgpSpec, apiv3.NodeBGPSpec{}) {
|
||||
calicoNode.Spec.BGP = bgpSpec
|
||||
}
|
||||
|
||||
// Set the VXLAN tunnel address based on annotation.
|
||||
calicoNode.Spec.IPv4VXLANTunnelAddr = annotations[nodeBgpIpv4VXLANTunnelAddrAnnotation]
|
||||
calicoNode.Spec.VXLANTunnelMACAddr = annotations[nodeBgpVXLANTunnelMACAddrAnnotation]
|
||||
|
||||
// Create the resource key from the node name.
|
||||
return &model.KVPair{
|
||||
Key: model.ResourceKey{
|
||||
Name: k8sNode.Name,
|
||||
Kind: apiv3.KindNode,
|
||||
},
|
||||
Value: calicoNode,
|
||||
Revision: k8sNode.ObjectMeta.ResourceVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mergeCalicoNodeIntoK8sNode takes a k8s node and a Calico node and puts the values from the Calico
|
||||
// node into the k8s node.
|
||||
func mergeCalicoNodeIntoK8sNode(calicoNode *apiv3.Node, k8sNode *kapiv1.Node) (*kapiv1.Node, error) {
|
||||
// Nodes inherit labels from Kubernetes, but we also have our own set of labels that are stored in an annotation.
|
||||
// For nodes that are being updated, we want to avoid writing k8s labels that we inherited into our annotation
|
||||
// and we don't want to touch the k8s labels directly. Take a copy of the node resource and update its labels
|
||||
// to match what we want to store in our annotation only.
|
||||
calicoNode, err := restoreCalicoLabels(calicoNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the k8s annotations from the Calico node metadata.
|
||||
SetK8sAnnotationsFromCalicoMetadata(k8sNode, calicoNode)
|
||||
|
||||
// Handle VXLAN address.
|
||||
if calicoNode.Spec.IPv4VXLANTunnelAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4VXLANTunnelAddrAnnotation] = calicoNode.Spec.IPv4VXLANTunnelAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4VXLANTunnelAddrAnnotation)
|
||||
}
|
||||
|
||||
// Handle VXLAN MAC address.
|
||||
if calicoNode.Spec.VXLANTunnelMACAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpVXLANTunnelMACAddrAnnotation] = calicoNode.Spec.VXLANTunnelMACAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpVXLANTunnelMACAddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP == nil {
|
||||
// If it is a empty NodeBGPSpec, remove all annotations.
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4AddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4IPIPTunnelAddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpIpv6AddrAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpAsnAnnotation)
|
||||
delete(k8sNode.Annotations, nodeBgpCIDAnnotation)
|
||||
return k8sNode, nil
|
||||
}
|
||||
|
||||
// If the BGP spec is not nil, then handle each field within the BGP spec individually.
|
||||
if calicoNode.Spec.BGP.IPv4Address != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4AddrAnnotation] = calicoNode.Spec.BGP.IPv4Address
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4AddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.IPv4IPIPTunnelAddr != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv4IPIPTunnelAddrAnnotation] = calicoNode.Spec.BGP.IPv4IPIPTunnelAddr
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv4IPIPTunnelAddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.IPv6Address != "" {
|
||||
k8sNode.Annotations[nodeBgpIpv6AddrAnnotation] = calicoNode.Spec.BGP.IPv6Address
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpIpv6AddrAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.ASNumber != nil {
|
||||
k8sNode.Annotations[nodeBgpAsnAnnotation] = calicoNode.Spec.BGP.ASNumber.String()
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpAsnAnnotation)
|
||||
}
|
||||
|
||||
if calicoNode.Spec.BGP.RouteReflectorClusterID != "" {
|
||||
k8sNode.Annotations[nodeBgpCIDAnnotation] = calicoNode.Spec.BGP.RouteReflectorClusterID
|
||||
} else {
|
||||
delete(k8sNode.Annotations, nodeBgpCIDAnnotation)
|
||||
}
|
||||
|
||||
return k8sNode, nil
|
||||
}
|
||||
|
||||
// mergeCalicoAndK8sLabels merges the Kubernetes labels (from k8sNode.Labels) with those that are already present in
|
||||
// calicoNode (which were loaded from our annotation). Kubernetes labels take precedence. To make the operation
|
||||
// reversible (so that we can support write back of a Calico node that was read from Kubernetes), we also store the
|
||||
// complete set of Kubernetes labels in an annotation.
|
||||
//
|
||||
// Note: if a Kubernetes label shadows a Calico label, the Calico label will be lost when the resource is written
|
||||
// back to the datastore. This is consistent with kube-controllers' behavior.
|
||||
func mergeCalicoAndK8sLabels(calicoNode *apiv3.Node, k8sNode *kapiv1.Node) error {
|
||||
// Now, copy the Kubernetes Node labels over. Note: this may overwrite Calico labels of the same name, but that's
|
||||
// consistent with the kube-controllers behavior.
|
||||
for k, v := range k8sNode.Labels {
|
||||
if calicoNode.Labels == nil {
|
||||
calicoNode.Labels = map[string]string{}
|
||||
}
|
||||
calicoNode.Labels[k] = v
|
||||
}
|
||||
|
||||
// For consistency with kube-controllers, and so we can correctly round-trip labels, we stash the kubernetes labels
|
||||
// in an annotation.
|
||||
if calicoNode.Annotations == nil {
|
||||
calicoNode.Annotations = map[string]string{}
|
||||
}
|
||||
bytes, err := json.Marshal(k8sNode.Labels)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error marshalling node labels")
|
||||
return err
|
||||
}
|
||||
calicoNode.Annotations[nodeK8sLabelAnnotation] = string(bytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// restoreCalicoLabels tries to undo the transformation done by mergeCalicoLabels. If no changes are needed, it
|
||||
// returns the input value; otherwise, it returns a copy.
|
||||
func restoreCalicoLabels(calicoNode *apiv3.Node) (*apiv3.Node, error) {
|
||||
rawLabels := calicoNode.Annotations[nodeK8sLabelAnnotation]
|
||||
if rawLabels == "" {
|
||||
return calicoNode, nil
|
||||
}
|
||||
|
||||
// We're about to update the labels and annotations on the node, take a copy.
|
||||
calicoNode = calicoNode.DeepCopy()
|
||||
|
||||
// We stashed the k8s labels in an annotation, extract them so we can compare with the combined labels.
|
||||
k8sLabels := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(rawLabels), &k8sLabels); err != nil {
|
||||
log.WithError(err).Error("Failed to unmarshal k8s node labels from " +
|
||||
nodeK8sLabelAnnotation + " annotation")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now remove any labels that match the k8s ones.
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
log.WithField("k8s", k8sLabels).Debug("Loaded label annotations")
|
||||
}
|
||||
for k, k8sVal := range k8sLabels {
|
||||
if calVal, ok := calicoNode.Labels[k]; ok && calVal != k8sVal {
|
||||
log.WithFields(log.Fields{
|
||||
"label": k,
|
||||
"newValue": calVal,
|
||||
"k8sValue": k8sVal,
|
||||
}).Warn("Update to label that is shadowed by a Kubernetes label will be ignored.")
|
||||
}
|
||||
|
||||
// The k8s value was inherited and there was no old Calico value, drop the label so that we don't copy
|
||||
// it to the Calico annotation.
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
log.WithField("key", k).Debug("Removing inherited k8s label")
|
||||
}
|
||||
delete(calicoNode.Labels, k)
|
||||
}
|
||||
|
||||
// Filter out our bookkeeping annotation, which is only used for round-tripping labels correctly.
|
||||
delete(calicoNode.Annotations, nodeK8sLabelAnnotation)
|
||||
if len(calicoNode.Annotations) == 0 {
|
||||
calicoNode.Annotations = nil
|
||||
}
|
||||
|
||||
return calicoNode, nil
|
||||
}
|
||||
|
||||
// getIPIPTunnelAddress calculates the IPv4 address to use for the IPIP tunnel based on the node's pod CIDR, for use
|
||||
// in conjunction with host-local IPAM backed by node.Spec.PodCIDR allocations.
|
||||
func getIPIPTunnelAddress(n *kapiv1.Node) (string, error) {
|
||||
ip, _, err := net.ParseCIDR(n.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid pod CIDR for node: %s, %s", n.Name, n.Spec.PodCIDR)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// We need to get the IP for the podCIDR and increment it to the
|
||||
// first IP in the CIDR.
|
||||
tunIp := ip.To4()
|
||||
if tunIp == nil {
|
||||
log.WithField("podCIDR", n.Spec.PodCIDR).Infof("Cannot pick an IPv4 tunnel address from the given CIDR")
|
||||
return "", nil
|
||||
}
|
||||
tunIp[3]++
|
||||
|
||||
return tunIp.String(), nil
|
||||
}
|
||||
471
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/profile.go
generated
vendored
Normal file
471
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/profile.go
generated
vendored
Normal file
@@ -0,0 +1,471 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
func NewProfileClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &profileClient{
|
||||
clientSet: c,
|
||||
Converter: conversion.Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for Profiles.
|
||||
type profileClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
conversion.Converter
|
||||
}
|
||||
|
||||
func (c *profileClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Create is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Create",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Warn("Operation Update is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: kvp.Key,
|
||||
Operation: "Update",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *profileClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on Profile type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *profileClient) getSaKv(sa *kapiv1.ServiceAccount) (*model.KVPair, error) {
|
||||
kvPair, err := c.ServiceAccountToProfile(sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvPair, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) getServiceAccount(ctx context.Context, rk model.ResourceKey, revision string) (*model.KVPair, error) {
|
||||
|
||||
namespace, serviceAccountName, err := c.ProfileNameToServiceAccount(rk.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceAccount, err := c.clientSet.CoreV1().ServiceAccounts(namespace).Get(serviceAccountName, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, rk)
|
||||
}
|
||||
|
||||
return c.getSaKv(serviceAccount)
|
||||
}
|
||||
|
||||
func (c *profileClient) getNsKv(ns *kapiv1.Namespace) (*model.KVPair, error) {
|
||||
kvPair, err := c.NamespaceToProfile(ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kvPair, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) getNamespace(ctx context.Context, rk model.ResourceKey, revision string) (*model.KVPair, error) {
|
||||
namespaceName, err := c.ProfileNameToNamespace(rk.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace, err := c.clientSet.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, rk)
|
||||
}
|
||||
|
||||
return c.getNsKv(namespace)
|
||||
}
|
||||
|
||||
func (c *profileClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on Profile type")
|
||||
rk := key.(model.ResourceKey)
|
||||
if rk.Name == "" {
|
||||
return nil, fmt.Errorf("Profile key missing name: %+v", rk)
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
splits := strings.SplitAfterN(rk.Name, ".", 2)
|
||||
if len(splits) == 1 {
|
||||
return nil, fmt.Errorf("Invalid name %s", rk.Name)
|
||||
}
|
||||
|
||||
switch splits[0] {
|
||||
case conversion.NamespaceProfileNamePrefix:
|
||||
return c.getNamespace(ctx, rk, nsRev)
|
||||
case conversion.ServiceAccountProfileNamePrefix:
|
||||
return c.getServiceAccount(ctx, rk, saRev)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Revision %s invalid", revision)
|
||||
}
|
||||
|
||||
func (c *profileClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on Profile type")
|
||||
nl := list.(model.ResourceListOptions)
|
||||
kvps := []*model.KVPair{}
|
||||
|
||||
// If a name is specified, then do an exact lookup.
|
||||
if nl.Name != "" {
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{Name: nl.Name, Kind: nl.Kind}, revision)
|
||||
if err != nil {
|
||||
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
|
||||
return nil, err
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
kvps = append(kvps, kvp)
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Otherwise, enumerate all.
|
||||
namespaces, err := c.clientSet.CoreV1().Namespaces().List(metav1.ListOptions{ResourceVersion: nsRev})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, nl)
|
||||
}
|
||||
|
||||
// For each Namespace, return a profile.
|
||||
for _, ns := range namespaces.Items {
|
||||
kvp, err := c.getNsKv(&ns)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to convert k8s Namespace to Calico Profile: Namespace=%s: %v", ns.Name, err)
|
||||
continue
|
||||
}
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
|
||||
// Enumerate all SA
|
||||
var serviceaccounts *kapiv1.ServiceAccountList
|
||||
// TBD: narrow down to only to the required namespace
|
||||
serviceaccounts, err = c.clientSet.CoreV1().ServiceAccounts(kapiv1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: saRev})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, nl)
|
||||
}
|
||||
|
||||
for _, sa := range serviceaccounts.Items {
|
||||
kvp, err := c.getSaKv(&sa)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Unable to convert k8s service account to Calico Profile: %s", sa.Name)
|
||||
continue
|
||||
}
|
||||
log.Debug("Converted k8s sa to Calico profile ", sa.Name)
|
||||
kvps = append(kvps, kvp)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: kvps,
|
||||
Revision: c.JoinProfileRevisions(namespaces.ResourceVersion, serviceaccounts.ResourceVersion),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *profileClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *profileClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
|
||||
// Setting to Watch all profiles in all namespaces; overriden below
|
||||
watchNS, watchSA := true, true
|
||||
ns := kapiv1.NamespaceAll
|
||||
sa := ""
|
||||
|
||||
// Watch a specific profile.
|
||||
if len(rlo.Name) != 0 {
|
||||
log.WithField("name", rlo.Name).Debug("Watching a single profile")
|
||||
var err error
|
||||
if strings.HasPrefix(rlo.Name, conversion.NamespaceProfileNamePrefix) {
|
||||
watchSA = false
|
||||
ns, err = c.ProfileNameToNamespace(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", ns).String()
|
||||
} else if strings.HasPrefix(rlo.Name, conversion.ServiceAccountProfileNamePrefix) {
|
||||
watchNS = false
|
||||
ns, sa, err = c.ProfileNameToServiceAccount(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", sa).String()
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unsupported prefix for resource name: %s", rlo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
nsRev, saRev, err := c.SplitProfileRevision(revision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts.ResourceVersion = nsRev
|
||||
var nsWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchNS {
|
||||
log.Debugf("Watching namespace at revision %q", nsRev)
|
||||
nsWatch, err = c.clientSet.CoreV1().Namespaces().Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sNamespace, ok := r.(*kapiv1.Namespace)
|
||||
if !ok {
|
||||
return nil, errors.New("Profile conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.NamespaceToProfile(k8sNamespace)
|
||||
}
|
||||
nsWatcher := newK8sWatcherConverter(ctx, "Profile-NS", converter, nsWatch)
|
||||
|
||||
// Watch all service accounts in relevant namespace(s)
|
||||
opts.ResourceVersion = saRev
|
||||
var saWatch kwatch.Interface = kwatch.NewFake()
|
||||
if watchSA {
|
||||
log.Debugf("Watching serviceAccount at revision %q", saRev)
|
||||
saWatch, err = c.clientSet.CoreV1().ServiceAccounts(ns).Watch(opts)
|
||||
if err != nil {
|
||||
nsWatch.Stop()
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
}
|
||||
converterSA := func(r Resource) (*model.KVPair, error) {
|
||||
k8sServiceAccount, ok := r.(*kapiv1.ServiceAccount)
|
||||
if !ok {
|
||||
nsWatch.Stop()
|
||||
return nil, errors.New("Profile conversion with incorrect k8s resource type")
|
||||
}
|
||||
return c.ServiceAccountToProfile(k8sServiceAccount)
|
||||
}
|
||||
saWatcher := newK8sWatcherConverter(ctx, "Profile-SA", converterSA, saWatch)
|
||||
|
||||
return newProfileWatcher(ctx, nsWatcher, saWatcher), nil
|
||||
}
|
||||
|
||||
func newProfileWatcher(ctx context.Context, k8sWatchNS, k8sWatchSA api.WatchInterface) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &profileWatcher{
|
||||
k8sNSWatch: k8sWatchNS,
|
||||
k8sSAWatch: k8sWatchSA,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
Converter: conversion.Converter{},
|
||||
}
|
||||
go wc.processProfileEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type profileWatcher struct {
|
||||
conversion.Converter
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sNSWatch api.WatchInterface
|
||||
k8sSAWatch api.WatchInterface
|
||||
k8sNSRev string
|
||||
k8sSARev string
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through the context cancel function.
|
||||
func (pw *profileWatcher) Stop() {
|
||||
pw.cancel()
|
||||
pw.k8sNSWatch.Stop()
|
||||
pw.k8sSAWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (pw *profileWatcher) ResultChan() <-chan api.WatchEvent {
|
||||
return pw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (pw *profileWatcher) HasTerminated() bool {
|
||||
terminated := atomic.LoadUint32(&pw.terminated) != 0
|
||||
|
||||
if pw.k8sNSWatch != nil {
|
||||
terminated = terminated && pw.k8sNSWatch.HasTerminated()
|
||||
}
|
||||
if pw.k8sSAWatch != nil {
|
||||
terminated = terminated && pw.k8sSAWatch.HasTerminated()
|
||||
}
|
||||
|
||||
return terminated
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (pw *profileWatcher) processProfileEvents() {
|
||||
log.Debug("Watcher process started for profile.")
|
||||
defer func() {
|
||||
log.Debug("Profile watcher process terminated")
|
||||
pw.Stop()
|
||||
close(pw.resultChan)
|
||||
atomic.AddUint32(&pw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
var e api.WatchEvent
|
||||
var isNsEvent bool
|
||||
select {
|
||||
case e, ok = <-pw.k8sNSWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Profile, namespace watch channel closed.")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile namespace watch channel closed."),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing Namespace event")
|
||||
isNsEvent = true
|
||||
|
||||
case e, ok = <-pw.k8sSAWatch.ResultChan():
|
||||
if !ok {
|
||||
// We shouldn't get a closed channel without first getting a terminating error,
|
||||
// so write a warning log and convert to a termination error.
|
||||
log.Warn("Profile, serviceaccount watch channel closed.")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile serviceaccount watch channel closed."),
|
||||
},
|
||||
}
|
||||
}
|
||||
log.Debug("Processing ServiceAccount event")
|
||||
isNsEvent = false
|
||||
|
||||
case <-pw.context.Done(): //user cancel
|
||||
log.Debug("Process watcher done event in kdd client")
|
||||
return
|
||||
}
|
||||
|
||||
// Update the resource version of the Object in the watcher. The version returned on a watch
|
||||
// event needs to be such that the Watch client can resume watching when a watch fails.
|
||||
// The watch client expects a slash separated list of resource versions in the format
|
||||
// <NS Revision/SA Revision>.
|
||||
var value interface{}
|
||||
switch e.Type {
|
||||
case api.WatchModified, api.WatchAdded:
|
||||
value = e.New.Value
|
||||
case api.WatchDeleted:
|
||||
value = e.Old.Value
|
||||
}
|
||||
|
||||
if value != nil {
|
||||
oma, ok := value.(metav1.ObjectMetaAccessor)
|
||||
|
||||
if !ok {
|
||||
log.WithField("event", e).Error(
|
||||
"Resource returned from watch does not implement ObjectMetaAccessor interface")
|
||||
e = api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
ClosedByRemote: true,
|
||||
Err: errors.New("Profile value does not implement ObjectMetaAccessor interface."),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if isNsEvent {
|
||||
pw.k8sNSRev = oma.GetObjectMeta().GetResourceVersion()
|
||||
} else {
|
||||
pw.k8sSARev = oma.GetObjectMeta().GetResourceVersion()
|
||||
}
|
||||
oma.GetObjectMeta().SetResourceVersion(pw.JoinProfileRevisions(pw.k8sNSRev, pw.k8sSARev))
|
||||
}
|
||||
} else if e.Error == nil {
|
||||
log.WithField("event", e).Warning("Event without error or value")
|
||||
}
|
||||
|
||||
// Send the processed event.
|
||||
select {
|
||||
case pw.resultChan <- e:
|
||||
// If this is an error event. check to see if it's a terminating one.
|
||||
// If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
log.WithError(e.Error).Debug("Kubernetes event converted to backend watcher error event")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
log.Debug("Watch terminated event")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case <-pw.context.Done():
|
||||
log.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
196
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/resources.go
generated
vendored
Normal file
196
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/resources.go
generated
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
)
|
||||
|
||||
const (
|
||||
labelsAnnotation = "projectcalico.org/labels"
|
||||
annotationsAnnotation = "projectcalico.org/annotations"
|
||||
metadataAnnotation = "projectcalico.org/metadata"
|
||||
)
|
||||
|
||||
// Interface that all Kubernetes and Calico resources implement.
|
||||
type Resource interface {
|
||||
runtime.Object
|
||||
metav1.ObjectMetaAccessor
|
||||
}
|
||||
|
||||
// Interface that all Kubernetes and Calico resource lists implement.
|
||||
type ResourceList interface {
|
||||
runtime.Object
|
||||
metav1.ListMetaAccessor
|
||||
}
|
||||
|
||||
// Function signature for conversion function to convert a K8s resouce to a
|
||||
// KVPair equivalent.
|
||||
type ConvertK8sResourceToKVPair func(Resource) (*model.KVPair, error)
|
||||
|
||||
// Store Calico Metadata in the k8s resource annotations for non-CRD backed resources.
|
||||
// Currently this just stores Annotations and Labels and drops all other metadata
|
||||
// attributes.
|
||||
func SetK8sAnnotationsFromCalicoMetadata(k8sRes Resource, calicoRes Resource) {
|
||||
a := k8sRes.GetObjectMeta().GetAnnotations()
|
||||
if a == nil {
|
||||
a = make(map[string]string)
|
||||
}
|
||||
if labels := calicoRes.GetObjectMeta().GetLabels(); len(labels) > 0 {
|
||||
if lann, err := json.Marshal(labels); err != nil {
|
||||
log.WithError(err).Warning("unable to store labels as an annotation")
|
||||
} else {
|
||||
a[labelsAnnotation] = string(lann)
|
||||
}
|
||||
} else {
|
||||
// There are no Calico labels - nil out the k8s res.
|
||||
delete(a, labelsAnnotation)
|
||||
}
|
||||
if annotations := calicoRes.GetObjectMeta().GetAnnotations(); len(annotations) > 0 {
|
||||
if aann, err := json.Marshal(annotations); err != nil {
|
||||
log.WithError(err).Warning("unable to store annotations as an annotation")
|
||||
} else {
|
||||
a[annotationsAnnotation] = string(aann)
|
||||
}
|
||||
} else {
|
||||
// There are no Calico annotations - nil out the k8s res.
|
||||
delete(a, annotationsAnnotation)
|
||||
}
|
||||
k8sRes.GetObjectMeta().SetAnnotations(a)
|
||||
}
|
||||
|
||||
// Extract the Calico resource Metadata from the k8s resource annotations for non-CRD
|
||||
// backed resources. This extracts the Annotations and Labels stored as a annotation,
|
||||
// and fills in the CreationTimestamp and UID from the k8s resource.
|
||||
func SetCalicoMetadataFromK8sAnnotations(calicoRes Resource, k8sRes Resource) {
|
||||
com := calicoRes.GetObjectMeta()
|
||||
kom := k8sRes.GetObjectMeta()
|
||||
com.SetResourceVersion(kom.GetResourceVersion())
|
||||
com.SetCreationTimestamp(kom.GetCreationTimestamp())
|
||||
com.SetUID(kom.GetUID())
|
||||
a := kom.GetAnnotations()
|
||||
if a == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if lann, ok := a[labelsAnnotation]; ok {
|
||||
var labels map[string]string
|
||||
if err := json.Unmarshal([]byte(lann), &labels); err != nil {
|
||||
log.WithError(err).Warning("unable to parse labels annotation")
|
||||
} else {
|
||||
com.SetLabels(labels)
|
||||
}
|
||||
}
|
||||
if aann, ok := a[annotationsAnnotation]; ok {
|
||||
var annotations map[string]string
|
||||
if err := json.Unmarshal([]byte(aann), &annotations); err != nil {
|
||||
log.WithError(err).Warning("unable to parse annotations annotation")
|
||||
} else {
|
||||
com.SetAnnotations(annotations)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store Calico Metadata in the in the k8s resource annotations for CRD backed resources.
|
||||
// This should store all Metadata except for those stored in Annotations and Labels and
|
||||
// store them in annotations.
|
||||
func ConvertCalicoResourceToK8sResource(resIn Resource) (Resource, error) {
|
||||
rom := resIn.GetObjectMeta()
|
||||
|
||||
// Make sure to remove data that is passed to Kubernetes so it is not duplicated in
|
||||
// the annotations.
|
||||
romCopy := &metav1.ObjectMeta{}
|
||||
rom.(*metav1.ObjectMeta).DeepCopyInto(romCopy)
|
||||
romCopy.Name = ""
|
||||
romCopy.Namespace = ""
|
||||
romCopy.ResourceVersion = ""
|
||||
romCopy.Labels = nil
|
||||
romCopy.Annotations = nil
|
||||
|
||||
// Marshal the data and store the json representation in the annotations.
|
||||
metadataBytes, err := json.Marshal(romCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
annotations := rom.GetAnnotations()
|
||||
if len(annotations) == 0 {
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
annotations[metadataAnnotation] = string(metadataBytes)
|
||||
|
||||
// Make sure to clear out all of the Calico Metadata out of the ObjectMeta except for
|
||||
// Name, Namespace, ResourceVersion, Labels, and Annotations. Annotations is already
|
||||
// copied so it can be set separately.
|
||||
meta := &metav1.ObjectMeta{}
|
||||
meta.Name = rom.GetName()
|
||||
meta.Namespace = rom.GetNamespace()
|
||||
meta.ResourceVersion = rom.GetResourceVersion()
|
||||
meta.Labels = rom.GetLabels()
|
||||
meta.UID = rom.GetUID()
|
||||
|
||||
resOut := resIn.DeepCopyObject().(Resource)
|
||||
romOut := resOut.GetObjectMeta()
|
||||
meta.DeepCopyInto(romOut.(*metav1.ObjectMeta))
|
||||
romOut.SetAnnotations(annotations)
|
||||
|
||||
return resOut, nil
|
||||
}
|
||||
|
||||
// Retrieve all of the Calico Metadata from the k8s resource annotations for CRD backed
|
||||
// resources. This should remove the relevant Calico Metadata annotation when it has finished.
|
||||
func ConvertK8sResourceToCalicoResource(res Resource) error {
|
||||
rom := res.GetObjectMeta()
|
||||
annotations := rom.GetAnnotations()
|
||||
if len(annotations) == 0 {
|
||||
// Make no changes if there are no annotations to read Calico Metadata out of.
|
||||
return nil
|
||||
}
|
||||
if _, ok := annotations[metadataAnnotation]; !ok {
|
||||
// No changes if there are no annotations stored on the Resource.
|
||||
return nil
|
||||
}
|
||||
|
||||
meta := &metav1.ObjectMeta{}
|
||||
err := json.Unmarshal([]byte(annotations[metadataAnnotation]), meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear out the annotations
|
||||
delete(annotations, metadataAnnotation)
|
||||
if len(annotations) == 0 {
|
||||
annotations = nil
|
||||
}
|
||||
|
||||
// Manually write in the data not stored in the annotations: Name, Namespace, ResourceVersion,
|
||||
// Labels, and Annotations so that they do not get overwritten.
|
||||
meta.Name = rom.GetName()
|
||||
meta.Namespace = rom.GetNamespace()
|
||||
meta.ResourceVersion = rom.GetResourceVersion()
|
||||
meta.Labels = rom.GetLabels()
|
||||
meta.Annotations = annotations
|
||||
meta.UID = rom.GetUID()
|
||||
|
||||
// Overwrite the K8s metadata with the Calico metadata.
|
||||
meta.DeepCopyInto(rom.(*metav1.ObjectMeta))
|
||||
|
||||
return nil
|
||||
}
|
||||
190
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/watcher.go
generated
vendored
Normal file
190
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/watcher.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
kwatch "k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
resultsBufSize = 100
|
||||
)
|
||||
|
||||
func newK8sWatcherConverter(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
converter ConvertK8sResourceToKVPair,
|
||||
k8sWatch kwatch.Interface,
|
||||
) api.WatchInterface {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
wc := &k8sWatcherConverter{
|
||||
logCxt: logrus.WithField("resource", name),
|
||||
converter: converter,
|
||||
k8sWatch: k8sWatch,
|
||||
context: ctx,
|
||||
cancel: cancel,
|
||||
resultChan: make(chan api.WatchEvent, resultsBufSize),
|
||||
}
|
||||
go wc.processK8sEvents()
|
||||
return wc
|
||||
}
|
||||
|
||||
type k8sWatcherConverter struct {
|
||||
logCxt *logrus.Entry
|
||||
converter ConvertK8sResourceToKVPair
|
||||
k8sWatch kwatch.Interface
|
||||
context context.Context
|
||||
cancel context.CancelFunc
|
||||
resultChan chan api.WatchEvent
|
||||
terminated uint32
|
||||
}
|
||||
|
||||
// Stop stops the watcher and releases associated resources.
|
||||
// This calls through to the context cancel function.
|
||||
func (crw *k8sWatcherConverter) Stop() {
|
||||
crw.cancel()
|
||||
crw.k8sWatch.Stop()
|
||||
}
|
||||
|
||||
// ResultChan returns a channel used to receive WatchEvents.
|
||||
func (crw *k8sWatcherConverter) ResultChan() <-chan api.WatchEvent {
|
||||
return crw.resultChan
|
||||
}
|
||||
|
||||
// HasTerminated returns true when the watcher has completed termination processing.
|
||||
func (crw *k8sWatcherConverter) HasTerminated() bool {
|
||||
return atomic.LoadUint32(&crw.terminated) != 0
|
||||
}
|
||||
|
||||
// Loop to process the events stream from the underlying k8s Watcher and convert them to
|
||||
// backend KVPs.
|
||||
func (crw *k8sWatcherConverter) processK8sEvents() {
|
||||
crw.logCxt.Debug("Kubernetes watcher/converter started")
|
||||
defer func() {
|
||||
crw.logCxt.Debug("Kubernetes watcher/converter stopped, closing result channel")
|
||||
crw.Stop()
|
||||
close(crw.resultChan)
|
||||
atomic.AddUint32(&crw.terminated, 1)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-crw.k8sWatch.ResultChan():
|
||||
var e *api.WatchEvent
|
||||
if !ok {
|
||||
// The channel is closed so send a terminating watcher event indicating the watch was
|
||||
// closed by the remote.
|
||||
crw.logCxt.Debug("Watcher terminated by remote")
|
||||
e = &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: fmt.Errorf("terminating error event from Kubernetes watcher: closed by remote"),
|
||||
ClosedByRemote: true,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// We have a valid event, so convert it.
|
||||
e = crw.convertEvent(event)
|
||||
if e == nil {
|
||||
crw.logCxt.Debug("Event converted to a no-op")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case crw.resultChan <- *e:
|
||||
crw.logCxt.Debug("Kubernetes event converted and sent to backend watcher")
|
||||
|
||||
// If this is an error event, check to see if it's a terminating one (the
|
||||
// convertEvent method will decide that). If so, terminate this watcher.
|
||||
if e.Type == api.WatchError {
|
||||
crw.logCxt.WithError(e.Error).Debug("Watch event was an error event type")
|
||||
if _, ok := e.Error.(cerrors.ErrorWatchTerminated); ok {
|
||||
crw.logCxt.Debug("Watch event indicates a terminated watcher")
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-crw.context.Done():
|
||||
crw.logCxt.Debug("Process watcher done event during watch event in kdd client")
|
||||
return
|
||||
}
|
||||
case <-crw.context.Done(): // user cancel
|
||||
crw.logCxt.Debug("Process watcher done event in kdd client")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convertEvent converts a Kubernetes Watch event into the equivalent Calico backend
|
||||
// client watch event.
|
||||
func (crw *k8sWatcherConverter) convertEvent(kevent kwatch.Event) *api.WatchEvent {
|
||||
var kvp *model.KVPair
|
||||
var err error
|
||||
if kevent.Type != kwatch.Error {
|
||||
k8sRes := kevent.Object.(Resource)
|
||||
kvp, err = crw.converter(k8sRes)
|
||||
if err != nil {
|
||||
crw.logCxt.WithError(err).Warning("Error converting Kubernetes resource to Calico resource")
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
if kvp == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch kevent.Type {
|
||||
case kwatch.Error:
|
||||
// An error directly from the k8s watcher is a terminating event.
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: cerrors.ErrorWatchTerminated{
|
||||
Err: fmt.Errorf("terminating error event from Kubernetes watcher: %v", kevent.Object),
|
||||
},
|
||||
}
|
||||
case kwatch.Deleted:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchDeleted,
|
||||
Old: kvp,
|
||||
}
|
||||
case kwatch.Added:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchAdded,
|
||||
New: kvp,
|
||||
}
|
||||
case kwatch.Modified:
|
||||
// In KDD we don't have access to the previous settings, so just set the current settings.
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchModified,
|
||||
New: kvp,
|
||||
}
|
||||
default:
|
||||
return &api.WatchEvent{
|
||||
Type: api.WatchError,
|
||||
Error: fmt.Errorf("unhandled Kubernetes watcher event type: %v", kevent.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
260
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/workloadendpoint.go
generated
vendored
Normal file
260
vendor/github.com/projectcalico/libcalico-go/lib/backend/k8s/resources/workloadendpoint.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/api"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/k8s/conversion"
|
||||
"github.com/projectcalico/libcalico-go/lib/backend/model"
|
||||
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func NewWorkloadEndpointClient(c *kubernetes.Clientset) K8sResourceClient {
|
||||
return &WorkloadEndpointClient{
|
||||
clientSet: c,
|
||||
converter: conversion.Converter{},
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the api.Client interface for WorkloadEndpoints.
|
||||
type WorkloadEndpointClient struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
converter conversion.Converter
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Create(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Create request on WorkloadEndpoint type")
|
||||
// As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated.
|
||||
// This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time:
|
||||
// https://github.com/kubernetes/kubernetes/issues/39113.
|
||||
//
|
||||
// Note: it's a bit odd to do this in the Create, but the CNI plugin uses CreateOrUpdate(). Doing it
|
||||
// here makes sure that, if the update fails: we retry here, and, we don't report success without
|
||||
// making the patch.
|
||||
return c.patchPodIP(ctx, kvp)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Update(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
log.Debug("Received Update request on WorkloadEndpoint type")
|
||||
// As a special case for the CNI plugin, try to patch the Pod with the IP that we've calculated.
|
||||
// This works around a bug in kubelet that causes it to delay writing the Pod IP for a long time:
|
||||
// https://github.com/kubernetes/kubernetes/issues/39113.
|
||||
return c.patchPodIP(ctx, kvp)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) DeleteKVP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
return c.Delete(ctx, kvp.Key, kvp.Revision, kvp.UID)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Delete(ctx context.Context, key model.Key, revision string, uid *types.UID) (*model.KVPair, error) {
|
||||
log.Warn("Operation Delete is not supported on WorkloadEndpoint type")
|
||||
return nil, cerrors.ErrorOperationNotSupported{
|
||||
Identifier: key,
|
||||
Operation: "Delete",
|
||||
}
|
||||
}
|
||||
|
||||
// patchPodIP PATCHes the Kubernetes Pod associated with the given KVPair with the IP address it contains.
|
||||
// This is a no-op if there is no IP address.
|
||||
//
|
||||
// We store the IP address in an annotation because patching the PodIP directly races with changes that
|
||||
// kubelet makes so kubelet can undo our changes.
|
||||
func (c *WorkloadEndpointClient) patchPodIP(ctx context.Context, kvp *model.KVPair) (*model.KVPair, error) {
|
||||
ips := kvp.Value.(*apiv3.WorkloadEndpoint).Spec.IPNetworks
|
||||
if len(ips) == 0 {
|
||||
return kvp, nil
|
||||
}
|
||||
|
||||
log.Debugf("PATCHing pod with IP: %v", ips[0])
|
||||
wepID, err := c.converter.ParseWorkloadEndpointName(kvp.Key.(model.ResourceKey).Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wepID.Pod == "" {
|
||||
return nil, cerrors.ErrorInsufficientIdentifiers{Name: kvp.Key.(model.ResourceKey).Name}
|
||||
}
|
||||
// Write the IP address into an annotation. This generates an event more quickly than
|
||||
// waiting for kubelet to update the Status.PodIP field.
|
||||
ns := kvp.Key.(model.ResourceKey).Namespace
|
||||
patch, err := calculateAnnotationPatch(conversion.AnnotationPodIP, ips[0])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to calculate Pod patch.")
|
||||
return nil, err
|
||||
}
|
||||
pod, err := c.clientSet.CoreV1().Pods(ns).Patch(wepID.Pod, types.StrategicMergePatchType, patch, "status")
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, kvp.Key)
|
||||
}
|
||||
log.Debugf("Successfully PATCHed pod to add podIP annotation: %+v", pod)
|
||||
return c.converter.PodToWorkloadEndpoint(pod)
|
||||
}
|
||||
|
||||
const annotationPatchTemplate = `{"metadata": {"annotations": {%s: %s}}}`
|
||||
|
||||
func calculateAnnotationPatch(name, value string) ([]byte, error) {
|
||||
// Marshal the key and value in order to make sure all the escaping is done correctly.
|
||||
nameJson, err := json.Marshal(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
valueJson, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patch := []byte(fmt.Sprintf(annotationPatchTemplate, nameJson, valueJson))
|
||||
return patch, nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Get(ctx context.Context, key model.Key, revision string) (*model.KVPair, error) {
|
||||
log.Debug("Received Get request on WorkloadEndpoint type")
|
||||
k := key.(model.ResourceKey)
|
||||
|
||||
// Parse resource name so we can get get the podName
|
||||
wepID, err := c.converter.ParseWorkloadEndpointName(key.(model.ResourceKey).Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wepID.Pod == "" {
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{
|
||||
Identifier: key,
|
||||
Err: errors.New("malformed WorkloadEndpoint name - unable to determine Pod name"),
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := c.clientSet.CoreV1().Pods(k.Namespace).Get(wepID.Pod, metav1.GetOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, k)
|
||||
}
|
||||
|
||||
// Decide if this pod should be displayed.
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(pod) {
|
||||
return nil, cerrors.ErrorResourceDoesNotExist{Identifier: k}
|
||||
}
|
||||
return c.converter.PodToWorkloadEndpoint(pod)
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) List(ctx context.Context, list model.ListInterface, revision string) (*model.KVPairList, error) {
|
||||
log.Debug("Received List request on WorkloadEndpoint type")
|
||||
l := list.(model.ResourceListOptions)
|
||||
|
||||
// If a workload is provided, we can do an exact lookup of this
|
||||
// workload endpoint.
|
||||
if l.Name != "" {
|
||||
kvp, err := c.Get(ctx, model.ResourceKey{
|
||||
Name: l.Name,
|
||||
Namespace: l.Namespace,
|
||||
Kind: l.Kind,
|
||||
}, revision)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
// Return empty slice of KVPair if the object doesn't exist, return the error otherwise.
|
||||
case cerrors.ErrorResourceDoesNotExist:
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &model.KVPairList{
|
||||
KVPairs: []*model.KVPair{kvp},
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Otherwise, enumerate all pods in a namespace.
|
||||
pods, err := c.clientSet.CoreV1().Pods(l.Namespace).List(metav1.ListOptions{ResourceVersion: revision})
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, l)
|
||||
}
|
||||
|
||||
// For each Pod, return a workload endpoint.
|
||||
ret := []*model.KVPair{}
|
||||
for _, pod := range pods.Items {
|
||||
// Decide if this pod should be included.
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(&pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
kvp, err := c.converter.PodToWorkloadEndpoint(&pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, kvp)
|
||||
}
|
||||
return &model.KVPairList{
|
||||
KVPairs: ret,
|
||||
Revision: revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) EnsureInitialized() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *WorkloadEndpointClient) Watch(ctx context.Context, list model.ListInterface, revision string) (api.WatchInterface, error) {
|
||||
// Build watch options to pass to k8s.
|
||||
opts := metav1.ListOptions{ResourceVersion: revision, Watch: true}
|
||||
rlo, ok := list.(model.ResourceListOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("ListInterface is not a ResourceListOptions: %s", list)
|
||||
}
|
||||
if len(rlo.Name) != 0 {
|
||||
if len(rlo.Namespace) == 0 {
|
||||
return nil, errors.New("cannot watch a specific WorkloadEndpoint without a namespace")
|
||||
}
|
||||
// We've been asked to watch a specific workloadendpoint
|
||||
wepids, err := c.converter.ParseWorkloadEndpointName(rlo.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("name", wepids.Pod).Debug("Watching a single workloadendpoint")
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", wepids.Pod).String()
|
||||
}
|
||||
|
||||
ns := list.(model.ResourceListOptions).Namespace
|
||||
k8sWatch, err := c.clientSet.CoreV1().Pods(ns).Watch(opts)
|
||||
if err != nil {
|
||||
return nil, K8sErrorToCalico(err, list)
|
||||
}
|
||||
converter := func(r Resource) (*model.KVPair, error) {
|
||||
k8sPod, ok := r.(*kapiv1.Pod)
|
||||
if !ok {
|
||||
return nil, errors.New("Pod conversion with incorrect k8s resource type")
|
||||
}
|
||||
if !c.converter.IsValidCalicoWorkloadEndpoint(k8sPod) {
|
||||
// If this is not a valid Calico workload endpoint then don't return in the watch.
|
||||
// Returning a nil KVP and a nil error swallows the event.
|
||||
return nil, nil
|
||||
}
|
||||
return c.converter.PodToWorkloadEndpoint(k8sPod)
|
||||
}
|
||||
return newK8sWatcherConverter(ctx, "Pod", converter, k8sWatch), nil
|
||||
}
|
||||
Reference in New Issue
Block a user