Merge pull request #4660 from f10atin9/pvc_autoresizer
add pvc-autoresizer controller to ks-controller-manager
This commit is contained in:
@@ -18,7 +18,9 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kubesphere/pvc-autoresizer/runners"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -98,6 +100,8 @@ var allControllers = []string{
|
||||
"job",
|
||||
"storagecapability",
|
||||
"volumesnapshot",
|
||||
"pvcautoresizer",
|
||||
"workloadrestart",
|
||||
"loginrecord",
|
||||
"cluster",
|
||||
"nsnp",
|
||||
@@ -348,6 +352,38 @@ func addAllControllers(mgr manager.Manager, client k8s.Client, informerFactory i
|
||||
addController(mgr, "volumesnapshot", volumeSnapshotController)
|
||||
}
|
||||
|
||||
// "pvc-autoresizer"
|
||||
monitoringOptionsEnable := cmOptions.MonitoringOptions != nil && len(cmOptions.MonitoringOptions.Endpoint) != 0
|
||||
if monitoringOptionsEnable {
|
||||
if cmOptions.IsControllerEnabled("pvc-autoresizer") {
|
||||
if err := runners.SetupIndexer(mgr, false); err != nil {
|
||||
return err
|
||||
}
|
||||
promClient, err := runners.NewPrometheusClient(cmOptions.MonitoringOptions.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pvcAutoResizerController := runners.NewPVCAutoresizer(
|
||||
promClient,
|
||||
mgr.GetClient(),
|
||||
ctrl.Log.WithName("pvc-autoresizer"),
|
||||
1*time.Minute,
|
||||
mgr.GetEventRecorderFor("pvc-autoresizer"),
|
||||
)
|
||||
addController(mgr, "pvcautoresizer", pvcAutoResizerController)
|
||||
}
|
||||
}
|
||||
|
||||
if cmOptions.IsControllerEnabled("pvc-workload-restarter") {
|
||||
restarter := runners.NewRestarter(
|
||||
mgr.GetClient(),
|
||||
ctrl.Log.WithName("pvc-workload-restarter"),
|
||||
1*time.Minute,
|
||||
mgr.GetEventRecorderFor("pvc-workload-restarter"),
|
||||
)
|
||||
addController(mgr, "pvcworkloadrestarter", restarter)
|
||||
}
|
||||
|
||||
// "loginrecord" controller
|
||||
if cmOptions.IsControllerEnabled("loginrecord") {
|
||||
loginRecordController := loginrecord.NewLoginRecordController(
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus"
|
||||
|
||||
controllerconfig "kubesphere.io/kubesphere/pkg/apiserver/config"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -57,6 +59,7 @@ type KubeSphereControllerManagerOptions struct {
|
||||
MultiClusterOptions *multicluster.Options
|
||||
ServiceMeshOptions *servicemesh.Options
|
||||
GatewayOptions *gateway.Options
|
||||
MonitoringOptions *prometheus.Options
|
||||
LeaderElect bool
|
||||
LeaderElection *leaderelection.LeaderElectionConfig
|
||||
WebhookCertDir string
|
||||
|
||||
@@ -62,6 +62,7 @@ func NewControllerManagerCommand() *cobra.Command {
|
||||
MultiClusterOptions: conf.MultiClusterOptions,
|
||||
ServiceMeshOptions: conf.ServiceMeshOptions,
|
||||
GatewayOptions: conf.GatewayOptions,
|
||||
MonitoringOptions: conf.MonitoringOptions,
|
||||
LeaderElection: s.LeaderElection,
|
||||
LeaderElect: s.LeaderElect,
|
||||
WebhookCertDir: s.WebhookCertDir,
|
||||
|
||||
1
go.mod
1
go.mod
@@ -58,6 +58,7 @@ require (
|
||||
github.com/jszwec/csvutil v1.5.0
|
||||
github.com/kelseyhightower/envconfig v1.4.0 // indirect
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
|
||||
github.com/kubesphere/pvc-autoresizer v0.1.1
|
||||
github.com/kubesphere/sonargo v0.0.2
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -521,6 +521,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
|
||||
github.com/kubesphere/pvc-autoresizer v0.1.1 h1:Q0VrvLfTiE1f38EvmFpJdBevwN21X7BrgQgKrssqKQw=
|
||||
github.com/kubesphere/pvc-autoresizer v0.1.1/go.mod h1:88qz9L1Ov2bvw7L/i5mUT8g5DvBwRCZ60JA2d1WLgB0=
|
||||
github.com/kubesphere/sonargo v0.0.2 h1:hsSRE3sv3mkPcUAeSABdp7rtfcNW2zzeHXzFa01CTkU=
|
||||
github.com/kubesphere/sonargo v0.0.2/go.mod h1:ww8n9ANlDXhX5PBZ18iaRnCgEkXN0GMml3/KZXOZ11w=
|
||||
github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c=
|
||||
|
||||
201
vendor/github.com/kubesphere/pvc-autoresizer/LICENSE
generated
vendored
Normal file
201
vendor/github.com/kubesphere/pvc-autoresizer/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
39
vendor/github.com/kubesphere/pvc-autoresizer/metrics/kubernetes_client.go
generated
vendored
Normal file
39
vendor/github.com/kubesphere/pvc-autoresizer/metrics/kubernetes_client.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
runtimemetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
// Metrics subsystem and all of the keys used by the metrics client.
|
||||
const (
|
||||
KubernetesClientSubsystem = "kubernetes_client"
|
||||
KubernetesClientFailTotalKey = "fail_total"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerKubernetesClientMetrics()
|
||||
}
|
||||
|
||||
type KubernetesClientFailTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *KubernetesClientFailTotalAdapter) Increment() {
|
||||
a.metric.Inc()
|
||||
}
|
||||
|
||||
var (
|
||||
kubernetesClientFailTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Subsystem: KubernetesClientSubsystem,
|
||||
Name: KubernetesClientFailTotalKey,
|
||||
Help: "counter that indicates how many API requests to kube-api server are failed.",
|
||||
})
|
||||
|
||||
KubernetesClientFailTotal *KubernetesClientFailTotalAdapter = &KubernetesClientFailTotalAdapter{metric: kubernetesClientFailTotal}
|
||||
)
|
||||
|
||||
func registerKubernetesClientMetrics() {
|
||||
runtimemetrics.Registry.MustRegister(kubernetesClientFailTotal)
|
||||
}
|
||||
3
vendor/github.com/kubesphere/pvc-autoresizer/metrics/mertics.go
generated
vendored
Normal file
3
vendor/github.com/kubesphere/pvc-autoresizer/metrics/mertics.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package metrics
|
||||
|
||||
const MetricsNamespace string = "pvcautoresizer"
|
||||
39
vendor/github.com/kubesphere/pvc-autoresizer/metrics/metrics_client.go
generated
vendored
Normal file
39
vendor/github.com/kubesphere/pvc-autoresizer/metrics/metrics_client.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
runtimemetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
// Metrics subsystem and all of the keys used by the metrics client.
|
||||
const (
|
||||
MetricsClientSubsystem = "metrics_client"
|
||||
MetricsClientFailTotalKey = "fail_total"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMetricsClientMetrics()
|
||||
}
|
||||
|
||||
type metricsClientFailTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *metricsClientFailTotalAdapter) Increment() {
|
||||
a.metric.Inc()
|
||||
}
|
||||
|
||||
var (
|
||||
metricsClientFailTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Subsystem: MetricsClientSubsystem,
|
||||
Name: MetricsClientFailTotalKey,
|
||||
Help: "counter that indicates how many API requests to metrics server(e.g. prometheus) are failed.",
|
||||
})
|
||||
|
||||
MetricsClientFailTotal *metricsClientFailTotalAdapter = &metricsClientFailTotalAdapter{metric: metricsClientFailTotal}
|
||||
)
|
||||
|
||||
func registerMetricsClientMetrics() {
|
||||
runtimemetrics.Registry.MustRegister(metricsClientFailTotal)
|
||||
}
|
||||
88
vendor/github.com/kubesphere/pvc-autoresizer/metrics/resizer.go
generated
vendored
Normal file
88
vendor/github.com/kubesphere/pvc-autoresizer/metrics/resizer.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
runtimemetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
// Metrics subsystem and all of the keys used by the resizer.
|
||||
const (
|
||||
ResizerSuccessResizeTotalKey = "success_resize_total"
|
||||
ResizerFailedResizeTotalKey = "failed_resize_total"
|
||||
ResizerLoopSecondsTotalKey = "loop_seconds_total"
|
||||
ResizerLimitReachedTotalKey = "limit_reached_total"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerResizerMetrics()
|
||||
}
|
||||
|
||||
type resizerSuccessResizeTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *resizerSuccessResizeTotalAdapter) Increment() {
|
||||
a.metric.Inc()
|
||||
}
|
||||
|
||||
type resizerFailedResizeTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *resizerFailedResizeTotalAdapter) Increment() {
|
||||
a.metric.Inc()
|
||||
}
|
||||
|
||||
type resizerLoopSecondsTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *resizerLoopSecondsTotalAdapter) Add(value float64) {
|
||||
a.metric.Add(value)
|
||||
}
|
||||
|
||||
type resizerLimitReachedTotalAdapter struct {
|
||||
metric prometheus.Counter
|
||||
}
|
||||
|
||||
func (a *resizerLimitReachedTotalAdapter) Increment() {
|
||||
a.metric.Inc()
|
||||
}
|
||||
|
||||
var (
|
||||
resizerSuccessResizeTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: ResizerSuccessResizeTotalKey,
|
||||
Help: "counter that indicates how many volume expansion processing resizes succeed.",
|
||||
})
|
||||
|
||||
resizerFailedResizeTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: ResizerFailedResizeTotalKey,
|
||||
Help: "counter that indicates how many volume expansion processing resizes fail.",
|
||||
})
|
||||
|
||||
resizerLoopSecondsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: ResizerLoopSecondsTotalKey,
|
||||
Help: "counter that indicates the sum of seconds spent on volume expansion processing loops.",
|
||||
})
|
||||
|
||||
resizerLimitReachedTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: MetricsNamespace,
|
||||
Name: ResizerLimitReachedTotalKey,
|
||||
Help: "counter that indicates how many storage limit was reached.",
|
||||
})
|
||||
|
||||
ResizerSuccessResizeTotal *resizerSuccessResizeTotalAdapter = &resizerSuccessResizeTotalAdapter{metric: resizerSuccessResizeTotal}
|
||||
ResizerFailedResizeTotal *resizerFailedResizeTotalAdapter = &resizerFailedResizeTotalAdapter{metric: resizerFailedResizeTotal}
|
||||
ResizerLoopSecondsTotal *resizerLoopSecondsTotalAdapter = &resizerLoopSecondsTotalAdapter{metric: resizerLoopSecondsTotal}
|
||||
ResizerLimitReachedTotal *resizerLimitReachedTotalAdapter = &resizerLimitReachedTotalAdapter{metric: resizerLimitReachedTotal}
|
||||
)
|
||||
|
||||
func registerResizerMetrics() {
|
||||
runtimemetrics.Registry.MustRegister(resizerSuccessResizeTotal)
|
||||
runtimemetrics.Registry.MustRegister(resizerFailedResizeTotal)
|
||||
runtimemetrics.Registry.MustRegister(resizerLoopSecondsTotal)
|
||||
runtimemetrics.Registry.MustRegister(resizerLimitReachedTotal)
|
||||
}
|
||||
49
vendor/github.com/kubesphere/pvc-autoresizer/runners/constants.go
generated
vendored
Normal file
49
vendor/github.com/kubesphere/pvc-autoresizer/runners/constants.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package runners
|
||||
|
||||
// AutoResizeEnabledKey is the key of flag that enables pvc-autoresizer.
|
||||
const AutoResizeEnabledKey = "resize.kubesphere.io/enabled"
|
||||
|
||||
// ResizeThresholdAnnotation is the key of resize threshold.
|
||||
const ResizeThresholdAnnotation = "resize.kubesphere.io/threshold"
|
||||
|
||||
// ResizeInodesThresholdAnnotation is the key of resize threshold for inodes.
|
||||
const ResizeInodesThresholdAnnotation = "resize.kubesphere.io/inodes-threshold"
|
||||
|
||||
// ResizeIncreaseAnnotation is the key of amount increased.
|
||||
const ResizeIncreaseAnnotation = "resize.kubesphere.io/increase"
|
||||
|
||||
// StorageLimitAnnotation is the key of storage limit value
|
||||
const StorageLimitAnnotation = "resize.kubesphere.io/storage-limit"
|
||||
|
||||
// PreviousCapacityBytesAnnotation is the key of previous volume capacity.
|
||||
const PreviousCapacityBytesAnnotation = "resize.kubesphere.io/pre-capacity-bytes"
|
||||
|
||||
// AutoRestartEnabledKey is the key of flag that enables pods-autoRestart.
|
||||
const AutoRestartEnabledKey = "restart.kubesphere.io/enabled"
|
||||
|
||||
// SupportOnlineResize is the key of flag that the storage class support online expansion
|
||||
const SupportOnlineResize = "restart.kubesphere.io/online-expansion-support"
|
||||
|
||||
// RestartSkip is the key of flag that the workload don't need autoRestart
|
||||
const RestartSkip = "restart.kubesphere.io/skip"
|
||||
|
||||
// ResizingMaxTime is the key of flag that the maximum number of seconds that autoRestart can wait for pvc resize
|
||||
const ResizingMaxTime = "restart.kubesphere.io/max-time"
|
||||
|
||||
// RestartStage is used to record whether autoRestart has finished shutting down the pod
|
||||
const RestartStage = "restart.kubesphere.io/stage"
|
||||
|
||||
// RestartStopTime is used to record the time when the pod is closed
|
||||
const RestartStopTime = "restart.kubesphere.io/stop-time"
|
||||
|
||||
// ExpectReplicaNums is used to record the value of replicas before restart
|
||||
const ExpectReplicaNums = "restart.kubesphere.io/replica-nums"
|
||||
|
||||
// DefaultThreshold is the default value of ResizeThresholdAnnotation.
|
||||
const DefaultThreshold = "10%"
|
||||
|
||||
// DefaultInodesThreshold is the default value of ResizeInodesThresholdAnnotation.
|
||||
const DefaultInodesThreshold = "10%"
|
||||
|
||||
// DefaultIncrease is the default value of ResizeIncreaseAnnotation.
|
||||
const DefaultIncrease = "10%"
|
||||
22
vendor/github.com/kubesphere/pvc-autoresizer/runners/fake_client_wrapper.go
generated
vendored
Normal file
22
vendor/github.com/kubesphere/pvc-autoresizer/runners/fake_client_wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package runners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type fakeClientWrapper struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func NewFakeClientWrapper(c client.Client) client.Client {
|
||||
return &fakeClientWrapper{
|
||||
Client: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeClientWrapper) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
return errors.New("occured fake error")
|
||||
}
|
||||
116
vendor/github.com/kubesphere/pvc-autoresizer/runners/metrics_client.go
generated
vendored
Normal file
116
vendor/github.com/kubesphere/pvc-autoresizer/runners/metrics_client.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package runners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kubesphere/pvc-autoresizer/metrics"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
volumeAvailableQuery = "kubelet_volume_stats_available_bytes"
|
||||
volumeCapacityQuery = "kubelet_volume_stats_capacity_bytes"
|
||||
inodesAvailableQuery = "kubelet_volume_stats_inodes_free"
|
||||
inodesCapacityQuery = "kubelet_volume_stats_inodes"
|
||||
)
|
||||
|
||||
// NewPrometheusClient returns a new prometheusClient
|
||||
func NewPrometheusClient(url string) (MetricsClient, error) {
|
||||
client, err := api.NewClient(api.Config{
|
||||
Address: url,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1api := prometheusv1.NewAPI(client)
|
||||
|
||||
return &prometheusClient{
|
||||
prometheusAPI: v1api,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MetricsClient is an interface for getting metrics
|
||||
type MetricsClient interface {
|
||||
GetMetrics(ctx context.Context) (map[types.NamespacedName]*VolumeStats, error)
|
||||
}
|
||||
|
||||
// VolumeStats is a struct containing metrics used by pvc-autoresizer
|
||||
type VolumeStats struct {
|
||||
AvailableBytes int64
|
||||
CapacityBytes int64
|
||||
AvailableInodeSize int64
|
||||
CapacityInodeSize int64
|
||||
}
|
||||
|
||||
type prometheusClient struct {
|
||||
prometheusAPI prometheusv1.API
|
||||
}
|
||||
|
||||
func (c *prometheusClient) GetMetrics(ctx context.Context) (map[types.NamespacedName]*VolumeStats, error) {
|
||||
volumeStatsMap := make(map[types.NamespacedName]*VolumeStats)
|
||||
|
||||
availableBytes, err := c.getMetricValues(ctx, volumeAvailableQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
capacityBytes, err := c.getMetricValues(ctx, volumeCapacityQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
availableInodeSize, err := c.getMetricValues(ctx, inodesAvailableQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
capacityInodeSize, err := c.getMetricValues(ctx, inodesCapacityQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key, val := range availableBytes {
|
||||
vs := &VolumeStats{AvailableBytes: val}
|
||||
if cb, ok := capacityBytes[key]; !ok {
|
||||
continue
|
||||
} else {
|
||||
vs.CapacityBytes = cb
|
||||
}
|
||||
if ais, ok := availableInodeSize[key]; ok {
|
||||
vs.AvailableInodeSize = ais
|
||||
}
|
||||
if cis, ok := capacityInodeSize[key]; ok {
|
||||
vs.CapacityInodeSize = cis
|
||||
}
|
||||
volumeStatsMap[key] = vs
|
||||
}
|
||||
|
||||
return volumeStatsMap, nil
|
||||
}
|
||||
|
||||
func (c *prometheusClient) getMetricValues(ctx context.Context, query string) (map[types.NamespacedName]int64, error) {
|
||||
res, _, err := c.prometheusAPI.Query(ctx, query, time.Now())
|
||||
if err != nil {
|
||||
metrics.MetricsClientFailTotal.Increment()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.Type() != model.ValVector {
|
||||
return nil, fmt.Errorf("unknown response type: %s", res.Type().String())
|
||||
}
|
||||
resultMap := make(map[types.NamespacedName]int64)
|
||||
vec := res.(model.Vector)
|
||||
for _, val := range vec {
|
||||
nn := types.NamespacedName{
|
||||
Namespace: string(val.Metric["namespace"]),
|
||||
Name: string(val.Metric["persistentvolumeclaim"]),
|
||||
}
|
||||
resultMap[nn] = int64(val.Value)
|
||||
}
|
||||
return resultMap, nil
|
||||
}
|
||||
329
vendor/github.com/kubesphere/pvc-autoresizer/runners/pvc_autoresizer.go
generated
vendored
Normal file
329
vendor/github.com/kubesphere/pvc-autoresizer/runners/pvc_autoresizer.go
generated
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
package runners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kubesphere/pvc-autoresizer/metrics"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;update;patch
|
||||
// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create
|
||||
|
||||
const resizeEnableIndexKey = ".metadata.annotations[resize.kubesphere.io/enabled]"
|
||||
const storageClassNameIndexKey = ".spec.storageClassName"
|
||||
const logLevelWarn = 3
|
||||
|
||||
// NewPVCAutoresizer returns a new pvcAutoresizer struct
|
||||
func NewPVCAutoresizer(mc MetricsClient, c client.Client, log logr.Logger, interval time.Duration, recorder record.EventRecorder) manager.Runnable {
|
||||
|
||||
return &pvcAutoresizer{
|
||||
metricsClient: mc,
|
||||
client: c,
|
||||
log: log,
|
||||
interval: interval,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
type pvcAutoresizer struct {
|
||||
client client.Client
|
||||
metricsClient MetricsClient
|
||||
interval time.Duration
|
||||
log logr.Logger
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Start implements manager.Runnable
|
||||
func (w *pvcAutoresizer) Start(ctx context.Context) error {
|
||||
ticker := time.NewTicker(w.interval)
|
||||
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
startTime := time.Now()
|
||||
err := w.reconcile(ctx)
|
||||
metrics.ResizerLoopSecondsTotal.Add(time.Since(startTime).Seconds())
|
||||
if err != nil {
|
||||
w.log.Error(err, "failed to reconcile")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isTargetPVC(pvc *corev1.PersistentVolumeClaim, sc *storagev1.StorageClass) (bool, error) {
|
||||
quantity, err := pvcStorageLimit(pvc, sc)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid storage limit: %w", err)
|
||||
}
|
||||
if quantity.IsZero() {
|
||||
return false, nil
|
||||
}
|
||||
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode != corev1.PersistentVolumeFilesystem {
|
||||
return false, nil
|
||||
}
|
||||
if pvc.Status.Phase != corev1.ClaimBound {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *pvcAutoresizer) getStorageClassList(ctx context.Context) (*storagev1.StorageClassList, error) {
|
||||
var scs storagev1.StorageClassList
|
||||
err := w.client.List(ctx, &scs, client.MatchingFields(map[string]string{resizeEnableIndexKey: "true"}))
|
||||
if err != nil {
|
||||
metrics.KubernetesClientFailTotal.Increment()
|
||||
return nil, err
|
||||
}
|
||||
return &scs, nil
|
||||
}
|
||||
|
||||
func (w *pvcAutoresizer) reconcile(ctx context.Context) error {
|
||||
scs, err := w.getStorageClassList(ctx)
|
||||
if err != nil {
|
||||
w.log.Error(err, "getStorageClassList failed")
|
||||
return nil
|
||||
}
|
||||
|
||||
vsMap, err := w.metricsClient.GetMetrics(ctx)
|
||||
if err != nil {
|
||||
w.log.Error(err, "metricsClient.GetMetrics failed")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, sc := range scs.Items {
|
||||
var pvcs corev1.PersistentVolumeClaimList
|
||||
err = w.client.List(ctx, &pvcs, client.MatchingFields(map[string]string{storageClassNameIndexKey: sc.Name}))
|
||||
if err != nil {
|
||||
metrics.KubernetesClientFailTotal.Increment()
|
||||
w.log.Error(err, "list pvc failed")
|
||||
return nil
|
||||
}
|
||||
for _, pvc := range pvcs.Items {
|
||||
isTarget, err := isTargetPVC(&pvc, &sc)
|
||||
if err != nil {
|
||||
metrics.ResizerFailedResizeTotal.Increment()
|
||||
w.log.WithValues("namespace", pvc.Namespace, "name", pvc.Name).Error(err, "failed to check target PVC")
|
||||
continue
|
||||
} else if !isTarget {
|
||||
continue
|
||||
}
|
||||
namespacedName := types.NamespacedName{
|
||||
Namespace: pvc.Namespace,
|
||||
Name: pvc.Name,
|
||||
}
|
||||
if _, ok := vsMap[namespacedName]; !ok {
|
||||
continue
|
||||
}
|
||||
err = w.resize(ctx, &pvc, vsMap[namespacedName], &sc)
|
||||
if err != nil {
|
||||
metrics.ResizerFailedResizeTotal.Increment()
|
||||
w.log.WithValues("namespace", pvc.Namespace, "name", pvc.Name).Error(err, "failed to resize PVC")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *pvcAutoresizer) resize(ctx context.Context, pvc *corev1.PersistentVolumeClaim, vs *VolumeStats, sc *storagev1.StorageClass) error {
|
||||
log := w.log.WithName("resize").WithValues("namespace", pvc.Namespace, "name", pvc.Name)
|
||||
|
||||
var resizeThreshold string
|
||||
if annotation, ok := pvc.Annotations[ResizeThresholdAnnotation]; ok && annotation != "" {
|
||||
resizeThreshold = annotation
|
||||
} else {
|
||||
resizeThreshold = sc.Annotations[ResizeThresholdAnnotation]
|
||||
}
|
||||
threshold, err := convertSizeInBytes(resizeThreshold, vs.CapacityBytes, DefaultThreshold)
|
||||
if err != nil {
|
||||
log.V(logLevelWarn).Info("failed to convert threshold annotation", "error", err.Error())
|
||||
// lint:ignore nilerr ignores this because invalid annotations should be allowed.
|
||||
return nil
|
||||
}
|
||||
|
||||
inodesThreshold, err := convertSize(pvc.Annotations[ResizeInodesThresholdAnnotation], vs.CapacityInodeSize, DefaultInodesThreshold)
|
||||
if err != nil {
|
||||
log.V(logLevelWarn).Info("failed to convert threshold annotation", "error", err.Error())
|
||||
// lint:ignore nilerr ignores this because invalid annotations should be allowed.
|
||||
return nil
|
||||
}
|
||||
|
||||
curReq := pvc.Spec.Resources.Requests[corev1.ResourceStorage]
|
||||
var resizeIncrease string
|
||||
if annotation, ok := pvc.Annotations[ResizeIncreaseAnnotation]; ok && annotation != "" {
|
||||
resizeIncrease = annotation
|
||||
} else {
|
||||
resizeIncrease = sc.Annotations[ResizeIncreaseAnnotation]
|
||||
}
|
||||
increase, err := convertSizeInBytes(resizeIncrease, curReq.Value(), DefaultIncrease)
|
||||
if err != nil {
|
||||
log.V(logLevelWarn).Info("failed to convert increase annotation", "error", err.Error())
|
||||
// lint:ignore nilerr ignores this because invalid annotations should be allowed.
|
||||
return nil
|
||||
}
|
||||
|
||||
preCap, exist := pvc.Annotations[PreviousCapacityBytesAnnotation]
|
||||
if exist {
|
||||
preCapInt64, err := strconv.ParseInt(preCap, 10, 64)
|
||||
if err != nil {
|
||||
log.V(logLevelWarn).Info("failed to parse pre_cap_bytes annotation", "error", err.Error())
|
||||
// lint:ignore nilerr ignores this because invalid annotations should be allowed.
|
||||
return nil
|
||||
}
|
||||
if preCapInt64 == vs.CapacityBytes {
|
||||
log.Info("waiting for resizing...", "capacity", vs.CapacityBytes)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
limitRes, err := pvcStorageLimit(pvc, sc)
|
||||
if err != nil {
|
||||
log.Error(err, "fetching storage limit failed")
|
||||
return err
|
||||
}
|
||||
if curReq.Cmp(limitRes) == 0 {
|
||||
log.Info("volume storage limit reached")
|
||||
metrics.ResizerLimitReachedTotal.Increment()
|
||||
return nil
|
||||
}
|
||||
|
||||
if threshold > vs.AvailableBytes || inodesThreshold > vs.AvailableInodeSize {
|
||||
if pvc.Annotations == nil {
|
||||
pvc.Annotations = make(map[string]string)
|
||||
}
|
||||
newReqBytes := int64(math.Ceil(float64(curReq.Value()+increase)/(1<<30))) << 30
|
||||
newReq := resource.NewQuantity(newReqBytes, resource.BinarySI)
|
||||
if newReq.Cmp(limitRes) > 0 {
|
||||
newReq = &limitRes
|
||||
}
|
||||
|
||||
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *newReq
|
||||
pvc.Annotations[PreviousCapacityBytesAnnotation] = strconv.FormatInt(vs.CapacityBytes, 10)
|
||||
err = w.client.Update(ctx, pvc)
|
||||
if err != nil {
|
||||
metrics.KubernetesClientFailTotal.Increment()
|
||||
return err
|
||||
}
|
||||
log.Info("resize started",
|
||||
"from", curReq.Value(),
|
||||
"to", newReq.Value(),
|
||||
"threshold", threshold,
|
||||
"available", vs.AvailableBytes,
|
||||
"inodesThreshold", inodesThreshold,
|
||||
"inodesAvailable", vs.AvailableInodeSize,
|
||||
)
|
||||
w.recorder.Eventf(pvc, corev1.EventTypeNormal, "Resized", "PVC volume is resized to %s", newReq.String())
|
||||
metrics.ResizerSuccessResizeTotal.Increment()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func indexByResizeEnableAnnotation(obj client.Object) []string {
|
||||
sc := obj.(*storagev1.StorageClass)
|
||||
if val, ok := sc.Annotations[AutoResizeEnabledKey]; ok {
|
||||
return []string{val}
|
||||
}
|
||||
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func indexByStorageClassName(obj client.Object) []string {
|
||||
pvc := obj.(*corev1.PersistentVolumeClaim)
|
||||
scName := pvc.Spec.StorageClassName
|
||||
if scName == nil {
|
||||
return []string{}
|
||||
}
|
||||
return []string{*scName}
|
||||
}
|
||||
|
||||
// SetupIndexer setup indices for PVC auto resizer
|
||||
func SetupIndexer(mgr ctrl.Manager, skipAnnotationCheck bool) error {
|
||||
idxFunc := indexByResizeEnableAnnotation
|
||||
if skipAnnotationCheck {
|
||||
idxFunc = func(_ client.Object) []string { return []string{"true"} }
|
||||
}
|
||||
err := mgr.GetFieldIndexer().IndexField(context.Background(), &storagev1.StorageClass{}, resizeEnableIndexKey, idxFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.PersistentVolumeClaim{}, storageClassNameIndexKey, indexByStorageClassName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertSizeInBytes(valStr string, capacity int64, defaultVal string) (int64, error) {
|
||||
if len(valStr) == 0 {
|
||||
valStr = defaultVal
|
||||
}
|
||||
if strings.HasSuffix(valStr, "%") {
|
||||
return calcSize(valStr, capacity)
|
||||
}
|
||||
|
||||
quantity, err := resource.ParseQuantity(valStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val := quantity.Value()
|
||||
if val <= 0 {
|
||||
return 0, fmt.Errorf("annotation value should be positive: %s", valStr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func convertSize(valStr string, capacity int64, defaultVal string) (int64, error) {
|
||||
if len(valStr) == 0 {
|
||||
valStr = defaultVal
|
||||
}
|
||||
if strings.HasSuffix(valStr, "%") {
|
||||
return calcSize(valStr, capacity)
|
||||
}
|
||||
return 0, fmt.Errorf("annotation value should be in percent notation: %s", valStr)
|
||||
}
|
||||
|
||||
func calcSize(valStr string, capacity int64) (int64, error) {
|
||||
rate, err := strconv.ParseFloat(strings.TrimRight(valStr, "%"), 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if rate < 0 || rate > 100 {
|
||||
return 0, fmt.Errorf("annotation value should between 0 and 100: %s", valStr)
|
||||
}
|
||||
|
||||
res := int64(float64(capacity) * rate / 100.0)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func pvcStorageLimit(pvc *corev1.PersistentVolumeClaim, sc *storagev1.StorageClass) (resource.Quantity, error) {
|
||||
// storage limit on the annotation has precedence
|
||||
if annotation, ok := sc.Annotations[StorageLimitAnnotation]; ok && annotation != "" {
|
||||
return resource.ParseQuantity(annotation)
|
||||
} else if annotation, ok := pvc.Annotations[StorageLimitAnnotation]; ok && annotation != "" {
|
||||
return resource.ParseQuantity(annotation)
|
||||
}
|
||||
|
||||
// Storage() returns 0 valued Quantity if Limits does not set
|
||||
return *pvc.Spec.Resources.Limits.Storage(), nil
|
||||
}
|
||||
422
vendor/github.com/kubesphere/pvc-autoresizer/runners/workload_autorestarter.go
generated
vendored
Normal file
422
vendor/github.com/kubesphere/pvc-autoresizer/runners/workload_autorestarter.go
generated
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
package runners
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/common/log"
|
||||
appsV1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type restarter struct {
|
||||
client client.Client
|
||||
metricsClient MetricsClient
|
||||
interval time.Duration
|
||||
log logr.Logger
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func NewRestarter(c client.Client, log logr.Logger, interval time.Duration, recorder record.EventRecorder) manager.Runnable {
|
||||
|
||||
return &restarter{
|
||||
client: c,
|
||||
log: log,
|
||||
interval: interval,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
// Start implements manager.Runnable
|
||||
func (c *restarter) Start(ctx context.Context) error {
|
||||
ticker := time.NewTicker(c.interval)
|
||||
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
err := c.reconcile(ctx)
|
||||
if err != nil {
|
||||
c.log.Error(err, "failed to reconcile")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *restarter) reconcile(ctx context.Context) error {
|
||||
stopPvcs, stopPvcErr := c.getPVCListByConditionsType(ctx, v1.PersistentVolumeClaimResizing)
|
||||
if stopPvcErr != nil {
|
||||
return stopPvcErr
|
||||
}
|
||||
|
||||
stopDeploy, stopSts, timeoutDeploy, timeoutSts, stopAppErr := c.getAppList(ctx, stopPvcs)
|
||||
if stopAppErr != nil {
|
||||
return stopAppErr
|
||||
}
|
||||
//stop deploy/sts
|
||||
for _, deploy := range stopDeploy {
|
||||
log.Info("Stopping deploy: ", deploy.Name)
|
||||
err := c.stopDeploy(ctx, deploy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, sts := range stopSts {
|
||||
log.Info("Stopping StatefulSet: ", sts.Name)
|
||||
err := c.stopSts(ctx, sts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//get pvc need restart
|
||||
startPvc, err := c.getPVCListByConditionsType(ctx, v1.PersistentVolumeClaimFileSystemResizePending)
|
||||
|
||||
//get list
|
||||
startDeploy := make([]*appsV1.Deployment, 0)
|
||||
startSts := make([]*appsV1.StatefulSet, 0)
|
||||
for _, pvc := range startPvc {
|
||||
dep, err := c.getDeploy(ctx, &pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dep != nil {
|
||||
startDeploy = append(startDeploy, dep)
|
||||
continue
|
||||
}
|
||||
sts, err := c.getSts(ctx, &pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sts != nil {
|
||||
startSts = append(startSts, sts)
|
||||
}
|
||||
}
|
||||
|
||||
//restart
|
||||
for _, deploy := range startDeploy {
|
||||
err := c.StartDeploy(ctx, deploy, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, deploy := range timeoutDeploy {
|
||||
err := c.StartDeploy(ctx, deploy, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, sts := range startSts {
|
||||
err := c.StartSts(ctx, sts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, sts := range timeoutSts {
|
||||
err := c.StartSts(ctx, sts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *restarter) getPVCListByConditionsType(ctx context.Context, pvcType v1.PersistentVolumeClaimConditionType) ([]v1.PersistentVolumeClaim, error) {
|
||||
pvcs := make([]v1.PersistentVolumeClaim, 0)
|
||||
pvcList := v1.PersistentVolumeClaimList{}
|
||||
var opts []client.ListOption
|
||||
err := c.client.List(ctx, &pvcList, opts...)
|
||||
if err != nil {
|
||||
return pvcs, err
|
||||
}
|
||||
scNeedRestart := make(map[string]string, 0)
|
||||
scNeedRestart, err = c.getSc(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pvc := range pvcList.Items {
|
||||
if len(pvc.Status.Conditions) > 0 && pvc.Status.Conditions[0].Type == pvcType {
|
||||
if _, ok := scNeedRestart[*pvc.Spec.StorageClassName]; ok {
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pvcs, nil
|
||||
}
|
||||
|
||||
func (c *restarter) getSc(ctx context.Context) (map[string]string, error) {
|
||||
scList := &storagev1.StorageClassList{}
|
||||
var opts []client.ListOption
|
||||
err := c.client.List(ctx, scList, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scMap := make(map[string]string, 0)
|
||||
for _, sc := range scList.Items {
|
||||
if val, ok := sc.Annotations[SupportOnlineResize]; ok {
|
||||
SupportOnline, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !SupportOnline {
|
||||
if val, ok := sc.Annotations[AutoRestartEnabledKey]; ok {
|
||||
NeedRestart, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if NeedRestart {
|
||||
scMap[sc.Name] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return scMap, nil
|
||||
}
|
||||
|
||||
func (c *restarter) stopDeploy(ctx context.Context, deploy *appsV1.Deployment) error {
|
||||
var zero int32
|
||||
zero = 0
|
||||
if val, ok := deploy.Annotations[RestartSkip]; ok {
|
||||
skip, _ := strconv.ParseBool(val)
|
||||
if skip {
|
||||
log.Info("Skip restart deploy ", deploy.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if stage, ok := deploy.Annotations[RestartStage]; ok {
|
||||
if stage == "resizing" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
replicas := *deploy.Spec.Replicas
|
||||
updateDeploy := deploy.DeepCopy()
|
||||
|
||||
// add annotations
|
||||
updateDeploy.Annotations[RestartStopTime] = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updateDeploy.Annotations[ExpectReplicaNums] = strconv.Itoa(int(replicas))
|
||||
updateDeploy.Annotations[RestartStage] = "resizing"
|
||||
updateDeploy.Spec.Replicas = &zero
|
||||
var opts []client.UpdateOption
|
||||
log.Info("stop deployment:" + deploy.Name)
|
||||
updateErr := c.client.Update(ctx, updateDeploy, opts...)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (c *restarter) stopSts(ctx context.Context, sts *appsV1.StatefulSet) error {
|
||||
var zero int32
|
||||
zero = 0
|
||||
if val, ok := sts.Annotations[RestartSkip]; ok {
|
||||
skip, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if stage, ok := sts.Annotations[RestartStage]; ok {
|
||||
if stage == "resizing" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
replicas := *sts.Spec.Replicas
|
||||
updateSts := sts.DeepCopy()
|
||||
|
||||
// add annotations
|
||||
updateSts.Annotations[RestartStopTime] = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
updateSts.Annotations[ExpectReplicaNums] = strconv.Itoa(int(replicas))
|
||||
updateSts.Annotations[RestartStage] = "resizing"
|
||||
updateSts.Spec.Replicas = &zero
|
||||
var opts []client.UpdateOption
|
||||
log.Info("stop deployment:" + sts.Name)
|
||||
updateErr := c.client.Update(ctx, updateSts, opts...)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (c *restarter) StartDeploy(ctx context.Context, deploy *appsV1.Deployment, timeout bool) error {
|
||||
if _, ok := deploy.Annotations[RestartStage]; !ok {
|
||||
return nil
|
||||
}
|
||||
if deploy.Annotations[RestartStage] != "resizing" {
|
||||
return fmt.Errorf("Unknown stage, skip ")
|
||||
}
|
||||
updateDeploy := deploy.DeepCopy()
|
||||
if _, ok := deploy.Annotations[ExpectReplicaNums]; !ok {
|
||||
return fmt.Errorf("Cannot find replica numbers before stop ")
|
||||
}
|
||||
expectReplicaNums, err := strconv.Atoi(deploy.Annotations[ExpectReplicaNums])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replicas := int32(expectReplicaNums)
|
||||
if timeout {
|
||||
updateDeploy.Annotations[RestartSkip] = "true"
|
||||
}
|
||||
delete(updateDeploy.Annotations, RestartStopTime)
|
||||
delete(updateDeploy.Annotations, ExpectReplicaNums)
|
||||
delete(updateDeploy.Annotations, RestartStage)
|
||||
updateDeploy.Spec.Replicas = &replicas
|
||||
var opts []client.UpdateOption
|
||||
log.Info("start deployment: " + deploy.Name)
|
||||
err = c.client.Update(ctx, updateDeploy, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *restarter) StartSts(ctx context.Context, sts *appsV1.StatefulSet, timeout bool) error {
|
||||
if _, ok := sts.Annotations[RestartStage]; !ok {
|
||||
return nil
|
||||
}
|
||||
if sts.Annotations[RestartStage] != "resizing" {
|
||||
return fmt.Errorf("Unknown stage, skip ")
|
||||
}
|
||||
updateSts := sts.DeepCopy()
|
||||
if _, ok := sts.Annotations[ExpectReplicaNums]; !ok {
|
||||
return fmt.Errorf("Cannot find replica numbers before stop ")
|
||||
}
|
||||
expectReplicaNums, err := strconv.Atoi(sts.Annotations[ExpectReplicaNums])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replicas := int32(expectReplicaNums)
|
||||
if timeout {
|
||||
updateSts.Annotations[RestartSkip] = "true"
|
||||
}
|
||||
delete(updateSts.Annotations, RestartStopTime)
|
||||
delete(updateSts.Annotations, ExpectReplicaNums)
|
||||
delete(updateSts.Annotations, RestartStage)
|
||||
updateSts.Spec.Replicas = &replicas
|
||||
var opts []client.UpdateOption
|
||||
log.Info("start deployment: " + sts.Name)
|
||||
err = c.client.Update(ctx, updateSts, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *restarter) getDeploy(ctx context.Context, pvc *v1.PersistentVolumeClaim) (*appsV1.Deployment, error) {
|
||||
// get deploy list
|
||||
deployList := &appsV1.DeploymentList{}
|
||||
var opts []client.ListOption
|
||||
err := c.client.List(ctx, deployList, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, deploy := range deployList.Items {
|
||||
if len(deploy.Spec.Template.Spec.Volumes) > 0 {
|
||||
for _, vol := range deploy.Spec.Template.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil && vol.PersistentVolumeClaim.ClaimName == pvc.Name {
|
||||
return &deploy, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *restarter) getSts(ctx context.Context, targetPvc *v1.PersistentVolumeClaim) (*appsV1.StatefulSet, error) {
|
||||
//get all sts
|
||||
stsList := &appsV1.StatefulSetList{}
|
||||
var opts []client.ListOption
|
||||
err := c.client.List(ctx, stsList, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sts := range stsList.Items {
|
||||
if len(sts.Spec.Template.Spec.Volumes) > 0 {
|
||||
for _, vol := range sts.Spec.Template.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil && vol.PersistentVolumeClaim.ClaimName == targetPvc.Name {
|
||||
return &sts, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, pvc := range sts.Spec.VolumeClaimTemplates {
|
||||
if pvc.Name == targetPvc.Name {
|
||||
return &sts, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Cannot get deployment or statefulSet which pod mounted the pvc %s ", targetPvc.Name)
|
||||
}
|
||||
|
||||
func (c *restarter) IfDeployTimeout(ctx context.Context, scName string, deploy *appsV1.Deployment) bool {
|
||||
sc := &storagev1.StorageClass{}
|
||||
err := c.client.Get(ctx, types.NamespacedName{Namespace: "", Name: scName}, sc)
|
||||
maxTime := 300
|
||||
if val, ok := sc.Annotations[ResizingMaxTime]; ok {
|
||||
userSetTime, err := strconv.Atoi(val)
|
||||
if err == nil {
|
||||
maxTime = userSetTime
|
||||
}
|
||||
}
|
||||
if _, ok := deploy.Annotations[RestartStopTime]; !ok {
|
||||
return false
|
||||
}
|
||||
startResizeTime, err := strconv.Atoi(deploy.Annotations[RestartStopTime])
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
timeout := int(time.Now().Unix())-startResizeTime > maxTime
|
||||
return timeout
|
||||
}
|
||||
|
||||
func (c *restarter) IfStsTimeout(ctx context.Context, scName string, sts *appsV1.StatefulSet) bool {
|
||||
sc := &storagev1.StorageClass{}
|
||||
err := c.client.Get(ctx, types.NamespacedName{Namespace: "", Name: scName}, sc)
|
||||
maxTime := 300
|
||||
if val, ok := sc.Annotations[ResizingMaxTime]; ok {
|
||||
userSetTime, err := strconv.Atoi(val)
|
||||
if err == nil {
|
||||
maxTime = userSetTime
|
||||
}
|
||||
}
|
||||
if _, ok := sts.Annotations[RestartStopTime]; !ok {
|
||||
return false
|
||||
}
|
||||
startResizeTime, err := strconv.Atoi(sts.Annotations[RestartStopTime])
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
timeout := int(time.Now().Unix())-startResizeTime > maxTime
|
||||
return timeout
|
||||
}
|
||||
|
||||
func (c *restarter) getAppList(ctx context.Context, pvcs []v1.PersistentVolumeClaim) (deployToStop []*appsV1.Deployment, stsToStop []*appsV1.StatefulSet, deployTimeout []*appsV1.Deployment, stsTimeout []*appsV1.StatefulSet, err error) {
|
||||
for _, pvc := range pvcs {
|
||||
dep, err := c.getDeploy(ctx, &pvc)
|
||||
if err != nil {
|
||||
return deployToStop, stsToStop, deployTimeout, stsTimeout, err
|
||||
}
|
||||
if dep != nil {
|
||||
if timeout := c.IfDeployTimeout(ctx, *pvc.Spec.StorageClassName, dep); timeout {
|
||||
deployTimeout = append(deployTimeout, dep)
|
||||
} else {
|
||||
deployToStop = append(deployToStop, dep)
|
||||
}
|
||||
continue
|
||||
}
|
||||
sts, stsErr := c.getSts(ctx, &pvc)
|
||||
if stsErr != nil {
|
||||
return deployToStop, stsToStop, deployTimeout, stsTimeout, err
|
||||
}
|
||||
if sts != nil {
|
||||
if timeout := c.IfStsTimeout(ctx, *pvc.Spec.StorageClassName, sts); timeout {
|
||||
stsTimeout = append(stsTimeout, sts)
|
||||
} else {
|
||||
stsToStop = append(stsToStop, sts)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@@ -503,6 +503,10 @@ github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversi
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions/volumesnapshot/v1beta1
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1
|
||||
github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1beta1
|
||||
# github.com/kubesphere/pvc-autoresizer v0.1.1
|
||||
## explicit
|
||||
github.com/kubesphere/pvc-autoresizer/metrics
|
||||
github.com/kubesphere/pvc-autoresizer/runners
|
||||
# github.com/kubesphere/sonargo v0.0.2 => github.com/kubesphere/sonargo v0.0.2
|
||||
## explicit
|
||||
github.com/kubesphere/sonargo/sonar
|
||||
|
||||
Reference in New Issue
Block a user