Skip to content
Draft
44 changes: 44 additions & 0 deletions examples/kubernetes_multicluster/canary/app.pipecd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
apiVersion: pipecd.dev/v1beta1
kind: KubernetesApp
spec:
name: canary-multicluster
labels:
env: example
team: product
description: |
This app demonstrates how to deploy a Kubernetes application across multiple clusters
using a Canary strategy with the kubernetes_multicluster plugin.
The canary variant is first rolled out to cluster-us only, then after approval
the primary rollout is applied to all clusters, and finally the canary resources
are cleaned up with K8S_CANARY_CLEAN.
plugins:
kubernetes_multicluster:
input:
multiTargets:
- target:
name: cluster-us
manifests:
- cluster-us/deployment.yaml
- cluster-us/service.yaml
- target:
name: cluster-eu
manifests:
- cluster-eu/deployment.yaml
- cluster-eu/service.yaml
pipeline:
stages:
# Deploy the canary variant to cluster-us only (10% of replicas).
- name: K8S_CANARY_ROLLOUT
with:
replicas: 10%
multiTarget:
- target:
name: cluster-us
# Wait for approval before rolling out to all clusters.
- name: WAIT_APPROVAL
# Roll out the new version as primary to all clusters.
- name: K8S_PRIMARY_ROLLOUT
with:
prune: true
# Remove the canary variant resources from all clusters.
- name: K8S_CANARY_CLEAN
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: canary-multicluster
labels:
app: canary-multicluster
spec:
replicas: 2
revisionHistoryLimit: 2
selector:
matchLabels:
app: canary-multicluster
pipecd.dev/variant: primary
template:
metadata:
labels:
app: canary-multicluster
pipecd.dev/variant: primary
spec:
containers:
- name: helloworld
image: ghcr.io/pipe-cd/helloworld:v0.32.0
args:
- server
ports:
- containerPort: 9085
11 changes: 11 additions & 0 deletions examples/kubernetes_multicluster/canary/cluster-eu/service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: canary-multicluster
spec:
selector:
app: canary-multicluster
ports:
- protocol: TCP
port: 9085
targetPort: 9085
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: canary-multicluster
labels:
app: canary-multicluster
spec:
replicas: 2
revisionHistoryLimit: 2
selector:
matchLabels:
app: canary-multicluster
pipecd.dev/variant: primary
template:
metadata:
labels:
app: canary-multicluster
pipecd.dev/variant: primary
spec:
containers:
- name: helloworld
image: ghcr.io/pipe-cd/helloworld:v0.32.0
args:
- server
ports:
- containerPort: 9085
11 changes: 11 additions & 0 deletions examples/kubernetes_multicluster/canary/cluster-us/service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: canary-multicluster
spec:
selector:
app: canary-multicluster
ports:
- protocol: TCP
port: 9085
targetPort: 9085
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,99 @@
// The label will be configured to variant manifests used to distinguish them.
VariantLabel KubernetesVariantLabel `json:"variantLabel"`

// TODO: Define fields for KubernetesApplicationSpec.
// The service manifest used for variant service generation (canary/primary).
Service K8sResourceReference `json:"service"`

Check failure on line 60 in pkg/app/pipedv1/plugin/kubernetes_multicluster/config/application.go

View workflow job for this annotation

GitHub Actions / go (./pkg/app/pipedv1/plugin/kubernetes_multicluster)

[golangci] reported by reviewdog 🐶 Service redeclared Raw Output: config/application.go:60:2: Service redeclared
}

// K8sCanaryRolloutStageOptions contains all configurable values for a K8S_CANARY_ROLLOUT stage.
type K8sCanaryRolloutStageOptions struct {
// How many pods for CANARY workloads.
// An integer value can be specified to indicate an absolute value of pod number.
// Or a string suffixed by "%" to indicate a percentage value compared to the pod number of PRIMARY.
// Default is 1 pod.
Replicas unit.Replicas `json:"replicas"`
// Suffix that should be used when naming the CANARY variant's resources.
// Default is "canary".
Suffix string `json:"suffix" default:"canary"`
// Whether the CANARY service should be created.
CreateService bool `json:"createService"`
// List of patches used to customize manifests for CANARY variant.
Patches []K8sResourcePatch `json:"patches,omitempty"`
}

func (o *K8sCanaryRolloutStageOptions) UnmarshalJSON(data []byte) error {
type alias K8sCanaryRolloutStageOptions
var a alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*o = K8sCanaryRolloutStageOptions(a)
if err := defaults.Set(o); err != nil {
return err
}
return nil
}

// K8sPrimaryRolloutStageOptions contains all configurable values for a K8S_PRIMARY_ROLLOUT stage.
type K8sPrimaryRolloutStageOptions struct {
// Suffix that should be used when naming the PRIMARY variant's resources.
// Default is "primary".
Suffix string `json:"suffix" default:"primary"`
// Whether the PRIMARY service should be created.
CreateService bool `json:"createService"`
// Whether the PRIMARY variant label should be added to manifests if they were missing.
AddVariantLabelToSelector bool `json:"addVariantLabelToSelector"`
// Whether the resources that are no longer defined in Git should be removed or not.
Prune bool `json:"prune"`
}

func (o *K8sPrimaryRolloutStageOptions) UnmarshalJSON(data []byte) error {
type alias K8sPrimaryRolloutStageOptions
var a alias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
*o = K8sPrimaryRolloutStageOptions(a)
if err := defaults.Set(o); err != nil {
return err
}
return nil
}

// K8sResourcePatch represents a patch operation for a Kubernetes resource.
type K8sResourcePatch struct {
// The target resource to be patched.
Target K8sResourcePatchTarget `json:"target"`
// List of patch operations to apply.
Ops []K8sResourcePatchOp `json:"ops"`
}

// K8sResourcePatchTarget represents the target of a patch operation for a Kubernetes resource.
type K8sResourcePatchTarget struct {
// The kind of the target resource.
Kind string `json:"kind"`
// The name of the target resource.
Name string `json:"name"`
// The root document in the manifest to be patched (e.g. for helm, it might be "data.deployment.yaml").
DocumentRoot string `json:"documentRoot,omitempty"`
}

// K8sResourcePatchOpName represents the name of a patch operation for a Kubernetes resource.
type K8sResourcePatchOpName string

const (
// K8sResourcePatchOpYAMLReplace is the name of the patch operation that replaces the target with a new YAML document.
K8sResourcePatchOpYAMLReplace K8sResourcePatchOpName = "yaml-replace"
)

// K8sResourcePatchOp represents a patch operation for a Kubernetes resource.
type K8sResourcePatchOp struct {
// The operation to apply.
Op K8sResourcePatchOpName `json:"op" default:"yaml-replace"`
// The path to the field to be patched.
Path string `json:"path"`
// The value to replace with.
Value string `json:"value"`
}

func (s *KubernetesApplicationSpec) UnmarshalJSON(data []byte) error {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -356,3 +356,87 @@ func patchManifest(m provider.Manifest, patch kubeconfig.K8sResourcePatch) (*pro

return buildManifest(proc.Bytes())
}

func (p *Plugin) executeK8sMultiCanaryCleanStage(ctx context.Context, input *sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec], dts []*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]) sdk.StageStatus {
lp := input.Client.LogPersister()

cfg, err := input.Request.TargetDeploymentSource.AppConfig()
if err != nil {
lp.Errorf("Failed while decoding application config (%v)", err)
return sdk.StageStatusFailure
}

deployTargetMap := make(map[string]*sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig], len(dts))
for _, dt := range dts {
deployTargetMap[dt.Name] = dt
}

type targetConfig struct {
deployTarget *sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig]
}

targetConfigs := make([]targetConfig, 0, len(dts))
if len(cfg.Spec.Input.MultiTargets) == 0 {
for _, dt := range dts {
targetConfigs = append(targetConfigs, targetConfig{deployTarget: dt})
}
} else {
for _, mt := range cfg.Spec.Input.MultiTargets {
dt, ok := deployTargetMap[mt.Target.Name]
if !ok {
lp.Infof("Ignore multi target '%s': not matched any deployTarget", mt.Target.Name)
continue
}
targetConfigs = append(targetConfigs, targetConfig{deployTarget: dt})
}
}

eg, ctx := errgroup.WithContext(ctx)
for _, tc := range targetConfigs {
eg.Go(func() error {
lp.Infof("Start cleaning CANARY variant on target %s", tc.deployTarget.Name)
if err := p.canaryClean(ctx, input, tc.deployTarget, cfg); err != nil {
return fmt.Errorf("failed to clean CANARY variant on target %s: %w", tc.deployTarget.Name, err)
}
return nil
})
}

if err := eg.Wait(); err != nil {
lp.Errorf("Failed while cleaning CANARY variant (%v)", err)
return sdk.StageStatusFailure
}

return sdk.StageStatusSuccess
}

func (p *Plugin) canaryClean(
ctx context.Context,
input *sdk.ExecuteStageInput[kubeconfig.KubernetesApplicationSpec],
dt *sdk.DeployTarget[kubeconfig.KubernetesDeployTargetConfig],
cfg *sdk.ApplicationConfig[kubeconfig.KubernetesApplicationSpec],
) error {
lp := input.Client.LogPersister()

var (
appCfg = cfg.Spec
variantLabel = appCfg.VariantLabel.Key
canaryVariant = appCfg.VariantLabel.CanaryValue
)

toolRegistry := toolregistry.NewRegistry(input.Client.ToolRegistry())

kubectlPath, err := toolRegistry.Kubectl(ctx, cmp.Or(appCfg.Input.KubectlVersion, dt.Config.KubectlVersion))
if err != nil {
return fmt.Errorf("failed while getting kubectl tool: %w", err)
}

kubectl := provider.NewKubectl(kubectlPath)
applier := provider.NewApplier(kubectl, appCfg.Input, dt.Config, input.Logger)

if err := deleteVariantResources(ctx, lp, kubectl, dt.Config.KubeConfigPath, applier, input.Request.Deployment.ApplicationID, variantLabel, canaryVariant); err != nil {
return fmt.Errorf("unable to remove canary resources: %w", err)
}

return nil
}
Loading
Loading