Skip to content

Commit

Permalink
Merge pull request #368 from gkurz/merge-to-main-for-1.5.1
Browse files Browse the repository at this point in the history
Merge to main for 1.5.1
  • Loading branch information
gkurz committed Dec 8, 2023
2 parents 9891e9b + 7f0995b commit 817ff2a
Show file tree
Hide file tree
Showing 10 changed files with 50 additions and 54 deletions.
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
VERSION ?= 1.5.0
VERSION ?= 1.5.1

# CHANNELS define the bundle channels used in the bundle.
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
Expand Down
4 changes: 2 additions & 2 deletions api/v1/kataconfig_types.go
Expand Up @@ -48,9 +48,9 @@ type KataConfigSpec struct {

// KataConfigStatus defines the observed state of KataConfig
type KataConfigStatus struct {
// RuntimeClass is the names of the RuntimeClasses created by this controller
// RuntimeClasses is the names of the RuntimeClasses created by this controller
// +optional
RuntimeClass []string `json:"runtimeClass"`
RuntimeClasses []string `json:"runtimeClasses"`

// +optional
KataNodes KataNodesStatus `json:"kataNodes,omitempty"`
Expand Down
2 changes: 1 addition & 1 deletion config/manager/kustomization.yaml
Expand Up @@ -13,4 +13,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/openshift_sandboxed_containers/openshift-sandboxed-containers-operator
newTag: 1.5.0
newTag: 1.5.1
Expand Up @@ -13,7 +13,7 @@ metadata:
}
]
capabilities: Seamless Upgrades
olm.skipRange: '>=1.1.0 <1.5.0'
olm.skipRange: '>=1.1.0 <1.5.1'
operatorframework.io/suggested-namespace: openshift-sandboxed-containers-operator
operators.openshift.io/infrastructure-features: '["disconnected", "fips"]'
operators.openshift.io/valid-subscription: '["OpenShift Container Platform", "OpenShift
Expand All @@ -25,7 +25,7 @@ metadata:
labels:
operatorframework.io/arch.amd64: supported
operatorframework.io/os.linux: supported
name: sandboxed-containers-operator.v1.5.0
name: sandboxed-containers-operator.v1.5.1
spec:
apiservicedefinitions: {}
customresourcedefinitions:
Expand Down Expand Up @@ -365,7 +365,7 @@ spec:
maturity: beta
provider:
name: Red Hat
version: 1.5.0
version: 1.5.1
webhookdefinitions:
- admissionReviewVersions:
- v1
Expand Down
2 changes: 2 additions & 0 deletions config/peerpods/podvm/aws-VM-image-create-job.yaml
Expand Up @@ -40,6 +40,8 @@ spec:
- name: image-id
mountPath: /output
env:
- name: CLOUD_PROVIDER
value: aws
- name: PODVM_DISTRO
value: rhel
- name: IMAGE_NAME
Expand Down
2 changes: 2 additions & 0 deletions config/peerpods/podvm/azure-VM-image-create-job.yaml
Expand Up @@ -40,6 +40,8 @@ spec:
env:
# - name: VM_SIZE
# value: "Standard_A2_v2"
- name: CLOUD_PROVIDER
value: azure
- name: PODVM_DISTRO
value: rhel
- name: PUBLISHER
Expand Down
5 changes: 3 additions & 2 deletions config/samples/deploy.yaml
Expand Up @@ -6,7 +6,7 @@ metadata:
spec:
DisplayName: My Operator Catalog
sourceType: grpc
image: quay.io/openshift_sandboxed_containers/openshift-sandboxed-containers-operator-catalog:v1.5.0
image: quay.io/openshift_sandboxed_containers/openshift-sandboxed-containers-operator-catalog:v1.5.1
updateStrategy:
registryPoll:
interval: 5m
Expand Down Expand Up @@ -36,4 +36,5 @@ spec:
name: sandboxed-containers-operator
source: my-operator-catalog
sourceNamespace: openshift-marketplace
startingCSV: sandboxed-containers-operator.v1.5.0
startingCSV: sandboxed-containers-operator.v1.5.1

77 changes: 34 additions & 43 deletions controllers/openshift_controller.go
Expand Up @@ -165,42 +165,6 @@ func (r *KataConfigOpenShiftReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, updateErr
}

ds := r.processDaemonsetForMonitor()
// Set KataConfig instance as the owner and controller
if err := controllerutil.SetControllerReference(r.kataConfig, ds, r.Scheme); err != nil {
r.Log.Error(err, "failed to set controller reference on the monitor daemonset")
return ctrl.Result{}, err
}
r.Log.Info("controller reference set for the monitor daemonset")

foundDs := &appsv1.DaemonSet{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}, foundDs)
if err != nil {
//The DaemonSet (DS) should be ideally created after the required SeLinux policy is installed on the
//node. One of the ways to ensure this is to check for the existence of "kata" runtimeclass before
//creating the DS
//Alternatively we can create the DS post execution of createRuntimeClass()
if k8serrors.IsNotFound(err) {
if contains(r.kataConfig.Status.RuntimeClass, "kata") {
r.Log.Info("Creating a new installation monitor daemonset", "ds.Namespace", ds.Namespace, "ds.Name", ds.Name)
err = r.Client.Create(context.TODO(), ds)
if err != nil {
r.Log.Error(err, "error when creating monitor daemonset")
res = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}
}
}
} else {
r.Log.Error(err, "could not get monitor daemonset, try again")
res = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}
}
} else {
r.Log.Info("Updating monitor daemonset", "ds.Namespace", ds.Namespace, "ds.Name", ds.Name)
err = r.Client.Update(context.TODO(), ds)
if err != nil {
r.Log.Error(err, "error when updating monitor daemonset")
res = ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}
}
}
cMap := r.processDashboardConfigMap()
if cMap == nil {
r.Log.Info("failed to generate config map for metrics dashboard")
Expand Down Expand Up @@ -597,7 +561,7 @@ func (r *KataConfigOpenShiftReconciler) listKataPods() error {
}
for _, pod := range podList.Items {
if pod.Spec.RuntimeClassName != nil {
if contains(r.kataConfig.Status.RuntimeClass, *pod.Spec.RuntimeClassName) {
if contains(r.kataConfig.Status.RuntimeClasses, *pod.Spec.RuntimeClassName) {
return fmt.Errorf("Existing pods using \"%v\" RuntimeClass found. Please delete the pods manually for KataConfig deletion to proceed", *pod.Spec.RuntimeClassName)
}
}
Expand Down Expand Up @@ -756,8 +720,8 @@ func (r *KataConfigOpenShiftReconciler) createRuntimeClass(runtimeClassName stri
}
}

if !contains(r.kataConfig.Status.RuntimeClass, runtimeClassName) {
r.kataConfig.Status.RuntimeClass = append(r.kataConfig.Status.RuntimeClass, runtimeClassName)
if !contains(r.kataConfig.Status.RuntimeClasses, runtimeClassName) {
r.kataConfig.Status.RuntimeClasses = append(r.kataConfig.Status.RuntimeClasses, runtimeClassName)
}

return nil
Expand Down Expand Up @@ -1161,6 +1125,37 @@ func (r *KataConfigOpenShiftReconciler) processKataConfigInstallRequest() (ctrl.
return reconcile.Result{Requeue: true, RequeueAfter: 15 * time.Second}, err
}

ds := r.processDaemonsetForMonitor()
// Set KataConfig instance as the owner and controller
if err = controllerutil.SetControllerReference(r.kataConfig, ds, r.Scheme); err != nil {
r.Log.Error(err, "failed to set controller reference on the monitor daemonset")
return ctrl.Result{}, err
}
r.Log.Info("controller reference set for the monitor daemonset")

foundDs := &appsv1.DaemonSet{}
err = r.Client.Get(context.TODO(), types.NamespacedName{Name: ds.Name, Namespace: ds.Namespace}, foundDs)
if err != nil {
if k8serrors.IsNotFound(err) {
r.Log.Info("Creating a new installation monitor daemonset", "ds.Namespace", ds.Namespace, "ds.Name", ds.Name)
err = r.Client.Create(context.TODO(), ds)
if err != nil {
r.Log.Error(err, "error when creating monitor daemonset")
return ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}, err
}
} else {
r.Log.Error(err, "could not get monitor daemonset, try again")
return ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}, err
}
} else {
r.Log.Info("Updating monitor daemonset", "ds.Namespace", ds.Namespace, "ds.Name", ds.Name)
err = r.Client.Update(context.TODO(), ds)
if err != nil {
r.Log.Error(err, "error when updating monitor daemonset")
return ctrl.Result{Requeue: true, RequeueAfter: 15 * time.Second}, err
}
}

// create PeerPodConfig CRD and runtimeclass for peerpods
if r.kataConfig.Spec.EnablePeerPods {
err = r.enablePeerPodsMiscConfigs()
Expand Down Expand Up @@ -1672,10 +1667,6 @@ const (
// will be returned.
func (r *KataConfigOpenShiftReconciler) updateStatus() error {

if r.getInProgressConditionValue() != corev1.ConditionTrue {
return nil
}

err, nodeList := r.getNodes()
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion hack/aws-image-job.yaml
Expand Up @@ -21,7 +21,7 @@ spec:

initContainers:
- name: payload
image: registry.redhat.io/openshift-sandboxed-containers/osc-podvm-payload-rhel9:1.5.0
image: registry.redhat.io/openshift-sandboxed-containers/osc-podvm-payload-rhel9:1.5.1
imagePullPolicy: Always
volumeMounts:
- name: shared-data
Expand Down
2 changes: 1 addition & 1 deletion hack/azure-image-job.yaml
Expand Up @@ -20,7 +20,7 @@ spec:

initContainers:
- name: payload
image: registry.redhat.io/openshift-sandboxed-containers/osc-podvm-payload-rhel9:1.5.0
image: registry.redhat.io/openshift-sandboxed-containers/osc-podvm-payload-rhel9:1.5.1
imagePullPolicy: Always
volumeMounts:
- name: shared-data
Expand Down

0 comments on commit 817ff2a

Please sign in to comment.