Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test orc #5340

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmd/cluster/openstack/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,15 +217,15 @@ func (o *CreateOptions) GenerateResources() ([]client.Object, error) {
Namespace: o.namespace,
Name: "capi-provider-role",
},
// The following rule is required for CAPO to watch for the Images resources created by ORC,
// The following rule is required for CAPO to reconcile for the Images resources created by ORC,
// which is a dependency since CAPO v0.11.0.
// This rule is also defined in the Hypershift HostedCluster controller and the Hypershift Operator when creating
// the cluster.
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"openstack.k-orc.cloud"},
Resources: []string{"images"},
Verbs: []string{"list", "watch"},
Resources: []string{"images", "images/status"},
Verbs: []string{rbacv1.VerbAll},
},
},
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ rules:
- openstack.k-orc.cloud
resources:
- images
- images/status
verbs:
- list
- watch
- '*'
---
apiVersion: v1
data:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@ rules:
- openstack.k-orc.cloud
resources:
- images
- images/status
verbs:
- list
- watch
- '*'
---
apiVersion: v1
data:
Expand Down
6 changes: 3 additions & 3 deletions cmd/install/assets/hypershift_operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -1169,14 +1169,14 @@ func (o HyperShiftOperatorClusterRole) Build() *rbacv1.ClusterRole {
Resources: []string{"ipaddresses", "ipaddresses/status"},
Verbs: []string{"create", "delete", "get", "list", "update", "watch"},
},
// The following rule is required for CAPO to watch for the Images resources created by ORC,
// The following rule is required for CAPO to reconcile for the Images resources created by ORC,
// which is a dependency since CAPO v0.11.0.
// This rule is also defined in the Hypershift HostedCluster controller and the Hypershift CLI when creating
// the cluster.
{
APIGroups: []string{"openstack.k-orc.cloud"},
Resources: []string{"images"},
Verbs: []string{"list", "watch"},
Resources: []string{"images", "images/status"},
Verbs: []string{rbacv1.VerbAll},
},
{ // This allows the kubevirt csi driver to hotplug volumes to KubeVirt VMs.
APIGroups: []string{"subresources.kubevirt.io"},
Expand Down
11 changes: 1 addition & 10 deletions cmd/nodepool/openstack/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,6 @@ func (o *RawOpenStackPlatformCreateOptions) Validate() (*ValidatedOpenStackPlatf
return nil, fmt.Errorf("flavor is required")
}

// TODO(emilien): Remove that validation once we support using the image from the release payload.
// This will be possible when CAPO supports managing images in the OpenStack cluster:
// https://github.com/kubernetes-sigs/cluster-api-provider-openstack/pull/2130
// For 4.17 we might leave this as is and let the user provide the image name as
// we plan to deliver the OpenStack provider as a dev preview.
if o.ImageName == "" {
return nil, fmt.Errorf("image name is required")
}

additionalports, err := convertAdditionalPorts(o.AdditionalPorts)
if err != nil {
return nil, err
Expand All @@ -99,7 +90,7 @@ func BindOptions(opts *RawOpenStackPlatformCreateOptions, flags *pflag.FlagSet)

func bindCoreOptions(opts *RawOpenStackPlatformCreateOptions, flags *pflag.FlagSet) {
flags.StringVar(&opts.Flavor, "openstack-node-flavor", opts.Flavor, "The flavor to use for the nodepool (required)")
flags.StringVar(&opts.ImageName, "openstack-node-image-name", opts.ImageName, "The image name to use for the nodepool (required)")
flags.StringVar(&opts.ImageName, "openstack-node-image-name", opts.ImageName, "The image name to use for the nodepool (optional)")
flags.StringVar(&opts.AvailabityZone, "openstack-node-availability-zone", opts.AvailabityZone, "The availability zone to use for the nodepool (optional)")
flags.StringArrayVar(&opts.AdditionalPorts, "openstack-node-additional-port", opts.AdditionalPorts, fmt.Sprintf(`Specify additional port that should be attached to the nodes, the "network-id" field should point to an existing neutron network ID and the "vnic-type" is the type of the port to create, it can be specified multiple times to attach to multiple ports. Supported parameters: %s, example: "network-id:40a355cb-596d-495c-8766-419d98cadd57,vnic-type:direct"`, cmdutil.Supported(PortSpec{})))
}
Expand Down
13 changes: 1 addition & 12 deletions cmd/nodepool/openstack/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,10 @@ func TestRawOpenStackPlatformCreateOptions_Validate(t *testing.T) {
{
name: "should fail if flavor is missing",
input: RawOpenStackPlatformCreateOptions{
OpenStackPlatformOptions: &OpenStackPlatformOptions{
ImageName: "rhcos",
},
OpenStackPlatformOptions: &OpenStackPlatformOptions{},
},
expectedError: "flavor is required",
},
{
name: "should fail if image name is missing",
input: RawOpenStackPlatformCreateOptions{
OpenStackPlatformOptions: &OpenStackPlatformOptions{
Flavor: "flavor",
},
},
expectedError: "image name is required",
},
{
name: "should pass when AZ is provided",
input: RawOpenStackPlatformCreateOptions{
Expand Down
21 changes: 1 addition & 20 deletions docs/content/how-to/openstack/create-openstack-cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ Upon scaling up a NodePool, a Machine will be created, and the CAPI provider wil
* OpenStack Octavia service must be running in the cloud hosting the guest cluster when ingress is configured with an Octavia load balancer.
In the future, we'll explore other Ingress options like MetalLB.
* The default external network (on which the kube-apiserver LoadBalancer type service is created) of the Management OCP cluster must be reachable from the guest cluster.
* The RHCOS image must be uploaded to OpenStack.

### Install the HyperShift and HCP CLI

Expand Down Expand Up @@ -84,23 +83,6 @@ operator-755d587f44-lrtrq 1/1 Running 0 114s
operator-755d587f44-qj6pz 1/1 Running 0 114s
```

### Upload RHCOS image in OpenStack

For now, we need to manually push an RHCOS image that will be used when deploying the node pools
on OpenStack. In the [future](https://issues.redhat.com/browse/OSASINFRA-3492), the CAPI provider (CAPO) will handle the RHCOS image
lifecycle by using the image available in the chosen release payload.

Here is an example of how to upload an RHCOS image to OpenStack:

```shell
openstack image create --disk-format qcow2 --file rhcos-openstack.x86_64.qcow2 rhcos
```

!!! note

The `rhcos-openstack.x86_64.qcow2` file is the RHCOS image that was downloaded from the OpenShift mirror.
You can download the latest RHCOS image from the [Red Hat OpenShift Container Platform mirror](https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/).

## Create a floating IP for the Ingress (optional)

To get Ingress healthy in a HostedCluster without manual intervention, you need to create a floating IP that will be used by the Ingress service.
Expand Down Expand Up @@ -168,7 +150,6 @@ hcp create cluster openstack \
--openstack-credentials-file $CLOUDS_YAML \
--openstack-ca-cert-file $CA_CERT_PATH \
--openstack-external-network-id $EXTERNAL_NETWORK_ID \
--openstack-node-image-name $IMAGE_NAME \
--openstack-node-flavor $FLAVOR \
--openstack-ingress-floating-ip $INGRESS_FLOATING_IP
```
Expand Down Expand Up @@ -313,7 +294,7 @@ port for SR-IOV, with no port security and address pairs:
```shell
export NODEPOOL_NAME=$CLUSTER_NAME-extra-az
export WORKER_COUNT="2"
export IMAGE_NAME="rhcos"
export IMAGE_NAME="rhcos" # Pre-existing Glance image used for this NodePool
export FLAVOR="m1.xlarge"
export AZ="az1"
export SRIOV_NEUTRON_NETWORK_ID="f050901b-11bc-4a75-a553-878509255760"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,9 @@ func (a OpenStack) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, _
"--namespace=$(MY_NAMESPACE)",
"--leader-elect",
"--metrics-bind-addr=127.0.0.1:8080",
"--v=2",
// We need to set the log level to 3 to get the logs from ORC.
// Once ORC follows logging guidelines, we should use V(2) again.
"--v=3",
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
Expand Down Expand Up @@ -337,14 +339,14 @@ func (a OpenStack) CAPIProviderPolicyRules() []rbacv1.PolicyRule {
Resources: []string{"ipaddresses", "ipaddresses/status"},
Verbs: []string{"create", "delete", "get", "list", "update", "watch"},
},
// The following rule is required for CAPO to watch for the Images resources created by ORC,
// The following rule is required for CAPO to reconcile the Images resources created by ORC,
// which is a dependency since CAPO v0.11.0.
// This rule is also defined in the Hypershift Operator and the Hypershift CLI when creating
// the cluster.
{
APIGroups: []string{"openstack.k-orc.cloud"},
Resources: []string{"images"},
Verbs: []string{"list", "watch"},
Resources: []string{"images", "images/status"},
Verbs: []string{rbacv1.VerbAll},
},
}
}
Expand Down
13 changes: 12 additions & 1 deletion hypershift-operator/controllers/nodepool/capi.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,13 @@ func (c *CAPI) Reconcile(ctx context.Context) error {
return err
}

// Reconcile ORC resources
if nodePool.Spec.Platform.Type == hyperv1.OpenStackPlatform {
if err := c.reconcileORCResources(ctx); err != nil {
return err
}
}

// Reconcile (Platform)MachineTemplate.
template, mutateTemplate, _, err := c.machineTemplateBuilders()
if err != nil {
Expand Down Expand Up @@ -842,7 +849,7 @@ func (c *CAPI) machineTemplateBuilders() (client.Object, func(object client.Obje
case hyperv1.OpenStackPlatform:
template = &capiopenstackv1beta1.OpenStackMachineTemplate{}
var err error
machineTemplateSpec, err = openstack.MachineTemplateSpec(hcluster, nodePool)
machineTemplateSpec, err = openstack.MachineTemplateSpec(hcluster, nodePool, c.releaseImage)
if err != nil {
SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{
Type: hyperv1.NodePoolValidMachineTemplateConditionType,
Expand Down Expand Up @@ -1316,3 +1323,7 @@ func (r *NodePoolReconciler) getMachinesForNodePool(ctx context.Context, nodePoo

return sortedByCreationTimestamp(machinesForNodePool), nil
}

func (c *CAPI) reconcileORCResources(ctx context.Context) error {
return openstack.ReconcileOpenStackImageCR(ctx, c.Client, c.CreateOrUpdate, c.hostedCluster, c.releaseImage, c.nodePool)
}
23 changes: 1 addition & 22 deletions hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,27 +54,6 @@ func defaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error
return containerImage, split[1], nil
}

func unsupportedOpenstackDefaultImage(releaseImage *releaseinfo.ReleaseImage) (string, string, error) {
arch, foundArch := releaseImage.StreamMetadata.Architectures["x86_64"]
if !foundArch {
return "", "", fmt.Errorf("couldn't find OS metadata for architecture %q", "x64_64")
}
openStack, exists := arch.Artifacts["openstack"]
if !exists {
return "", "", fmt.Errorf("couldn't find OS metadata for openstack")
}
artifact, exists := openStack.Formats["qcow2.gz"]
if !exists {
return "", "", fmt.Errorf("couldn't find OS metadata for openstack qcow2.gz")
}
disk, exists := artifact["disk"]
if !exists {
return "", "", fmt.Errorf("couldn't find OS metadata for the openstack qcow2.gz disk")
}

return disk.Location, disk.SHA256, nil
}

func allowUnsupportedRHCOSVariants(nodePool *hyperv1.NodePool) bool {
val, exists := nodePool.Annotations[hyperv1.AllowUnsupportedKubeVirtRHCOSVariantsAnnotation]
if exists && strings.ToLower(val) == "true" {
Expand All @@ -101,7 +80,7 @@ func GetImage(nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage

imageName, imageHash, err := defaultImage(releaseImage)
if err != nil && allowUnsupportedRHCOSVariants(nodePool) {
imageName, imageHash, err = unsupportedOpenstackDefaultImage(releaseImage)
imageName, imageHash, err = releaseinfo.UnsupportedOpenstackDefaultImage(releaseImage)
if err != nil {
return nil, err
}
Expand Down
106 changes: 99 additions & 7 deletions hypershift-operator/controllers/nodepool/openstack/openstack.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,25 @@
package openstack

import (
"context"
"fmt"
"strings"

hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"sigs.k8s.io/controller-runtime/pkg/client"

orc "github.com/k-orc/openstack-resource-controller/api/v1alpha1"
"github.com/openshift/hypershift/support/openstackutil"
"github.com/openshift/hypershift/support/releaseinfo"
"github.com/openshift/hypershift/support/upsert"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
capiopenstackv1beta1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1"
)

func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.NodePool) (*capiopenstackv1beta1.OpenStackMachineTemplateSpec, error) {
func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.NodePool, releaseImage *releaseinfo.ReleaseImage) (*capiopenstackv1beta1.OpenStackMachineTemplateSpec, error) {
openStackMachineTemplate := &capiopenstackv1beta1.OpenStackMachineTemplateSpec{Template: capiopenstackv1beta1.OpenStackMachineTemplateResource{Spec: capiopenstackv1beta1.OpenStackMachineSpec{
Flavor: ptr.To(nodePool.Spec.Platform.OpenStack.Flavor),
}}}
Expand All @@ -20,12 +29,13 @@ func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.Node
Name: ptr.To(nodePool.Spec.Platform.OpenStack.ImageName),
}
} else {
// TODO(emilien): Add support for using the image from the release payload.
// This will be possible when CAPO supports managing images in the OpenStack cluster:
// https://github.com/kubernetes-sigs/cluster-api-provider-openstack/pull/2130
// For 4.17 we might leave this as is and let the user provide the image name as
// we plan to deliver the OpenStack provider as a dev preview.
return nil, fmt.Errorf("image name is required")
releaseVersion, err := releaseinfo.OpenStackReleaseImage(releaseImage)
if err != nil {
return nil, err
}
openStackMachineTemplate.Template.Spec.Image.ImageRef = &capiopenstackv1beta1.ResourceReference{
Name: "rhcos-" + releaseVersion + "-" + hcluster.Name,
}
}

// TODO: add support for BYO network/subnet
Expand Down Expand Up @@ -72,3 +82,85 @@ func MachineTemplateSpec(hcluster *hyperv1.HostedCluster, nodePool *hyperv1.Node
}
return openStackMachineTemplate, nil
}

func getOpenStackClusterForHostedCluster(ctx context.Context, c client.Client, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string) (capiopenstackv1beta1.OpenStackCluster, error) {
cluster := capiopenstackv1beta1.OpenStackCluster{}

if err := c.Get(ctx, types.NamespacedName{Namespace: controlPlaneNamespace, Name: hcluster.Name}, &cluster); err != nil {
return cluster, fmt.Errorf("failed to list Machines: %w", err)
}

return cluster, nil
}

func ReconcileOpenStackImageCR(ctx context.Context, client client.Client, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, release *releaseinfo.ReleaseImage, nodePool *hyperv1.NodePool) error {
releaseVersion, err := releaseinfo.OpenStackReleaseImage(release)
if err != nil {
return err
}
controlPlaneNamespace := fmt.Sprintf("%s-%s", nodePool.Namespace, strings.ReplaceAll(nodePool.Spec.ClusterName, ".", "-"))
openstackCluster, err := getOpenStackClusterForHostedCluster(ctx, client, hcluster, controlPlaneNamespace)
if err != nil {
return err
}
openStackImage := orc.Image{
ObjectMeta: metav1.ObjectMeta{
Name: "rhcos-" + releaseVersion + "-" + hcluster.Name,
Namespace: controlPlaneNamespace,
// TODO: add proper cleanup in CAPI resources cleanup
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: openstackCluster.APIVersion,
Kind: openstackCluster.Kind,
Name: openstackCluster.Name,
UID: openstackCluster.UID,
},
},
},
Spec: orc.ImageSpec{},
}

if _, err := createOrUpdate(ctx, client, &openStackImage, func() error {
err := reconcileOpenStackImageSpec(hcluster, &openStackImage.Spec, release)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
return nil
}

func reconcileOpenStackImageSpec(hcluster *hyperv1.HostedCluster, openStackImageSpec *orc.ImageSpec, release *releaseinfo.ReleaseImage) error {
imageURL, imageHash, err := releaseinfo.UnsupportedOpenstackDefaultImage(release)
if err != nil {
return fmt.Errorf("failed to lookup RHCOS image: %w", err)
}

openStackImageSpec.CloudCredentialsRef = orc.CloudCredentialsReference{
SecretName: hcluster.Spec.Platform.OpenStack.IdentityRef.Name,
CloudName: hcluster.Spec.Platform.OpenStack.IdentityRef.CloudName,
}
releaseVersion, err := releaseinfo.OpenStackReleaseImage(release)
if err != nil {
return err
}

openStackImageSpec.Resource = &orc.ImageResourceSpec{
Name: "rhcos-" + releaseVersion + "-" + hcluster.Name,
Content: &orc.ImageContent{
DiskFormat: "qcow2",
Download: &orc.ImageContentSourceDownload{
URL: imageURL,
Decompress: ptr.To(orc.ImageCompressionGZ),
Hash: &orc.ImageHash{
Algorithm: "sha256",
Value: imageHash,
},
},
},
}

return nil
}
Loading