From 2da3adb8beb227afa1e4a400301b9f6930ec51fc Mon Sep 17 00:00:00 2001 From: Jordi Massaguer Pla Date: Thu, 9 Nov 2023 17:54:48 +0100 Subject: [PATCH 1/2] CI: add build and push models workflow This workflow will build and push into the ghcr.io the example MAP in examples/apps/simple_imaging_app. This workflow will use terraform to launch a VM with a GPU and then run the monai-deploy-sdk package subcommand inside it. NOTE: you need to setup the secrets: * AZURE_CLIENT_ID * AZURE_SUBSCRIPTION_ID * AZURE_TENANT_ID * AZURE_CLIENT_SECRET The workflow will patch holoscan, so that it works when there is no cache. The workflow will install a "patched" libseccomp package so that we can install libnvidia-container. The workflow will use nvidia docker runtime to build, load, export, ... the image. The workflow uses the smallest/cheapest Azure Image with GPU available in Western Europe, which is Standard_NC4as_T4_v3. For this to work, you need to request a quota increase to Azure Help Desk. However, the default 30GB disc is not enough for the build, so you need to setup a 64GB. :WARNING: This will incur in costs in Azure Cloud, use it with caution. Signed-off-by: Jordi Massaguer Pla --- ...e17d7563c3499b548dae913cabd7b8242f78.patch | 62 ++++++++ .github/workflows/build_and_push_models.yml | 113 ++++++++++++++ main.tf | 139 ++++++++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 .github/workflows/142be17d7563c3499b548dae913cabd7b8242f78.patch create mode 100644 .github/workflows/build_and_push_models.yml create mode 100644 main.tf diff --git a/.github/workflows/142be17d7563c3499b548dae913cabd7b8242f78.patch b/.github/workflows/142be17d7563c3499b548dae913cabd7b8242f78.patch new file mode 100644 index 00000000..d7e1dd04 --- /dev/null +++ b/.github/workflows/142be17d7563c3499b548dae913cabd7b8242f78.patch @@ -0,0 +1,62 @@ +From 142be17d7563c3499b548dae913cabd7b8242f78 Mon Sep 17 00:00:00 2001 +From: Jordi Massaguer Pla +Date: Tue, 14 Nov 2023 10:30:15 +0100 +Subject: [PATCH] Fix using no-cache option for the container build + +If we specify no-cache, we should not add the local cache with the +from-cache and to-cache parameters. Otherwise, we get the error + +``` +WARNING: local cache import at /home/adminuser/.holoscan_build_cache +not found due to err: could not read +/home/adminuser/.holoscan_build_cache/index.json: open +/home/adminuser/.holoscan_build_cache/index.json: no such file or directory +``` +being adminuser the user that runs the build. + +This is important for CI, where we do not have any cache to start with. + +Signed-off-by: Jordi Massaguer Pla +--- + python/holoscan/cli/packager/container_builder.py | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +diff --git a/python/holoscan/cli/packager/container_builder.py b/python/holoscan/cli/packager/container_builder.py +index 92edd91..ca6e141 100644 +--- a/python/holoscan/cli/packager/container_builder.py ++++ b/python/holoscan/cli/packager/container_builder.py +@@ -89,9 +89,11 @@ def _build_internal( + builder = create_and_get_builder(Constants.LOCAL_BUILDX_BUILDER_NAME) + + build_result = PlatformBuildResults(platform_parameters) +- +- cache_to = {"type": "local", "dest": self._build_parameters.build_cache} +- cache_from = [{"type": "local", "src": self._build_parameters.build_cache}] ++ cache_to = {} ++ cache_from = [] ++ if not self._build_parameters.no_cache: ++ cache_to = {"type": "local", "dest": self._build_parameters.build_cache} ++ cache_from = [{"type": "local", "src": self._build_parameters.build_cache}] + if platform_parameters.base_image is not None: + cache_from.append({"type": "registry", "ref": platform_parameters.base_image}) + if platform_parameters.build_image is not None: +@@ -99,8 +101,6 @@ def _build_internal( + builds = { + "builder": builder, + "cache": not self._build_parameters.no_cache, +- "cache_from": cache_from, +- "cache_to": cache_to, + "context_path": self._temp_dir, + "file": dockerfile, + "platforms": [platform_parameters.docker_arch], +@@ -108,6 +108,10 @@ def _build_internal( + "pull": True, + "tags": [platform_parameters.tag], + } ++ if cache_to != {}: ++ builds["cache_to"] = cache_to ++ if cache_from != []: ++ builds["cache_from"] = cache_from + + export_to_tar_ball = False + if self._build_parameters.tarball_output is not None: diff --git a/.github/workflows/build_and_push_models.yml b/.github/workflows/build_and_push_models.yml new file mode 100644 index 00000000..7f916a2d --- /dev/null +++ b/.github/workflows/build_and_push_models.yml @@ -0,0 +1,113 @@ +# This workflow will install Python dependencies, build the latest models as containers, and push to the registry the resulting containers +# TODO: Use cache for caching the docker images, to speed up the build +# TODO: Can we have the dependencies stored somehow (predownloaded, a custom image, a container registry, our artifact server...) so this will always be reproduceable? +# TODO: Can we have some kind of Bill Of Materials of the resulting image? What packages are in there? What python "modules" ? Code version of monai? + +name: build_and_push_models + +# This is triggered manually. It could be changed to be triggered by new pushed tags. +on: workflow_dispatch + +# Version could be infered from the new tag if this was triggered by a new tag push +# FIXME: Python version could be inferred with "python --version" run inside the containers, and CP is the python version without '.' +# FIXME: wheel name could be dynamically generated, by for example use "ls" on the download folder +# ARM environment variables are used by the terraform azure provider for authentication using a client secret. +# See https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret +# https://github.com/Azure-Samples/terraform-github-actions/blob/main/.github/workflows/tf-plan-apply.yml +env: + ARM_CLIENT_ID: "${{ secrets.AZURE_CLIENT_ID }}" + ARM_SUBSCRIPTION_ID: "${{ secrets.AZURE_SUBSCRIPTION_ID }}" + ARM_TENANT_ID: "${{ secrets.AZURE_TENANT_ID }}" + ARM_CLIENT_SECRET: "${{ secrets.AZURE_CLIENT_SECRET }}" + VERSION: "0.6.0" + PYTHON_VERSION: "3.8" + CP_VERSION: "38" + DOCKER_IMAGE_TAG : "latest" + APP_IMAGE_NAME : "simple_app" + PLATFORM : "x64-workstation" + DOCKER_IMAGE_NAME : "simple_app-x64-workstation-dgpu-linux-amd64-latest" + DOCKER_IMAGE_NAME_SHORT: "simple_app-x64-workstation-dgpu-linux-amd64" + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + APP: "examples/apps/simple_imaging_app" + +jobs: + do: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v2 + - name: Disclaimers + run: | + echo "!!! WARNING !!! This is a hackweek project, not meant for production or clinical usage, does not have any kind of guarantee, use at your own risk.https://hackweek.opensuse.org/23/projects/package-monai-machine-learning-models-for-medical-applications. !!! WARNING !!!" + # Install the latest version of the Terraform CLI + - name: Show disc space + run: df -h + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - name: Initialize a new Terraform working directory + run: terraform init + - name: Check Terraform configuration files format + run: terraform fmt -check + - name: Generate unique SSH Key + run: ssh-keygen -t rsa -f /tmp/ssh_id_gh -N "" + - name: Terraform Apply + run: terraform apply -auto-approve + - name: Get IP address + run: echo "AZURE_IPADDRESS=$(terraform output | grep instance_public_ip | cut -d\" -f2)" >> $GITHUB_ENV + - name: Output ip address + run: echo "AZURE_IPADDRESS=$AZURE_IPADDRESS" + - name: Test connection + # We use StrictHostKeyChecking=no to accept the SSH fingerprint on the first connection + run: ssh -i /tmp/ssh_id_gh -o StrictHostKeyChecking=no adminuser@$AZURE_IPADDRESS "sudo uname -a" + - name: Add fixed libseccomp package + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo zypper ar -G https://download.opensuse.org/repositories/home:/jordimassaguerpla:/branches:/openSUSE:/Leap:/15.5:/Update/pool-leap-15.5/home:jordimassaguerpla:branches:openSUSE:Leap:15.5:Update.repo && sudo zypper ref && sudo zypper -n install --from home_jordimassaguerpla_branches_openSUSE_Leap_15.5_Update --allow-vendor-change libseccomp" + - name: Install Deps + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo zypper ar -G https://developer.download.nvidia.com/compute/cuda/repos/opensuse15/x86_64/ nvidia && sudo zypper ref && sudo zypper --non-interactive install patch python39 docker-buildx nvidia-container-toolkit nvidia-computeG05 cuda-cudart-devel-11-0 libyaml-cpp0_6 && wget -c https://bootstrap.pypa.io/get-pip.py && python3.9 get-pip.py && python3.9 --version" + - name: Setup Nvidia container + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo usermod -G docker,video adminuser && sudo nvidia-ctk runtime configure --runtime=docker && sudo nvidia-ctk runtime configure --runtime=containerd && sudo systemctl start docker && sudo systemctl start containerd && sudo sed -e \"s/user = \\\"\\\"/user = \\\"adminuser:video\\\"/g \" -i /etc/nvidia-container-runtime/config.toml && sudo modprobe nvidia" + - name: Check nvidia + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo systemctl start docker && nvidia-smi && docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi" + - name: Install Monai Deploy Sdk and Holoscan + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "python3.9 -m pip install monai-deploy-app-sdk holoscan" + - name: Fix Holoscan + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} 'cd /home/adminuser/.local/lib/python3.9/site-packages/holoscan/lib ; to_link=$(ls libholoscan_*.so*);for i in $to_link; do name=$(echo $i | cut -d. -f1); ln -sv $name.so.$VERSION $name.so.0;done' + - name: Copy holoscan patch + run: scp -i /tmp/ssh_id_gh .github/workflows/142be17d7563c3499b548dae913cabd7b8242f78.patch adminuser@${AZURE_IPADDRESS}:/home/adminuser/.local/lib/python3.9/site-packages/holoscan + - name: Patch Holoscan + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "cd /home/adminuser/.local/lib/python3.9/site-packages/holoscan; patch -p3 < 142be17d7563c3499b548dae913cabd7b8242f78.patch" + - name: Make work dir + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "mkdir /home/adminuser/work" + - name: Download wheels + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "cd /home/adminuser/work && python3.9 -m pip download --no-deps --python-version=$PYTHON_VERSION holoscan==$VERSION && python3.9 -m pip download --no-deps monai-deploy-app-sdk==$VERSION" + - name: Copy example code + run: scp -i /tmp/ssh_id_gh -r * adminuser@${AZURE_IPADDRESS}:/home/adminuser/work + - name: Monai Deploy package + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "mkdir /home/adminuser/work/output && cd /home/adminuser/work && monai-deploy package --no-cache /home/adminuser/work/$APP -c /home/adminuser/work/$APP/app.yaml -t $APP_IMAGE_NAME:$DOCKER_IMAGE_TAG --platform $PLATFORM -l DEBUG --holoscan-sdk-file=/home/adminuser/work/holoscan-$VERSION-cp$CP_VERSION-cp$CP_VERSION-manylinux2014_x86_64.whl --monai-deploy-sdk-file=/home/adminuser/work/monai_deploy_app_sdk-$VERSION-py3-none-any.whl --platform-config dgpu --gid 1000 --output /home/adminuser/work/output" + - name: Size of docker image + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "du -hs /home/adminuser/work/output/*" + - name: Compress docker image + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "cd /home/adminuser/work/output && gzip $DOCKER_IMAGE_NAME.tar" + - name: Size of docker image + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "du -hs /home/adminuser/work/output/*" + - name: Show disc space + run: df -h + - name: Load docker image + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "cat /home/adminuser/work/output/$DOCKER_IMAGE_NAME.tar.gz" | docker load + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Tag Docker image + run: docker tag $DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG $REGISTRY/$IMAGE_NAME/$DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG + - name: Push Docker image + run: docker push $REGISTRY/$IMAGE_NAME/$DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG + - name: Terraform Destroy + if: ${{ always() }} + run: terraform destroy -auto-approve diff --git a/main.tf b/main.tf new file mode 100644 index 00000000..15998a80 --- /dev/null +++ b/main.tf @@ -0,0 +1,139 @@ +/* +export these variables before running this file +ARM_CLIENT_ID +ARM_SUBSCRIPTION_ID +ARM_TENANT_ID +ARM_CLIENT_SECRET +*/ + +# We strongly recommend using the required_providers block to set the +# Azure Provider source and version being used +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "=3.0.0" + } + } +} + +# Configure the Microsoft Azure Provider +provider "azurerm" { + features {} +} + +# Create a resource group +resource "azurerm_resource_group" "gh-actions-build-monai-models-resource-group" { + name = "gh-actions-build-monai-models-resource-group" + location = "West Europe" +} + +# Create a virtual network within the resource group +resource "azurerm_virtual_network" "gh-actions-build-monai-models-virtual-network" { + name = "gh-actions-build-monai-models-virtual-network" + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + location = azurerm_resource_group.gh-actions-build-monai-models-resource-group.location + address_space = ["10.0.0.0/16"] +} + +resource "azurerm_subnet" "gh-actions-build-monai-models-internal-subnet" { + name = "gh-actions-build-monai-models-internal-subnet" + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + virtual_network_name = azurerm_virtual_network.gh-actions-build-monai-models-virtual-network.name + address_prefixes = ["10.0.2.0/24"] +} + +# Create public IPs +resource "azurerm_public_ip" "gh-actions-build-monai-models-public-ip" { + name = "gh-actions-build-monai-models-public-ip" + location = azurerm_resource_group.gh-actions-build-monai-models-resource-group.location + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + allocation_method = "Dynamic" +} + +resource "azurerm_network_interface" "gh-actions-build-monai-models-network-interface" { + name = "gh-actions-build-monai-models-network-interface" + location = azurerm_resource_group.gh-actions-build-monai-models-resource-group.location + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + + ip_configuration { + name = "gh-actions-build-monai-models-network-interface-ip-configuration" + subnet_id = azurerm_subnet.gh-actions-build-monai-models-internal-subnet.id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.gh-actions-build-monai-models-public-ip.id + } +} + +# Create Network Security Group and rule +resource "azurerm_network_security_group" "gh-actions-build-monai-models-nsg" { + name = "gh-actions-build-monai-models-nsg" + location = azurerm_resource_group.gh-actions-build-monai-models-resource-group.location + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + + security_rule { + name = "SSH" + priority = 1001 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +# Connect the security group to the network interface +resource "azurerm_network_interface_security_group_association" "gh-actions-build-monai-models-ga" { + network_interface_id = azurerm_network_interface.gh-actions-build-monai-models-network-interface.id + network_security_group_id = azurerm_network_security_group.gh-actions-build-monai-models-nsg.id +} + +resource "azurerm_linux_virtual_machine" "gh-actions-build-monai-models-vm" { + name = "gh-actions-build-monai-models-vm" + resource_group_name = azurerm_resource_group.gh-actions-build-monai-models-resource-group.name + location = azurerm_resource_group.gh-actions-build-monai-models-resource-group.location + // Standard_NC4as_T4_v3 has GPU. This has a cost associated!!! + size = "Standard_NC4as_T4_v3" + admin_username = "adminuser" + network_interface_ids = [ + azurerm_network_interface.gh-actions-build-monai-models-network-interface.id, + ] + + admin_ssh_key { + username = "adminuser" + public_key = file("/tmp/ssh_id_gh.pub") //This file is in the vm where you run terraform!! + } + + os_disk { + caching = "ReadWrite" + storage_account_type = "StandardSSD_LRS" + # With the default 30GB, docker will fail to load and export the image + disk_size_gb = "64" + } + + source_image_reference { + publisher = "SUSE" + offer = "opensuse-leap-15-5" + sku = "gen2" + version = "latest" + } +} + +resource "null_resource" "example" { + provisioner "remote-exec" { + connection { + host = azurerm_linux_virtual_machine.gh-actions-build-monai-models-vm.public_ip_address + user = "adminuser" + private_key = file("/tmp/ssh_id_gh") + } + + inline = ["echo 'connected!'"] + } +} + +output "instance_public_ip" { + description = "Public IP address" + value = azurerm_linux_virtual_machine.gh-actions-build-monai-models-vm.public_ip_address +} + From 6013c5e3876ebbcbe8f1816b7ae848b39a58698f Mon Sep 17 00:00:00 2001 From: Jordi Massaguer Pla Date: Wed, 29 Nov 2023 12:53:51 +0100 Subject: [PATCH 2/2] Add SBOM and sign image Sign the image with cosign using the OIDC token. Add Software Bill of Materials with trivy as signed cosign attestations. This informatin is needed for securing the supply chain. You can verify the image with cosign. You can get the SBOM from the attestations and then use trivy to check for vulnerabilities. Signed-off-by: Jordi Massaguer Pla --- .github/workflows/build_and_push_models.yml | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_push_models.yml b/.github/workflows/build_and_push_models.yml index 7f916a2d..b2ebbe83 100644 --- a/.github/workflows/build_and_push_models.yml +++ b/.github/workflows/build_and_push_models.yml @@ -1,7 +1,6 @@ # This workflow will install Python dependencies, build the latest models as containers, and push to the registry the resulting containers # TODO: Use cache for caching the docker images, to speed up the build # TODO: Can we have the dependencies stored somehow (predownloaded, a custom image, a container registry, our artifact server...) so this will always be reproduceable? -# TODO: Can we have some kind of Bill Of Materials of the resulting image? What packages are in there? What python "modules" ? Code version of monai? name: build_and_push_models @@ -37,6 +36,7 @@ jobs: permissions: contents: read packages: write + id-token: write steps: - uses: actions/checkout@v2 - name: Disclaimers @@ -67,7 +67,7 @@ jobs: - name: Add fixed libseccomp package run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo zypper ar -G https://download.opensuse.org/repositories/home:/jordimassaguerpla:/branches:/openSUSE:/Leap:/15.5:/Update/pool-leap-15.5/home:jordimassaguerpla:branches:openSUSE:Leap:15.5:Update.repo && sudo zypper ref && sudo zypper -n install --from home_jordimassaguerpla_branches_openSUSE_Leap_15.5_Update --allow-vendor-change libseccomp" - name: Install Deps - run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo zypper ar -G https://developer.download.nvidia.com/compute/cuda/repos/opensuse15/x86_64/ nvidia && sudo zypper ref && sudo zypper --non-interactive install patch python39 docker-buildx nvidia-container-toolkit nvidia-computeG05 cuda-cudart-devel-11-0 libyaml-cpp0_6 && wget -c https://bootstrap.pypa.io/get-pip.py && python3.9 get-pip.py && python3.9 --version" + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo zypper ar -G https://developer.download.nvidia.com/compute/cuda/repos/opensuse15/x86_64/ nvidia && sudo zypper ref && sudo zypper --non-interactive install patch python39 docker-buildx nvidia-container-toolkit nvidia-computeG05 cuda-cudart-devel-11-0 libyaml-cpp0_6 trivy && wget -c https://bootstrap.pypa.io/get-pip.py && python3.9 get-pip.py && python3.9 --version" - name: Setup Nvidia container run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "sudo usermod -G docker,video adminuser && sudo nvidia-ctk runtime configure --runtime=docker && sudo nvidia-ctk runtime configure --runtime=containerd && sudo systemctl start docker && sudo systemctl start containerd && sudo sed -e \"s/user = \\\"\\\"/user = \\\"adminuser:video\\\"/g \" -i /etc/nvidia-container-runtime/config.toml && sudo modprobe nvidia" - name: Check nvidia @@ -88,6 +88,8 @@ jobs: run: scp -i /tmp/ssh_id_gh -r * adminuser@${AZURE_IPADDRESS}:/home/adminuser/work - name: Monai Deploy package run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "mkdir /home/adminuser/work/output && cd /home/adminuser/work && monai-deploy package --no-cache /home/adminuser/work/$APP -c /home/adminuser/work/$APP/app.yaml -t $APP_IMAGE_NAME:$DOCKER_IMAGE_TAG --platform $PLATFORM -l DEBUG --holoscan-sdk-file=/home/adminuser/work/holoscan-$VERSION-cp$CP_VERSION-cp$CP_VERSION-manylinux2014_x86_64.whl --monai-deploy-sdk-file=/home/adminuser/work/monai_deploy_app_sdk-$VERSION-py3-none-any.whl --platform-config dgpu --gid 1000 --output /home/adminuser/work/output" + - name: Build SBOM + run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "trivy image --format spdx-json --input /home/adminuser/work/output/$DOCKER_IMAGE_NAME.tar > /home/adminuser/work/output/sbom.spdx.json" - name: Size of docker image run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "du -hs /home/adminuser/work/output/*" - name: Compress docker image @@ -98,6 +100,10 @@ jobs: run: df -h - name: Load docker image run: ssh -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS} "cat /home/adminuser/work/output/$DOCKER_IMAGE_NAME.tar.gz" | docker load + - name: Get digest + run: echo "IMAGE_DIGEST=$(docker images --no-trunc -q $DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG)" >> $GITHUB_ENV + - name: Copy SBOM + run: scp -i /tmp/ssh_id_gh adminuser@${AZURE_IPADDRESS}:/home/adminuser/work/output/sbom.spdx.json . - name: Log in to the Container registry uses: docker/login-action@v3 with: @@ -108,6 +114,16 @@ jobs: run: docker tag $DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG $REGISTRY/$IMAGE_NAME/$DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG - name: Push Docker image run: docker push $REGISTRY/$IMAGE_NAME/$DOCKER_IMAGE_NAME_SHORT:$DOCKER_IMAGE_TAG + - name: Install sigstore cosign + uses: sigstore/cosign-installer@main + - name: Sign image + env: + COSIGN_EXPERIMENTAL: "true" + run: cosign sign --yes ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ env.IMAGE_DIGEST }} + - name: Sign attestations + env: + COSIGN_EXPERIMENTAL: "true" + run: cosign attest --yes --type spdx --predicate sbom.spdx.json ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ env.IMAGE_DIGEST }} - name: Terraform Destroy if: ${{ always() }} run: terraform destroy -auto-approve