Nightly integration tests #469
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Nightly integration tests | |
concurrency: | |
# only one integration tests workflow to be run at a time, since it involves pushing/cleaning up a test image (same tag). | |
group: ${{ github.workflow }}${{ github.event.workflow_run.name }} | |
cancel-in-progress: false | |
# Run on request and every day at 3 AM UTC | |
on: | |
workflow_dispatch: | |
inputs: | |
target: | |
description: 'Target (choose nightly to run like nightly tests)' | |
required: true | |
default: 'nightly' | |
type: choice | |
options: | |
- nightly | |
- anyon | |
- ionq | |
- iqm | |
- oqc | |
- quantinuum | |
- nvqc | |
- orca | |
single_test_name: | |
type: string | |
required: false | |
description: 'Single test (e.g., targettests/quantinuum/load_value.cpp). Runs default tests if left blank' | |
target_machine: | |
type: string | |
required: false | |
description: 'Target machine (e.g., H1-1E).' | |
cudaq_test_image: | |
type: string | |
required: false | |
default: 'nvcr.io/nvidia/nightly/cuda-quantum:latest' # If changed, update env defaults, too | |
description: 'CUDA Quantum image to run the tests in. Default to the latest CUDA Quantum nightly image' | |
commit_sha: | |
type: string | |
required: false | |
description: 'Commit SHA to pull the code (examples/tests) for testing. Default to the commit associated with the CUDA Quantum docker image if left blank' | |
cudaq_nvqc_deploy_image: | |
type: string | |
required: false | |
default: 'nvcr.io/nvidia/nightly/cuda-quantum:latest' # If changed, update env defaults, too | |
description: 'CUDA Quantum image to use for NVQC deployment to NVCF. Default to the latest CUDA Quantum nightly image' | |
workflow_id: | |
type: string | |
required: false | |
description: 'Workflow Id to retrieve the Python wheel for testing. Default to the wheels produced by the Publishing workflow associated with the latest nightly CUDA Quantum Docker image if left blank' | |
python_version: | |
type: choice | |
required: true | |
default: '3.10' # If changed, update env defaults, too | |
description: 'Python version to run wheel test' | |
options: | |
- '3.8' | |
- '3.9' | |
- '3.10' | |
- '3.11' | |
schedule: | |
- cron: 0 3 * * * | |
env: | |
# NGC nv-quantum organization: pnyjrcojiblh | |
NGC_QUANTUM_ORG: pnyjrcojiblh | |
NGC_QUANTUM_TEAM: cuda-quantum | |
NVQC_FUNCTION_ID: 3bfa0342-7d2a-4f1b-8e81-b6608d28ca7d | |
# <Backend>:<GPU Type>:<Instance Type>:<Min Instances>:<Max Instances> | |
NGC_NVQC_DEPLOYMENT_SPEC: GFN:L40:gl40_1.br20_2xlarge:1:1 | |
# If vars below are changed, it is recommended to also update the | |
# workflow_dispatch defaults above so they stay in sync. | |
cudaq_test_image: nvcr.io/nvidia/nightly/cuda-quantum:latest | |
cudaq_nvqc_deploy_image: nvcr.io/nvidia/nightly/cuda-quantum:latest | |
python_version: '3.10' | |
jobs: | |
# We need this job purely to choose the container image values because the | |
# `env` context is unavailable outside of "steps" contexts. | |
setup: | |
name: Set variables | |
runs-on: ubuntu-latest | |
outputs: | |
cudaq_test_image: ${{ steps.vars.outputs.cudaq_test_image }} | |
steps: | |
- name: Set variables | |
id: vars | |
run: | | |
echo "cudaq_test_image=${{ inputs.cudaq_test_image || env.cudaq_test_image }}" >> $GITHUB_OUTPUT | |
metadata: | |
name: Retrieve commit info | |
runs-on: ubuntu-latest | |
needs: setup | |
container: | |
image: ${{ needs.setup.outputs.cudaq_test_image }} | |
options: --user root | |
outputs: | |
cudaq_commit: ${{ steps.commit-sha.outputs.sha }} | |
steps: | |
- name: Get commit SHA | |
id: commit-sha | |
run: | | |
if [ -n "${{ inputs.commit_sha }}" ]; then | |
echo "sha=${{ inputs.commit_sha }}" >> $GITHUB_OUTPUT | |
else | |
echo "sha=$(cat $CUDA_QUANTUM_PATH/build_info.txt | grep -o 'source-sha: \S*' | cut -d ' ' -f 2)" >> $GITHUB_OUTPUT | |
fi | |
build_nvqc_image: | |
name: Build NVQC deployment image | |
runs-on: ubuntu-latest | |
needs: metadata | |
environment: ghcr-deployment | |
if: (inputs.target == 'nvqc' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
steps: | |
- name: Checkout repository | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ needs.metadata.outputs.cudaq_commit }} | |
fetch-depth: 1 | |
- name: Set up context for buildx | |
run: | | |
docker context create builder_context | |
- name: Set up buildx runner | |
uses: docker/setup-buildx-action@v3 | |
with: | |
endpoint: builder_context | |
- name: Login to NGC container registry | |
uses: docker/login-action@v3 | |
with: | |
registry: nvcr.io | |
username: '$oauthtoken' | |
password: ${{ secrets.NGC_CREDENTIALS }} | |
# Log in to GHCR (in case the base image is a local one) | |
- name: Log in to the GitHub container registry | |
uses: docker/login-action@v3 | |
with: | |
registry: ghcr.io | |
username: ${{ github.actor }} | |
password: ${{ github.token }} | |
- name: Build NVQC image | |
id: docker_build | |
uses: docker/build-push-action@v5 | |
with: | |
context: . | |
file: ./docker/release/cudaq.nvqc.Dockerfile | |
build-args: | | |
base_image=${{ inputs.cudaq_nvqc_deploy_image || env.cudaq_nvqc_deploy_image }} | |
tags: nvcr.io/${{ env.NGC_QUANTUM_ORG }}/${{ env.NGC_QUANTUM_TEAM }}/cuda-quantum:nightly | |
platforms: linux/amd64 | |
provenance: false | |
push: true | |
deploy_nvqc_test_function: | |
name: Deploy NVQC function | |
runs-on: ubuntu-latest | |
needs: [metadata, build_nvqc_image] | |
if: (inputs.target == 'nvqc' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
environment: ghcr-deployment | |
outputs: | |
nvqc_function_version_id: ${{ steps.deploy.outputs.nvqc_function_version_id }} | |
steps: | |
- name: Checkout repository | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ needs.metadata.outputs.cudaq_commit }} | |
fetch-depth: 1 | |
- name: Install NGC CLI | |
uses: ./.github/actions/install-ngc-cli | |
with: | |
version: 3.38.0 | |
checksum: 427c67684d792b673b63882a6d0cbb8777815095c0f2f31559c1570a91187388 | |
- name: Deploy NVQC Function | |
id: deploy | |
env: | |
NGC_CLI_API_KEY: ${{ secrets.NGC_CREDENTIALS }} | |
NGC_CLI_ORG: ${{ env.NGC_QUANTUM_ORG }} | |
NGC_CLI_TEAM: cuda-quantum | |
# When a new REST version is introduced, NVQC_REST_PAYLOAD_VERSION needs to be updated in lockstep with the new nightly CUDA Quantum image. | |
# Otherwise, deployment of the test function will fail. | |
run: | | |
# We run with CUDAQ_SER_CODE_EXEC set. The final NVQC deployment may | |
# or may not have this set, but since we run the client with | |
# CUDAQ_CLIENT_REMOTE_CAPABILITY_OVERRIDE=1 (below), we need to run | |
# the CI with CUDAQ_SER_CODE_EXEC=1. If we ever remove | |
# CUDAQ_CLIENT_REMOTE_CAPABILITY_OVERRIDE=1 below, we can consider | |
# removing CUDAQ_SER_CODE_EXEC=1. | |
create_function_result=$(ngc-cli/ngc cloud-function function create \ | |
--container-image nvcr.io/${{ env.NGC_QUANTUM_ORG }}/${{ env.NGC_QUANTUM_TEAM }}/cuda-quantum:nightly \ | |
--container-environment-variable NUM_GPUS:1 \ | |
--container-environment-variable NVQC_REST_PAYLOAD_VERSION:1.1 \ | |
--container-environment-variable RUN_AS_NOBODY:1 \ | |
--container-environment-variable CUDAQ_SER_CODE_EXEC:1 \ | |
--api-body-format CUSTOM \ | |
--inference-port 3030 \ | |
--health-uri / \ | |
--inference-url /job \ | |
--name cudaq-nightly-integration-test \ | |
$NVQC_FUNCTION_ID) | |
version_id=$(echo "$create_function_result" | grep 'Version: \S*' | head -1 | cut -d ':' -f 2 | tr -d ' ') | |
echo "Create version Id: $version_id" | |
echo "nvqc_function_version_id=$version_id" >> $GITHUB_OUTPUT | |
# Deploy it | |
ngc-cli/ngc cloud-function function deploy create --deployment-specification $NGC_NVQC_DEPLOYMENT_SPEC $NVQC_FUNCTION_ID:$version_id | |
function_status=DEPLOYING | |
while [ "$function_status" = "DEPLOYING" ]; do | |
echo "Waiting for deploying NVQC function version $version_id ..." | |
sleep 120 | |
function_info=$(ngc-cli/ngc cloud-function function info $NVQC_FUNCTION_ID:$version_id) | |
function_status=$(echo "$function_info" | grep 'Status: \S*' | head -1 | cut -d ':' -f 2 | tr -d ' ') | |
done | |
if [ "$function_status" != "ACTIVE" ]; then | |
echo "::error:: Failed to deploy NVQC Test Function" | |
exit 1 | |
fi | |
hardware_providers_integration_test: | |
name: Integration test with hardware providers | |
runs-on: ubuntu-latest | |
needs: [setup, metadata] | |
environment: backend-validation | |
container: | |
image: ${{ needs.setup.outputs.cudaq_test_image }} | |
options: --user root | |
steps: | |
- name: Get code | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ needs.metadata.outputs.cudaq_commit }} | |
fetch-depth: 1 | |
- name: Get tests | |
id: gettests | |
run: | | |
if [ -n "${{ inputs.single_test_name }}" ]; then | |
if [ -e ${{ inputs.single_test_name }} ]; then | |
echo "testlist=${{ inputs.single_test_name }}" >> $GITHUB_OUTPUT | |
else | |
# User's request test does not exit | |
echo "::error::File ${{ inputs.single_test_name}} not found" | |
exit 1 | |
fi | |
else | |
filelist="targettests/*/*.cpp" | |
echo "testlist=$filelist" >> $GITHUB_OUTPUT | |
fi | |
- name: Setup anyon account | |
# This step is currently bypassed during nightly runs due to | |
# maintenance. Restore the if check to the original value when | |
# maintenance is complete. | |
#if: github.event_name == 'schedule' || inputs.target == 'nightly' || inputs.target == 'anyon' | |
if: inputs.target == 'anyon' | |
run: | | |
curl -X POST -H "Content Type: application/json" -d '{ "username":"${{ secrets.BACKEND_LOGIN_USERNAME }}","password":"${{ secrets.BACKEND_LOGIN_PASSWORD }}" }' https://api.anyon.cloud/login > credentials.json | |
id_token=`cat credentials.json | jq -r '."id_token"'` | |
refresh_token=`cat credentials.json | jq -r '."refresh_token"'` | |
echo "key: $id_token" > ~/.anyon_config | |
echo "refresh: $refresh_token" >> ~/.anyon_config | |
- name: QIR syntax check (Anyon) | |
# This step is currently bypassed during nightly runs due to | |
# maintenance. Restore the if check to the original value when | |
# maintenance is complete. | |
#if: github.event_name == 'schedule' || inputs.target == 'nightly' || inputs.target == 'anyon' | |
if: inputs.target == 'anyon' | |
run: | | |
echo "### QIR syntax check (Anyon)" >> $GITHUB_STEP_SUMMARY | |
export CUDAQ_LOG_LEVEL="info" | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
case $filename in | |
targettests/anyon/*) | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
nvq++ -v $filename -DSYNTAX_CHECK --target anyon --anyon-machine telegraph-8q | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
;; | |
esac | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Setup quantinum account | |
if: github.event_name == 'schedule' || inputs.target == 'nightly' || inputs.target == 'quantinuum' | |
run: | | |
curl -X POST -H "Content Type: application/json" -d '{ "email":"${{ secrets.BACKEND_LOGIN_EMAIL }}","password":"${{ secrets.QUANTINUUM_PASSWORD }}" }' https://qapi.quantinuum.com/v1/login > credentials.json | |
id_token=`cat credentials.json | jq -r '."id-token"'` | |
refresh_token=`cat credentials.json | jq -r '."refresh-token"'` | |
echo "key: $id_token" > ~/.quantinuum_config | |
echo "refresh: $refresh_token" >> ~/.quantinuum_config | |
- name: QIR syntax check (Quantinuum) | |
if: github.event_name == 'schedule' || inputs.target == 'nightly' | |
run: | | |
echo "### QIR syntax check (Quantinuum)" >> $GITHUB_STEP_SUMMARY | |
export CUDAQ_LOG_LEVEL="info" | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
case $filename in | |
targettests/quantinuum/*) | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
nvq++ -v $filename -DSYNTAX_CHECK --target quantinuum --quantinuum-machine H1-1SC | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
;; | |
esac | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Submit to IonQ Simulator | |
if: (success() || failure()) && (inputs.target == 'ionq' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
run: | | |
echo "### Submit to IonQ Simulator" >> $GITHUB_STEP_SUMMARY | |
export IONQ_API_KEY='${{ secrets.IONQ_API_KEY }}' | |
# TODO: remove this flag once https://github.com/NVIDIA/cuda-quantum/issues/512 is addressed. | |
export CUDAQ_LOG_LEVEL="info" | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
case $filename in | |
targettests/ionq/*) | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
nvq++ -v $filename --target ionq | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
;; | |
esac | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Submit to IQM Demo server | |
if: (success() || failure()) && (inputs.target == 'iqm' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
run: | | |
# Must install iqm-cortex-cli to authenticate | |
pip install iqm-cortex-cli | |
echo "### Submit to IQM Demo server" >> $GITHUB_STEP_SUMMARY | |
# IQM demo server info is from: https://demo.qc.iqm.fi/cocos/info/ | |
cortex init --config-file ${HOME}/.config/iqm-cortex-cli/config.json --tokens-file ${HOME}/.cache/iqm-cortex-cli/tokens.json --auth-server-url https://demo.qc.iqm.fi/auth --client-id iqm_client --realm cortex --username '${{ secrets.IQM_USER }}' | |
cortex auth login --username '${{ secrets.IQM_USER }}' --password '${{ secrets.IQM_PASSWORD }}' | |
echo ":white_check_mark: Successfully installed iqm-cortex-cli and logged in" >> $GITHUB_STEP_SUMMARY | |
# Use the demo machine, which is Adonis architecture | |
export IQM_SERVER_URL="https://demo.qc.iqm.fi/cocos" | |
export CUDAQ_LOG_LEVEL="info" | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
# Only the following tests are currently supported on IQM | |
case $filename in | |
targettests/iqm/*) | |
nvq++ -DSYNTAX_CHECK --target iqm --iqm-machine Adonis $filename | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
;; | |
esac | |
done | |
set -e # Re-enable exit code error checking | |
cortex auth logout -f | |
echo ":white_check_mark: Successfully logged out of IQM" >> $GITHUB_STEP_SUMMARY | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Submit to OQC Sandbox server | |
# This step is currently bypassed during nightly runs due to | |
# maintenance. Restore the if check to the original value when | |
# maintenance is complete. | |
# if: (success() || failure()) && (inputs.target == 'oqc' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
if: (success() || failure()) && (inputs.target == 'oqc') | |
run: | | |
echo "### Submit to OQC sandbox server" >> $GITHUB_STEP_SUMMARY | |
export CUDAQ_LOG_LEVEL="info" | |
export OQC_EMAIL='${{ secrets.BACKEND_LOGIN_EMAIL }}' | |
export OQC_PASSWORD='${{ secrets.OQC_PASSWORD }}' | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
# Only the following tests are currently supported on OQC | |
case $filename in | |
targettests/oqc/*) | |
nvq++ -DSYNTAX_CHECK --target oqc $filename | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
;; | |
esac | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Submit to ORCA test server | |
if: (success() || failure()) && (inputs.target == 'orca' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
run: | | |
echo "### Submit to ORCA server" >> $GITHUB_STEP_SUMMARY | |
export ORCA_ACCESS_URL='${{ secrets.ORCA_ACCESS_URL }}' | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
cpp_tests="docs/sphinx/examples/cpp/providers/orca.cpp" | |
for filename in $cpp_tests; do | |
[ -e "$filename" ] || echo "::error::Couldn't find file ($filename)" | |
nvq++ --target orca --orca-url $ORCA_ACCESS_URL $filename | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
done | |
python_tests="docs/sphinx/examples/python/providers/orca.py" | |
for filename in $python_tests; do | |
[ -e "$filename" ] || echo "::error::Couldn't find file ($filename)" | |
python3 $filename 1> /dev/null | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
- name: Submit to ${{ inputs.target }} | |
# The full set of tests used by this step is currently only supported on | |
# Quantinuum. The other supported tests are tested by the step above. | |
# The main point of this special step is to run with a special | |
# target_machine. It also doesn't use -DSYNTAX_CHECK, so you probably | |
# don't want to run this on a simple Syntax Check machine. | |
if: inputs.target == 'quantinuum' && github.event_name == 'workflow_dispatch' | |
run: | | |
if ${{inputs.target == 'ionq'}}; then | |
export IONQ_API_KEY='${{ secrets.IONQ_API_KEY }}' | |
fi | |
for filename in ${{ steps.gettests.outputs.testlist }}; do | |
[ -e "$filename" ] || echo "::error::Couldn't find files ($filename)" | |
case $filename in | |
targettests/quantinuum/*) | |
nvq++ -v $filename --target ${{ inputs.target }} --${{ inputs.target }}-machine ${{ inputs.target_machine }} | |
CUDAQ_LOG_LEVEL=info ./a.out | |
;; | |
esac | |
done | |
shell: bash | |
nvqc_integration_docker_test: | |
name: NVQC integration test using Docker image | |
runs-on: ubuntu-latest | |
if: (inputs.target == 'nvqc' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
needs: [setup, metadata, build_nvqc_image, deploy_nvqc_test_function] | |
environment: ghcr-deployment | |
container: | |
image: ${{ needs.setup.outputs.cudaq_test_image }} | |
options: --user root | |
steps: | |
- name: Get code | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ needs.metadata.outputs.cudaq_commit }} | |
fetch-depth: 1 | |
- name: Submit to NVQC | |
run: | | |
echo "### Submit to NVQC" >> $GITHUB_STEP_SUMMARY | |
export NVQC_API_KEY="${{ secrets.NVQC_SERVICE_KEY }}" | |
export NVQC_FUNCTION_ID="$NVQC_FUNCTION_ID" | |
export NVQC_FUNCTION_VERSION_ID="${{ needs.deploy_nvqc_test_function.outputs.nvqc_function_version_id }}" | |
# When overriding the NVQC_FUNCTION_ID to a function that doesn't | |
# follow the production naming convenvtions, we need to set the | |
# following environment variable to tell the client that the server | |
# has all the remote capabilities. | |
export CUDAQ_CLIENT_REMOTE_CAPABILITY_OVERRIDE=1 | |
set +e # Allow script to keep going through errors | |
test_err_sum=0 | |
# Test all NVQPP execution tests | |
for filename in `find targettests/execution/ -name '*.cpp'`; do | |
echo "$filename" | |
# Only run tests that require execution (not a syntax-only check) | |
if grep -q "ifndef SYNTAX_CHECK" "$filename"; then | |
nvq++ -v $filename --target nvqc | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
fi | |
done | |
# Test all remote-sim tests | |
for filename in `find targettests/Remote-Sim -name '*.cpp'`; do | |
# unsupport_args and compile_errors are compile error tests | |
# nested_vectors: Compiler fails to synthesize nested vector parameters (https://github.com/NVIDIA/cuda-quantum/issues/2001) | |
# state_init: New argument synthesis is not executed for nvqc (https://github.com/NVIDIA/cuda-quantum/issues/2146) | |
if [[ "$filename" != *"unsupport_args"* ]] && [[ "$filename" != *"compile_errors"* ]] && [[ "$filename" != *"nested_vectors"* ]] && [[ "$filename" != *"qvector_init_from_state"* ]]; then | |
echo "$filename" | |
nvqc_config="" | |
# Look for a --remote-mqpu-auto-launch to determine the number of QPUs | |
num_qpus=`cat $filename | grep -oP -m 1 '^//\s*RUN:\s*nvq++.+--remote-mqpu-auto-launch\s+\K\S+'` | |
if [ -n "$num_qpus" ]; then | |
echo "Intended to run on '$num_qpus' QPUs." | |
nvqc_config="$nvqc_config --nvqc-nqpus $num_qpus" | |
fi | |
nvq++ -v $filename --target nvqc $nvqc_config | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
fi | |
done | |
# Test C++ examples with NVQC | |
for filename in `find examples/cpp/ -name '*.cpp'`; do | |
if [[ "$filename" == *"nvqc"* ]]; then | |
echo "$filename" | |
nvqc_config="" | |
# Look for a --nvqc-backend flag to nvq++ in the comment block | |
nvqc_backend=`sed -e '/^$/,$d' $filename | grep -oP -m 1 '^//\s*nvq++.+--nvqc-backend\s+\K\S+'` | |
if [ -n "$nvqc_backend" ]; then | |
echo "Intended for execution on '$nvqc_backend' backend." | |
nvqc_config="$nvqc_config --nvqc-backend $nvqc_backend" | |
fi | |
# Look for a --nvqc-nqpus flag to nvq++ in the comment block | |
num_qpus=`sed -e '/^$/,$d' $filename | grep -oP -m 1 '^//\s*nvq++.+--nvqc-nqpus\s+\K\S+'` | |
if [ -n "$num_qpus" ]; then | |
echo "Intended to run on '$num_qpus' QPUs." | |
nvqc_config="$nvqc_config --nvqc-nqpus $num_qpus" | |
fi | |
nvq++ -v $filename --target nvqc $nvqc_config | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
./a.out | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
fi | |
done | |
# Test NVQC Python examples + Python MLIR execution tests (not IR tests) | |
python3 -m pip install pytest | |
for ex in `find examples/python python/tests/mlir/target -name '*.py' -not -path '*/python/tutorials/*'`; do | |
filename=$(basename -- "$ex") | |
filename="${filename%.*}" | |
echo "Testing $filename:" | |
if [[ "$ex" == *"nvqc"* ]]; then | |
# This is an NVQC example | |
python3 $ex 1> /dev/null | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
# Only run examples that are not target-specific (e.g., ionq, iqm) | |
if ! grep -q "set_target" "$ex"; then | |
# Use --target command line option to run these examples with nvqc | |
python3 $ex --target nvqc 1> /dev/null | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
fi | |
fi | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
shell: bash | |
nvqc_integration_wheel_test: | |
name: NVQC integration test using Python wheels | |
runs-on: ubuntu-latest | |
needs: [metadata, build_nvqc_image, deploy_nvqc_test_function] | |
environment: ghcr-deployment | |
if: inputs.target == 'nvqc' || github.event_name == 'schedule' || inputs.target == 'nightly' | |
steps: | |
- name: Get code | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ needs.metadata.outputs.cudaq_commit }} | |
fetch-depth: 1 | |
- name: Install wheel | |
id: install_wheel | |
run: | | |
python_version=${{ inputs.python_version || env.python_version }} | |
workflow_id=${{ inputs.workflow_id }} | |
# Helper to get the *valid* Publishing run Id for a commit hash | |
# Notes: runs that have 'CUDA Quantum Python wheels' jobs skipped are not considered. | |
function get_publishing_run_id { | |
# Find all Publishing runs, we'll look into its jobs' status later | |
if [[ -z "$1" ]]; then | |
publishing_run_ids=$(gh run list --workflow Publishing --json databaseId --jq .[].databaseId) | |
else | |
publishing_run_ids=$(gh run list --commit $1 --workflow Publishing --json databaseId --jq .[].databaseId) | |
fi | |
for run_id in $publishing_run_ids ; do | |
# Look into its jobs: if "CUDA Quantum Python wheels" matrix build was performed, | |
# then we have multiple jobs, like "CUDA Quantum Python wheels (python_arm64....") | |
cuda_wheel_build_jobs=$(gh run view $run_id --jq '.jobs.[] | select(.name | startswith("CUDA Quantum Python wheels (python_")).name' --json jobs) | |
if [ ! -z "$cuda_wheel_build_jobs" ]; then | |
# This is a valid run that produces wheel artifacts | |
echo $run_id | |
break | |
fi | |
done | |
} | |
if [ -z "${workflow_id}" ]; then | |
workflow_id=$(get_publishing_run_id ${{ needs.metadata.outputs.cudaq_commit }}) | |
fi | |
if [ ! -z "$workflow_id" ]; then | |
echo "Using artifacts from workflow id $workflow_id" | |
# Allow error when trying to download wheel artifacts since they might be expired. | |
set +e | |
gh run download $workflow_id --name "x86_64-py$python_version-wheels" | |
retVal=$? | |
set -e | |
if [ $retVal -ne 0 ]; then | |
echo "Failed to download wheels artifact from Publishing workflow run Id $workflow_id. Perhaps the artifacts have been expired." | |
# This is allowed since there might be a period where no Publishing workflow is run (e.g., no PR merged to main). | |
echo "skipped=true" >> $GITHUB_OUTPUT | |
exit 0 | |
fi | |
python_version_filename=$(echo "${python_version//.}") | |
# Install Python and the wheel | |
apt-get update && apt-get install -y --no-install-recommends python$python_version python3-pip | |
wheelfile=$(find . -name "cuda_quantum*cp$python_version_filename*x86_64.whl") | |
python$python_version -m pip install $wheelfile | |
echo "skipped=false" >> $GITHUB_OUTPUT | |
else | |
echo "Failed to retrieve Publishing workflow run Id for commit ${{ needs.metadata.outputs.cudaq_commit }}" | |
exit 1 | |
fi | |
env: | |
GH_TOKEN: ${{ github.token }} | |
- name: Test NVQC | |
if: ${{ ! steps.install_wheel.skipped }} | |
run: | | |
echo "### Submit to NVQC from Python wheels" >> $GITHUB_STEP_SUMMARY | |
python_version=${{ inputs.python_version || env.python_version }} | |
export NVQC_API_KEY="${{ secrets.NVQC_SERVICE_KEY }}" | |
export NVQC_FUNCTION_ID="$NVQC_FUNCTION_ID" | |
export NVQC_FUNCTION_VERSION_ID="${{ needs.deploy_nvqc_test_function.outputs.nvqc_function_version_id }}" | |
set +e # Allow script to keep going through errors | |
python$python_version -m pip install pytest | |
test_err_sum=0 | |
for ex in `find examples/python python/tests/mlir/target -name '*.py' -not -path '*/python/tutorials/*'`; do | |
filename=$(basename -- "$ex") | |
filename="${filename%.*}" | |
echo "Testing $filename:" | |
if [[ "$ex" == *"nvqc"* ]]; then | |
python$python_version $ex 1> /dev/null | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
else | |
# Only run examples that are not target-specific (e.g., ionq, iqm) | |
if ! grep -q "set_target" "$ex"; then | |
# Use --target command line option to run these examples with nvqc | |
python$python_version $ex --target nvqc 1> /dev/null | |
test_status=$? | |
if [ $test_status -eq 0 ]; then | |
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY | |
else | |
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY | |
test_err_sum=$((test_err_sum+1)) | |
fi | |
fi | |
fi | |
done | |
set -e # Re-enable exit code error checking | |
if [ ! $test_err_sum -eq 0 ]; then | |
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures" | |
exit 1 | |
fi | |
cleanup_nvqc_resources: | |
name: Cleanup NVQC resources | |
runs-on: ubuntu-latest | |
needs: [build_nvqc_image, deploy_nvqc_test_function, nvqc_integration_docker_test, nvqc_integration_wheel_test] | |
if: (success() || failure()) && (inputs.target == 'nvqc' || github.event_name == 'schedule' || inputs.target == 'nightly') | |
environment: ghcr-deployment | |
steps: | |
- name: Get code | |
uses: actions/checkout@v4 | |
- name: Install NGC CLI | |
uses: ./.github/actions/install-ngc-cli | |
with: | |
version: 3.38.0 | |
checksum: 427c67684d792b673b63882a6d0cbb8777815095c0f2f31559c1570a91187388 | |
- name: Cleanup | |
env: | |
NGC_CLI_API_KEY: ${{ secrets.NGC_CREDENTIALS }} | |
NGC_CLI_ORG: ${{ env.NGC_QUANTUM_ORG }} | |
NGC_CLI_TEAM: cuda-quantum | |
run: | | |
echo "Version Id: ${{ needs.deploy_nvqc_test_function.outputs.nvqc_function_version_id }}" | |
# Remove deployment (make it inactive) | |
ngc-cli/ngc cloud-function function deploy remove $NVQC_FUNCTION_ID:${{ needs.deploy_nvqc_test_function.outputs.nvqc_function_version_id }} | |
# Remove the function version | |
ngc-cli/ngc cloud-function function remove $NVQC_FUNCTION_ID:${{ needs.deploy_nvqc_test_function.outputs.nvqc_function_version_id }} | |
# Remove the docker image | |
ngc-cli/ngc registry image remove -y nvcr.io/${{ env.NGC_QUANTUM_ORG }}/${{ env.NGC_QUANTUM_TEAM }}/cuda-quantum:nightly |