Skip to content

Nightly-OnDemand Tests Rolling #698

Nightly-OnDemand Tests Rolling

Nightly-OnDemand Tests Rolling #698

name: Nightly-OnDemand Tests Rolling
on:
schedule:
# GMT+8 21:30 every workday
- cron: '30 13 * * 0-4'
# GMT+8 0:30 Saturday
- cron: '30 16 * * 5'
workflow_dispatch:
inputs:
pytorch:
required: false
type: string
default: 'main'
description: Pytorch branch/commit
keep_torch_xpu_ops:
required: false
type: string
default: 'false'
description: Keep torch-xpu-ops pin. `true` means use pined commit
ut:
required: false
type: string
default: 'torch_xpu'
description: UT scope. `op_regression,op_regression_dev1,op_transformers,op_extended,op_ut,torch_xpu,xpu_profiling`. Delimiter is comma
triton:
required: false
type: string
default: ''
description: Triton commit. Use pytorch pined commit by default
suite:
required: true
type: string
default: 'huggingface'
description: Dynamo benchmarks test suite. `huggingface,timm_models,torchbench,pt2e`. Delimiter is comma
dt:
required: true
type: string
default: 'float32'
description: Data precision of the test. `float32,bfloat16,float16,amp_bf16,amp_fp16`. Delimiter is comma
mode:
required: true
type: string
default: 'inference'
description: Test mode. `inference,training`. Delimiter is comma
scenario:
required: true
type: string
default: 'accuracy'
description: Test scenario. `accuracy,performance`. Delimiter is comma
model:
required: false
type: string
default: ''
description: Model. Will only run this one mode if set
python:
required: false
type: string
default: '3.10'
description: Python version
permissions: read-all
concurrency:
group: ${{ github.workflow }}-${{ github.sha }}-${{ github.event_name }}-${{ inputs.pytorch }}-${{ inputs.keep_torch_xpu_ops }}-${{ inputs.ut }}-${{ inputs.triton }}-${{ inputs.suite }}-${{ inputs.dt }}-${{ inputs.mode }}-${{ inputs.scenario }}-${{ inputs.model }}-${{ inputs.python }}
cancel-in-progress: ${{ github.event_name != 'schedule' }}
jobs:
Linux-Nightly-Ondemand-Build-Rolling:
if: ${{ github.repository_owner == 'intel' }}
name: linux-nightly-ondemand-rolling
secrets: inherit
uses: ./.github/workflows/_linux_build.yml
with:
pytorch: ${{ github.event_name == 'schedule' && 'main' || inputs.pytorch }}
keep_torch_xpu_ops: ${{ github.event_name == 'schedule' && 'false' || inputs.keep_torch_xpu_ops }}
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
driver: rolling
runner: pvc_rolling
Linux-Nightly-Ondemand-UT-Tests-Rolling:
if: ${{ github.event_name == 'schedule' || inputs.ut != '' }}
name: linux-nightly-ondemand-rolling
needs: Linux-Nightly-Ondemand-Build-Rolling
uses: ./.github/workflows/_linux_ut.yml
with:
keep_torch_xpu_ops: ${{ github.event_name == 'schedule' && 'false' || inputs.keep_torch_xpu_ops }}
ut: ${{ github.event_name == 'schedule' && 'op_regression,op_regression_dev1,op_transformers,op_extended,op_ut' || inputs.ut }}
pytorch: ${{ needs.Linux-Nightly-Ondemand-Build-Rolling.outputs.torch_commit_id }}
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
triton: ${{ github.event_name == 'schedule' && '' || inputs.triton }}
driver: rolling
runner: pvc_rolling
Linux-Nightly-Ondemand-OP-Microbench-Tests-Rolling:
name: linux-nightly-ondemand-rolling / Op_microbench
permissions:
issues: write
needs: Linux-Nightly-Ondemand-Build-Rolling
uses: ./.github/workflows/_linux_op_benchmark.yml
with:
keep_torch_xpu_ops: ${{ github.event_name == 'schedule' && 'false' || inputs.keep_torch_xpu_ops }}
pytorch: ${{ needs.Linux-Nightly-Ondemand-Build-Rolling.outputs.torch_commit_id }}
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
triton: ${{ github.event_name == 'schedule' && '' || inputs.triton }}
driver: rolling
runner: pvc_rolling
Linux-Nightly-Ondemand-E2E-Tests-Rolling:
runs-on: pvc_rolling
name: linux-nightly-ondemand-rolling / e2e_test
needs: Linux-Nightly-Ondemand-Build-Rolling
timeout-minutes: 3600
permissions:
issues: write
env:
GH_TOKEN: ${{ github.token }}
reference_issue: 1645
pytorch: ${{ needs.Linux-Nightly-Ondemand-Build-Rolling.outputs.torch_commit_id }}
keep_torch_xpu_ops: ${{ github.event_name == 'schedule' && 'false' || inputs.keep_torch_xpu_ops }}
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
NEOReadDebugKeys: 1
DisableScratchPages: 1
run_type: ${{ (github.event_name == 'schedule' && (github.event.schedule == '30 16 * * 5' && 'weekly' || 'nightly')) || 'on-demand' }}
outputs:
TORCH_BRANCH_ID: ${{ steps.pinned.outputs.TORCH_BRANCH_ID }}
TORCH_COMMIT_ID: ${{ steps.pinned.outputs.TORCH_COMMIT_ID }}
DRIVER_VERSION: ${{ steps.pinned.outputs.DRIVER_VERSION }}
KERNEL_VERSION: ${{ steps.pinned.outputs.KERNEL_VERSION }}
BUNDLE_VERSION: ${{ steps.pinned.outputs.BUNDLE_VERSION }}
OS_PRETTY_NAME: ${{ steps.pinned.outputs.OS_PRETTY_NAME }}
GCC_VERSION: ${{ steps.pinned.outputs.GCC_VERSION }}
TORCHBENCH_COMMIT_ID: ${{ steps.pinned.outputs.TORCHBENCH_COMMIT_ID }}
TORCHVISION_COMMIT_ID: ${{ steps.pinned.outputs.TORCHVISION_COMMIT_ID }}
TORCHAUDIO_COMMIT_ID: ${{ steps.pinned.outputs.TORCHAUDIO_COMMIT_ID }}
TRANSFORMERS_VERSION: ${{ steps.pinned.outputs.TRANSFORMERS_VERSION }}
TIMM_COMMIT_ID: ${{ steps.pinned.outputs.TIMM_COMMIT_ID }}
TRITON_COMMIT_ID: ${{ steps.pinned.outputs.TRITON_COMMIT_ID }}
TIMEOUT_MODELS: ${{ steps.summary.outputs.TIMEOUT_MODELS }}
steps:
- name: Checkout torch-xpu-ops
uses: actions/checkout@v4
- name: Prepare Conda ENV
run: |
which conda && conda clean -ay
conda remove --all -y -n e2e_ci || rm -rf $(dirname ${CONDA_EXE})/../envs/e2e_ci
conda create -n e2e_ci python=${{ env.python }} cmake ninja -y
source activate e2e_ci
pip install pandas scipy psutil requests
- name: Download Pytorch wheel
if: ${{ inputs.pytorch != 'nightly_wheel' }}
uses: actions/download-artifact@v4
with:
name: Torch-XPU-Wheel-${{ github.event.pull_request.number || github.sha }}
- name: Prepare Stock Pytorch
run: |
pwd
cd ../
rm -rf pytorch || sudo rm -rf pytorch
source activate e2e_ci
pip install --force-reinstall ${{ github.workspace }}/torch*.whl
TORCH_COMMIT_ID=$(python -c 'import torch; print(torch.version.git_version)')
git clone https://github.com/pytorch/pytorch pytorch
cd pytorch
git checkout ${TORCH_COMMIT_ID}
# apply extra PRs for stock pytorch
python ../torch-xpu-ops/.github/scripts/apply_torch_pr.py
git status && git diff && git show -s
- name: Identify pinned versions
id: pinned
run: |
source .github/scripts/env.sh
cd ../pytorch
if [ -z ${{ inputs.triton }} ]; then
echo "TRITON_COMMIT_ID=$(<.ci/docker/ci_commit_pins/triton-xpu.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
else
echo "TRITON_COMMIT_ID=${{ inputs.triton }}" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
fi
echo "TORCH_BRANCH_ID=${{ inputs.pytorch }}" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TORCH_COMMIT_ID=$(git rev-parse HEAD)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TORCHBENCH_COMMIT_ID=$(<.github/ci_commit_pins/torchbench.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TORCHVISION_COMMIT_ID=$(<.github/ci_commit_pins/vision.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TORCHAUDIO_COMMIT_ID=$(<.github/ci_commit_pins/audio.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TRANSFORMERS_VERSION=$(<.ci/docker/ci_commit_pins/huggingface.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "TIMM_COMMIT_ID=$(<.ci/docker/ci_commit_pins/timm.txt)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "MODEL_ONLY_NAME=${{ inputs.model }}" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "DRIVER_VERSION=$(sycl-ls |grep 'opencl:gpu' |awk '{print $NF}' |sort |uniq -c |sed 's/ //g;s/\[/*[/')" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "KERNEL_VERSION=$(uname -rv 2>&1)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "BUNDLE_VERSION=$(icpx --version 2>&1 |grep 'DPC++/C++' |sed 's/.*(//;s/).*//')" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
. /etc/os-release
echo "OS_PRETTY_NAME=${PRETTY_NAME}" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo "GCC_VERSION=$(gcc -dumpversion)" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}"
echo ${GITHUB_ENV}
- name: Triton Installation
run: |
source activate e2e_ci
cd ../pytorch
pip install cmake ninja pybind11
rm -rf pytorch_triton_xpu-*.whl
TRITON_VERSION_NAME="$(
curl -sSL https://raw.githubusercontent.com/intel/intel-xpu-backend-for-triton/${TRITON_COMMIT_ID}/python/triton/__init__.py 2>&1 |\
grep '__version__' |head -n 1 |awk -F "'" '{print $2}'
)"
python .github/scripts/build_triton_wheel.py --device xpu --commit-hash ${TRITON_COMMIT_ID} --triton-version ${TRITON_VERSION_NAME}
pip install pytorch_triton_xpu-*.whl
- name: Show GITHUB_ENV
run: |
echo "$GITHUB_ENV"
rm -rf ../pytorch/inductor_log || sudo rm -rf ../pytorch/inductor_log
rm -rf /tmp/torchinductor_* || sudo rm -rf /tmp/torchinductor_*
rm -rf ~/.triton/cache || sudo rm -rf ~/.triton/cache
# Nihglty launch
- name: Nightly Huggingface FP32/BF16/FP16 Inference & Training Accuracy Test
if: ${{ env.run_type == 'nightly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
env_prepare: true
dt: float32,bfloat16,float16
mode: inference,training
scenario: accuracy
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Nightly Torchbench BF16 Training Accuracy Test
if: ${{ env.run_type == 'nightly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: torchbench
dt: bfloat16
mode: training
scenario: accuracy
env_prepare: true
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Nightly Timm_models FP16 Training Accuracy Test
if: ${{ env.run_type == 'nightly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: timm_models
dt: float16
mode: training
scenario: accuracy
env_prepare: true
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Nightly PT2E Full Test
if: ${{ env.run_type == 'nightly' }}
uses: ./.github/actions/pt2e
with:
dt: float32,int8
scenario: accuracy,performance
env_prepare: true
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
# Weekly launch
- name: Weekly Huggingface Full Test
if: ${{ env.run_type == 'weekly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: huggingface
env_prepare: true
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
mode: inference,training
scenario: accuracy,performance
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Weekly Torchbench Full Test
if: ${{ env.run_type == 'weekly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: torchbench
env_prepare: true
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
mode: inference,training
scenario: accuracy,performance
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Weekly Timm_models Full Test
if: ${{ env.run_type == 'weekly' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: timm_models
env_prepare: true
dt: float32,bfloat16,float16,amp_bf16,amp_fp16
mode: inference,training
scenario: accuracy,performance
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Weekly PT2E Accuracy Test
if: ${{ env.run_type == 'weekly' }}
uses: ./.github/actions/pt2e
with:
env_prepare: true
dt: float32,int8
scenario: accuracy,performance
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
# On-demand launch
- name: OnDemand Test (${{ inputs.suite }} ${{ inputs.dt }} ${{ inputs.mode }} ${{ inputs.scenario }})
if: ${{ github.event_name != 'schedule' && inputs.suite != 'pt2e' }}
uses: ./.github/actions/inductor-xpu-e2e-test
with:
suite: ${{ inputs.suite }}
env_prepare: true
dt: ${{ inputs.dt }}
mode: ${{ inputs.mode }}
scenario: ${{ inputs.scenario }}
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: OnDemand PT2E Test (${{ inputs.suite }} ${{ inputs.dt }} ${{ inputs.mode }} ${{ inputs.scenario }})
if: ${{ github.event_name != 'schedule' && contains(inputs.suite, 'pt2e') }}
uses: ./.github/actions/pt2e
with:
env_prepare: true
dt: ${{ inputs.dt }}
scenario: ${{ inputs.scenario }}
hf_token: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
driver: rolling
- name: Download Reference Artifact
id: reference_id
run: |
set -xe
source activate e2e_ci
conda install gh --channel conda-forge -y
if [ "${{ env.run_type }}" == "on-demand" ];then
artifact_type="weekly"
else
artifact_type="${{ env.run_type }}"
fi
REFERENCE_RUN_ID="$(gh --repo ${GITHUB_REPOSITORY} issue view ${reference_issue} \
--json body -q .body |grep "Inductor-${artifact_type}-Rolling-XPU-E2E" |sed 's/.*: *//')"
gh --repo ${GITHUB_REPOSITORY} run download ${REFERENCE_RUN_ID} -p "Inductor-*-XPU-E2E-*"
rm -rf reference && mv Inductor-*-XPU-E2E-* reference
- name: Summarize archieve files
id: summary
if: ${{ ! cancelled() }}
run: |
set -x -e -o pipefail
rm -rf ${{ github.workspace }}/upload_files
cp -r ${{ github.workspace }}/../pytorch/inductor_log ${{ github.workspace }}/upload_files
mkdir -p ${{ github.workspace }}/../../_backup/ && cd ${{ github.workspace }}/../../_backup/
find . -type f -name "*.tgz" -mtime +3 -delete # delete files older than 3 days
tar zcf xpu-inductor-${GITHUB_RUN_ID}.tgz -C ${{ github.workspace }}/upload_files/ . # backup logs
# Print summary
if [ "${{ inputs.suite }}" != 'pt2e' ];then
source activate e2e_ci
export LTS_OR_ROLLING='rolling'
bash ${{ github.workspace }}/.github/scripts/e2e_summary.sh \
${{ github.workspace }}/upload_files \
${{ github.workspace }}/reference \
>> ${GITHUB_STEP_SUMMARY}
exit_label=$(awk 'BEGIN{sum=0}{if($2>0){sum++}}END{print sum}' /tmp/tmp-result.txt)
if [ ${exit_label} -ne 0 ];then
grep -E "(Real failed|to passed|Warning timeout).*: [1-9]|Summary for" /tmp/tmp-*.txt |grep -E "failed|passed|timeout" -B 1
echo "There are ${exit_label} cases that need look into!!! Please check them"
exit ${exit_label}
fi
fi
pt2e_summary_csv="$(find ${{ github.workspace }}/upload_files/ -name "summary.csv")"
if [ -f "${pt2e_summary_csv}" ];then
cat ${pt2e_summary_csv}
failed_num=$(grep ',failed' ${pt2e_summary_csv} |wc -l)
if [ ${failed_num} -ne 0 ];then
echo "[Warning] PT2E has failures!"
fi
fi
- name: Upload Inductor XPU E2E Data
if: ${{ ! cancelled() }}
uses: actions/upload-artifact@v4
with:
name: Inductor-${{ env.run_type }}-Rolling-XPU-E2E-Data-${{ github.event.pull_request.number || github.sha }}
path: ${{ github.workspace }}/upload_files
- name: Upload Reference Run ID
if: ${{ env.run_type != 'on-demand' }}
run: |
gh --repo ${GITHUB_REPOSITORY} issue view ${reference_issue} --json body -q .body | \
sed "s/Inductor-${{ env.run_type }}-Rolling-XPU-E2E:.*/Inductor-${{ env.run_type }}-Rolling-XPU-E2E: ${GITHUB_RUN_ID}/" | sed '/^$/d' > new_body.txt
gh --repo ${GITHUB_REPOSITORY} issue edit ${reference_issue} --body-file new_body.txt
Tests-Failure-And-Report:
if: ${{ ! cancelled() }}
runs-on: [ self-hosted, Linux ]
permissions:
issues: write
env:
GH_TOKEN: ${{ github.token }}
python: ${{ github.event_name == 'schedule' && '3.10' || inputs.python }}
needs: Linux-Nightly-Ondemand-E2E-Tests-Rolling
steps:
- name: Report github issue for XPU OPS nightly
if: github.repository_owner == 'intel'
run: |
set -xe
# Test env
build_url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
repo="${{ github.repository }}"
TORCH_BRANCH_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TORCH_BRANCH_ID }}"
TORCH_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TORCH_COMMIT_ID }}"
KERNEL_VERSION="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.KERNEL_VERSION }}"
DRIVER_VERSION="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.DRIVER_VERSION }}"
BUNDLE_VERSION="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.BUNDLE_VERSION }}"
OS_PRETTY_NAME="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.OS_PRETTY_NAME }}"
GCC_VERSION="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.GCC_VERSION }}"
TORCHBENCH_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TORCHBENCH_COMMIT_ID }}"
TORCHVISION_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TORCHVISION_COMMIT_ID }}"
TORCHAUDIO_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TORCHAUDIO_COMMIT_ID }}"
TRANSFORMERS_VERSION="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TRANSFORMERS_VERSION }}"
TIMM_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TIMM_COMMIT_ID }}"
TRITON_COMMIT_ID="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TRITON_COMMIT_ID }}"
TIMEOUT_MODELS="${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.outputs.TIMEOUT_MODELS }}"
# Test status
if [ "${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.result }}" == "success" ];then
test_status=Success
elif [ "${{ needs.Linux-Nightly-Ondemand-E2E-Tests-Rolling.result }}" == "failure" ];then
test_status=Failure
cc_comment="CC ${{ secrets.NIGHTLY_EMAIL_LIST }}"
else
test_status=None
exit 0
fi
# Test Type
if [ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ];then
test_type="On-demand"
test_issue_id=426
cc_comment="CC @${GITHUB_TRIGGERING_ACTOR}"
elif [ "${{ github.event.schedule }}" == "30 16 * * 5" ];then
test_type="Weekly"
test_issue_id=432
else
test_type="Nightly"
test_issue_id=432
fi
# Test report
echo -e "**${test_status}** $test_type Rolling Test on $(date +'%F'), See: $build_url\n" > ${{ github.workspace }}/report.txt
printf "Torch-xpu-ops | PyTorch | Triton\n--- | --- | ---\n${GITHUB_WORKFLOW_SHA:0:7} on ${GITHUB_REF_NAME} | " >> ${{ github.workspace }}/report.txt
printf "[${TORCH_COMMIT_ID:0:7}](https://github.com/pytorch/pytorch/commit/${TORCH_COMMIT_ID:0:7}) on $TORCH_BRANCH_ID | " >> ${{ github.workspace }}/report.txt
echo -e "[${TRITON_COMMIT_ID:0:7}](https://github.com/intel/intel-xpu-backend-for-triton/commit/${TRITON_COMMIT_ID:0:7}) \n" >> ${{ github.workspace }}/report.txt
printf "Transformers | Timm | Torchbench | Torchvision | Torchaudio\n--- | --- | --- | --- | ---\n" >> ${{ github.workspace }}/report.txt
printf "[${TRANSFORMERS_VERSION:0:7}](https://github.com/huggingface/transformers/commit/${TRANSFORMERS_VERSION:0:7}) | " >> ${{ github.workspace }}/report.txt
printf "[${TIMM_COMMIT_ID:0:7}](https://github.com/huggingface/pytorch-image-models/commit/${TIMM_COMMIT_ID:0:7}) | " >> ${{ github.workspace }}/report.txt
printf "[${TORCHBENCH_COMMIT_ID:0:7}](https://github.com/pytorch/benchmark/commit/${TORCHBENCH_COMMIT_ID:0:7}) | " >> ${{ github.workspace }}/report.txt
printf "[${TORCHVISION_COMMIT_ID:0:7}](https://github.com/pytorch/vision/commit/${TORCHVISION_COMMIT_ID:0:7}) | " >> ${{ github.workspace }}/report.txt
echo -e "[${TORCHAUDIO_COMMIT_ID:0:7}](https://github.com/pytorch/audio/commit/${TORCHAUDIO_COMMIT_ID:0:7}) \n" >> ${{ github.workspace }}/report.txt
printf "Device | OS | GCC | Python | Driver(DKMS) | Kernel | Bundle(DPCPP)\n--- | --- | --- | --- | --- | --- | ---\n" >> ${{ github.workspace }}/report.txt
echo -e "$RUNNER_NAME | $OS_PRETTY_NAME | $GCC_VERSION | ${{ env.python }} | rolling-$DRIVER_VERSION |$KERNEL_VERSION | $BUNDLE_VERSION \n" >> ${{ github.workspace }}/report.txt
if [ "${GITHUB_EVENT_NAME}" == "workflow_dispatch" ];then
test_scope="${{ inputs.suite }}/${{ inputs.dt }}/${{ inputs.mode }}/${{ inputs.scenario }}"
if [ "${{ inputs.model }}" != "" ];then
test_scope+="; model=${{ inputs.model }}"
fi
echo -e "Inputs | $test_scope\n--- | --- \n" >> ${{ github.workspace }}/report.txt
fi
echo "$TIMEOUT_MODELS" |awk '{printf("%s\\n", $0)}' >> ${{ github.workspace }}/report.txt
echo "$cc_comment" >> ${{ github.workspace }}/report.txt
# Report
report_txt=$(cat ${{ github.workspace }}/report.txt)
gh --repo $repo issue comment $test_issue_id --body "$report_txt"