Compare commits

..

1 Commits

Author SHA1 Message Date
Eric Engestrom
b0de5e98f0 VERSION: bump for 24.1.0-rc1 2024-04-24 22:28:09 +02:00
4498 changed files with 217785 additions and 539427 deletions

View File

@@ -2,7 +2,6 @@
# enforcement in the CI.
src/gallium/drivers/i915
src/gallium/drivers/r300/compiler/*
src/gallium/targets/teflon/**/*
src/amd/vulkan/**/*
src/amd/compiler/**/*

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
.cache
.vscode*
*.pyc
*.pyo

View File

@@ -33,57 +33,53 @@ workflow:
# merge pipeline
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
variables:
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
MESA_CI_PERFORMANCE_ENABLED: 1
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
JOB_PRIORITY: 75
# fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes
DEQP_RUNNER_MAX_FAILS: 40
# post-merge pipeline
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
variables:
# (some) nightly builds perform LTO, so they take much longer than the
# short timeout allowed in other pipelines.
# Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h
BUILD_JOB_TIMEOUT_OVERRIDE: 0
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
JOB_PRIORITY: 50
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
# pipeline for direct pushes that bypassed the CI
- if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
variables:
JOB_PRIORITY: 70
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
JOB_PRIORITY: 40
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
# pre-merge or fork pipeline
- if: $FORCE_KERNEL_TAG != null
variables:
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${FORCE_KERNEL_TAG}
JOB_PRIORITY: 50
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
- if: $FORCE_KERNEL_TAG == null
variables:
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
JOB_PRIORITY: 50
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
variables:
FDO_UPSTREAM_REPO: mesa/mesa
MESA_TEMPLATES_COMMIT: &ci-templates-commit e195d80f35b45cc73668be3767b923fd76c70ed5
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
CI_PRE_CLONE_SCRIPT: |-
set -o xtrace
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
bash download-git-cache.sh
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
CI_JOB_JWT_FILE: /minio_jwt
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
S3_KERNEL_BUCKET: mesa-rootfs
# Bucket for git cache
S3_GITCACHE_BUCKET: git-cache
# Bucket for the pipeline artifacts pushed to S3
S3_ARTIFACTS_BUCKET: artifacts
# Buckets for traces
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
# per-pipeline artifact storage on MinIO
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
# reference images stored for traces
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/${S3_TRACIE_RESULTS_BUCKET}/$FDO_UPSTREAM_REPO"
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
# For individual CI farm status see .ci-farms folder
# Disable farm with `git mv .ci-farms{,-disabled}/$farm_name`
# Re-enable farm with `git mv .ci-farms{-disabled,}/$farm_name`
@@ -95,30 +91,27 @@ variables:
MESA_VK_ABORT_ON_DEVICE_LOSS: 1
# Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output
MESA_SPIRV_LOG_LEVEL: error
# Default priority for non-merge pipelines
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
JOB_PRIORITY: 50
default:
id_tokens:
S3_JWT:
aud: https://s3.freedesktop.org
before_script:
- |
if [ -z "${KERNEL_IMAGE_BASE:-}" ]; then
export KERNEL_IMAGE_BASE="https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${EXTERNAL_KERNEL_TAG:-$KERNEL_TAG}"
fi
- >
export SCRIPTS_DIR=$(mktemp -d) &&
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" &&
. ${SCRIPTS_DIR}/setup-test-env.sh &&
echo -n "${S3_JWT}" > "${S3_JWT_FILE}" &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}" &&
unset CI_JOB_JWT # Unsetting vulnerable env variables
after_script:
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
- find -name '*.log' -exec mv {} {}.txt \;
- >
set +x
test -e "${CI_JOB_JWT_FILE}" &&
export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
rm "${CI_JOB_JWT_FILE}"
# Retry when job fails. Failed jobs can be found in the Mesa CI Daily Reports:
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/?sort=created_date&state=opened&label_name%5B%5D=CI%20daily
retry:
@@ -138,28 +131,18 @@ stages:
- sanity
- container
- git-archive
- build-for-tests
- build-only
- build-x86_64
- build-misc
- code-validation
- amd
- amd-postmerge
- intel
- intel-postmerge
- nouveau
- nouveau-postmerge
- arm
- arm-postmerge
- broadcom
- broadcom-postmerge
- freedreno
- freedreno-postmerge
- etnaviv
- etnaviv-postmerge
- software-renderer
- software-renderer-postmerge
- layered-backends
- layered-backends-postmerge
- performance
- deploy
include:
@@ -184,11 +167,12 @@ include:
- local: 'src/**/ci/gitlab-ci.yml'
# Rules applied to every job in the pipeline
.common-rules:
rules:
- if: *is-fork-push
when: manual
# YAML anchors for rule conditions
# --------------------------------
.rules-anchors:
# Pre-merge pipeline
- &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
.never-post-merge-rules:
rules:
@@ -198,7 +182,6 @@ include:
.container+build-rules:
rules:
- !reference [.common-rules, rules]
# Run when re-enabling a disabled farm, but not when disabling it
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
@@ -280,7 +263,8 @@ make git archive:
# compress the current folder
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
- ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
# Sanity checks of MR settings and commit logs
sanity:
@@ -299,20 +283,19 @@ sanity:
- |
set -eu
image_tags=(
ALPINE_X86_64_BUILD_TAG
ALPINE_X86_64_LAVA_SSH_TAG
DEBIAN_BASE_TAG
DEBIAN_BUILD_TAG
DEBIAN_PYUTILS_TAG
DEBIAN_TEST_ANDROID_TAG
DEBIAN_TEST_GL_TAG
DEBIAN_TEST_VK_TAG
DEBIAN_X86_64_TEST_ANDROID_TAG
DEBIAN_X86_64_TEST_GL_TAG
DEBIAN_X86_64_TEST_VK_TAG
ALPINE_X86_64_BUILD_TAG
ALPINE_X86_64_LAVA_SSH_TAG
FEDORA_X86_64_BUILD_TAG
KERNEL_ROOTFS_TAG
KERNEL_TAG
PKG_REPO_REV
WINDOWS_X64_BUILD_TAG
WINDOWS_X64_MSVC_TAG
WINDOWS_X64_BUILD_TAG
WINDOWS_X64_TEST_TAG
)
for var in "${image_tags[@]}"
@@ -354,5 +337,3 @@ mr-label-maker-test:
optional: true
- job: rustfmt
optional: true
- job: toml-lint
optional: true

View File

@@ -80,8 +80,3 @@ wayland-dEQP-EGL.functional.render.multi_context.gles2_gles3.other
wayland-dEQP-EGL.functional.render.multi_thread.gles2.other
wayland-dEQP-EGL.functional.render.multi_thread.gles3.other
wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other
# These test the loader more than the implementation and are broken because the
# Vulkan loader in Debian is too old
dEQP-VK.api.get_device_proc_addr.non_enabled
dEQP-VK.api.version_check.unavailable_entry_points

View File

@@ -5,25 +5,17 @@ target:
id: '{{ ci_runner_id }}'
timeouts:
first_console_activity: # This limits the time it can take to receive the first console log
minutes: {{ timeout_first_console_activity_minutes | default(0, true) }}
seconds: {{ timeout_first_console_activity_seconds | default(0, true) }}
retries: {{ timeout_first_console_activity_retries }}
minutes: {{ timeout_first_minutes }}
retries: {{ timeout_first_retries }}
console_activity: # Reset every time we receive a message from the logs
minutes: {{ timeout_console_activity_minutes | default(0, true) }}
seconds: {{ timeout_console_activity_seconds | default(0, true) }}
retries: {{ timeout_console_activity_retries }}
minutes: {{ timeout_minutes }}
retries: {{ timeout_retries }}
boot_cycle:
minutes: {{ timeout_boot_minutes | default(0, true) }}
seconds: {{ timeout_boot_seconds | default(0, true) }}
minutes: {{ timeout_boot_minutes }}
retries: {{ timeout_boot_retries }}
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
minutes: {{ timeout_overall_minutes | default(0, true) }}
seconds: {{ timeout_overall_seconds | default(0, true) }}
minutes: {{ timeout_overall_minutes }}
retries: 0
# no retries possible here
@@ -39,50 +31,37 @@ console_patterns:
job_success:
regex: >-
{{ job_success_regex }}
{% if job_warn_regex %}
job_warn:
regex: >-
{{ job_warn_regex }}
{% endif %}
# Environment to deploy
deployment:
# Initial boot
start:
storage:
http:
- path: "/b2c-extra-args"
data: >
b2c.pipefail b2c.poweroff_delay={{ poweroff_delay }}
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
{% for volume in volumes %}
b2c.volume={{ volume }}
{% endfor %}
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd | replace('"', '\\\"') }}"
kernel:
{% if kernel_url %}
url: '{{ kernel_url }}'
{% endif %}
# NOTE: b2c.cache_device should not be here, but this works around
# a limitation of b2c which will be removed in the next release
cmdline: >
SALAD.machine_id={{ '{{' }} machine_id }}
console={{ '{{' }} local_tty_device }},115200
b2c.cache_device=auto b2c.ntp_peer=10.42.0.1
b2c.extra_args_url={{ '{{' }} job.http.url }}/b2c-extra-args
console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
loglevel={{ log_level }} no_hash_pointers
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
{% for volume in volumes %}
b2c.volume={{ volume }}
{% endfor %}
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
{% if kernel_cmdline_extras is defined %}
{{ kernel_cmdline_extras }}
{% endif %}
{% if initramfs_url %}
initramfs:
url: '{{ initramfs_url }}'
{% endif %}
{% if dtb_url %}
{% if dtb_url is defined %}
dtb:
url: '{{ dtb_url }}'
{% endif %}

View File

@@ -5,8 +5,6 @@
# First stage: very basic setup to bring up network and /dev etc
/init-stage1.sh
export CURRENT_SECTION=dut_boot
# Second stage: run jobs
test $? -eq 0 && /init-stage2.sh

View File

@@ -50,10 +50,6 @@ if [ -z "$BM_CMDLINE" ]; then
exit 1
fi
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start prepare_rootfs "Preparing rootfs components"
set -ex
# Clear out any previous run's artifacts.
@@ -90,7 +86,7 @@ rm -rf /tftp/*
if echo "$BM_KERNEL" | grep -q http; then
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
$BM_KERNEL -o /tftp/vmlinuz
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
@@ -103,20 +99,16 @@ fi
echo "$BM_CMDLINE" > /tftp/cmdline
set +e
STRUCTURED_LOG_FILE=results/job_detail.json
STRUCTURED_LOG_FILE=job_detail.json
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
section_end prepare_rootfs
python3 $BM/cros_servo_run.py \
--cpu $BM_SERIAL \
--ec $BM_SERIAL_EC \
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20}
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
ret=$?
section_start dut_cleanup "Cleaning up after job"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
set -e
@@ -124,6 +116,9 @@ set -e
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
section_end dut_cleanup
if [ -f "${STRUCTURED_LOG_FILE}" ]; then
cp -p ${STRUCTURED_LOG_FILE} results/
echo "Structured log file is available at https://${CI_PROJECT_ROOT_NAMESPACE}.pages.freedesktop.org/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts/results/${STRUCTURED_LOG_FILE}"
fi
exit $ret

View File

@@ -4,29 +4,21 @@
# SPDX-License-Identifier: MIT
import argparse
import datetime
import math
import os
import re
import sys
from custom_logger import CustomLogger
from serial_buffer import SerialBuffer
ANSI_ESCAPE="\x1b[0K"
ANSI_COLOUR="\x1b[0;36m"
ANSI_RESET="\x1b[0m"
SECTION_START="start"
SECTION_END="end"
class CrosServoRun:
def __init__(self, cpu, ec, test_timeout, logger):
self.cpu_ser = SerialBuffer(
cpu, "results/serial.txt", ": ")
cpu, "results/serial.txt", "R SERIAL-CPU> ")
# Merge the EC serial into the cpu_ser's line stream so that we can
# effectively poll on both at the same time and not have to worry about
self.ec_ser = SerialBuffer(
ec, "results/serial-ec.txt", " EC: ", line_queue=self.cpu_ser.line_queue)
ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
self.test_timeout = test_timeout
self.logger = logger
@@ -35,11 +27,11 @@ class CrosServoRun:
self.cpu_ser.close()
def ec_write(self, s):
print("EC> %s" % s)
print("W SERIAL-EC> %s" % s)
self.ec_ser.serial.write(s.encode())
def cpu_write(self, s):
print("> %s" % s)
print("W SERIAL-CPU> %s" % s)
self.cpu_ser.serial.write(s.encode())
def print_error(self, message):
@@ -48,31 +40,6 @@ class CrosServoRun:
print(RED + message + NO_COLOR)
self.logger.update_status_fail(message)
def get_rel_timestamp(self):
now = datetime.datetime.now(tz=datetime.UTC)
then_env = os.getenv("CI_JOB_STARTED_AT")
if not then_env:
return ""
delta = now - datetime.datetime.fromisoformat(then_env)
return f"[{math.floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}]"
def get_cur_timestamp(self):
return str(int(datetime.datetime.timestamp(datetime.datetime.now())))
def print_gitlab_section(self, action, name, description, collapse=True):
assert action in [SECTION_START, SECTION_END]
out = ANSI_ESCAPE + "section_" + action + ":"
out += self.get_cur_timestamp() + ":"
out += name
if action == "start" and collapse:
out += "[collapsed=true]"
out += "\r" + ANSI_ESCAPE + ANSI_COLOUR
out += self.get_rel_timestamp() + " " + description + ANSI_RESET
print(out)
def boot_section(self, action):
self.print_gitlab_section(action, "dut_boot", "Booting hardware device", True)
def run(self):
# Flush any partial commands in the EC's prompt, then ask for a reboot.
self.ec_write("\n")
@@ -80,7 +47,6 @@ class CrosServoRun:
bootloader_done = False
self.logger.create_job_phase("boot")
self.boot_section(SECTION_START)
tftp_failures = 0
# This is emitted right when the bootloader pauses to check for input.
# Emit a ^N character to request network boot, because we don't have a
@@ -161,18 +127,14 @@ class CrosServoRun:
self.print_error("Detected cheza MMU fail, abandoning run.")
return 1
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
result = re.search("hwci: mesa: (\S*)", line)
if result:
status = result.group(1)
exit_code = int(result.group(2))
if status == "pass":
if result.group(1) == "pass":
self.logger.update_dut_job("status", "pass")
return 0
else:
self.logger.update_status_fail("test fail")
self.logger.update_dut_job("exit_code", exit_code)
return exit_code
return 1
self.print_error(
"Reached the end of the CPU serial log without finding a result")
@@ -189,7 +151,7 @@ def main():
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
args = parser.parse_args()
logger = CustomLogger("results/job_detail.json")
logger = CustomLogger("job_detail.json")
logger.update_dut_time("start", None)
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger)
retval = servo.run()

View File

@@ -55,8 +55,6 @@ if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
BM_FASTBOOT_NFSROOT=1
fi
section_start prepare_rootfs "Preparing rootfs components"
set -ex
# Clear out any previous run's artifacts.
@@ -107,7 +105,7 @@ if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
cat kernel dtb > Image.gz-dtb
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
@@ -150,12 +148,10 @@ if [ -n "$BM_SERIAL_SCRIPT" ]; then
done
fi
section_end prepare_rootfs
set +e
$BM/fastboot_run.py \
--dev="$BM_SERIAL" \
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} \
--test-timeout ${TEST_PHASE_TIMEOUT:-20} \
--fbserial="$BM_FASTBOOT_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN"

View File

@@ -119,12 +119,12 @@ class FastbootRun:
if print_more_lines == -1:
print_more_lines = 30
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
result = re.search("hwci: mesa: (\S*)", line)
if result:
status = result.group(1)
exit_code = int(result.group(2))
return exit_code
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error(
"Reached the end of the CPU serial log without finding a result, abandoning run.")

View File

@@ -10,7 +10,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))"
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
SNMP_OFF="i 2"
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"

View File

@@ -10,7 +10,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))"
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
SNMP_ON="i 1"
SNMP_OFF="i 2"

View File

@@ -71,8 +71,6 @@ if [ -z "$BM_CMDLINE" ]; then
exit 1
fi
section_start prepare_rootfs "Preparing rootfs components"
set -ex
date +'%F %T'
@@ -104,7 +102,7 @@ if [ -f "${BM_BOOTFS}" ]; then
fi
# If BM_KERNEL and BM_DTS is present
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
if [ -n "${FORCE_KERNEL_TAG}" ]; then
if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then
echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!"
exit 1
@@ -122,7 +120,7 @@ date +'%F %T'
# Install kernel modules (it could be either in /lib/modules or
# /usr/lib/modules, but we want to install in the latter)
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
if [ -n "${FORCE_KERNEL_TAG}" ]; then
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/
rm modules.tar.zst &
elif [ -n "${BM_BOOTFS}" ]; then
@@ -136,7 +134,7 @@ fi
date +'%F %T'
# Install kernel image + bootloader files
if [ -n "${EXTERNAL_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
if [ -n "${FORCE_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
else # BM_BOOTFS
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
@@ -183,16 +181,13 @@ if [ -n "$BM_BOOTCONFIG" ]; then
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
fi
section_end prepare_rootfs
set +e
STRUCTURED_LOG_FILE=results/job_detail.json
STRUCTURED_LOG_FILE=job_detail.json
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
ATTEMPTS=3
first_attempt=True
while [ $((ATTEMPTS--)) -gt 0 ]; do
section_start dut_boot "Booting hardware device ..."
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
# Update subtime time to CI_JOB_STARTED_AT only for the first run
if [ "$first_attempt" = "True" ]; then
@@ -204,22 +199,17 @@ while [ $((ATTEMPTS--)) -gt 0 ]; do
--dev="$BM_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN" \
--boot-timeout-seconds ${BOOT_PHASE_TIMEOUT_SECONDS:-300} \
--test-timeout-minutes ${TEST_PHASE_TIMEOUT_MINUTES:-$((CI_JOB_TIMEOUT/60 - ${TEST_SETUP_AND_UPLOAD_MARGIN_MINUTES:-5}))}
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
ret=$?
if [ $ret -eq 2 ]; then
echo "Did not detect boot sequence, retrying..."
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
first_attempt=False
error "Device failed to boot; will retry"
else
# We're no longer in dut_boot by this point
unset CURRENT_SECTION
ATTEMPTS=0
fi
done
section_start dut_cleanup "Cleaning up after job"
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
set -e
@@ -229,8 +219,11 @@ date +'%F %T'
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
if [ -f "${STRUCTURED_LOG_FILE}" ]; then
cp -p ${STRUCTURED_LOG_FILE} results/
echo "Structured log file is available at ${ARTIFACTS_BASE_URL}/results/${STRUCTURED_LOG_FILE}"
fi
date +'%F %T'
section_end dut_cleanup
exit $ret

View File

@@ -31,12 +31,11 @@ from custom_logger import CustomLogger
from serial_buffer import SerialBuffer
class PoERun:
def __init__(self, args, boot_timeout, test_timeout, logger):
def __init__(self, args, test_timeout, logger):
self.powerup = args.powerup
self.powerdown = args.powerdown
self.ser = SerialBuffer(
args.dev, "results/serial-output.txt", ": ")
self.boot_timeout = boot_timeout
args.dev, "results/serial-output.txt", "")
self.test_timeout = test_timeout
self.logger = logger
@@ -57,7 +56,7 @@ class PoERun:
boot_detected = False
self.logger.create_job_phase("boot")
for line in self.ser.lines(timeout=self.boot_timeout, phase="bootloader"):
for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
if re.search("Booting Linux", line):
boot_detected = True
break
@@ -65,7 +64,7 @@ class PoERun:
if not boot_detected:
self.print_error(
"Something wrong; couldn't detect the boot start up sequence")
return 2
return 1
self.logger.create_job_phase("test")
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
@@ -87,18 +86,14 @@ class PoERun:
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
return 1
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
result = re.search("hwci: mesa: (\S*)", line)
if result:
status = result.group(1)
exit_code = int(result.group(2))
if status == "pass":
if result.group(1) == "pass":
self.logger.update_dut_job("status", "pass")
return 0
else:
self.logger.update_status_fail("test fail")
self.logger.update_dut_job("exit_code", exit_code)
return exit_code
return 1
self.print_error(
"Reached the end of the CPU serial log without finding a result")
@@ -114,14 +109,12 @@ def main():
parser.add_argument('--powerdown', type=str,
help='shell command for powering off', required=True)
parser.add_argument(
'--boot-timeout-seconds', type=int, help='Boot phase timeout (seconds)', required=True)
parser.add_argument(
'--test-timeout-minutes', type=int, help='Test phase timeout (minutes)', required=True)
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
args = parser.parse_args()
logger = CustomLogger("results/job_detail.json")
logger = CustomLogger("job_detail.json")
logger.update_dut_time("start", None)
poe = PoERun(args, args.boot_timeout_seconds, args.test_timeout_minutes * 60, logger)
poe = PoERun(args, args.test_timeout * 60, logger)
retval = poe.run()
poe.logged_system(args.powerdown)

View File

@@ -13,7 +13,7 @@ date +'%F %T'
# Make JWT token available as file in the bare-metal storage to enable access
# to MinIO
cp "${S3_JWT_FILE}" "${rootfs_dst}${S3_JWT_FILE}"
cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
date +'%F %T'

View File

@@ -22,7 +22,7 @@
# IN THE SOFTWARE.
import argparse
from datetime import datetime, UTC
from datetime import datetime, timezone
import queue
import serial
import threading
@@ -130,10 +130,9 @@ class SerialBuffer:
if b == b'\n'[0]:
line = line.decode(errors="replace")
ts = datetime.now(tz=UTC)
ts_str = f"{ts.hour:02}:{ts.minute:02}:{ts.second:02}.{int(ts.microsecond / 1000):03}"
print("{endc}{time}{prefix}{line}".format(
time=ts_str, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
print("{endc}{time} {prefix}{line}".format(
time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
self.line_queue.put(line)
line = bytearray()

View File

@@ -3,17 +3,14 @@
extends: .container+build-rules
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
variables:
# Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner
# without a populated ccache.
# These jobs are never slow, either they finish within reasonable time or
# something has gone wrong and the job will never terminate, so we should
# instead timeout so that the retry mechanism can kick in.
# A few exception are made, see overrides in the rest of this file.
BUILD_JOB_TIMEOUT: 15m
timeout: 1h
# We don't want to download any previous job's artifacts
dependencies: []
# Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner
# without a populated ccache.
# These jobs are never slow, either they finish within reasonable time or
# something has gone wrong and the job will never terminate, so we should
# instead timeout so that the retry mechanism can kick in.
# A few exception are made, see `timeout:` overrides in the rest of this
# file.
timeout: 30m
artifacts:
name: "mesa_${CI_JOB_NAME}"
when: always
@@ -57,46 +54,17 @@
extends:
- .build-linux
- .use-debian/x86_64_build
stage: build-only
stage: build-x86_64
variables:
LLVM_VERSION: 15
script:
- &meson-build timeout --verbose ${BUILD_JOB_TIMEOUT_OVERRIDE:-$BUILD_JOB_TIMEOUT} .gitlab-ci/meson/build.sh
# Make sure this list stays the same as all the jobs with
# `stage: build-for-tests`, except for the windows job as
# explained below.
.build-for-tests-jobs:
- job: debian-testing
optional: true
- job: debian-testing-asan
optional: true
- job: debian-build-testing
optional: true
- job: debian-arm32
optional: true
- job: debian-arm32-asan
optional: true
- job: debian-arm64
optional: true
- job: debian-arm64-asan
optional: true
# Windows runners don't have more than one build right now, so there is
# no need to wait on the "first one" to be done.
# - job: windows-msvc
# optional: true
- job: python-test
optional: true
- .gitlab-ci/meson/build.sh
debian-testing:
extends:
- .meson-build
- .ci-deqp-artifacts
stage: build-for-tests
variables:
BUILD_JOB_TIMEOUT: 30m
UNWIND: "enabled"
DRI_LOADERS: >
-D glx=dri
@@ -105,10 +73,11 @@ debian-testing:
-D glvnd=disabled
-D platforms=x11,wayland
GALLIUM_ST: >
-D dri3=enabled
-D gallium-nine=true
-D gallium-va=enabled
-D gallium-rusticl=true
GALLIUM_DRIVERS: "llvmpipe,softpipe,virgl,radeonsi,zink,crocus,iris,i915,r300,svga"
GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915,r300,svga"
VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio,nouveau"
BUILDTYPE: "debugoptimized"
EXTRA_OPTION: >
@@ -119,7 +88,7 @@ debian-testing:
S3_ARTIFACT_NAME: mesa-x86_64-default-${BUILDTYPE}
LLVM_VERSION: 15
script:
- *meson-build
- .gitlab-ci/meson/build.sh
- .gitlab-ci/prepare-artifacts.sh
artifacts:
reports:
@@ -128,9 +97,7 @@ debian-testing:
debian-testing-asan:
extends:
- debian-testing
stage: build-for-tests
variables:
BUILD_JOB_TIMEOUT: 30m
C_ARGS: >
-Wno-error=stringop-truncation
EXTRA_OPTION: >
@@ -159,9 +126,7 @@ debian-testing-msan:
# msan cannot fully work until it's used together with msan libc
extends:
- debian-clang
# `needs:` inherited from debian-clang
variables:
BUILD_JOB_TIMEOUT: 30m
# l_undef is incompatible with msan
EXTRA_OPTION:
-D b_sanitize=memory
@@ -173,7 +138,7 @@ debian-testing-msan:
# GLSL has some issues in sexpression reading.
# gtest has issues in its test initialization.
MESON_TEST_ARGS: "--suite glcpp --suite format"
GALLIUM_DRIVERS: "freedreno,iris,nouveau,r300,r600,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
GALLIUM_DRIVERS: "freedreno,iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
VULKAN_DRIVERS: intel,amd,broadcom,virtio
# Do a host build for intel-clc (msan complains about
# uninitialized values in the LLVM libs)
@@ -191,7 +156,6 @@ debian-testing-msan:
debian-build-testing:
extends: .meson-build
stage: build-for-tests
variables:
BUILDTYPE: debug
UNWIND: "enabled"
@@ -202,43 +166,40 @@ debian-build-testing:
-D glvnd=disabled
-D platforms=x11,wayland
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=enabled
-D gallium-omx=bellagio
-D gallium-va=enabled
-D gallium-xa=enabled
-D gallium-nine=false
-D gallium-nine=true
-D gallium-rusticl=false
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
VULKAN_DRIVERS: swrast
EXTRA_OPTION: >
-D spirv-to-dxil=true
-D osmesa=true
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi
-D b_lto=true
LLVM_VERSION: 15
S3_ARTIFACT_NAME: debian-build-testing
script:
- *meson-build
- .gitlab-ci/prepare-artifacts.sh
script: |
section_start lava-pytest "lava-pytest"
.gitlab-ci/lava/lava-pytest.sh
section_switch shellcheck "shellcheck"
.gitlab-ci/run-shellcheck.sh
section_switch yamllint "yamllint"
.gitlab-ci/run-yamllint.sh
section_switch meson "meson"
.gitlab-ci/meson/build.sh
.gitlab-ci/prepare-artifacts.sh
timeout: 15m
shader-db:
stage: code-validation
extends:
- .use-debian/x86_64_build
rules:
- !reference [.never-post-merge-rules, rules]
- !reference [.core-rules, rules]
# Keep this list in sync with the drivers tested in run-shader-db.sh
- !reference [.freedreno-common-rules, rules]
- !reference [.intel-common-rules, rules]
- !reference [.lima-rules, rules]
- !reference [.v3d-rules, rules]
- !reference [.vc4-rules, rules]
- !reference [.nouveau-rules, rules]
- !reference [.r300-rules, rules]
# Also run if this job's own config or script changes
- changes:
- .gitlab-ci/build/gitlab-ci.yml
- .gitlab-ci/run-shader-db.sh
- .container+build-rules
needs:
- debian-build-testing
variables:
@@ -246,6 +207,7 @@ shader-db:
before_script:
- !reference [.download_s3, before_script]
script: |
section_switch shader-db "shader-db"
.gitlab-ci/run-shader-db.sh
artifacts:
paths:
@@ -255,9 +217,6 @@ shader-db:
# Test a release build with -Werror so new warnings don't sneak in.
debian-release:
extends: .meson-build
needs:
- !reference [.meson-build, needs]
- !reference [.build-for-tests-jobs]
variables:
LLVM_VERSION: 15
UNWIND: "enabled"
@@ -270,14 +229,16 @@ debian-release:
-D glvnd=disabled
-D platforms=x11,wayland
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=enabled
-D gallium-omx=disabled
-D gallium-va=enabled
-D gallium-xa=enabled
-D gallium-nine=false
-D gallium-rusticl=false
-D llvm=enabled
GALLIUM_DRIVERS: "i915,iris,nouveau,freedreno,r300,svga,llvmpipe,softpipe,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental"
EXTRA_OPTION: >
-D spirv-to-dxil=true
@@ -289,23 +250,19 @@ debian-release:
BUILDTYPE: "release"
S3_ARTIFACT_NAME: "mesa-x86_64-default-${BUILDTYPE}"
script:
- *meson-build
- .gitlab-ci/meson/build.sh
- 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
alpine-build-testing:
extends:
- .meson-build
- .use-alpine/x86_64_build
needs:
- !reference [.use-alpine/x86_64_build, needs]
- !reference [.build-for-tests-jobs]
stage: build-x86_64
variables:
BUILD_JOB_TIMEOUT: 30m
BUILDTYPE: "release"
C_ARGS: >
-Wno-error=cpp
-Wno-error=array-bounds
-Wno-error=stringop-overflow
-Wno-error=stringop-overread
DRI_LOADERS: >
-D glx=disabled
@@ -313,10 +270,13 @@ alpine-build-testing:
-D egl=enabled
-D glvnd=disabled
-D platforms=wayland
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink"
LLVM_VERSION: "16"
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=disabled
-D gallium-omx=disabled
-D gallium-va=enabled
-D gallium-xa=disabled
-D gallium-nine=true
@@ -324,32 +284,23 @@ alpine-build-testing:
-D gles1=disabled
-D gles2=enabled
-D llvm=enabled
-D llvm-orcjit=true
-D microsoft-clc=disabled
-D shared-llvm=enabled
UNWIND: "disabled"
VULKAN_DRIVERS: "amd,asahi,broadcom,freedreno,intel,imagination-experimental"
VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
fedora-release:
extends:
- .meson-build
- .use-fedora/x86_64_build
needs:
- !reference [.use-fedora/x86_64_build, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILDTYPE: "release"
# array-bounds are pure non-LTO gcc buggy warning, verify after bump to F39
C_ARGS: >
-Wno-error=stringop-overflow
-Wno-error=stringop-overread
-Wno-error=array-bounds
CPP_ARGS: >
-Wno-error=dangling-reference
-Wno-error=overloaded-virtual
C_LINK_ARGS: >
-Wno-error=stringop-overflow
-Wno-error=stringop-overread
CPP_ARGS: >
-Wno-error=dangling-reference
-Wno-error=overloaded-virtual
CPP_LINK_ARGS: >
-Wno-error=stringop-overflow
-Wno-error=stringop-overread
@@ -360,16 +311,20 @@ fedora-release:
-D glvnd=enabled
-D platforms=x11,wayland
EXTRA_OPTION: >
-D b_lto=true
-D osmesa=true
-D selinux=true
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
-D vulkan-layers=device-select,overlay
-D intel-rt=enabled
-D imagination-srv=true
-D teflon=true
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink"
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=enabled
-D gallium-omx=disabled
-D gallium-va=enabled
-D gallium-xa=enabled
-D gallium-nine=false
@@ -381,16 +336,13 @@ fedora-release:
-D shared-llvm=enabled
LLVM_VERSION: ""
UNWIND: "disabled"
VULKAN_DRIVERS: "amd,asahi,broadcom,freedreno,imagination-experimental,intel,intel_hasvk"
VULKAN_DRIVERS: "amd,broadcom,freedreno,imagination-experimental,intel,intel_hasvk"
debian-android:
extends:
- .meson-cross
- .use-debian/android_build
- .ci-deqp-artifacts
needs:
- !reference [.use-debian/android_build, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILDTYPE: debug
UNWIND: "disabled"
@@ -419,7 +371,9 @@ debian-android:
-D android-libbacktrace=disabled
-D intel-clc=system
GALLIUM_ST: >
-D dri3=disabled
-D gallium-vdpau=disabled
-D gallium-omx=disabled
-D gallium-va=disabled
-D gallium-xa=disabled
-D gallium-nine=false
@@ -440,24 +394,19 @@ debian-android:
ARTIFACTS_DEBUG_SYMBOLS: 1
S3_ARTIFACT_NAME: mesa-x86_64-android-${BUILDTYPE}
script:
- export CROSS=aarch64-linux-android
- export GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d
- export VULKAN_DRIVERS=freedreno,broadcom,virtio
- *meson-build
- CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio .gitlab-ci/meson/build.sh
# x86_64 build:
# Can't do Intel because gen_decoder.c currently requires libexpat, which
# is not a dependency that AOSP wants to accept. Can't do Radeon Gallium
# drivers because they requires LLVM, which we don't have an Android build
# of.
- export CROSS=x86_64-linux-android
- export GALLIUM_DRIVERS=iris,virgl
- export VULKAN_DRIVERS=amd,intel
- *meson-build
- CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris,virgl VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh
- .gitlab-ci/prepare-artifacts.sh
.meson-cross:
extends:
- .meson-build
stage: build-misc
variables:
UNWIND: "disabled"
DRI_LOADERS: >
@@ -467,7 +416,9 @@ debian-android:
-D platforms=x11,wayland
-D osmesa=false
GALLIUM_ST: >
-D dri3=enabled
-D gallium-vdpau=disabled
-D gallium-omx=disabled
-D gallium-va=disabled
-D gallium-xa=disabled
-D gallium-nine=false
@@ -479,8 +430,8 @@ debian-android:
needs:
- debian/arm64_build
variables:
VULKAN_DRIVERS: asahi,freedreno,broadcom
GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,llvmpipe,softpipe,tegra,v3d,vc4,zink"
VULKAN_DRIVERS: freedreno,broadcom
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink"
BUILDTYPE: "debugoptimized"
tags:
- aarch64
@@ -489,14 +440,10 @@ debian-arm32:
extends:
- .meson-arm
- .ci-deqp-artifacts
stage: build-for-tests
variables:
CROSS: armhf
DRI_LOADERS:
-D glvnd=disabled
# remove asahi & llvmpipe from the .meson-arm list because here we have llvm=disabled
VULKAN_DRIVERS: freedreno,broadcom
GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,softpipe,tegra,v3d,vc4,zink"
EXTRA_OPTION: >
-D llvm=disabled
-D valgrind=disabled
@@ -505,13 +452,12 @@ debian-arm32:
# tempfiles in our artifacts.
ARTIFACTS_DEBUG_SYMBOLS: 1
script:
- *meson-build
- .gitlab-ci/meson/build.sh
- .gitlab-ci/prepare-artifacts.sh
debian-arm32-asan:
extends:
- debian-arm32
stage: build-for-tests
variables:
DRI_LOADERS:
-D glvnd=disabled
@@ -528,35 +474,33 @@ debian-arm64:
extends:
- .meson-arm
- .ci-deqp-artifacts
stage: build-for-tests
variables:
C_ARGS: >
-Wno-error=array-bounds
-Wno-error=stringop-truncation
VULKAN_DRIVERS: "asahi,freedreno,broadcom,panfrost,imagination-experimental"
VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental"
DRI_LOADERS:
-D glvnd=disabled
EXTRA_OPTION: >
-D llvm=disabled
-D valgrind=disabled
-D imagination-srv=true
-D perfetto=true
-D freedreno-kmds=msm,virtio
-D teflon=true
GALLIUM_ST:
-D gallium-rusticl=true
S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
script:
- *meson-build
- .gitlab-ci/meson/build.sh
- .gitlab-ci/prepare-artifacts.sh
debian-arm64-asan:
extends:
- debian-arm64
stage: build-for-tests
variables:
DRI_LOADERS:
-D glvnd=disabled
EXTRA_OPTION: >
-D llvm=disabled
-D b_sanitize=address
-D valgrind=disabled
-D tools=dlclose-skip
@@ -568,11 +512,8 @@ debian-arm64-build-test:
extends:
- .meson-arm
- .ci-deqp-artifacts
needs:
- !reference [.meson-arm, needs]
- !reference [.build-for-tests-jobs]
variables:
VULKAN_DRIVERS: "amd,nouveau"
VULKAN_DRIVERS: "amd"
DRI_LOADERS:
-D glvnd=disabled
EXTRA_OPTION: >
@@ -581,10 +522,6 @@ debian-arm64-build-test:
debian-arm64-release:
extends:
- debian-arm64
stage: build-only
needs:
- !reference [debian-arm64, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILDTYPE: release
S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
@@ -593,35 +530,11 @@ debian-arm64-release:
-Wno-error=stringop-truncation
-Wno-error=stringop-overread
script:
- *meson-build
- .gitlab-ci/meson/build.sh
- 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
debian-no-libdrm:
extends:
- .meson-arm
stage: build-only
needs:
- !reference [.meson-arm, needs]
- !reference [.build-for-tests-jobs]
variables:
VULKAN_DRIVERS: freedreno
GALLIUM_DRIVERS: "zink,llvmpipe"
BUILDTYPE: release
C_ARGS: >
-Wno-error=array-bounds
-Wno-error=stringop-truncation
-Wno-error=stringop-overread
EXTRA_OPTION: >
-D freedreno-kmds=kgsl
-D glx=disabled
-D gbm=disabled
-D egl=disabled
debian-clang:
extends: .meson-build
needs:
- !reference [.meson-build, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILDTYPE: debug
LLVM_VERSION: 15
@@ -644,8 +557,10 @@ debian-clang:
-D glvnd=enabled
-D platforms=x11,wayland
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=enabled
-D gallium-omx=bellagio
-D gallium-va=enabled
-D gallium-xa=enabled
-D gallium-nine=true
@@ -656,7 +571,7 @@ debian-clang:
-D shared-llvm=enabled
-D opencl-spirv=true
-D shared-glapi=enabled
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,swrast,panfrost,imagination-experimental,microsoft-experimental,nouveau
EXTRA_OPTION:
-D spirv-to-dxil=true
@@ -674,16 +589,16 @@ debian-clang:
debian-clang-release:
extends: debian-clang
# `needs:` inherited from debian-clang
variables:
BUILD_JOB_TIMEOUT: 30m
BUILDTYPE: "release"
DRI_LOADERS: >
-D glx=xlib
-D platforms=x11,wayland
GALLIUM_ST: >
-D dri3=enabled
-D gallium-extra-hud=true
-D gallium-vdpau=enabled
-D gallium-omx=bellagio
-D gallium-va=enabled
-D gallium-xa=enabled
-D gallium-nine=true
@@ -700,7 +615,7 @@ windows-msvc:
- .build-windows
- .use-windows_build_msvc
- .windows-build-rules
stage: build-for-tests
stage: build-misc
script:
- pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1
artifacts:
@@ -710,11 +625,7 @@ windows-msvc:
debian-vulkan:
extends: .meson-build
needs:
- !reference [.meson-build, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILD_JOB_TIMEOUT: 30m
BUILDTYPE: debug
LLVM_VERSION: 15
UNWIND: "disabled"
@@ -729,7 +640,9 @@ debian-vulkan:
-D platforms=x11,wayland
-D osmesa=false
GALLIUM_ST: >
-D dri3=enabled
-D gallium-vdpau=disabled
-D gallium-omx=disabled
-D gallium-va=disabled
-D gallium-xa=disabled
-D gallium-nine=false
@@ -738,7 +651,7 @@ debian-vulkan:
-D c_args=-fno-sanitize-recover=all
-D cpp_args=-fno-sanitize-recover=all
UBSAN_OPTIONS: "print_stacktrace=1"
VULKAN_DRIVERS: amd,asahi,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau
VULKAN_DRIVERS: amd,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau
EXTRA_OPTION: >
-D vulkan-layers=device-select,overlay
-D build-aco-tests=true
@@ -749,14 +662,11 @@ debian-x86_32:
extends:
- .meson-cross
- .use-debian/x86_32_build
needs:
- !reference [.use-debian/x86_32_build, needs]
- !reference [.build-for-tests-jobs]
variables:
BUILDTYPE: debug
CROSS: i386
VULKAN_DRIVERS: intel,amd,swrast,virtio
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,llvmpipe,softpipe,virgl,zink,crocus,d3d12"
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus,d3d12"
LLVM_VERSION: 15
DRI_LOADERS:
-D glvnd=disabled
@@ -775,24 +685,18 @@ debian-x86_32:
-D intel-clc=enabled
-D install-intel-clc=true
# While s390 is dead, s390x is very much alive, and one of the last major
# big-endian platforms, so it provides useful coverage.
# In case of issues with this job, contact @ajax
debian-s390x:
extends:
- .meson-cross
- debian-ppc64el
- .use-debian/s390x_build
needs:
- !reference [.use-debian/s390x_build, needs]
- !reference [.build-for-tests-jobs]
- .s390x-rules
tags:
- kvm
variables:
BUILDTYPE: debug
CROSS: s390x
GALLIUM_DRIVERS: "llvmpipe,virgl,zink"
GALLIUM_DRIVERS: "swrast,zink"
LLVM_VERSION: 15
VULKAN_DRIVERS: "swrast,virtio"
VULKAN_DRIVERS: "swrast"
DRI_LOADERS:
-D glvnd=disabled
@@ -800,28 +704,11 @@ debian-ppc64el:
extends:
- .meson-cross
- .use-debian/ppc64el_build
needs:
- !reference [.use-debian/ppc64el_build, needs]
- !reference [.build-for-tests-jobs]
- .ppc64el-rules
variables:
BUILDTYPE: debug
CROSS: ppc64el
GALLIUM_DRIVERS: "nouveau,radeonsi,llvmpipe,softpipe,virgl,zink"
GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink"
VULKAN_DRIVERS: "amd,swrast"
DRI_LOADERS:
-D glvnd=disabled
# This job tests our Python scripts, and also emits our scripts into
# artifacts, so they can be reused for job submission to hardware devices.
python-test:
stage: build-for-tests
extends:
- .use-debian/x86_64_pyutils
- .build-common
variables:
GIT_STRATEGY: fetch
S3_ARTIFACT_NAME: mesa-python-test
timeout: 10m
script:
- .gitlab-ci/run-pytest.sh
- .gitlab-ci/prepare-artifacts-python.sh

View File

@@ -7,7 +7,7 @@ while true; do
devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null)
for i in $devcds; do
echo "Found a devcoredump at $i."
if cp $i $RESULTS_DIR/first.devcore; then
if cp $i /results/first.devcore; then
echo 1 > $i
echo "Saved to the job artifacts at /first.devcore"
exit 0
@@ -23,7 +23,7 @@ while true; do
rm "$tmpfile"
else
echo "Found an i915 error state at $i size=$filesize."
if cp "$tmpfile" $RESULTS_DIR/first.i915_error_state; then
if cp "$tmpfile" /results/first.i915_error_state; then
rm "$tmpfile"
echo 1 > "$i"
echo "Saved to the job artifacts at /first.i915_error_state"

View File

@@ -10,7 +10,7 @@ VARS=(
CI_COMMIT_REF_NAME
CI_COMMIT_TITLE
CI_JOB_ID
S3_JWT_FILE
CI_JOB_JWT_FILE
CI_JOB_STARTED_AT
CI_JOB_NAME
CI_JOB_URL
@@ -35,7 +35,7 @@ VARS=(
DEQP_EXPECTED_RENDERER
DEQP_FRACTION
DEQP_HEIGHT
DEQP_RUNNER_MAX_FAILS
DEQP_RESULTS_DIR
DEQP_RUNNER_OPTIONS
DEQP_SUITE
DEQP_TEMP_DIR
@@ -56,6 +56,7 @@ VARS=(
GTEST
GTEST_FAILS
GTEST_FRACTION
GTEST_RESULTS_DIR
GTEST_RUNNER_OPTIONS
GTEST_SKIPS
HWCI_FREQ_MAX
@@ -63,7 +64,6 @@ VARS=(
HWCI_KVM
HWCI_START_WESTON
HWCI_START_XORG
HWCI_TEST_ARGS
HWCI_TEST_SCRIPT
IR3_SHADER_DEBUG
JOB_ARTIFACTS_BASE
@@ -84,9 +84,7 @@ VARS=(
MESA_IMAGE_PATH
MESA_IMAGE_TAG
MESA_LOADER_DRIVER_OVERRIDE
MESA_SPIRV_LOG_LEVEL
MESA_TEMPLATES_COMMIT
MESA_VK_ABORT_ON_DEVICE_LOSS
MESA_VK_IGNORE_CONFORMANCE_WARNING
S3_HOST
S3_RESULTS_UPLOAD
@@ -107,7 +105,6 @@ VARS=(
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE
PIGLIT_REPLAY_SUBCOMMAND
PIGLIT_RESULTS
PIGLIT_RUNNER_OPTIONS
PIGLIT_TESTS
PIGLIT_TRACES_FILE
PIPELINE_ARTIFACTS_BASE
@@ -119,16 +116,17 @@ VARS=(
TU_DEBUG
USE_ANGLE
VIRGL_HOST_API
VIRGL_RENDER_SERVER
WAFFLE_PLATFORM
VK_CPU
VK_DRIVER
# required by virglrender CI
VK_DRIVER_FILES
VKD3D_PROTON_RESULTS
VKD3D_CONFIG
VKD3D_TEST_EXCLUDE
ZINK_DESCRIPTORS
ZINK_DEBUG
LVP_POISON_MEMORY
# Dead code within Mesa CI, but required by virglrender CI
# (because they include our files in their CI)
VK_DRIVER_FILES
)
for var in "${VARS[@]}"; do

View File

@@ -47,13 +47,6 @@ for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; d
done
. "$SCRIPTS_DIR"/setup-test-env.sh
# Flush out anything which might be stuck in a serial buffer
echo
echo
echo
section_switch init_stage2 "Pre-testing hardware setup"
set -ex
# Set up any devices required by the jobs
@@ -165,9 +158,6 @@ if [ -x /capture-devcoredump.sh ]; then
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
fi
ARCH=$(uname -m)
export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json"
# If we want Xorg to be running for the test, then we start it up before the
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
@@ -175,7 +165,8 @@ export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json"
if [ -n "$HWCI_START_XORG" ]; then
echo "touch /xorg-started; sleep 100000" > /xorg-script
env \
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile "$RESULTS_DIR/Xorg.0.log" &
VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
# Wait for xorg to be ready for connections.
@@ -201,24 +192,22 @@ if [ -n "$HWCI_START_WESTON" ]; then
mkdir -p /tmp/.X11-unix
env \
VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
fi
set +x
section_end init_stage2
echo "Running ${HWCI_TEST_SCRIPT} ${HWCI_TEST_ARGS} ..."
set +e
$HWCI_TEST_SCRIPT ${HWCI_TEST_ARGS:-}; EXIT_CODE=$?
bash -c ". $SCRIPTS_DIR/setup-test-env.sh && $HWCI_TEST_SCRIPT"
EXIT_CODE=$?
set -e
section_start post_test_cleanup "Cleaning up after testing, uploading results"
set -x
# Let's make sure the results are always stored in current working directory
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
# Make sure that capture-devcoredump is done before we start trying to tar up
# artifacts -- if it's writing while tar is reading, tar will throw an error and
@@ -228,7 +217,7 @@ cleanup
# upload artifacts
if [ -n "$S3_RESULTS_UPLOAD" ]; then
tar --zstd -cf results.tar.zst results/;
ci-fairy s3cp --token-file "${S3_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
fi
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
@@ -236,12 +225,11 @@ fi
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
set +x
section_end post_test_cleanup
# Print the final result; both bare-metal and LAVA look for this string to get
# the result of our run, so try really hard to get it out rather than losing
# the run. The device gets shut down right at this point, and a630 seems to
# enjoy corrupting the last line of serial output before shutdown.
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done
exit $EXIT_CODE

View File

@@ -560,8 +560,7 @@ set_cpu_freq_max() {
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
tf_res=$?
[ -z "${target_freq}" ] && { res=$tf_res; continue; }
[ -z "${target_freq}" ] && { res=$?; continue; }
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
[ -n "${DRY_RUN}" ] && continue

View File

@@ -1,18 +1,24 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091 # the path is created in build-kdl and
# here is check if exist
# shellcheck disable=SC2086 # we want the arguments to be expanded
if ! [ -f /ci-kdl/bin/activate ]; then
echo -e "ci-kdl not installed; not monitoring temperature"
exit 0
terminate() {
echo "ci-kdl.sh caught SIGTERM signal! propagating to child processes"
for job in $(jobs -p)
do
kill -15 "$job"
done
}
trap terminate SIGTERM
if [ -f /ci-kdl.venv/bin/activate ]; then
source /ci-kdl.venv/bin/activate
/ci-kdl.venv/bin/python /ci-kdl.venv/bin/ci-kdl | tee -a /results/kdl.log &
child=$!
wait $child
mv kdl_*.json /results/kdl.json
else
echo -e "Not possible to activate ci-kdl virtual environment"
fi
KDL_ARGS="
--output-file=${RESULTS_DIR}/kdl.json
--log-level=WARNING
--num-samples=-1
"
source /ci-kdl/bin/activate
exec /ci-kdl/bin/ci-kdl ${KDL_ARGS}

21
.gitlab-ci/common/start-x.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/sh
set -ex
_XORG_SCRIPT="/xorg-script"
_FLAG_FILE="/xorg-started"
echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
if [ "x$1" != "x" ]; then
export LD_LIBRARY_PATH="${1}/lib"
export LIBGL_DRIVERS_PATH="${1}/lib/dri"
fi
xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
# Wait for xorg to be ready for connections.
for _ in 1 2 3 4 5; do
if [ -e "${_FLAG_FILE}" ]; then
break
fi
sleep 5
done

View File

@@ -8,6 +8,8 @@
set -e
set -o xtrace
export LLVM_VERSION="${LLVM_VERSION:=16}"
EPHEMERAL=(
)
@@ -16,7 +18,7 @@ DEPS=(
bash
bison
ccache
"clang${LLVM_VERSION}-dev"
clang16-dev
cmake
clang-dev
coreutils
@@ -27,31 +29,23 @@ DEPS=(
git
gettext
glslang
graphviz
linux-headers
"llvm${LLVM_VERSION}-static"
"llvm${LLVM_VERSION}-dev"
llvm16-static
llvm16-dev
meson
mold
musl-dev
expat-dev
elfutils-dev
libclc-dev
libdrm-dev
libselinux-dev
libva-dev
libpciaccess-dev
zlib-dev
python3-dev
py3-clang
py3-cparser
py3-mako
py3-packaging
py3-pip
py3-ply
py3-yaml
vulkan-headers
spirv-tools-dev
spirv-llvm-translator-dev
util-macros
wayland-dev
wayland-protocols
@@ -59,20 +53,15 @@ DEPS=(
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
pip3 install --break-system-packages sphinx===5.1.1 hawkmoth===0.16.0
. .gitlab-ci/container/build-llvm-spirv.sh
. .gitlab-ci/container/build-libclc.sh
. .gitlab-ci/container/container_pre_build.sh
############### Uninstall the build software
# too many vendor binarise, just keep the ones we need
find /usr/share/clc \
\( -type f -o -type l \) \
! -name 'spirv-mesa3d-.spv' \
! -name 'spirv64-mesa3d-.spv' \
-delete
apk del "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,81 +1,41 @@
#!/usr/bin/env bash
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# KERNEL_ROOTFS_TAG
set -ex
set -uex
ANGLE_REV="76025caa1a059f464a2b0e8f879dbd4746f092b9"
SCRIPTS_DIR="$(pwd)/.gitlab-ci"
ANGLE_PATCH_DIR="${SCRIPTS_DIR}/container/patches"
ANGLE_REV="0518a3ff4d4e7e5b2ce8203358f719613a31c118"
# DEPOT tools
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git /depot-tools
export PATH=/depot-tools:$PATH
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git
PWD=$(pwd)
export PATH=$PWD/depot_tools:$PATH
export DEPOT_TOOLS_UPDATE=0
mkdir /angle-build
mkdir /angle
pushd /angle-build
git init
git remote add origin https://chromium.googlesource.com/angle/angle.git
git fetch --depth 1 origin "$ANGLE_REV"
git checkout FETCH_HEAD
angle_patch_files=(
build-angle_deps_Make-more-sources-conditional.patch
)
for patch in "${angle_patch_files[@]}"; do
echo "Apply patch to ANGLE from ${patch}"
GIT_COMMITTER_DATE=$(date -d@0) git am < "${ANGLE_PATCH_DIR}/${patch}"
done
{
echo "ANGLE base version $ANGLE_REV"
echo "The following local patches are applied on top:"
git log --reverse --oneline $ANGLE_REV.. --format='- %s'
} > /angle/version
# source preparation
gclient config --name REPLACE-WITH-A-DOT --unmanaged \
--custom-var='angle_enable_cl=False' \
--custom-var='angle_enable_cl_testing=False' \
--custom-var='angle_enable_vulkan_validation_layers=False' \
--custom-var='angle_enable_wgpu=False' \
--custom-var='build_allow_regenerate=False' \
--custom-var='build_angle_deqp_tests=False' \
--custom-var='build_angle_perftests=False' \
--custom-var='build_with_catapult=False' \
--custom-var='build_with_swiftshader=False' \
https://chromium.googlesource.com/angle/angle.git
sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient
gclient sync -j"${FDO_CI_CONCURRENT:-4}"
python3 scripts/bootstrap.py
mkdir -p build/config
gclient sync
sed -i "/catapult/d" testing/BUILD.gn
mkdir -p out/Release
echo '
angle_build_all=false
angle_build_tests=false
angle_enable_cl=false
angle_enable_cl_testing=false
angle_enable_gl=false
angle_enable_gl_desktop_backend=false
angle_enable_null=false
angle_enable_swiftshader=false
angle_enable_trace=false
angle_enable_wgpu=false
angle_enable_vulkan=true
angle_enable_vulkan_api_dump_layer=false
angle_enable_vulkan_validation_layers=false
angle_has_frame_capture=false
angle_has_histograms=false
angle_use_custom_libvulkan=false
angle_egl_extension="so.1"
angle_glesv2_extension="so.2"
build_angle_deqp_tests=false
is_debug = false
angle_enable_swiftshader = false
angle_enable_null = false
angle_enable_gl = false
angle_enable_vulkan = true
angle_has_histograms = false
build_angle_trace_perf_tests = false
build_angle_deqp_tests = false
angle_use_custom_libvulkan = false
dcheck_always_on=true
enable_expensive_dchecks=false
is_debug=false
' > out/Release/args.gn
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
@@ -85,15 +45,14 @@ fi
gn gen out/Release
# depot_tools overrides ninja with a version that doesn't work. We want
# ninja with FDO_CI_CONCURRENT anyway.
/usr/local/bin/ninja -C out/Release/ libEGL libGLESv2
/usr/local/bin/ninja -C out/Release/
rm -f out/Release/libvulkan.so* out/Release/*.so.TOC
cp out/Release/lib*.so* /angle/
ln -s libEGL.so.1 /angle/libEGL.so
ln -s libGLESv2.so.2 /angle/libGLESv2.so
mkdir /angle
cp out/Release/lib*GL*.so /angle/
ln -s libEGL.so /angle/libEGL.so.1
ln -s libGLESv2.so /angle/libGLESv2.so.2
rm -rf out
popd
rm -rf /depot-tools
rm -rf /angle-build
rm -rf ./depot_tools

View File

@@ -3,11 +3,11 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_GL_TAG
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -uex
set -ex
APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a"
@@ -15,7 +15,7 @@ git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout
pushd /apitrace
git checkout "$APITRACE_VERSION"
git submodule update --init --depth 1 --recursive
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on ${EXTRA_CMAKE_ARGS:-}
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
cmake --build _build --parallel --target apitrace eglretrace
mkdir build
cp _build/apitrace build

View File

@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
BINDGEN_VER=0.65.1
CBINDGEN_VER=0.26.0
# bindgen
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
bindgen-cli --version ${BINDGEN_VER} \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local
# cbindgen
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
cbindgen --version ${CBINDGEN_VER} \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
set -uex
set -ex
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
@@ -17,7 +17,7 @@ rm -rf third_party/virglrenderer
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
pushd third_party/virglrenderer
git checkout "$VIRGLRENDERER_VERSION"
meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true ${EXTRA_MESON_ARGS:-}
meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true $EXTRA_MESON_ARGS
meson install -C build
popd
@@ -29,7 +29,7 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local \
--version 0.65.1 \
${EXTRA_CARGO_ARGS:-}
$EXTRA_CARGO_ARGS
CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
-j ${FDO_CI_CONCURRENT:-4} \
@@ -37,7 +37,7 @@ CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L nati
--features 'default-no-sandbox gpu x virgl_renderer' \
--path . \
--root /usr/local \
${EXTRA_CARGO_ARGS:-}
$EXTRA_CARGO_ARGS
popd

View File

@@ -3,73 +3,47 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_ANDROID_TAG
# DEBIAN_BASE_TAG
# DEBIAN_X86_64_TEST_ANDROID_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -uex
set -ex
DEQP_RUNNER_VERSION=0.20.2
commits_to_backport=(
)
patch_files=(
)
DEQP_RUNNER_VERSION=0.18.0
DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}"
if [ -n "${DEQP_RUNNER_GIT_TAG:-}" ]; then
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG"
elif [ -n "${DEQP_RUNNER_GIT_REV:-}" ]; then
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV"
if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
# Build and install from source
DEQP_RUNNER_CARGO_ARGS="--git $DEQP_RUNNER_GIT_URL"
if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG"
else
DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV"
fi
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
else
# Install from package registry
DEQP_RUNNER_CARGO_ARGS="--version ${DEQP_RUNNER_VERSION} ${EXTRA_CARGO_ARGS} -- deqp-runner"
DEQP_RUNNER_GIT_CHECKOUT="v$DEQP_RUNNER_VERSION"
fi
BASE_PWD=$PWD
mkdir -p /deqp-runner
pushd /deqp-runner
mkdir deqp-runner-git
pushd deqp-runner-git
git init
git remote add origin "$DEQP_RUNNER_GIT_URL"
git fetch --depth 1 origin "$DEQP_RUNNER_GIT_CHECKOUT"
git checkout FETCH_HEAD
for commit in "${commits_to_backport[@]}"
do
PATCH_URL="https://gitlab.freedesktop.org/mesa/deqp-runner/-/commit/$commit.patch"
echo "Backport deqp-runner commit $commit from $PATCH_URL"
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | git am
done
for patch in "${patch_files[@]}"
do
echo "Apply patch to deqp-runner from $patch"
git am "$BASE_PWD/.gitlab-ci/container/patches/$patch"
done
if [ -z "${RUST_TARGET:-}" ]; then
RUST_TARGET=""
fi
if [[ "$RUST_TARGET" != *-android ]]; then
# When CC (/usr/lib/ccache/gcc) variable is set, the rust compiler uses
# this variable when cross-compiling arm32 and build fails for zsys-sys.
# So unset the CC variable when cross-compiling for arm32.
SAVEDCC=${CC:-}
if [ "$RUST_TARGET" = "armv7-unknown-linux-gnueabihf" ]; then
unset CC
fi
cargo install --locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local \
${EXTRA_CARGO_ARGS:-} \
--path .
CC=$SAVEDCC
${DEQP_RUNNER_CARGO_ARGS}
else
mkdir -p /deqp-runner
pushd /deqp-runner
git clone --branch "$DEQP_RUNNER_GIT_CHECKOUT" --depth 1 "$DEQP_RUNNER_GIT_URL" deqp-runner-git
pushd deqp-runner-git
cargo install --locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local --version 2.10.0 \
@@ -83,14 +57,14 @@ else
cargo uninstall --locked \
--root /usr/local \
cargo-ndk
fi
popd
rm -rf deqp-runner-git
popd
popd
rm -rf deqp-runner-git
popd
fi
# remove unused test runners to shrink images for the Mesa CI build (not kernel,
# which chooses its own deqp branch)
if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then
if [ -z "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
rm -f /usr/local/bin/igt-runner
fi

View File

@@ -3,12 +3,12 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_ANDROID_TAG
# DEBIAN_TEST_GL_TAG
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_ANDROID_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -uex -o pipefail
set -ex -o pipefail
# See `deqp_build_targets` below for which release is used to produce which
# binary. Unless this comment has bitrotten:
@@ -16,9 +16,9 @@ set -uex -o pipefail
# - the GL release produces `glcts`, and
# - the GLES release produces `deqp-gles*` and `deqp-egl`
DEQP_VK_VERSION=1.3.10.0
DEQP_GL_VERSION=4.6.5.0
DEQP_GLES_VERSION=3.2.11.0
DEQP_VK_VERSION=1.3.8.2
DEQP_GL_VERSION=4.6.4.0
DEQP_GLES_VERSION=3.2.10.0
# Patches to VulkanCTS may come from commits in their repo (listed in
# cts_commits_to_backport) or patch files stored in our repo (in the patch
@@ -28,8 +28,11 @@ DEQP_GLES_VERSION=3.2.11.0
# shellcheck disable=SC2034
vk_cts_commits_to_backport=(
# Remove multi-line test results in DRM format modifier tests
8c95af68a2a85cbdc7e1d9267ab029f73e9427d2
# Fix more ASAN errors due to missing virtual destructors
dd40bcfef1b4035ea55480b6fd4d884447120768
# Remove "unused shader stages" tests
7dac86c6bbd15dec91d7d9a98cd6dd57c11092a7
)
# shellcheck disable=SC2034
@@ -61,6 +64,8 @@ fi
# shellcheck disable=SC2034
# GLES builds also EGL
gles_cts_commits_to_backport=(
# Implement support for the EGL_EXT_config_select_group extension
88ba9ac270db5be600b1ecacbc6d9db0c55d5be4
)
# shellcheck disable=SC2034
@@ -106,20 +111,20 @@ do
PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL"
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \
GIT_COMMITTER_DATE=$(date -d@0) git am -
git am -
done
cts_patch_files="${deqp_api}_cts_patch_files[@]"
for patch in "${!cts_patch_files}"
do
echo "Apply patch to ${DEQP_API} CTS from $patch"
GIT_COMMITTER_DATE=$(date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch
git am < $OLDPWD/.gitlab-ci/container/patches/$patch
done
{
echo "dEQP base version $DEQP_VERSION"
echo "The following local patches are applied on top:"
git log --reverse --oneline $DEQP_VERSION.. --format='- %s'
git log --reverse --oneline $DEQP_VERSION.. --format=%s | sed 's/^/- /'
} > /deqp/version-$deqp_api
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
@@ -138,7 +143,7 @@ if [ "${DEQP_API}" = 'GLES' ]; then
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=android \
-DCMAKE_BUILD_TYPE=Release \
${EXTRA_CMAKE_ARGS:-}
$EXTRA_CMAKE_ARGS
mold --run ninja modules/egl/deqp-egl
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android
else
@@ -147,14 +152,14 @@ if [ "${DEQP_API}" = 'GLES' ]; then
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=x11_egl_glx \
-DCMAKE_BUILD_TYPE=Release \
${EXTRA_CMAKE_ARGS:-}
$EXTRA_CMAKE_ARGS
mold --run ninja modules/egl/deqp-egl
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=wayland \
-DCMAKE_BUILD_TYPE=Release \
${EXTRA_CMAKE_ARGS:-}
$EXTRA_CMAKE_ARGS
mold --run ninja modules/egl/deqp-egl
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland
fi
@@ -163,7 +168,7 @@ fi
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=${DEQP_TARGET} \
-DCMAKE_BUILD_TYPE=Release \
${EXTRA_CMAKE_ARGS:-}
$EXTRA_CMAKE_ARGS
# Make sure `default` doesn't silently stop detecting one of the platforms we care about
if [ "${DEQP_TARGET}" = 'default' ]; then
@@ -206,22 +211,22 @@ if [ "${DEQP_TARGET}" != 'android' ]; then
if [ "${DEQP_API}" = 'GL' ]; then
cp \
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \
/VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-main.txt \
/deqp/mustpass/
cp \
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \
/VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass_single/4.6.1.x/*-single.txt \
/deqp/mustpass/
fi
if [ "${DEQP_API}" = 'GLES' ]; then
cp \
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \
/VK-GL-CTS/external/openglcts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
/deqp/mustpass/
cp \
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \
/VK-GL-CTS/external/openglcts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-main.txt \
/deqp/mustpass/
cp \
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \
/VK-GL-CTS/external/openglcts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-main.txt \
/deqp/mustpass/
fi
@@ -233,10 +238,6 @@ if [ "${DEQP_TARGET}" != 'android' ]; then
mv /deqp/executor.save /deqp/executor
fi
# Compress the caselists, since Vulkan's in particular are gigantic; higher
# compression levels provide no real measurable benefit.
zstd -1 --rm /deqp/mustpass/*.txt
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
rm -rf /deqp/external/**/mustpass/
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-main*

View File

@@ -5,11 +5,11 @@
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BUILD_TAG
set -uex
set -ex
git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1
git clone https://github.com/microsoft/DirectX-Headers -b v1.613.1 --depth 1
pushd DirectX-Headers
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-}
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false $EXTRA_MESON_ARGS
meson install -C build
popd
rm -rf DirectX-Headers

View File

@@ -2,7 +2,7 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -ex

View File

@@ -0,0 +1,16 @@
#!/bin/bash
set -ex
PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
pushd /parallel-deqp-runner
git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
meson . _build
ninja -C _build hang-detection
mkdir -p build/bin
install _build/hang-detection build/bin
strip build/bin/*
find . -not -path './build' -not -path './build/*' -delete
popd

View File

@@ -3,26 +3,21 @@
set -ex
KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f"
KDL_CHECKOUT_DIR="/tmp/ci-kdl.git"
KDL_REVISION="5056f71b100a68b72b285c6fc845a66a2ed25985"
mkdir -p ${KDL_CHECKOUT_DIR}
pushd ${KDL_CHECKOUT_DIR}
mkdir ci-kdl.git
pushd ci-kdl.git
git init
git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git
git fetch --depth 1 origin ${KDL_REVISION}
git checkout FETCH_HEAD
popd
# Run venv in a subshell, so we don't accidentally leak the venv state into
# calling scripts
(
python3 -m venv /ci-kdl
source /ci-kdl/bin/activate &&
pushd ${KDL_CHECKOUT_DIR} &&
pip install -r requirements.txt &&
pip install . &&
popd
)
python3 -m venv ci-kdl.venv
source ci-kdl.venv/bin/activate
pushd ci-kdl.git
pip install -r requirements.txt
pip install .
popd
rm -rf ${KDL_CHECKOUT_DIR}
rm -rf ci-kdl.git

View File

@@ -2,7 +2,7 @@
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC2153
set -uex
set -ex
mkdir -p kernel
pushd kernel
@@ -11,12 +11,12 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
KERNEL_IMAGE_NAME+=" cheza-kernel"
fi
for image in ${KERNEL_IMAGE_NAME:-}; do
for image in ${KERNEL_IMAGE_NAME}; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/lava-files/${image}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${image}"
done
for dtb in ${DEVICE_TREES:-}; do
for dtb in ${DEVICE_TREES}; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/lava-files/${dtb}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${dtb}"
done

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -uex
set -ex
export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
LLVM_TAG="llvmorg-15.0.7"

View File

@@ -1,17 +1,16 @@
#!/usr/bin/env bash
# Script used for Android and Fedora builds (Debian builds get their libdrm version
# from https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo - see PKG_REPO_REV)
# Script used for Android and Fedora builds
# shellcheck disable=SC2086 # we want word splitting
set -uex
set -ex
export LIBDRM_VERSION=libdrm-2.4.122
export LIBDRM_VERSION=libdrm-2.4.119
curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz
tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz
cd "$LIBDRM_VERSION"
meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled ${EXTRA_MESON_ARGS:-}
meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled $EXTRA_MESON_ARGS
meson install -C build
cd ..
rm -rf "$LIBDRM_VERSION"

View File

@@ -4,20 +4,19 @@ set -ex
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# ALPINE_X86_64_BUILD_TAG
# DEBIAN_BASE_TAG
# DEBIAN_BUILD_TAG
# FEDORA_X86_64_BUILD_TAG
# KERNEL_ROOTFS_TAG
MOLD_VERSION="2.32.0"
MOLD_VERSION="2.4.1"
git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
pushd mold
cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}"
cmake --install . --strip
cmake --build . --parallel
cmake --install .
popd
rm -rf mold

View File

@@ -2,7 +2,7 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_GL_TAG
# DEBIAN_X86_64_TEST_GL_TAG
set -ex -o pipefail

View File

@@ -1,25 +1,24 @@
#!/bin/bash
# shellcheck disable=SC2086 # we want word splitting
set -uex
set -ex
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_GL_TAG
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
REV="c2b31333926a6171c3c02d182b756efad7770410"
REV="f7ece74a107a2f99b2f494d978c84f8d51faa703"
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
pushd /piglit
git checkout "$REV"
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS ${EXTRA_CMAKE_ARGS:-}
ninja ${PIGLIT_BUILD_TARGETS:-}
find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) \
! -name 'include_test.h' -exec rm -rf {} \;
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
ninja $PIGLIT_BUILD_TARGETS
find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) -exec rm -rf {} \;
rm -rf target_api
if [ "${PIGLIT_BUILD_TARGETS:-}" = "piglit_replayer" ]; then
if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then
find . -depth \
! -regex "^\.$" \
! -regex "^\.\/piglit.*" \

View File

@@ -15,7 +15,7 @@ ln -s /usr/local/bin "$HOME"/.cargo/bin
#
# Also, pick a specific snapshot from rustup so the compiler doesn't drift on
# us.
RUST_VERSION=1.76.0-2024-02-08
RUST_VERSION=1.73.0-2023-10-05
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
# version of the compiler, rather than whatever the container's Debian comes

View File

@@ -8,31 +8,13 @@
# .gitlab-ci/image-tags.yml tags:
# KERNEL_ROOTFS_TAG
set -uex
SKQP_BRANCH=android-cts-12.1_r5
SCRIPT_DIR="$(pwd)/.gitlab-ci/container"
SKQP_PATCH_DIR="${SCRIPT_DIR}/patches"
BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
case "$DEBIAN_ARCH" in
amd64)
SKQP_ARCH=x64
;;
armhf)
SKQP_ARCH=arm
;;
arm64)
SKQP_ARCH=arm64
;;
esac
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp}
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms)
# hack for skqp see the clang
pushd /usr/bin/
ln -s ../lib/llvm-15/bin/clang clang
ln -s ../lib/llvm-15/bin/clang++ clang++
popd
create_gn_args() {
# gn can be configured to cross-compile skia and its tools
@@ -56,6 +38,19 @@ download_skia_source() {
git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
}
set -ex
SCRIPT_DIR=$(realpath "$(dirname "$0")")
SKQP_PATCH_DIR="${SCRIPT_DIR}/patches"
BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
SKQP_ARCH=${SKQP_ARCH:-x64}
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp}
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms)
download_skia_source
pushd "${SKIA_DIR}"
@@ -64,12 +59,6 @@ pushd "${SKIA_DIR}"
cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
patch -p1
# hack for skqp see the clang
pushd /usr/bin/
ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang" clang
ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang++" clang++
popd
# Fetch some needed build tools needed to build skia/skqp.
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
# directory.

View File

@@ -4,7 +4,7 @@
# .gitlab-ci/image-tags.yml tags:
# KERNEL_ROOTFS_TAG
set -uex
set -ex
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
@@ -19,7 +19,7 @@ pushd /va-utils
# Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch.
curl -L https://github.com/intel/libva-utils/pull/329.patch | git am
meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-}
meson setup build -D tests=true -Dprefix=/va $EXTRA_MESON_ARGS
meson install -C build
popd
rm -rf /va-utils

View File

@@ -2,20 +2,21 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -ex
VKD3D_PROTON_COMMIT="59d6d4b5ed23766e69fe252408a3401d2fd52ce8"
VKD3D_PROTON_COMMIT="c3b385606a93baed42482d822805e0d9c2f3f603"
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build"
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION"
function build_arch {
local arch="$1"
shift
meson setup \
meson "$@" \
-Denable_tests=true \
--buildtype release \
--prefix "$VKD3D_PROTON_DST_DIR" \
@@ -36,11 +37,6 @@ git submodule update --init --recursive
git submodule update --recursive
build_arch 64
build_arch 86
mkdir "$VKD3D_PROTON_DST_DIR/tests"
cp \
"tests/test-runner.sh" \
"tests/d3d12_tests.h" \
"$VKD3D_PROTON_DST_DIR/tests/"
popd
rm -rf "$VKD3D_PROTON_BUILD_DIR"

View File

@@ -2,18 +2,17 @@
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_TEST_GL_TAG
# KERNEL_ROOTFS_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# KERNEL_ROOTFS_TAG:
set -uex
set -ex
VALIDATION_TAG="snapshot-2024wk39"
VALIDATION_TAG="v1.3.281"
git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
pushd Vulkan-ValidationLayers
python3 scripts/update_deps.py --dir external --config release --generator Ninja
python3 scripts/update_deps.py --dir external --config debug
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build
ninja -C build
cmake --install build --strip
ninja -C build install
popd
rm -rf Vulkan-ValidationLayers

View File

@@ -1,24 +1,24 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
set -uex
set -ex
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BUILD_TAG
# DEBIAN_TEST_ANDROID_TAG
# DEBIAN_TEST_GL_TAG
# DEBIAN_TEST_VK_TAG
# DEBIAN_X86_64_TEST_ANDROID_TAG
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# FEDORA_X86_64_BUILD_TAG
# KERNEL_ROOTFS_TAG
export LIBWAYLAND_VERSION="1.21.0"
export WAYLAND_PROTOCOLS_VERSION="1.38"
export WAYLAND_PROTOCOLS_VERSION="1.34"
git clone https://gitlab.freedesktop.org/wayland/wayland
cd wayland
git checkout "$LIBWAYLAND_VERSION"
meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build ${EXTRA_MESON_ARGS:-}
meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build $EXTRA_MESON_ARGS
meson install -C _build
cd ..
rm -rf wayland
@@ -26,7 +26,7 @@ rm -rf wayland
git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
cd wayland-protocols
git checkout "$WAYLAND_PROTOCOLS_VERSION"
meson setup -Dtests=false _build ${EXTRA_MESON_ARGS:-}
meson setup _build $EXTRA_MESON_ARGS
meson install -C _build
cd ..
rm -rf wayland-protocols

View File

@@ -13,8 +13,8 @@ if test -x /usr/bin/ccache; then
export CCACHE_COMPILERCHECK=content
export CCACHE_COMPRESS=true
export CCACHE_DIR="/cache/$CI_PROJECT_NAME/ccache"
export PATH="$CCACHE_PATH:$PATH"
export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache
export PATH=$CCACHE_PATH:$PATH
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
export CC="${CCACHE_PATH}/gcc"
@@ -27,14 +27,9 @@ fi
# linkers to gold, since it's so much faster for building. We can't use
# lld because we're on old debian and it's buggy. mingw fails meson builds
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
if [ -e /usr/bin/ld.gold ]; then
find /usr/bin -name \*-ld -o -name ld | \
find /usr/bin -name \*-ld -o -name ld | \
grep -v mingw | \
xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
else
echo "ld.gold is missing, not replacing ld with it."
echo "Builds might be slower, consider installing gold."
fi
# Make a wrapper script for ninja to always include the -j flags
{

View File

@@ -22,7 +22,7 @@ cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_v
c_ld = 'lld'
cpp_ld = 'lld'
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip'
pkg-config = ['/usr/bin/pkgconf']
pkgconfig = ['/usr/bin/pkgconf']
[host_machine]
system = 'android'

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -e
arch=armhf . .gitlab-ci/container/debian/arm_test.sh

View File

@@ -26,7 +26,6 @@ DEPS=(
ccache
cmake
curl
"clang-${LLVM_VERSION}"
fastboot
flatbuffers-compiler
flex
@@ -34,15 +33,11 @@ DEPS=(
git
glslang-tools
kmod
"libclang-${LLVM_VERSION}-dev"
"libclang-cpp${LLVM_VERSION}-dev"
"libclang-common-${LLVM_VERSION}-dev"
libasan8
libdrm-dev
libelf-dev
libexpat1-dev
libflatbuffers-dev
"libllvm${LLVM_VERSION}"
libvulkan-dev
libx11-dev
libx11-xcb-dev
@@ -91,16 +86,6 @@ arch=armhf
. .gitlab-ci/container/build-wayland.sh
. .gitlab-ci/container/build-llvm-spirv.sh
. .gitlab-ci/container/build-libclc.sh
. .gitlab-ci/container/install-meson.sh
. .gitlab-ci/container/build-rust.sh
. .gitlab-ci/container/build-bindgen.sh
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env bash
DEBIAN_ARCH=arm64 \
. .gitlab-ci/container/debian/test-base.sh

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
. .gitlab-ci/container/debian/test-gl.sh
. .gitlab-ci/container/strip-rootfs.sh

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
. .gitlab-ci/container/debian/test-vk.sh
. .gitlab-ci/container/strip-rootfs.sh

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -e
arch=arm64 . .gitlab-ci/container/debian/arm_test.sh

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
set -e
arch=armhf . .gitlab-ci/container/debian/baremetal_arm_test.sh

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
set -e
arch=arm64 . .gitlab-ci/container/debian/baremetal_arm_test.sh

View File

@@ -1,187 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BASE_TAG
set -e
. .gitlab-ci/setup-test-env.sh
set -o xtrace
uncollapsed_section_start debian_setup "Base Debian system setup"
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates gnupg2 software-properties-common
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
export LLVM_VERSION="${LLVM_VERSION:=15}"
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
autoconf
automake
bc
bison
bzip2
ccache
cmake
"clang-${LLVM_VERSION}"
dpkg-dev
flex
glslang-tools
g++
libasound2-dev
libcap-dev
"libclang-cpp${LLVM_VERSION}-dev"
libdrm-dev
libegl-dev
libelf-dev
libepoxy-dev
libgbm-dev
libpciaccess-dev
libssl-dev
libvulkan-dev
libwayland-dev
libx11-xcb-dev
libxext-dev
"llvm-${LLVM_VERSION}-dev"
make
meson
openssh-server
patch
pkgconf
protobuf-compiler
python3-dev
python3-pip
python3-setuptools
python3-wheel
spirv-tools
wayland-protocols
xz-utils
)
DEPS=(
apt-utils
curl
git
git-lfs
inetutils-syslogd
iptables
jq
libasan8
libdrm2
libexpat1
"libllvm${LLVM_VERSION}"
liblz4-1
libpng16-16
libpython3.11
libvulkan1
libwayland-client0
libwayland-server0
libxcb-ewmh2
libxcb-randr0
libxcb-xfixes0
libxkbcommon0
libxrandr2
libxrender1
python3-mako
python3-numpy
python3-packaging
python3-pil
python3-requests
python3-six
python3-yaml
socat
vulkan-tools
waffle-utils
xauth
xvfb
zlib1g
zstd
)
apt-get update
apt-get dist-upgrade -y
apt-get install --purge -y \
sysvinit-core libelogind0
apt-get install -y --no-remove "${DEPS[@]}"
apt-get install -y --no-install-recommends "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_pre_build.sh
# Needed for ci-fairy, this revision is able to upload files to MinIO
# and doesn't depend on git
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
# Needed for manipulation with traces yaml files.
pip3 install --break-system-packages yq
############### Download prebuilt kernel
if [ "$DEBIAN_ARCH" = amd64 ]; then
uncollapsed_section_switch kernel "Downloading kernel"
export KERNEL_IMAGE_NAME=bzImage
mkdir -p /lava-files/
. .gitlab-ci/container/download-prebuilt-kernel.sh
fi
############### Build mold
uncollapsed_section_switch mold "Building mold linker"
. .gitlab-ci/container/build-mold.sh
############### Build LLVM-SPIRV translator
uncollapsed_section_switch llvmspv "Building LLVM-SPIRV-Translator"
. .gitlab-ci/container/build-llvm-spirv.sh
############### Build libclc
uncollapsed_section_switch libclc "Building libclc"
. .gitlab-ci/container/build-libclc.sh
############### Build Wayland
uncollapsed_section_switch wayland "Building Wayland"
. .gitlab-ci/container/build-wayland.sh
############### Install Rust toolchain
uncollapsed_section_switch rust "Installing Rust toolchain"
. .gitlab-ci/container/build-rust.sh
############### Build Crosvm
uncollapsed_section_switch crosvm "Building crosvm"
. .gitlab-ci/container/build-crosvm.sh
############### Build dEQP runner
uncollapsed_section_switch deqpr "Building deqp-runner"
. .gitlab-ci/container/build-deqp-runner.sh
############### Uninstall the build software
uncollapsed_section_switch debian_cleanup "Cleaning up base Debian system"
apt-get purge -y "${EPHEMERAL[@]}"
rm -rf /root/.rustup
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,141 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
set -e
. .gitlab-ci/setup-test-env.sh
set -o xtrace
uncollapsed_section_start debian_setup "Base Debian system setup"
export DEBIAN_FRONTEND=noninteractive
export LLVM_VERSION="${LLVM_VERSION:=15}"
apt-get install -y libelogind0 # this interfere with systemd deps, install separately
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
bzip2
ccache
"clang-${LLVM_VERSION}"
cmake
dpkg-dev
g++
glslang-tools
libasound2-dev
libcap-dev
"libclang-cpp${LLVM_VERSION}-dev"
libdrm-dev
libgles2-mesa-dev
libgtest-dev
libpciaccess-dev
libpng-dev
libudev-dev
libvulkan-dev
libwaffle-dev
libwayland-dev
libx11-xcb-dev
libxcb-dri2-0-dev
libxcb-dri3-dev
libxcb-present-dev
libxfixes-dev
libxkbcommon-dev
libxrandr-dev
libxrender-dev
"llvm-${LLVM_VERSION}-dev"
make
meson
ocl-icd-opencl-dev
patch
pkgconf
python3-distutils
xz-utils
)
DEPS=(
clinfo
iptables
kmod
"libclang-common-${LLVM_VERSION}-dev"
"libclang-cpp${LLVM_VERSION}"
libcap2
libegl1
libepoxy0
libfdt1
libxcb-shm0
ocl-icd-libopencl1
python3-lxml
python3-renderdoc
python3-simplejson
spirv-tools
sysvinit-core
weston
xwayland
)
apt-get update
apt-get install -y --no-remove "${DEPS[@]}" "${EPHEMERAL[@]}" \
$EXTRA_LOCAL_PACKAGES
. .gitlab-ci/container/container_pre_build.sh
############### Build piglit
uncollapsed_section_switch piglit "Building Piglit"
PIGLIT_OPTS="-DPIGLIT_USE_WAFFLE=ON
-DPIGLIT_USE_GBM=ON
-DPIGLIT_USE_WAYLAND=ON
-DPIGLIT_USE_X11=ON
-DPIGLIT_BUILD_GLX_TESTS=ON
-DPIGLIT_BUILD_EGL_TESTS=ON
-DPIGLIT_BUILD_WGL_TESTS=OFF
-DPIGLIT_BUILD_GL_TESTS=ON
-DPIGLIT_BUILD_GLES1_TESTS=ON
-DPIGLIT_BUILD_GLES2_TESTS=ON
-DPIGLIT_BUILD_GLES3_TESTS=ON
-DPIGLIT_BUILD_CL_TESTS=ON
-DPIGLIT_BUILD_VK_TESTS=ON
-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" \
. .gitlab-ci/container/build-piglit.sh
############### Build dEQP GL
uncollapsed_section_switch piglit_gl "Building dEQP for GL"
DEQP_API=GL \
DEQP_TARGET=surfaceless \
. .gitlab-ci/container/build-deqp.sh
uncollapsed_section_switch piglit_gles "Building dEQP for GLES"
DEQP_API=GLES \
DEQP_TARGET=surfaceless \
. .gitlab-ci/container/build-deqp.sh
############### Build apitrace
. .gitlab-ci/container/build-apitrace.sh
############### Build validation layer for zink
uncollapsed_section_switch vvl "Building Vulkan validation layers"
. .gitlab-ci/container/build-vulkan-validation.sh
############### Build nine tests
uncollapsed_section_switch nine "Building Nine tests"
. .gitlab-ci/container/build-ninetests.sh
############### Uninstall the build software
uncollapsed_section_switch debian_cleanup "Cleaning up base Debian system"
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,145 +0,0 @@
#!/usr/bin/env bash
# The relative paths in this file only become valid at runtime.
# shellcheck disable=SC1091
# shellcheck disable=SC2086 # we want word splitting
set -e
. .gitlab-ci/setup-test-env.sh
set -o xtrace
uncollapsed_section_start debian_setup "Base Debian system setup"
export DEBIAN_FRONTEND=noninteractive
apt-get install -y libelogind0 # this interfere with systemd deps, install separately
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
ccache
cmake
dpkg-dev
g++
glslang-tools
libexpat1-dev
gnupg2
libdrm-dev
libgbm-dev
libgles2-mesa-dev
liblz4-dev
libpciaccess-dev
libudev-dev
libvulkan-dev
libwaffle-dev
libx11-xcb-dev
libxcb-dri2-0-dev
libxcb-ewmh-dev
libxcb-keysyms1-dev
libxkbcommon-dev
libxrandr-dev
libxrender-dev
libzstd-dev
meson
p7zip
patch
pkgconf
python3-dev
python3-distutils
python3-pip
python3-setuptools
python3-wheel
software-properties-common
wine64-tools
xz-utils
)
DEPS=(
curl
libepoxy0
libxcb-shm0
pciutils
python3-lxml
python3-simplejson
sysvinit-core
weston
xwayland
wine
wine64
xinit
xserver-xorg-video-amdgpu
xserver-xorg-video-ati
)
apt-get update
apt-get install -y --no-remove --no-install-recommends \
"${DEPS[@]}" "${EPHEMERAL[@]}"
############### Building ...
. .gitlab-ci/container/container_pre_build.sh
############### Build piglit replayer
uncollapsed_section_switch piglit "Building Piglit for Vulkan (traces only)"
# We don't run any _piglit_ Vulkan tests in the containers.
PIGLIT_OPTS="-DPIGLIT_USE_WAFFLE=ON
-DPIGLIT_USE_GBM=OFF
-DPIGLIT_USE_WAYLAND=OFF
-DPIGLIT_USE_X11=OFF
-DPIGLIT_BUILD_GLX_TESTS=OFF
-DPIGLIT_BUILD_EGL_TESTS=OFF
-DPIGLIT_BUILD_WGL_TESTS=OFF
-DPIGLIT_BUILD_GL_TESTS=OFF
-DPIGLIT_BUILD_GLES1_TESTS=OFF
-DPIGLIT_BUILD_GLES2_TESTS=OFF
-DPIGLIT_BUILD_GLES3_TESTS=OFF
-DPIGLIT_BUILD_CL_TESTS=OFF
-DPIGLIT_BUILD_VK_TESTS=OFF
-DPIGLIT_BUILD_DMA_BUF_TESTS=OFF" \
PIGLIT_BUILD_TARGETS="piglit_replayer" \
. .gitlab-ci/container/build-piglit.sh
############### Build dEQP VK
uncollapsed_section_switch deqp "Building Vulkan CTS (dEQP)"
DEQP_API=VK \
DEQP_TARGET=default \
. .gitlab-ci/container/build-deqp.sh
############### Build apitrace
uncollapsed_section_switch apitrace "Building apitrace"
. .gitlab-ci/container/build-apitrace.sh
############### Build Fossilize
uncollapsed_section_switch fossilize "Building Fossilize"
. .gitlab-ci/container/build-fossilize.sh
############### Build gfxreconstruct
uncollapsed_section_switch gfxreconstruct "Building gfxreconstruct"
. .gitlab-ci/container/build-gfxreconstruct.sh
############### Build VKD3D-Proton
uncollapsed_section_switch proton "Installing Proton (Wine/D3DVK emulation)"
. .gitlab-ci/container/setup-wine.sh "/vkd3d-proton-wine64"
. .gitlab-ci/container/build-vkd3d-proton.sh
############### Uninstall the build software
uncollapsed_section_switch debian_cleanup "Cleaning up base Debian system"
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -28,7 +28,6 @@ DEPS=(
"clang-${LLVM_VERSION}"
"clang-format-${LLVM_VERSION}"
dpkg-cross
dpkg-dev
findutils
flex
flatbuffers-compiler
@@ -47,6 +46,7 @@ DEPS=(
libflatbuffers-dev
libgtk-3-dev
"libllvm${LLVM_VERSION}"
libomxil-bellagio-dev
libpciaccess-dev
libunwind-dev
libva-dev
@@ -62,7 +62,6 @@ DEPS=(
libxtensor-dev
libxxf86vm-dev
libwayland-egl-backend-dev
"llvm-${LLVM_VERSION}-dev"
make
ninja-build
openssh-server
@@ -74,7 +73,6 @@ DEPS=(
python3-pycparser
python3-requests
python3-setuptools
python3-yaml
qemu-user
valgrind
x11proto-dri2-dev
@@ -97,7 +95,8 @@ apt-get install -y --no-remove "${DEPS[@]}" "${EPHEMERAL[@]}" \
# Needed for ci-fairy, this revision is able to upload files to S3
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
. .gitlab-ci/container/install-meson.sh
# We need at least 1.3.1 for rusticl
pip3 install --break-system-packages 'meson==1.3.1'
. .gitlab-ci/container/build-rust.sh

View File

@@ -81,9 +81,21 @@ rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-directx-headers.sh
. .gitlab-ci/container/build-bindgen.sh
python3 -m pip install --break-system-packages -r .gitlab-ci/lava/requirements.txt
python3 -m pip install --break-system-packages -r bin/ci/requirements.txt
# install bindgen
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
bindgen-cli --version 0.62.0 \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local
# install cbindgen
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
cbindgen --version 0.26.0 \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local
############### Uninstall the build software

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_PYUTILS_TAG
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
# Ephemeral packages (installed for this script and removed again at
# the end)
EPHEMERAL=(
binutils
build-essential
cpp
dpkg-dev
g++
gcc
libc6-dev
perl
python3-dev
)
DEPS=(
apt-utils
curl
file
findutils
git
python3-pil
python3-pip
python3-ply
python3-setuptools
python3-venv
python3-yaml
shellcheck
xz-utils
yamllint
zstd
)
apt-get update
apt-get install -y --no-remove --no-install-recommends "${DEPS[@]}" "${EPHEMERAL[@]}" \
"${EXTRA_LOCAL_PACKAGES:-}"
# Needed for ci-fairy, this revision is able to upload files to S3
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
pip3 install --break-system-packages -r bin/ci/test/requirements.txt
############### Uninstall ephemeral packages
apt-get purge -y "${EPHEMERAL[@]}"
apt-get autoremove --purge -y
. .gitlab-ci/container/container_post_build.sh

View File

@@ -108,5 +108,3 @@ rm -rf "/${ndk:?}"
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh
. .gitlab-ci/container/strip-rootfs.sh

View File

@@ -1,4 +1,160 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
DEBIAN_ARCH=amd64 \
. .gitlab-ci/container/debian/test-base.sh
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BASE_TAG
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates gnupg2 software-properties-common
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
export LLVM_VERSION="${LLVM_VERSION:=15}"
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
autoconf
automake
bc
bison
bzip2
ccache
cmake
"clang-${LLVM_VERSION}"
flex
glslang-tools
g++
libasound2-dev
libcap-dev
"libclang-cpp${LLVM_VERSION}-dev"
libdrm-dev
libegl-dev
libelf-dev
libepoxy-dev
libgbm-dev
libpciaccess-dev
libssl-dev
libvulkan-dev
libwayland-dev
libx11-xcb-dev
libxext-dev
"llvm-${LLVM_VERSION}-dev"
make
meson
openssh-server
patch
pkgconf
protobuf-compiler
python3-dev
python3-pip
python3-setuptools
python3-wheel
spirv-tools
wayland-protocols
xz-utils
)
DEPS=(
apt-utils
curl
git
git-lfs
inetutils-syslogd
iptables
jq
libasan8
libdrm2
libexpat1
"libllvm${LLVM_VERSION}"
liblz4-1
libpng16-16
libpython3.11
libvulkan1
libwayland-client0
libwayland-server0
libxcb-ewmh2
libxcb-randr0
libxcb-xfixes0
libxkbcommon0
libxrandr2
libxrender1
python3-mako
python3-numpy
python3-packaging
python3-pil
python3-requests
python3-six
python3-yaml
socat
vulkan-tools
waffle-utils
xauth
xvfb
zlib1g
zstd
)
apt-get update
apt-get dist-upgrade -y
apt-get install --purge -y \
sysvinit-core libelogind0
apt-get install -y --no-remove "${DEPS[@]}"
apt-get install -y --no-install-recommends "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_pre_build.sh
############### Build kernel
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
export KERNEL_IMAGE_NAME=bzImage
export KERNEL_ARCH=x86_64
export DEBIAN_ARCH=amd64
mkdir -p /lava-files/
. .gitlab-ci/container/build-kernel.sh
# Needed for ci-fairy, this revision is able to upload files to MinIO
# and doesn't depend on git
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
# Needed for manipulation with traces yaml files.
pip3 install --break-system-packages yq
. .gitlab-ci/container/build-mold.sh
############### Build LLVM-SPIRV translator
. .gitlab-ci/container/build-llvm-spirv.sh
############### Build libclc
. .gitlab-ci/container/build-libclc.sh
############### Build Wayland
. .gitlab-ci/container/build-wayland.sh
############### Build Crosvm
. .gitlab-ci/container/build-rust.sh
. .gitlab-ci/container/build-crosvm.sh
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
apt-get purge -y "${EPHEMERAL[@]}"
rm -rf /root/.rustup
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,5 +1,109 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
. .gitlab-ci/container/debian/test-gl.sh
set -e
set -o xtrace
. .gitlab-ci/container/strip-rootfs.sh
export DEBIAN_FRONTEND=noninteractive
export LLVM_VERSION="${LLVM_VERSION:=15}"
apt-get install -y libelogind0 # this interfere with systemd deps, install separately
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
bzip2
ccache
"clang-${LLVM_VERSION}"
cmake
g++
glslang-tools
libasound2-dev
libcap-dev
"libclang-cpp${LLVM_VERSION}-dev"
libdrm-dev
libgles2-mesa-dev
libgtest-dev
libpciaccess-dev
libpng-dev
libudev-dev
libvulkan-dev
libwaffle-dev
libwayland-dev
libx11-xcb-dev
libxcb-dri2-0-dev
libxcb-dri3-dev
libxcb-present-dev
libxfixes-dev
libxkbcommon-dev
libxrandr-dev
libxrender-dev
"llvm-${LLVM_VERSION}-dev"
make
meson
ocl-icd-opencl-dev
patch
pkgconf
python3-distutils
xz-utils
)
DEPS=(
clinfo
iptables
kmod
"libclang-common-${LLVM_VERSION}-dev"
"libclang-cpp${LLVM_VERSION}"
libcap2
libegl1
libepoxy0
libfdt1
libxcb-shm0
ocl-icd-libopencl1
python3-lxml
python3-renderdoc
python3-simplejson
spirv-tools
sysvinit-core
weston
xwayland
)
apt-get update
apt-get install -y --no-remove "${DEPS[@]}" "${EPHEMERAL[@]}" \
$EXTRA_LOCAL_PACKAGES
. .gitlab-ci/container/container_pre_build.sh
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_BUILD_GLX_TESTS=ON -DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
############### Build dEQP GL
DEQP_API=GL \
DEQP_TARGET=surfaceless \
. .gitlab-ci/container/build-deqp.sh
DEQP_API=GLES \
DEQP_TARGET=surfaceless \
. .gitlab-ci/container/build-deqp.sh
############### Build apitrace
. .gitlab-ci/container/build-apitrace.sh
############### Build validation layer for zink
. .gitlab-ci/container/build-vulkan-validation.sh
############### Build nine tests
. .gitlab-ci/container/build-ninetests.sh
############### Uninstall the build software
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,5 +1,129 @@
#!/usr/bin/env bash
# The relative paths in this file only become valid at runtime.
# shellcheck disable=SC1091
# shellcheck disable=SC2086 # we want word splitting
. .gitlab-ci/container/debian/test-vk.sh
set -e
set -o xtrace
. .gitlab-ci/container/strip-rootfs.sh
export DEBIAN_FRONTEND=noninteractive
apt-get install -y libelogind0 # this interfere with systemd deps, install separately
# Ephemeral packages (installed for this script and removed again at the end)
EPHEMERAL=(
ccache
cmake
g++
glslang-tools
libexpat1-dev
gnupg2
libdrm-dev
libgbm-dev
libgles2-mesa-dev
liblz4-dev
libpciaccess-dev
libudev-dev
libvulkan-dev
libwaffle-dev
libx11-xcb-dev
libxcb-dri2-0-dev
libxcb-ewmh-dev
libxcb-keysyms1-dev
libxkbcommon-dev
libxrandr-dev
libxrender-dev
libzstd-dev
meson
p7zip
patch
pkgconf
python3-dev
python3-distutils
python3-pip
python3-setuptools
python3-wheel
software-properties-common
wine64-tools
xz-utils
)
DEPS=(
curl
libepoxy0
libxcb-shm0
pciutils
python3-lxml
python3-simplejson
sysvinit-core
weston
xwayland
wine
wine64
xinit
xserver-xorg-video-amdgpu
xserver-xorg-video-ati
)
apt-get update
apt-get install -y --no-remove --no-install-recommends \
"${DEPS[@]}" "${EPHEMERAL[@]}"
############### Install DXVK
. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
. .gitlab-ci/container/install-wine-dxvk.sh
############### Install apitrace binaries for wine
. .gitlab-ci/container/install-wine-apitrace.sh
# Add the apitrace path to the registry
wine \
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
/v Path \
/t REG_EXPAND_SZ \
/d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \
/f
############### Building ...
. .gitlab-ci/container/container_pre_build.sh
############### Build parallel-deqp-runner's hang-detection tool
. .gitlab-ci/container/build-hang-detection.sh
############### Build piglit replayer
PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
############### Build Fossilize
. .gitlab-ci/container/build-fossilize.sh
############### Build dEQP VK
DEQP_API=VK \
DEQP_TARGET=default \
. .gitlab-ci/container/build-deqp.sh
############### Build apitrace
. .gitlab-ci/container/build-apitrace.sh
############### Build gfxreconstruct
. .gitlab-ci/container/build-gfxreconstruct.sh
############### Build VKD3D-Proton
. .gitlab-ci/container/setup-wine.sh "/vkd3d-proton-wine64"
. .gitlab-ci/container/build-vkd3d-proton.sh
############### Uninstall the build software
apt-get purge -y "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_post_build.sh

View File

@@ -48,6 +48,8 @@ DEPS=(
"pkgconfig(libclc)"
"pkgconfig(libelf)"
"pkgconfig(libglvnd)"
"pkgconfig(libomxil-bellagio)"
"pkgconfig(libselinux)"
"pkgconfig(libva)"
"pkgconfig(pciaccess)"
"pkgconfig(vdpau)"
@@ -76,7 +78,6 @@ DEPS=(
python3-mako
python3-ply
python3-pycparser
python3-yaml
rust-packaging
vulkan-headers
spirv-tools-devel
@@ -100,7 +101,8 @@ tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/install-meson.sh
# We need at least 1.3.1 for rusticl
pip install meson==1.3.1
. .gitlab-ci/container/build-mold.sh

View File

@@ -1,53 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2046 # we want to arg-split FIRMWARE_FILES
# shellcheck disable=SC2086 # as above
# shellcheck disable=SC2116 # as above
set -e
ROOTFS=$1
FIRMWARE_FILES=$2
if [ -n "${FIRMWARE_FILES:-}" ]; then
FIRMWARE=$(jq -s '.' $(echo "$FIRMWARE_FILES"))
else
FIRMWARE=""
fi
if ! echo "$FIRMWARE" | jq empty; then
echo "FIRMWARE contains invalid JSON."
fi
for item in $(echo "$FIRMWARE" | jq -c '.[]'); do
src=$(echo "$item" | jq -r '.src')
git_hash=$(echo "$item" | jq -r '.git_hash')
dst=$(echo "$item" | jq -r '.dst')
if [ "$src" = "null" ] || [ "$dst" = "null" ]; then
echo "Missing src or dst for $item."
continue
fi
# Remove any trailing slashes from src and dst
src=${src%/}
dst=${dst%/}
# Remove any leading slash
dst=${dst#/}
if [ "$(echo "$item" | jq '.files | length')" -eq 0 ]; then
echo "No files specified for $item."
continue
fi
for file in $(echo "$item" | jq -r '.files[]'); do
FIRMWARE_SRC_PATH="${src}/${file}"
if [ "$git_hash" != "null" ]; then
FIRMWARE_SRC_PATH="${FIRMWARE_SRC_PATH}?h=${git_hash}"
fi
FIRMWARE_DST_DIR="${ROOTFS}/${dst}"
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 --create-dirs --output-dir "${FIRMWARE_DST_DIR}" -o "${file}" "${FIRMWARE_SRC_PATH}"
done
done

View File

@@ -40,16 +40,10 @@
# repository's container registry, so that the image from the main
# repository's registry will be used there as well.
.debian-container-version:
.debian-container:
variables:
FDO_DISTRIBUTION_VERSION: bookworm-slim
.debian-container:
extends:
- .fdo.container-build@debian
- .container
- .debian-container-version
.container:
stage: container
extends:
@@ -69,14 +63,20 @@
# Debian based x86_64 build image base
debian/x86_64_build-base:
extends:
- .fdo.container-build@debian
- .container
- .debian-container
variables:
MESA_IMAGE_TAG: &debian-x86_64_build-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
rules:
# python-test requires debian/x86_64_build, which requires this job
- !reference [python-test, rules]
- !reference [.container, rules]
.use-debian/x86_64_build-base:
extends:
- .fdo.container-build@debian
- .debian-container-version
- .debian-container
- .use-base-image
variables:
MESA_BASE_IMAGE: ${DEBIAN_X86_64_BUILD_BASE_IMAGE}
@@ -91,6 +91,10 @@ debian/x86_64_build:
- .use-debian/x86_64_build-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_build ${DEBIAN_BUILD_TAG}
rules:
# python-test requires this job
- !reference [python-test, rules]
- !reference [.use-debian/x86_64_build-base, rules]
.use-debian/x86_64_build:
extends:
@@ -174,15 +178,14 @@ debian/android_build:
# Debian based x86_64 test image base
debian/x86_64_test-base:
extends:
- .debian-container
extends: debian/x86_64_build-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_test-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}--${KERNEL_TAG}"
.use-debian/x86_64_test-base:
extends:
- .fdo.container-build@debian
- .debian-container-version
- .debian-container
- .use-base-image
variables:
MESA_BASE_IMAGE: ${DEBIAN_X86_64_TEST_BASE_IMAGE}
@@ -190,33 +193,11 @@ debian/x86_64_test-base:
needs:
- debian/x86_64_test-base
# Debian based aarch64 test image base
debian/arm64_test-base:
tags:
- aarch64
extends:
- .debian-container
variables:
MESA_IMAGE_TAG: &debian-arm64_test-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
.use-debian/arm64_test-base:
tags:
- aarch64
extends:
- .fdo.container-build@debian
- .debian-container-version
- .use-base-image
variables:
MESA_BASE_IMAGE: ${DEBIAN_ARM64_TEST_BASE_IMAGE}
MESA_BASE_TAG: *debian-arm64_test-base
needs:
- debian/arm64_test-base
# Debian based x86_64 test image for GL
debian/x86_64_test-gl:
extends: .use-debian/x86_64_test-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_test-gl ${DEBIAN_TEST_GL_TAG}
MESA_IMAGE_TAG: &debian-x86_64_test-gl ${DEBIAN_X86_64_TEST_GL_TAG}
.use-debian/x86_64_test-gl:
extends:
@@ -232,7 +213,7 @@ debian/x86_64_test-gl:
debian/x86_64_test-vk:
extends: .use-debian/x86_64_test-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_test-vk ${DEBIAN_TEST_VK_TAG}
MESA_IMAGE_TAG: &debian-x86_64_test-vk ${DEBIAN_X86_64_TEST_VK_TAG}
.use-debian/x86_64_test-vk:
extends:
@@ -248,7 +229,7 @@ debian/x86_64_test-vk:
.debian/x86_64_test-android:
extends: .use-debian/x86_64_test-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_test-android ${DEBIAN_TEST_ANDROID_TAG}
MESA_IMAGE_TAG: &debian-x86_64_test-android ${DEBIAN_X86_64_TEST_ANDROID_TAG}
ANDROID_NDK: android-ndk-r25b
.use-debian/x86_64_test-android:
@@ -261,74 +242,12 @@ debian/x86_64_test-vk:
needs:
- debian/x86_64_test-android
# Debian-based x86_64 image to run Python utilities
debian/x86_64_pyutils:
extends:
- .debian-container
variables:
MESA_IMAGE_TAG: &debian-x86_64_pyutils "${DEBIAN_PYUTILS_TAG}"
rules:
# python-test requires this job
- !reference [python-test, rules]
- !reference [.container, rules]
.use-debian/x86_64_pyutils:
extends:
- .fdo.container-build@debian
- .debian-container-version
- .set-image
variables:
MESA_IMAGE_PATH: ${DEBIAN_PYUTILS_IMAGE}
MESA_IMAGE_TAG: *debian-x86_64_pyutils
needs:
- debian/x86_64_pyutils
# Debian based aarch64 test image for GL
debian/arm64_test-gl:
tags:
- aarch64
extends: .use-debian/arm64_test-base
variables:
MESA_IMAGE_TAG: &debian-arm64_test-gl ${DEBIAN_TEST_GL_TAG}
.use-debian/arm64_test-gl:
tags:
- aarch64
extends:
- .set-image-base-tag
variables:
MESA_BASE_TAG: *debian-arm64_test-base
MESA_IMAGE_PATH: ${DEBIAN_ARM64_TEST_IMAGE_GL_PATH}
MESA_IMAGE_TAG: *debian-arm64_test-gl
needs:
- debian/arm64_test-gl
# Debian based aarch64 test image for VK
debian/arm64_test-vk:
tags:
- aarch64
extends: .use-debian/arm64_test-base
variables:
MESA_IMAGE_TAG: &debian-arm64_test-vk ${DEBIAN_TEST_VK_TAG}
.use-debian/arm64_test-vk:
tags:
- aarch64
extends:
- .set-image-base-tag
variables:
MESA_BASE_TAG: *debian-arm64_test-base
MESA_IMAGE_PATH: ${DEBIAN_ARM64_TEST_IMAGE_VK_PATH}
MESA_IMAGE_TAG: *debian-arm64_test-vk
needs:
- debian/arm64_test-vk
# Debian based ARM build image
debian/arm64_build:
extends:
- .fdo.container-build@debian
- .container
- .debian-container-version
- .debian-container
tags:
- aarch64
variables:
@@ -360,15 +279,6 @@ alpine/x86_64_build:
- .alpine/x86_64_build-base
variables:
MESA_IMAGE_TAG: &alpine-x86_64_build ${ALPINE_X86_64_BUILD_TAG}
LLVM_VERSION: &alpine-llvm_version 19
rules:
# Note: the next three lines must remain in that order, so that the rules
# in `linkcheck-docs` catch nightly pipelines before the rules in `pages`
# exclude them.
- !reference [linkcheck-docs, rules]
- !reference [pages, rules]
- !reference [test-docs, rules]
- !reference [.container, rules]
.use-alpine/x86_64_build:
extends:
@@ -376,7 +286,6 @@ alpine/x86_64_build:
variables:
MESA_IMAGE_PATH: "alpine/x86_64_build"
MESA_IMAGE_TAG: *alpine-x86_64_build
LLVM_VERSION: *alpine-llvm_version
needs:
- alpine/x86_64_build
@@ -405,29 +314,12 @@ fedora/x86_64_build:
needs:
- fedora/x86_64_build
# Get firmware directly rather than using package versions.
# Change KERNEL_ROOTFS_TAG to add firmware changes.
# FIRMWARE_FILES is a list of json files arranged by vendor in .gitlab-ci/firmware/*
.firmware_x86_64:
variables:
FIRMWARE_FILES: |
.gitlab-ci/firmware/i915/mtl-fw.json
.firmware_arm64:
variables:
FIRMWARE_FILES: |
.gitlab-ci/firmware/arm/mali/arch10.8/mali-fw.json
.firmware_arm32:
variables:
FIRMWARE_FILES: |
.kernel+rootfs:
extends:
- .container+build-rules
- .debian-container-version
- .debian-container
stage: container
timeout: 90m
variables:
GIT_STRATEGY: fetch
MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG}
@@ -439,7 +331,6 @@ kernel+rootfs_x86_64:
extends:
- .use-debian/x86_64_build-base
- .kernel+rootfs
- .firmware_x86_64
image: "$FDO_BASE_IMAGE"
variables:
DEBIAN_ARCH: "amd64"
@@ -449,7 +340,6 @@ kernel+rootfs_arm64:
extends:
- .use-debian/arm64_build
- .kernel+rootfs
- .firmware_arm64
tags:
- aarch64
variables:
@@ -458,7 +348,6 @@ kernel+rootfs_arm64:
kernel+rootfs_arm32:
extends:
- kernel+rootfs_arm64
- .firmware_arm32
variables:
DEBIAN_ARCH: "armhf"
@@ -474,59 +363,59 @@ kernel+rootfs_arm32:
MESA_ROOTFS_TAG: *kernel-rootfs
# x86_64 image with ARM64 & ARM32 kernel & rootfs for baremetal testing
.debian/baremetal_arm_test:
.debian/arm_test:
extends:
- .fdo.container-build@debian
- .container
- .debian-container-version
- .debian-container
# Don't want the .container rules
- .container+build-rules
variables:
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
ARTIFACTS_PREFIX: "https://${S3_HOST}/${S3_KERNEL_BUCKET}"
ARTIFACTS_PREFIX: "https://${S3_HOST}/mesa-lava"
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
MESA_ARTIFACTS_TAG: *debian-arm64_build
MESA_ROOTFS_TAG: *kernel-rootfs
debian/baremetal_arm32_test:
debian/arm32_test:
extends:
- .debian/baremetal_arm_test
- .debian/arm_test
needs:
- kernel+rootfs_arm32
variables:
MESA_IMAGE_TAG: &debian-arm32_test "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
debian/baremetal_arm64_test:
debian/arm64_test:
extends:
- .debian/baremetal_arm_test
- .debian/arm_test
needs:
- kernel+rootfs_arm64
variables:
MESA_IMAGE_TAG: &debian-arm64_test "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
.use-debian/baremetal_arm_test:
.use-debian/arm_test:
variables:
MESA_ROOTFS_TAG: *kernel-rootfs
.use-debian/baremetal_arm32_test:
.use-debian/arm32_test:
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
extends:
- .use-debian/baremetal_arm_test
- .use-debian/arm_test
variables:
MESA_IMAGE_PATH: "debian/baremetal_arm32_test"
MESA_IMAGE_PATH: "debian/arm32_test"
MESA_IMAGE_TAG: *debian-arm32_test
needs:
- debian/baremetal_arm_test
- debian/arm_test
.use-debian/baremetal_arm64_test:
.use-debian/arm64_test:
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
extends:
- .use-debian/baremetal_arm_test
- .use-debian/arm_test
variables:
MESA_IMAGE_PATH: "debian/baremetal_arm64_test"
MESA_IMAGE_PATH: "debian/arm64_test"
MESA_IMAGE_TAG: *debian-arm64_test
needs:
- debian/baremetal_arm_test
- debian/arm_test
# Native Windows docker builds
#
@@ -555,7 +444,6 @@ debian/baremetal_arm64_test:
- .windows-docker-msvc
- .windows-shell-tags
rules:
- !reference [.common-rules, rules]
- !reference [.microsoft-farm-container-rules, rules]
- !reference [.container+build-rules, rules]
variables:
@@ -581,7 +469,6 @@ windows_build_msvc:
extends:
- .windows_container_build
rules:
- !reference [.common-rules, rules]
- !reference [.microsoft-farm-rules, rules]
- !reference [.container+build-rules, rules]
variables:
@@ -600,7 +487,6 @@ windows_test_msvc:
extends:
- .windows_container_build
rules:
- !reference [.common-rules, rules]
- !reference [.microsoft-farm-rules, rules]
- !reference [.container+build-rules, rules]
variables:

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env bash
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BUILD_TAG
# FEDORA_X86_64_BUILD_TAG
rm -f /usr/lib/python3.*/EXTERNALLY-MANAGED
# We need at least 1.4.0 for rusticl
pip3 install 'meson==1.4.0'

View File

@@ -1,7 +1,5 @@
#!/bin/bash
set -ue
APITRACE_VERSION="11.1"
APITRACE_VERSION_DATE=""

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -ue
set -e
overrideDll() {
if ! wine reg add 'HKEY_CURRENT_USER\Software\Wine\DllOverrides' /v "$1" /d native /f; then

View File

@@ -2,24 +2,19 @@
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
# shellcheck disable=SC2034 # Variables are used in scripts called from here
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC2016 # non-expanded variables are intentional
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# KERNEL_ROOTFS_TAG
set -e
. .gitlab-ci/setup-test-env.sh
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
export LLVM_VERSION="${LLVM_VERSION:=15}"
export FIRMWARE_FILES="${FIRMWARE_FILES}"
check_minio()
{
S3_PATH="${S3_HOST}/${S3_KERNEL_BUCKET}/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
S3_PATH="${S3_HOST}/mesa-lava/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
if curl -L --retry 4 -f --retry-delay 60 -s -X HEAD \
"https://${S3_PATH}/done"; then
echo "Remote files are up-to-date, skip rebuilding them."
@@ -36,10 +31,9 @@ check_minio "${CI_PROJECT_PATH}"
. .gitlab-ci/container/build-rust.sh
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
BUILD_CL="ON"
BUILD_VK="ON"
GCC_ARCH="aarch64-linux-gnu"
KERNEL_ARCH="arm64"
SKQP_ARCH="arm64"
DEFCONFIG="arch/arm64/configs/defconfig"
DEVICE_TREES="rk3399-gru-kevin.dtb"
DEVICE_TREES+=" meson-g12b-a311d-khadas-vim3.dtb"
@@ -58,10 +52,9 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
KERNEL_IMAGE_NAME="Image"
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
BUILD_CL="OFF"
BUILD_VK="OFF"
GCC_ARCH="arm-linux-gnueabihf"
KERNEL_ARCH="arm"
SKQP_ARCH="arm"
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
DEVICE_TREES="rk3288-veyron-jaq.dtb"
DEVICE_TREES+=" sun8i-h3-libretech-all-h3-cc.dtb"
@@ -83,15 +76,14 @@ elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
libxkbcommon-dev:armhf
)
else
BUILD_CL="ON"
BUILD_VK="ON"
GCC_ARCH="x86_64-linux-gnu"
KERNEL_ARCH="x86_64"
SKQP_ARCH="x64"
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
DEVICE_TREES=""
KERNEL_IMAGE_NAME="bzImage"
CONTAINER_ARCH_PACKAGES=(
libasound2-dev libcap-dev libfdt-dev libva-dev p7zip wine
libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols p7zip wine
)
fi
@@ -114,7 +106,6 @@ fi
# no need to remove these at end, image isn't saved at the end
CONTAINER_EPHEMERAL=(
arch-test
automake
bc
"clang-${LLVM_VERSION}"
@@ -123,7 +114,6 @@ CONTAINER_EPHEMERAL=(
mmdebstrap
git
glslang-tools
jq
libdrm-dev
libegl1-mesa-dev
libxext-dev
@@ -154,15 +144,9 @@ CONTAINER_EPHEMERAL=(
python3-serial
python3-venv
unzip
wayland-protocols
zstd
)
[ "$BUILD_CL" == "ON" ] && CONTAINER_EPHEMERAL+=(
ocl-icd-opencl-dev
)
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
apt-get update
@@ -172,7 +156,7 @@ apt-get install -y --no-remove \
"${CONTAINER_ARCH_PACKAGES[@]}" \
${EXTRA_LOCAL_PACKAGES}
export ROOTFS=/lava-files/rootfs-${DEBIAN_ARCH}
ROOTFS=/lava-files/rootfs-${DEBIAN_ARCH}
mkdir -p "$ROOTFS"
# rootfs packages
@@ -206,6 +190,7 @@ PKG_DEP=(
# arch dependent rootfs packages
[ "$DEBIAN_ARCH" = "arm64" ] && PKG_ARCH=(
libgl1 libglu1-mesa
libvulkan-dev
firmware-linux-nonfree firmware-qcom-media
libfontconfig1
)
@@ -218,6 +203,7 @@ PKG_DEP=(
spirv-tools
libelf1 libfdt1 "libllvm${LLVM_VERSION}"
libva2 libva-drm2
libvulkan-dev
socat
sysvinit-core
wine
@@ -226,21 +212,10 @@ PKG_DEP=(
firmware-misc-nonfree
)
[ "$BUILD_CL" == "ON" ] && PKG_ARCH+=(
clinfo
"libclang-cpp${LLVM_VERSION}"
"libclang-common-${LLVM_VERSION}-dev"
ocl-icd-libopencl1
)
[ "$BUILD_VK" == "ON" ] && PKG_ARCH+=(
libvulkan-dev
)
mmdebstrap \
--variant=apt \
--arch="${DEBIAN_ARCH}" \
--components main,contrib,non-free-firmware \
--customize-hook='.gitlab-ci/container/get-firmware-from-source.sh "$ROOTFS" "$FIRMWARE_FILES"' \
--include "${PKG_BASE[*]} ${PKG_CI[*]} ${PKG_DEP[*]} ${PKG_MESA_DEP[*]} ${PKG_ARCH[*]}" \
bookworm \
"$ROOTFS/" \
@@ -250,21 +225,25 @@ mmdebstrap \
############### Install mold
. .gitlab-ci/container/build-mold.sh
############### Setuping
if [ "$DEBIAN_ARCH" = "amd64" ]; then
. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
. .gitlab-ci/container/install-wine-dxvk.sh
mv /dxvk-wine64 $ROOTFS
fi
############### Installing
if [ "$DEBIAN_ARCH" = "amd64" ]; then
. .gitlab-ci/container/install-wine-apitrace.sh
mkdir -p "$ROOTFS/apitrace-msvc-win64"
mv /apitrace-msvc-win64/bin "$ROOTFS/apitrace-msvc-win64"
rm -rf /apitrace-msvc-win64
fi
############### Building
STRIP_CMD="${GCC_ARCH}-strip"
mkdir -p $ROOTFS/usr/lib/$GCC_ARCH
############### Build libclc
if [ "$BUILD_CL" = "ON" ]; then
rm -rf /usr/lib/clc/*
. .gitlab-ci/container/build-libclc.sh
mkdir -p $ROOTFS/usr/{share,lib}/clc
mv /usr/share/clc/spirv*-mesa3d-.spv $ROOTFS/usr/share/clc/
ln -s /usr/share/clc/spirv64-mesa3d-.spv $ROOTFS/usr/lib/clc/
ln -s /usr/share/clc/spirv-mesa3d-.spv $ROOTFS/usr/lib/clc/
fi
############### Build Vulkan validation layer (for zink)
if [ "$DEBIAN_ARCH" = "amd64" ]; then
. .gitlab-ci/container/build-vulkan-validation.sh
@@ -282,7 +261,7 @@ rm -rf /apitrace
############### Build ANGLE
if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
. .gitlab-ci/container/build-angle.sh
mv /angle $ROOTFS/.
mv /angle /lava-files/rootfs-${DEBIAN_ARCH}/.
rm -rf /angle
fi
@@ -301,7 +280,7 @@ DEQP_API=GLES \
DEQP_TARGET=surfaceless \
. .gitlab-ci/container/build-deqp.sh
[ "$BUILD_VK" == "ON" ] && DEQP_API=VK \
DEQP_API=VK \
DEQP_TARGET=default \
. .gitlab-ci/container/build-deqp.sh
@@ -316,21 +295,7 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]] \
fi
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_USE_WAFFLE=ON
-DPIGLIT_USE_GBM=ON
-DPIGLIT_USE_WAYLAND=ON
-DPIGLIT_USE_X11=ON
-DPIGLIT_BUILD_GLX_TESTS=ON
-DPIGLIT_BUILD_EGL_TESTS=ON
-DPIGLIT_BUILD_WGL_TESTS=OFF
-DPIGLIT_BUILD_GL_TESTS=ON
-DPIGLIT_BUILD_GLES1_TESTS=ON
-DPIGLIT_BUILD_GLES2_TESTS=ON
-DPIGLIT_BUILD_GLES3_TESTS=ON
-DPIGLIT_BUILD_CL_TESTS=$BUILD_CL
-DPIGLIT_BUILD_VK_TESTS=$BUILD_VK
-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" \
. .gitlab-ci/container/build-piglit.sh
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON -DPIGLIT_BUILD_GLX_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
mv /piglit $ROOTFS/.
############### Build libva tests
@@ -351,7 +316,7 @@ fi
############### Build ci-kdl
section_start kdl "Prepare a venv for kdl"
. .gitlab-ci/container/build-kdl.sh
mv /ci-kdl $ROOTFS/
mv ci-kdl.venv $ROOTFS
section_end kdl
############### Build local stuff for use by igt and kernel testing, which
@@ -362,8 +327,8 @@ if [[ -e ".gitlab-ci/local/build-rootfs.sh" ]]; then
fi
############### Download prebuilt kernel
. .gitlab-ci/container/download-prebuilt-kernel.sh
############### Build kernel
. .gitlab-ci/container/build-kernel.sh
############### Delete rust, since the tests won't be compiling anything.
rm -rf /root/.cargo
@@ -400,8 +365,8 @@ popd
. .gitlab-ci/container/container_post_build.sh
ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/"${ROOTFSTAR}" \
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/"${ROOTFSTAR}" \
https://${S3_PATH}/"${ROOTFSTAR}"
touch /lava-files/done
ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/done https://${S3_PATH}/done
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/done https://${S3_PATH}/done

View File

@@ -1,144 +0,0 @@
From 2f4a38ecfde470abcd5d3c0ae7337bf780343469 Mon Sep 17 00:00:00 2001
From: Daniel Stone <daniels@collabora.com>
Date: Tue, 15 Oct 2024 16:02:26 +0100
Subject: [PATCH] deps: Make more sources conditional
Fetching all the dependent sources - including at least one copy of LLVM
- can take a surprising amount of time. Mesa needs to build ANGLE as
part of CI, and the cost of downloading all the sources all of the time
is not OK for the number of dependencies we don't need during the build.
---
DEPS | 33 +++++++++++++++++++++++----------
1 file changed, 23 insertions(+), 10 deletions(-)
Submitted upstream at:
https://chromium-review.googlesource.com/c/angle/angle/+/5937820
diff --git a/DEPS b/DEPS
index 61263fb7af..0cff8c3126 100644
--- a/DEPS
+++ b/DEPS
@@ -17,6 +17,17 @@ gclient_gn_args = [
]
vars = {
+ 'angle_enable_cl': True,
+ 'angle_enable_cl_testing': False,
+ 'angle_enable_vulkan': True,
+ 'angle_enable_vulkan_validation_layers': True,
+ 'angle_enable_wgpu': True,
+ 'build_angle_deqp_tests': True,
+ 'build_angle_perftests': True,
+ 'build_with_swiftshader': True,
+ 'use_custom_libcxx': True,
+ 'export_libcxxapi_from_executables': True,
+
'android_git': 'https://android.googlesource.com',
'chromium_git': 'https://chromium.googlesource.com',
'chrome_internal_git': 'https://chrome-internal.googlesource.com',
@@ -673,7 +684,7 @@ deps = {
'third_party/catapult': {
'url': Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
- 'condition': 'not build_with_chromium',
+ 'condition': 'build_with_catapult and not build_with_chromium',
},
# Cherry is a dEQP/VK-GL-CTS management GUI written in Go. We use it for viewing test results.
@@ -689,7 +700,7 @@ deps = {
'third_party/clspv/src': {
'url': Var('chromium_git') + '/external/github.com/google/clspv@a173c052455434a422bcfe5c12ffe44d574fd6e1',
- 'condition': 'not build_with_chromium',
+ 'condition': 'angle_enable_cl and angle_enable_vulkan and not build_with_chromium',
},
'third_party/cpu_features/src': {
@@ -700,7 +711,7 @@ deps = {
'third_party/dawn': {
'url': Var('dawn_git') + '/dawn.git' + '@' + Var('dawn_revision'),
- 'condition': 'not build_with_chromium'
+ 'condition': 'angle_enable_wgpu and not build_with_chromium'
},
'third_party/depot_tools': {
@@ -745,6 +756,7 @@ deps = {
# glmark2 is a GPL3-licensed OpenGL ES 2.0 benchmark. We use it for testing.
'third_party/glmark2/src': {
'url': Var('chromium_git') + '/external/github.com/glmark2/glmark2@ca8de51fedb70bace5351c6b002eb952c747e889',
+ 'condition': 'build_angle_perftests',
},
'third_party/googletest': {
@@ -777,7 +789,7 @@ deps = {
# libjpeg_turbo is used by glmark2.
'third_party/libjpeg_turbo': {
'url': Var('chromium_git') + '/chromium/deps/libjpeg_turbo.git@927aabfcd26897abb9776ecf2a6c38ea5bb52ab6',
- 'condition': 'not build_with_chromium',
+ 'condition': 'build_angle_perftests and not build_with_chromium',
},
'third_party/libpng/src': {
@@ -787,7 +799,7 @@ deps = {
'third_party/llvm/src': {
'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project@d222fa4521531cc4ac14b8e157d231c108c003be',
- 'condition': 'not build_with_chromium',
+ 'condition': '(build_with_swiftshader or (angle_enable_cl and angle_enable_vulkan)) and not build_with_chromium',
},
'third_party/jdk': {
@@ -824,12 +836,12 @@ deps = {
'third_party/libc++/src': {
'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxx.git@6a68fd412b9aecd515a20a7cf84d11b598bfaf96',
- 'condition': 'not build_with_chromium',
+ 'condition': 'use_custom_libcxx and not build_with_chromium',
},
'third_party/libc++abi/src': {
'url': Var('chromium_git') + '/external/github.com/llvm/llvm-project/libcxxabi.git@9a1d90c3b412d5ebeb97a6e33d98e1d0dd923221',
- 'condition': 'not build_with_chromium',
+ 'condition': 'export_libcxxapi_from_executables and not build_with_chromium',
},
'third_party/libunwind/src': {
@@ -872,7 +884,7 @@ deps = {
'third_party/OpenCL-CTS/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenCL-CTS@e0a31a03fc8f816d59fd8b3051ac6a61d3fa50c6',
- 'condition': 'not build_with_chromium',
+ 'condition': 'angle_enable_cl_testing and not build_with_chromium',
},
'third_party/OpenCL-Docs/src': {
@@ -968,7 +980,7 @@ deps = {
'third_party/SwiftShader': {
'url': Var('swiftshader_git') + '/SwiftShader@7a9a492a38b7c701f7c96a15a76046aed8f8c0c3',
- 'condition': 'not build_with_chromium',
+ 'condition': 'build_with_swiftshader and not build_with_chromium',
},
'third_party/turbine/cipd': {
@@ -984,6 +996,7 @@ deps = {
'third_party/VK-GL-CTS/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/VK-GL-CTS' + '@' + Var('vk_gl_cts_revision'),
+ 'condition': 'build_angle_deqp_tests',
},
'third_party/vulkan-deps': {
@@ -1038,7 +1051,7 @@ deps = {
'third_party/vulkan-validation-layers/src': {
'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@b63e9bd51fbd7bf8fea161a4f7c06994abc24b75',
- 'condition': 'not build_with_chromium',
+ 'condition': 'angle_enable_vulkan_validation_layers and not build_with_chromium',
},
'third_party/vulkan_memory_allocator': {
--
2.46.2

View File

@@ -1,7 +1,5 @@
#!/usr/bin/env bash
set -u
export WINEPREFIX="$1"
export WINEDEBUG="-all"

View File

@@ -26,7 +26,7 @@ apt-get autoremove --yes || true
UNNEEDED_PACKAGES=(
apt libapt-pkg6.0
ncurses-bin ncurses-base libncursesw6 libncurses6
perl-base libperl5.36 perl-modules-5.36
perl-base
debconf libdebconfclient0
e2fsprogs e2fslibs libfdisk1
insserv
@@ -40,8 +40,14 @@ UNNEEDED_PACKAGES=(
hostname
adduser
debian-archive-keyring
libgl1-mesa-dri mesa-vulkan-drivers mesa-va-drivers mesa-vdpau-drivers i965-va-driver
intel-media-va-driver
libegl1-mesa-dev # mesa group
libegl-mesa0
libgl1-mesa-dev
libgl1-mesa-dri
libglapi-mesa
libgles2-mesa-dev
libglx-mesa0
mesa-common-dev
gnupg2
software-properties-common
)
@@ -85,7 +91,6 @@ directories=(
/var/lib/usbutils/usb.ids
/root/.pip # pip cache
/root/.cache
/root/.cargo
/etc/apt # configuration archives of apt and dpkg
/etc/dpkg
/var/* # drop non-ostree directories
@@ -109,14 +114,6 @@ directories=(
/usr/lib/*/libdb-5.3.so # libdb-5.3.so that is only used by this pam module ^
/usr/lib/*/libnss_hesiod* # remove NSS support for nis, nisplus and hesiod
/usr/lib/*/libnss_nis*
/usr/lib/*/wine # don't need Wine's implementation, using Proton instead
/usr/local/bin/mold
/usr/local/bin/bindgen
/usr/local/bin/cargo*
/usr/local/bin/clippy*
/usr/local/bin/rust*
/usr/local/bin/rls
/usr/lib/*/dri
)
for directory in "${directories[@]}"; do
@@ -134,24 +131,3 @@ files=(
for files in "${files[@]}"; do
find /usr /etc -name "$files" -prune -exec rm -r {} \;
done
# We purge apt and dpkg to save on space, which is great for runtime and
# bandwidth use etc, but less great for cbuild which wants to run apt-get clean
# when we're done. Install a stub which works for that and is apologetic for
# anyone else.
cat >/usr/bin/apt-get <<EOF
#!/bin/bash
if [ "\${1:-}" != "clean" ]; then
echo "Couldn't run '\$0 \$*', because apt has been cleaned from this container."
echo ""
echo "After .gitlab-ci/container/strip-rootfs.sh has run, you cannot install"
echo "new packages."
echo ""
echo "Sorry."
exit 1
fi
EOF
chmod +x /usr/bin/apt-get
ln -s /usr/bin/apt-get /usr/bin/apt

View File

@@ -1,17 +1,6 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
set -ue
# Instead of starting one dEQP instance per available CPU core, pour our
# concurrency at llvmpipe threads instead. This is mostly useful for VirGL and
# Venus, which serialise quite a bit at the host level. So instead of smashing
# it with a pile of concurrent jobs which don't actually parallelise very well,
# we use that concurrency for llvmpipe/lavapipe's render pipeline.
if [ -n "${PARALLELISE_VIA_LP_THREADS:-}" ]; then
export LP_NUM_THREADS="${FDO_CI_CONCURRENT:-4}"
export FDO_CI_CONCURRENT=1
fi
set -e
# If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act
# the same as the first thread in its threadpool.
@@ -36,7 +25,7 @@ THREAD=${DEQP_RUNNER_THREAD:-0}
# context data towards the guest
#
set_vsock_context() {
[ -n "${CI_JOB_ID:-}" ] || {
[ -n "${CI_JOB_ID}" ] || {
echo "Missing or unset CI_JOB_ID env variable" >&2
exit 1
}
@@ -75,13 +64,12 @@ set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
# Securely pass the current variables to the crosvm environment
echo "Variables passed through:"
SCRIPTS_DIR=$(readlink -en "${0%/*}")
${SCRIPTS_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
cp ${SCRIPTS_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh
SCRIPT_DIR=$(readlink -en "${0%/*}")
${SCRIPT_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
cp ${SCRIPT_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh
# Set the crosvm-script as the arguments of the current script
echo "export SCRIPTS_DIR=${SCRIPTS_DIR}" > ${VM_TEMP_DIR}/crosvm-script.sh
echo ". ${VM_TEMP_DIR}/setup-test-env.sh" >> ${VM_TEMP_DIR}/crosvm-script.sh
echo ". ${VM_TEMP_DIR}/setup-test-env.sh" > ${VM_TEMP_DIR}/crosvm-script.sh
echo "$@" >> ${VM_TEMP_DIR}/crosvm-script.sh
# Setup networking
@@ -97,20 +85,20 @@ unset DISPLAY
unset XDG_RUNTIME_DIR
CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPTS_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
[ "${CROSVM_GALLIUM_DRIVER:-}" = "llvmpipe" ] && \
[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \
CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
set +e -x
# We aren't testing the host driver here, so we don't need to validate NIR on the host
NIR_DEBUG="novalidate" \
LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE:-} \
GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER:-} \
VK_DRIVER_FILES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER:-}_icd.x86_64.json \
LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
VK_DRIVER_FILES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER}_icd.x86_64.json \
crosvm --no-syslog run \
--gpu "${CROSVM_GPU_ARGS:-}" --gpu-render-server "path=${VIRGL_RENDER_SERVER:-/usr/local/libexec/virgl_render_server}" \
--gpu "${CROSVM_GPU_ARGS}" --gpu-render-server "path=/usr/local/libexec/virgl_render_server" \
-m "${CROSVM_MEMORY:-4096}" -c "${CROSVM_CPU:-2}" --disable-sandbox \
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
--net "host-ip=192.168.30.1,netmask=255.255.255.0,mac=AA:BB:CC:00:00:12" \

View File

@@ -1,8 +1,5 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC1091 # paths only become valid at runtime
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start cuttlefish_setup "cuttlefish: setup"
set -xe
@@ -91,18 +88,18 @@ $ADB shell rm /vendor/lib64/egl/libGLESv2_angle.so
$ADB shell rm /vendor/lib64/egl/libGLESv2_emulation.so
AOSP_RESULTS=/data/results
RESULTS=/data/results
uncollapsed_section_switch cuttlefish_test "cuttlefish: testing"
set +e
$ADB shell "mkdir ${AOSP_RESULTS}; cd ${AOSP_RESULTS}/..; ./deqp-runner \
$ADB shell "mkdir /data/results; cd /data; ./deqp-runner \
suite \
--suite /data/deqp-$DEQP_SUITE.toml \
--output $RESULTS \
--skips /data/all-skips.txt $DEQP_SKIPS \
--flakes /data/$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
--fraction-start ${CI_NODE_INDEX:-1} \
--fraction-start $CI_NODE_INDEX \
--fraction $(( CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS"
@@ -111,11 +108,11 @@ EXIT_CODE=$?
set -e
section_switch cuttlefish_results "cuttlefish: gathering the results"
$ADB pull $RESULTS $RESULTS_DIR
$ADB pull $RESULTS results
cp /cuttlefish/cuttlefish/instances/cvd-1/logs/logcat $RESULTS_DIR
cp /cuttlefish/cuttlefish/instances/cvd-1/kernel.log $RESULTS_DIR
cp /cuttlefish/cuttlefish/instances/cvd-1/logs/launcher.log $RESULTS_DIR
cp /cuttlefish/cuttlefish/instances/cvd-1/logs/logcat results
cp /cuttlefish/cuttlefish/instances/cvd-1/kernel.log results
cp /cuttlefish/cuttlefish/instances/cvd-1/logs/launcher.log results
section_end cuttlefish_results
exit $EXIT_CODE

View File

@@ -1,8 +1,5 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC1091 # paths only become valid at runtime
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start test_setup "deqp: preparing test setup"
@@ -21,14 +18,16 @@ INSTALL=$(realpath -s "$PWD"/install)
# Set up the driver environment.
export LD_LIBRARY_PATH="$INSTALL"/lib/:$LD_LIBRARY_PATH
export EGL_PLATFORM=surfaceless
ARCH=$(uname -m)
export VK_DRIVER_FILES="$PWD"/install/share/vulkan/icd.d/"$VK_DRIVER"_icd."$ARCH".json
export VK_DRIVER_FILES="$PWD"/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-$(uname -m)}.json
export OCL_ICD_VENDORS="$PWD"/install/etc/OpenCL/vendors/
if [ -n "$USE_ANGLE" ]; then
export LD_LIBRARY_PATH=/angle:$LD_LIBRARY_PATH
fi
RESULTS="$PWD/${DEQP_RESULTS_DIR:-results}"
mkdir -p "$RESULTS"
# Ensure Mesa Shader Cache resides on tmpfs.
SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
@@ -60,38 +59,33 @@ if [ -z "$DEQP_SUITE" ]; then
# Generate test case list file.
if [ "$DEQP_VER" = "vk" ]; then
MUSTPASS=/deqp/mustpass/vk-main.txt.zst
MUSTPASS=/deqp/mustpass/vk-main.txt
DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk
elif [ "$DEQP_VER" = "gles2" ] || [ "$DEQP_VER" = "gles3" ] || [ "$DEQP_VER" = "gles31" ] || [ "$DEQP_VER" = "egl" ]; then
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt.zst
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt
DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER
elif [ "$DEQP_VER" = "gles2-khr" ] || [ "$DEQP_VER" = "gles3-khr" ] || [ "$DEQP_VER" = "gles31-khr" ] || [ "$DEQP_VER" = "gles32-khr" ]; then
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt.zst
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt
DEQP=/deqp/external/openglcts/modules/glcts
else
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt.zst
MUSTPASS=/deqp/mustpass/$DEQP_VER-main.txt
DEQP=/deqp/external/openglcts/modules/glcts
fi
[ -z "${DEQP_FRACTION:-}" ] && DEQP_FRACTION=1
[ -z "${CI_NODE_INDEX:-}" ] && CI_NODE_INDEX=1
[ -z "${CI_NODE_TOTAL:-}" ] && CI_NODE_TOTAL=1
cp $MUSTPASS /tmp/case-list.txt
# This ugly sed expression does a single pass across the case list to take
# into account the global fraction and sharding.
#
# First, we select only every n'th line, according to DEQP_FRACTION; for a
# fraction of 3, it will select lines 1, 4, 7, 10, etc.
#
# Then, we select $CI_NODE_INDEX/$CI_NODE_TOTAL for sharding; for a two-way
# shard, the first node will select lines 1 and 7, and the second node will
# select lines 4 and 10.
#
# Sharding like this gives us the best coverage, as sequential tests often
# test very slightly different permutations of the same functionality. So
# by distributing our skips as widely across the set as possible, rather
# than grouping them together, we get the broadest coverage.
zstd -d $MUSTPASS -c | sed -n "$(((CI_NODE_INDEX - 1) * DEQP_FRACTION + 1))~$((DEQP_FRACTION * CI_NODE_TOTAL))p" > /tmp/case-list.txt
# If the caselist is too long to run in a reasonable amount of time, let the job
# specify what fraction (1/n) of the caselist we should run. Note: N~M is a gnu
# sed extension to match every nth line (first line is #1).
if [ -n "$DEQP_FRACTION" ]; then
sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt
fi
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
fi
if [ ! -s /tmp/case-list.txt ]; then
echo "Caselist generation failed"
@@ -123,10 +117,6 @@ if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
fi
if [ -e "$INSTALL/$GPU_VERSION-slow-skips.txt" ] && [[ $CI_JOB_NAME != *full* ]]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-slow-skips.txt"
fi
if [ "$PIGLIT_PLATFORM" != "gbm" ] ; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/x11-skips.txt"
fi
@@ -135,19 +125,11 @@ if [ "$PIGLIT_PLATFORM" = "gbm" ]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/gbm-skips.txt"
fi
if [ -n "$USE_ANGLE" ]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/angle-skips.txt"
fi
if [ -n "$VK_DRIVER" ] && [ -z "$DEQP_SUITE" ]; then
# Bump the number of tests per group to reduce the startup time of VKCTS.
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --tests-per-group ${DEQP_RUNNER_TESTS_PER_GROUP:-5000}"
fi
if [ -n "${DEQP_RUNNER_MAX_FAILS:-}" ]; then
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --max-fails ${DEQP_RUNNER_MAX_FAILS}"
fi
# Set the path to VK validation layer settings (in case it ends up getting loaded)
# Note: If you change the format of this filename, look through the rest of the
# tree for other places that need to be kept in sync (e.g.
@@ -169,7 +151,7 @@ if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
fi
GALLIUM_DRIVER=llvmpipe \
virgl_test_server $VTEST_ARGS >$RESULTS_DIR/vtest-log.txt 2>&1 &
virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
sleep 1
fi
@@ -187,22 +169,19 @@ fi
uncollapsed_section_switch deqp "deqp: deqp-runner"
# Print the detailed version with the list of backports and local patches
{ set +x; } 2>/dev/null
for api in vk gl gles; do
deqp_version_log=/deqp/version-$api
if [ -r "$deqp_version_log" ]; then
cat "$deqp_version_log"
fi
done
set -x
set +e
deqp-runner -V
if [ -z "$DEQP_SUITE" ]; then
deqp-runner \
run \
--deqp $DEQP \
--output $RESULTS_DIR \
--output $RESULTS \
--caselist /tmp/case-list.txt \
--skips $INSTALL/all-skips.txt $DEQP_SKIPS \
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
@@ -210,7 +189,7 @@ if [ -z "$DEQP_SUITE" ]; then
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS \
-- \
$DEQP_OPTIONS; DEQP_EXITCODE=$?
$DEQP_OPTIONS
else
# If you change the format of the suite toml filenames or the
# $GPU_VERSION-{fails,flakes,skips}.txt filenames, look through the rest
@@ -219,41 +198,42 @@ else
deqp-runner \
suite \
--suite $INSTALL/deqp-$DEQP_SUITE.toml \
--output $RESULTS_DIR \
--output $RESULTS \
--skips $INSTALL/all-skips.txt $DEQP_SKIPS \
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
--fraction-start ${CI_NODE_INDEX:-1} \
--fraction-start $CI_NODE_INDEX \
--fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS; DEQP_EXITCODE=$?
$DEQP_RUNNER_OPTIONS
fi
{ set +x; } 2>/dev/null
DEQP_EXITCODE=$?
set -e
set +x
report_load
section_switch test_post_process "deqp: post-processing test results"
set -x
report_load
# Remove all but the first 50 individual XML files uploaded as artifacts, to
# save fd.o space when you break everything.
find $RESULTS_DIR -name \*.xml | \
find $RESULTS -name \*.xml | \
sort -n |
sed -n '1,+49!p' | \
xargs rm -f
# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
find $RESULTS_DIR -name \*.xml \
-exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS_DIR/" ";" \
find $RESULTS -name \*.xml \
-exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \
-quit
deqp-runner junit \
--testsuite dEQP \
--results $RESULTS_DIR/failures.csv \
--output $RESULTS_DIR/junit.xml \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
@@ -262,7 +242,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then
python3 $INSTALL/report-flakes.py \
--host irc.oftc.net \
--port 6667 \
--results $RESULTS_DIR/results.csv \
--results $RESULTS/results.csv \
--known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
--channel "$FLAKES_CHANNEL" \
--runner "$CI_RUNNER_DESCRIPTION" \
@@ -275,9 +255,8 @@ fi
# Compress results.csv to save on bandwidth during the upload of artifacts to
# GitLab. This reduces the size in a VKCTS run from 135 to 7.6MB, and takes
# 0.17s on a Ryzen 5950X (16 threads, 0.95s when limited to 1 thread).
zstd --quiet --rm --threads ${FDO_CI_CONCURRENT:-0} -8 "$RESULTS_DIR/results.csv" -o "$RESULTS_DIR/results.csv.zst"
zstd --rm -T0 -8q "$RESULTS/results.csv" -o "$RESULTS/results.csv.zst"
set +x
section_end test_post_process
exit $DEQP_EXITCODE

View File

@@ -18,7 +18,7 @@ TMP_DIR=$(mktemp -d)
echo "$(date +"%F %T") Downloading archived master..."
if ! /usr/bin/wget \
-O "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" \
"https://${S3_HOST}/${S3_GITCACHE_BUCKET}/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz";
"https://${S3_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz";
then
echo "Repository cache not available"
exit

View File

@@ -217,25 +217,25 @@
- !reference [.austriancoder-farm-rules, rules]
.google-freedreno-farm-rules:
.freedreno-farm-rules:
rules:
- exists: [ .ci-farms-disabled/google-freedreno ]
- exists: [ .ci-farms-disabled/freedreno ]
when: never
- changes: [ .ci-farms-disabled/google-freedreno ]
- changes: [ .ci-farms-disabled/freedreno ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.google-freedreno-farm-manual-rules:
.freedreno-farm-manual-rules:
rules:
- exists: [ .ci-farms-disabled/google-freedreno ]
- exists: [ .ci-farms-disabled/freedreno ]
when: never
- changes: [ .ci-farms-disabled/google-freedreno ]
- changes: [ .ci-farms-disabled/freedreno ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.google-freedreno-farm-rules, rules]
- !reference [.freedreno-farm-rules, rules]
.vmware-farm-rules:
rules:
@@ -323,8 +323,8 @@
exists: [ .ci-farms-disabled/austriancoder ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/google-freedreno ]
exists: [ .ci-farms-disabled/google-freedreno ]
changes: [ .ci-farms-disabled/freedreno ]
exists: [ .ci-farms-disabled/freedreno ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/ondracka ]

View File

@@ -1,8 +0,0 @@
{
"src": "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/arm/mali/arch10.8/",
"git_hash": "ad8d5f76c429e5485764a9ecb7a2ce3fbc1386ae",
"files": [
"mali_csffw.bin"
],
"dst": "/lib/firmware/arm/mali/arch10.8/"
}

View File

@@ -1,12 +0,0 @@
{
"src": "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/i915/",
"git_hash": "ad8d5f76c429e5485764a9ecb7a2ce3fbc1386ae",
"files": [
"mtl_dmc.bin",
"mtl_dmc_ver2_10.bin",
"mtl_gsc_1.bin",
"mtl_guc_70.bin",
"mtl_huc_gsc.bin"
],
"dst": "/lib/firmware/i915/"
}

View File

@@ -1,8 +1,5 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC1091 # paths only become valid at runtime
. "${SCRIPTS_DIR}/setup-test-env.sh"
set -ex
@@ -11,6 +8,9 @@ INSTALL=$PWD/install
# Set up the driver environment.
export LD_LIBRARY_PATH=$INSTALL/lib/
RESULTS="$PWD/${GTEST_RESULTS_DIR:-results}"
mkdir -p "$RESULTS"
export LIBVA_DRIVERS_PATH=$INSTALL/lib/dri/
# libva spams driver open info by default, and that happens per testcase.
export LIBVA_MESSAGING_LEVEL=1
@@ -39,7 +39,7 @@ set +e
gtest-runner \
run \
--gtest $GTEST \
--output ${RESULTS_DIR} \
--output ${RESULTS} \
--jobs ${FDO_CI_CONCURRENT:-4} \
$GTEST_SKIPS \
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
@@ -52,8 +52,8 @@ GTEST_EXITCODE=$?
deqp-runner junit \
--testsuite gtest \
--results $RESULTS_DIR/failures.csv \
--output $RESULTS_DIR/junit.xml \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
@@ -62,7 +62,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then
python3 $INSTALL/report-flakes.py \
--host irc.oftc.net \
--port 6667 \
--results $RESULTS_DIR/results.csv \
--results $RESULTS/results.csv \
--known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
--channel "$FLAKES_CHANNEL" \
--runner "$CI_RUNNER_DESCRIPTION" \

View File

@@ -13,40 +13,34 @@
variables:
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "20241106-wlproto2"
DEBIAN_BASE_TAG: "20240412-pycparser"
DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "20241106-wlproto2"
DEBIAN_BUILD_TAG: "20240408-cbindgen"
DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
DEBIAN_ARM64_TEST_BASE_IMAGE: "debian/arm64_test-base"
DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
DEBIAN_ARM64_TEST_IMAGE_GL_PATH: "debian/arm64_test-gl"
DEBIAN_X86_64_TEST_IMAGE_VK_PATH: "debian/x86_64_test-vk"
DEBIAN_ARM64_TEST_IMAGE_VK_PATH: "debian/arm64_test-vk"
DEBIAN_X86_64_TEST_ANDROID_IMAGE_PATH: "debian/x86_64_test-android"
DEBIAN_TEST_ANDROID_TAG: "20241106-wlproto2"
DEBIAN_TEST_GL_TAG: "20241106-wlproto2"
DEBIAN_TEST_VK_TAG: "20241107-setup"
KERNEL_ROOTFS_TAG: "20241107-setup"
DEBIAN_X86_64_TEST_ANDROID_TAG: "20240423-deqp"
DEBIAN_X86_64_TEST_GL_TAG: "20240423-deqp"
DEBIAN_X86_64_TEST_VK_TAG: "20240423-deqp"
KERNEL_ROOTFS_TAG: "20240423-deqp"
DEBIAN_PYUTILS_IMAGE: "debian/x86_64_pyutils"
DEBIAN_PYUTILS_TAG: "20241002-pyutils"
ALPINE_X86_64_BUILD_TAG: "20241106-wlproto2"
ALPINE_X86_64_LAVA_SSH_TAG: "20241106-wlproto2"
FEDORA_X86_64_BUILD_TAG: "20241106-wlproto2"
KERNEL_TAG: "v6.6.21-mesa-f8ea"
ALPINE_X86_64_BUILD_TAG: "20240412-pycparser"
ALPINE_X86_64_LAVA_SSH_TAG: "20240401-wlproto"
FEDORA_X86_64_BUILD_TAG: "20240412-pycparser"
KERNEL_TAG: "v6.6.21-mesa-19fc"
KERNEL_REPO: "gfx-ci/linux"
PKG_REPO_REV: "bca9635d"
PKG_REPO_REV: "3cc12a2a"
WINDOWS_X64_MSVC_PATH: "windows/x86_64_msvc"
WINDOWS_X64_MSVC_TAG: "20240827-v143"
WINDOWS_X64_MSVC_TAG: "20231222-msvc"
WINDOWS_X64_BUILD_PATH: "windows/x86_64_build"
WINDOWS_X64_BUILD_TAG: "20241107-setup"
WINDOWS_X64_BUILD_TAG: "20240405-vainfo-ci-1"
WINDOWS_X64_TEST_PATH: "windows/x86_64_test"
WINDOWS_X64_TEST_TAG: "20241107-setup"
WINDOWS_X64_TEST_TAG: "20240405-vainfo-ci-1"

View File

@@ -8,24 +8,20 @@ variables:
variables:
GIT_STRATEGY: none # testing doesn't build anything from source
FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions
# the dispatchers use this to cache data locally
LAVA_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
# proxy used to cache data locally
FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
# base system generated by the container build job, shared between many pipelines
BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/${S3_KERNEL_BUCKET}"
BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/mesa-lava"
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
# per-job build artifacts
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst"
LAVA_S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized"
S3_ARTIFACT_NAME: "mesa-python-test"
S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized"
S3_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
PIGLIT_NO_WINDOW: 1
VISIBILITY_GROUP: "Collabora+fdo"
before_script:
- !reference [.download_s3, before_script]
script:
- . artifacts/setup-test-env.sh
- ./artifacts/lava/lava-submit.sh
artifacts:
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
@@ -39,15 +35,10 @@ variables:
tags:
- $RUNNER_TAG
after_script:
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --warning=no-timestamp --zstd -x
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --zstd -x
needs:
- alpine/x86_64_lava_ssh_client
- !reference [.required-for-hardware-jobs, needs]
- job: alpine/x86_64_lava_ssh_client
artifacts: false
- job: debian/x86_64_pyutils
artifacts: false
- job: python-test
artifacts: false
.lava-test:arm32:
variables:
@@ -58,15 +49,14 @@ variables:
BOOT_METHOD: u-boot
extends:
- .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm32
- .use-debian/x86_64_pyutils
- .use-debian/x86_64_build
- .lava-test
- .use-kernel+rootfs-arm
needs:
- !reference [.lava-test, needs]
- job: kernel+rootfs_arm32
artifacts: false
- job: debian-arm32
artifacts: false
- kernel+rootfs_arm32
- debian/x86_64_build
- debian-arm32
.lava-test-deqp:arm32:
extends:
@@ -83,15 +73,16 @@ variables:
BOOT_METHOD: u-boot
extends:
- .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64
- .use-debian/x86_64_pyutils
- .use-debian/x86_64_build
- .lava-test
- .use-kernel+rootfs-arm
dependencies:
- debian-arm64
needs:
- !reference [.lava-test, needs]
- job: kernel+rootfs_arm64
artifacts: false
- job: debian-arm64
artifacts: false
- kernel+rootfs_arm64
- debian/x86_64_build
- debian-arm64
.lava-test-deqp:arm64:
variables:
@@ -108,15 +99,13 @@ variables:
BOOT_METHOD: u-boot
extends:
- .use-debian/x86_64_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_x86_64
- .use-debian/x86_64_pyutils
- .use-debian/x86_64_build
- .lava-test
- .use-kernel+rootfs-x86_64
needs:
- !reference [.lava-test, needs]
- job: kernel+rootfs_x86_64
artifacts: false
- job: debian-testing
artifacts: false
- kernel+rootfs_x86_64
- debian-testing
.lava-test-deqp:x86_64:
variables:

22
.gitlab-ci/lava/lava-pytest.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
# © Collabora Limited
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
# This script runs unit/integration tests related with LAVA CI tools
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
set -ex
# Use this script in a python virtualenv for isolation
python3 -m venv .venv
. .venv/bin/activate
python3 -m pip install --break-system-packages -r "${CI_PROJECT_DIR}/.gitlab-ci/lava/requirements-test.txt"
TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests
PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \
pytest "${TEST_DIR}" \
-W ignore::DeprecationWarning \
--junitxml=artifacts/ci_scripts_report.xml \
-m 'not slow'

View File

@@ -1,104 +1,63 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC1091 # paths only become valid at runtime
# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
_check_artifact_path() {
_url="https://${1}/${2}"
if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
echo -n "${_url}"
fi
}
get_path_to_artifact() {
_mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
if [ -n "${_mainline_artifact}" ]; then
echo -n "${_mainline_artifact}"
return
fi
_fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
if [ -n "${_fork_artifact}" ]; then
echo -n "${_fork_artifact}"
return
fi
set +x
error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
echo "" >&2
echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
echo "" >&2
echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2
echo "" >&2
echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2
set -x
exit 1
}
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start prepare_rootfs "Preparing root filesystem"
set -ex
section_switch rootfs "Assembling root filesystem"
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
if [ "$CI_PROJECT_PATH" != "$FDO_UPSTREAM_REPO" ]; then
if ! curl -s -X HEAD -L --retry 4 -f --retry-delay 60 \
"https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
echo "Using kernel and rootfs from the fork, cached from mainline is unavailable."
BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
else
echo "Using the cached mainline kernel and rootfs."
fi
fi
rm -rf results
mkdir -p results/job-rootfs-overlay/
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
cp artifacts/ci-common/kdl.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
cat results/job-rootfs-overlay/set-job-env-vars.sh
section_start variables "Variables passed through:"
artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
section_end variables
section_switch lava_submit "Submitting job for scheduling"
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
ARTIFACT_URL="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME:?}.tar.zst"
touch results/lava.log
tail -f results/lava.log &
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
--boot-method "${BOOT_METHOD}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \
submit \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--rootfs-url "${ROOTFS_URL}" \
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
--kernel-external "${EXTERNAL_KERNEL_TAG}" \
--kernel-external "${FORCE_KERNEL_TAG}" \
--build-url "${ARTIFACT_URL}" \
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir "${CI_PROJECT_DIR}" \
--device-type "${DEVICE_TYPE}" \
--dtb-filename "${DTB}" \
--jwt-file "${S3_JWT_FILE}" \
--jwt-file "${CI_JOB_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--boot-method "${BOOT_METHOD}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
--structured-log-file "results/lava_job_detail.json" \
--ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
--project-name "${CI_PROJECT_NAME}" \
--starting-section "${CURRENT_SECTION}" \
--job-submitted-at "${CI_JOB_STARTED_AT}" \
- append-overlay \
--name=mesa-build \
--url="https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst" \
--compression=zstd \
--path="${CI_PROJECT_DIR}" \
--format=tar \
- append-overlay \
--name=job-overlay \
--url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
--compression=gz \
--path="/" \
--format=tar \
- submit \
>> results/lava.log

View File

@@ -15,10 +15,10 @@ import pathlib
import sys
import time
from collections import defaultdict
from dataclasses import dataclass, field, fields
from datetime import datetime, timedelta, UTC
from os import environ, getenv
from typing import Any, Optional, Self
from dataclasses import dataclass, fields
from datetime import datetime, timedelta, timezone
from os import environ, getenv, path
from typing import Any, Optional
import fire
from lavacli.utils import flow_yaml as lava_yaml
@@ -51,7 +51,7 @@ from lava.utils import DEFAULT_GITLAB_SECTION_TIMEOUTS as GL_SECTION_TIMEOUTS
STRUCTURAL_LOG = defaultdict(list)
try:
from structured_logger import StructuredLogger
from ci.structured_logger import StructuredLogger
except ImportError as e:
print_log(
f"Could not import StructuredLogger library: {e}. "
@@ -91,7 +91,7 @@ CI_JOB_STARTED_AT_RAW = getenv("CI_JOB_STARTED_AT", "")
CI_JOB_STARTED_AT: datetime = (
datetime.fromisoformat(CI_JOB_STARTED_AT_RAW)
if CI_JOB_STARTED_AT_RAW
else datetime.now(tz=UTC)
else datetime.now(timezone.utc)
)
@@ -136,6 +136,36 @@ def raise_lava_error(job) -> None:
job.status = "fail"
def show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"):
with GitlabSection(
"job_data",
"LAVA job info",
type=LogSectionType.LAVA_POST_PROCESSING,
start_collapsed=True,
colour=colour,
):
wait_post_processing_retries: int = WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
while not job.is_post_processed() and wait_post_processing_retries > 0:
# Wait a little until LAVA finishes processing metadata
time.sleep(WAIT_FOR_LAVA_POST_PROCESSING_SEC)
wait_post_processing_retries -= 1
if not job.is_post_processed():
waited_for_sec: int = (
WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
* WAIT_FOR_LAVA_POST_PROCESSING_SEC
)
print_log(
f"Waited for {waited_for_sec} seconds "
"for LAVA to post-process the job, it haven't finished yet. "
"Dumping it's info anyway"
)
details: dict[str, str] = job.show()
for field, value in details.items():
print(f"{field:<15}: {value}")
job.refresh_log()
def fetch_logs(job, max_idle_time, log_follower) -> None:
is_job_hanging(job, max_idle_time)
@@ -151,13 +181,14 @@ def fetch_logs(job, max_idle_time, log_follower) -> None:
def is_job_hanging(job, max_idle_time):
# Poll to check for new logs, assuming that a prolonged period of
# silence means that the device has died and we should try it again
if datetime.now(tz=UTC) - job.last_log_time > max_idle_time:
if datetime.now() - job.last_log_time > max_idle_time:
max_idle_time_min = max_idle_time.total_seconds() / 60
raise MesaCITimeoutError(
f"{CONSOLE_LOG['FG_BOLD_YELLOW']}"
f"LAVA job {job.job_id} unresponsive for {max_idle_time_min} "
"minutes; retrying the job."
f"{CONSOLE_LOG['BOLD']}"
f"{CONSOLE_LOG['FG_YELLOW']}"
f"LAVA job {job.job_id} does not respond for {max_idle_time_min} "
"minutes. Retry."
f"{CONSOLE_LOG['RESET']}",
timeout_duration=max_idle_time,
)
@@ -205,13 +236,14 @@ def wait_for_job_get_started(job, attempt_no):
print_log(f"Waiting for job {job.job_id} to start.")
while not job.is_started():
current_job_duration_sec: int = int(
(datetime.now(tz=UTC) - CI_JOB_STARTED_AT).total_seconds()
(datetime.now(timezone.utc) - CI_JOB_STARTED_AT).total_seconds()
)
remaining_time_sec: int = max(0, CI_JOB_TIMEOUT_SEC - current_job_duration_sec)
if remaining_time_sec < EXPECTED_JOB_DURATION_SEC:
job.cancel()
raise MesaCIFatalException(
f"{CONSOLE_LOG['FG_BOLD_YELLOW']}"
f"{CONSOLE_LOG['BOLD']}"
f"{CONSOLE_LOG['FG_YELLOW']}"
f"Job {job.job_id} only has {remaining_time_sec} seconds "
"remaining to run, but it is expected to take at least "
f"{EXPECTED_JOB_DURATION_SEC} seconds."
@@ -222,21 +254,15 @@ def wait_for_job_get_started(job, attempt_no):
print_log(f"Job {job.job_id} started.")
def bootstrap_log_follower(main_test_case, timestamp_relative_to) -> LogFollower:
start_section = GitlabSection(
id="dut_boot",
header="Booting hardware device",
def bootstrap_log_follower() -> LogFollower:
gl = GitlabSection(
id="lava_boot",
header="LAVA boot",
type=LogSectionType.LAVA_BOOT,
start_collapsed=True,
suppress_end=True, # init-stage2 prints the end for us
timestamp_relative_to=timestamp_relative_to,
)
print(start_section.start())
return LogFollower(
starting_section=start_section,
main_test_case=main_test_case,
timestamp_relative_to=timestamp_relative_to
)
print(gl.start())
return LogFollower(starting_section=gl)
def follow_job_execution(job, log_follower):
@@ -269,46 +295,23 @@ def structural_log_phases(job, log_follower):
job.log["dut_job_phases"] = phases
def print_job_final_status(job, timestamp_relative_to):
job.refresh_log()
def print_job_final_status(job):
if job.status == "running":
job.status = "hung"
colour = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"])
with GitlabSection(
"job_data",
f"Hardware job info for {job.status} job",
type=LogSectionType.LAVA_POST_PROCESSING,
start_collapsed=True,
colour=colour,
timestamp_relative_to=timestamp_relative_to,
):
wait_post_processing_retries: int = WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
while not job.is_post_processed() and wait_post_processing_retries > 0:
# Wait a little until LAVA finishes processing metadata
time.sleep(WAIT_FOR_LAVA_POST_PROCESSING_SEC)
wait_post_processing_retries -= 1
color = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"])
print_log(
f"{color}"
f"LAVA Job finished with status: {job.status}"
f"{CONSOLE_LOG['RESET']}"
)
if not job.is_post_processed():
waited_for_sec: int = (
WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
* WAIT_FOR_LAVA_POST_PROCESSING_SEC
)
print_log(
"Timed out waiting for LAVA post-processing after "
f"{waited_for_sec} seconds. Printing incomplete information "
"anyway."
)
details: dict[str, str] = job.show()
for field, value in details.items():
print(f"{field:<15}: {value}")
job.refresh_log()
job.refresh_log()
show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{color}")
def execute_job_with_retries(
proxy, job_definition, retry_count, jobs_log, main_test_case,
timestamp_relative_to
proxy, job_definition, retry_count, jobs_log
) -> Optional[LAVAJob]:
last_failed_job = None
for attempt_no in range(1, retry_count + 2):
@@ -319,20 +322,10 @@ def execute_job_with_retries(
job = LAVAJob(proxy, job_definition, job_log)
STRUCTURAL_LOG["dut_attempt_counter"] = attempt_no
try:
job_log["submitter_start_time"] = datetime.now(tz=UTC).isoformat()
job_log["submitter_start_time"] = datetime.now().isoformat()
submit_job(job)
queue_section = GitlabSection(
id="dut_queue",
header="Waiting for hardware device to become available",
type=LogSectionType.LAVA_QUEUE,
start_collapsed=False,
timestamp_relative_to=timestamp_relative_to
)
with queue_section as section:
wait_for_job_get_started(job, attempt_no)
log_follower: LogFollower = bootstrap_log_follower(
main_test_case, timestamp_relative_to
)
wait_for_job_get_started(job, attempt_no)
log_follower: LogFollower = bootstrap_log_follower()
follow_job_execution(job, log_follower)
return job
@@ -340,10 +333,10 @@ def execute_job_with_retries(
job.handle_exception(exception)
finally:
print_job_final_status(job, timestamp_relative_to)
print_job_final_status(job)
# If LAVA takes too long to post process the job, the submitter
# gives up and proceeds.
job_log["submitter_end_time"] = datetime.now(tz=UTC).isoformat()
job_log["submitter_end_time"] = datetime.now().isoformat()
last_failed_job = job
print_log(
f"{CONSOLE_LOG['BOLD']}"
@@ -356,14 +349,11 @@ def execute_job_with_retries(
return last_failed_job
def retriable_follow_job(
proxy, job_definition, main_test_case, timestamp_relative_to
) -> LAVAJob:
def retriable_follow_job(proxy, job_definition) -> LAVAJob:
number_of_retries = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
last_attempted_job = execute_job_with_retries(
proxy, job_definition, number_of_retries, STRUCTURAL_LOG["dut_jobs"],
main_test_case, timestamp_relative_to
proxy, job_definition, number_of_retries, STRUCTURAL_LOG["dut_jobs"]
)
if last_attempted_job.exception is not None:
@@ -396,9 +386,10 @@ class PathResolver:
@dataclass
class LAVAJobSubmitter(PathResolver):
boot_method: str
ci_project_dir: str
device_type: str
farm: str
job_timeout_min: int # The job timeout in minutes
build_url: str = None
dtb_filename: str = None
dump_yaml: bool = False # Whether to dump the YAML payload to stdout
first_stage_init: str = None
@@ -407,68 +398,29 @@ class LAVAJobSubmitter(PathResolver):
kernel_image_type: str = ""
kernel_url_prefix: str = None
kernel_external: str = None
lava_tags: str | tuple[str, ...] = () # Comma-separated LAVA tags for the job
lava_tags: str = "" # Comma-separated LAVA tags for the job
mesa_job_name: str = "mesa_ci_job"
pipeline_info: str = ""
rootfs_url: str = None
rootfs_url_prefix: str = None
validate_only: bool = False # Whether to only validate the job, not execute it
visibility_group: str = None # Only affects LAVA farm maintainers
job_rootfs_overlay_url: str = None
structured_log_file: pathlib.Path = None # Log file path with structured LAVA log
ssh_client_image: str = None # x86_64 SSH client image to follow the job's output
project_name: str = None # Project name to be used in the job name
starting_section: str = None # GitLab section used to start
job_submitted_at: [str | datetime] = None
__structured_log_context = contextlib.nullcontext() # Structured Logger context
_overlays: dict = field(default_factory=dict, init=False)
def __post_init__(self) -> Self:
def __post_init__(self) -> None:
super().__post_init__()
# Remove mesa job names with spaces, which breaks the lava-test-case command
self.mesa_job_name = self.mesa_job_name.split(" ")[0]
if self.structured_log_file:
self.__structured_log_context = StructuredLoggerWrapper(self).logger_context()
if not self.structured_log_file:
return
if self.job_submitted_at:
self.job_submitted_at = datetime.fromisoformat(self.job_submitted_at)
self.__structured_log_context = StructuredLoggerWrapper(self).logger_context()
self.proxy = setup_lava_proxy()
return self
def append_overlay(
self, compression: str, name: str, path: str, url: str, format: str = "tar"
) -> Self:
"""
Append an overlay to the LAVA job definition.
Args:
compression (str): The compression type of the overlay (e.g., "gz", "xz").
name (str): The name of the overlay.
path (str): The path where the overlay should be applied.
url (str): The URL from where the overlay can be downloaded.
format (str, optional): The format of the overlay (default is "tar").
Returns:
Self: The instance of LAVAJobSubmitter with the overlay appended.
"""
self._overlays[name] = {
"compression": compression,
"format": format,
"path": path,
"url": url,
}
return self
def print(self) -> Self:
"""
Prints the dictionary representation of the instance and returns the instance itself.
Returns:
Self: The instance of the class.
"""
print(self.__dict__)
return self
def __prepare_submission(self) -> str:
# Overwrite the timeout for the testcases with the value offered by the
# user. The testcase running time should be at least 4 times greater than
@@ -487,6 +439,7 @@ class LAVAJobSubmitter(PathResolver):
validation_job = LAVAJob(self.proxy, job_definition)
if errors := validation_job.validate():
fatal_err(f"Error in LAVA job definition: {errors}")
print_log("LAVA job definition validated successfully")
return job_definition
@@ -516,24 +469,10 @@ class LAVAJobSubmitter(PathResolver):
if self.validate_only:
return
if self.starting_section:
gl = GitlabSection(
id=self.starting_section,
header="Preparing to submit job for scheduling",
type=LogSectionType.LAVA_SUBMIT,
start_collapsed=True,
timestamp_relative_to=self.job_submitted_at,
)
gl.start()
print(gl.end())
with self.__structured_log_context:
last_attempt_job = None
try:
last_attempt_job = retriable_follow_job(
self.proxy, job_definition,
f'{self.project_name}_{self.mesa_job_name}',
self.job_submitted_at)
last_attempt_job = retriable_follow_job(self.proxy, job_definition)
except MesaCIRetryError as retry_exception:
last_attempt_job = retry_exception.last_job
@@ -545,7 +484,17 @@ class LAVAJobSubmitter(PathResolver):
finally:
self.finish_script(last_attempt_job)
def print_log_artifact_url(self):
relative_log_path = self.structured_log_file.relative_to(pathlib.Path.cwd())
full_path = f"$ARTIFACTS_BASE_URL/{relative_log_path}"
artifact_url = path.expandvars(full_path)
print_log(f"Structural Logging data available at: {artifact_url}")
def finish_script(self, last_attempt_job):
if self.is_under_ci() and self.structured_log_file:
self.print_log_artifact_url()
if not last_attempt_job:
# No job was run, something bad happened
STRUCTURAL_LOG["job_combined_status"] = "script_crash"
@@ -555,10 +504,9 @@ class LAVAJobSubmitter(PathResolver):
raise SystemExit(1)
STRUCTURAL_LOG["job_combined_status"] = last_attempt_job.status
STRUCTURAL_LOG["job_exit_code"] = last_attempt_job.exit_code
if last_attempt_job.status != "pass":
raise SystemExit(last_attempt_job.exit_code)
raise SystemExit(1)
class StructuredLoggerWrapper:
@@ -568,10 +516,8 @@ class StructuredLoggerWrapper:
def _init_logger(self):
STRUCTURAL_LOG["fixed_tags"] = self.__submitter.lava_tags
STRUCTURAL_LOG["dut_job_type"] = self.__submitter.device_type
STRUCTURAL_LOG["farm"] = self.__submitter.farm
STRUCTURAL_LOG["job_combined_fail_reason"] = None
STRUCTURAL_LOG["job_combined_status"] = "not_submitted"
STRUCTURAL_LOG["job_exit_code"] = None
STRUCTURAL_LOG["dut_attempt_counter"] = 0
# Initialize dut_jobs list to enable appends
@@ -608,5 +554,11 @@ if __name__ == "__main__":
# more buffering
sys.stdout.reconfigure(line_buffering=True)
sys.stderr.reconfigure(line_buffering=True)
# LAVA farm is giving datetime in UTC timezone, let's set it locally for the
# script run.
# Setting environ here will not affect the system time, as the os.environ
# lifetime follows the script one.
environ["TZ"] = "UTC"
time.tzset()
fire.Fire(LAVAJobSubmitter)

View File

@@ -0,0 +1,6 @@
-r requirements.txt
freezegun==1.1.0
hypothesis==6.67.1
pytest==7.2.1
pytest-cov==3.0.0
PyYAML==5.3.1

View File

@@ -0,0 +1,2 @@
lavacli==1.5.2
fire==0.5.0

View File

@@ -1,13 +1,8 @@
CONSOLE_LOG = {
"FG_GREEN": "\x1b[0;32m",
"FG_BOLD_GREEN": "\x1b[0;1;32m",
"FG_RED": "\x1b[0;38;5;197m",
"FG_BOLD_RED": "\x1b[0;1;38;5;197m",
"FG_YELLOW": "\x1b[0;33m",
"FG_BOLD_YELLOW": "\x1b[0;1;33m",
"FG_MAGENTA": "\x1b[0;35m",
"FG_BOLD_MAGENTA": "\x1b[0;1;35m",
"FG_CYAN": "\x1b[0;36m",
"FG_GREEN": "\x1b[1;32;5;197m",
"FG_RED": "\x1b[1;38;5;197m",
"FG_YELLOW": "\x1b[1;33;5;197m",
"FG_MAGENTA": "\x1b[1;35;5;197m",
"RESET": "\x1b[0m",
"UNDERLINED": "\x1b[3m",
"BOLD": "\x1b[1m",

View File

@@ -23,10 +23,3 @@ KNOWN_ISSUE_R8152_PATTERNS: tuple[str, ...] = (
# This is considered noise, since LAVA produces this log after receiving a package of feedback
# messages.
LOG_DEBUG_FEEDBACK_NOISE = "Listened to connection for namespace 'dut' done"
A6XX_GPU_RECOVERY_WATCH_PERIOD_MIN = 3
A6XX_GPU_RECOVERY_FAILURE_MAX_COUNT = 30
A6XX_GPU_RECOVERY_FAILURE_MESSAGE = (
"cx gdsc didn't collapse",
"Timeout waiting for GMU OOB",
)

View File

@@ -2,8 +2,7 @@ from __future__ import annotations
import re
from dataclasses import dataclass, field
from datetime import datetime, timedelta, UTC
from math import floor
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Optional
from lava.utils.console_format import CONSOLE_LOG
@@ -19,11 +18,8 @@ class GitlabSection:
header: str
type: LogSectionType
start_collapsed: bool = False
suppress_end: bool = False
suppress_start: bool = False
timestamp_relative_to: Optional[datetime] = None
escape: str = "\x1b[0K"
colour: str = f"{CONSOLE_LOG['FG_CYAN']}"
colour: str = f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"
__start_time: Optional[datetime] = field(default=None, init=False)
__end_time: Optional[datetime] = field(default=None, init=False)
@@ -62,12 +58,7 @@ class GitlabSection:
timestamp = self.get_timestamp(time)
before_header = ":".join([preamble, timestamp, section_id])
if self.timestamp_relative_to:
delta = self.start_time - self.timestamp_relative_to
reltime = f"[{floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}] "
else:
reltime = ""
colored_header = f"{self.colour}{reltime}{header}\x1b[0m" if header else ""
colored_header = f"{self.colour}{header}\x1b[0m" if header else ""
header_wrapper = "\r" + f"{self.escape}{colored_header}"
return f"{before_header}{header_wrapper}"
@@ -91,25 +82,15 @@ class GitlabSection:
def start(self) -> str:
assert not self.has_finished, "Starting an already finished section"
self.__start_time = datetime.now(tz=UTC)
return self.print_start_section()
def print_start_section(self) -> str:
if self.suppress_start:
return ""
self.__start_time = datetime.now()
return self.section(marker="start", header=self.header, time=self.__start_time)
def end(self) -> str:
assert self.has_started, "Ending an uninitialized section"
self.__end_time = datetime.now(tz=UTC)
self.__end_time = datetime.now()
assert (
self.__end_time >= self.__start_time
), "Section execution time will be negative"
return self.print_end_section()
def print_end_section(self) -> str:
if self.suppress_end:
return ""
return self.section(marker="end", header="", time=self.__end_time)
def delta_time(self) -> Optional[timedelta]:
@@ -117,6 +98,6 @@ class GitlabSection:
return self.__end_time - self.__start_time
if self.has_started:
return datetime.now(tz=UTC) - self.__start_time
return datetime.now() - self.__start_time
return None

View File

@@ -1,7 +1,7 @@
import re
import xmlrpc
from collections import defaultdict
from datetime import datetime, UTC
from datetime import datetime
from typing import Any, Optional
from lava.exceptions import (
@@ -21,9 +21,9 @@ from .lava_proxy import call_proxy
class LAVAJob:
COLOR_STATUS_MAP: dict[str, str] = {
"pass": CONSOLE_LOG["FG_GREEN"],
"hung": CONSOLE_LOG["FG_BOLD_YELLOW"],
"fail": CONSOLE_LOG["FG_BOLD_RED"],
"canceled": CONSOLE_LOG["FG_BOLD_MAGENTA"],
"hung": CONSOLE_LOG["FG_YELLOW"],
"fail": CONSOLE_LOG["FG_RED"],
"canceled": CONSOLE_LOG["FG_MAGENTA"],
}
def __init__(self, proxy, definition, log=defaultdict(str)) -> None:
@@ -35,11 +35,10 @@ class LAVAJob:
self._is_finished = False
self.log: dict[str, Any] = log
self.status = "not_submitted"
self._exit_code = None
self.__exception: Optional[Exception] = None
def heartbeat(self) -> None:
self.last_log_time: datetime = datetime.now(tz=UTC)
self.last_log_time: datetime = datetime.now()
self.status = "running"
@property
@@ -51,15 +50,6 @@ class LAVAJob:
self._status = new_status
self.log["status"] = self._status
@property
def exit_code(self) -> int:
return self._exit_code
@exit_code.setter
def exit_code(self, code: int) -> None:
self._exit_code = code
self.log["exit_code"] = self._exit_code
@property
def job_id(self) -> int:
return self._job_id
@@ -168,12 +158,11 @@ class LAVAJob:
last_line = None # Print all lines. lines[:None] == lines[:]
for idx, line in enumerate(lava_lines):
if result := re.search(r"hwci: mesa: (pass|fail), exit_code: (\d+)", line):
if result := re.search(r"hwci: mesa: (pass|fail)", line):
self._is_finished = True
self.status = result.group(1)
self.exit_code = int(result.group(2))
self.status = result[1]
last_line = idx
last_line = idx + 1
# We reached the log end here. hwci script has finished.
break
return lava_lines[:last_line]
@@ -183,9 +172,6 @@ class LAVAJob:
self.cancel()
self.exception = exception
# Set the exit code to nonzero value
self.exit_code = 1
# Give more accurate status depending on exception
if isinstance(exception, MesaCIKnownIssueException):
self.status = "canceled"

View File

@@ -34,10 +34,6 @@ class LAVAJobDefinition:
def __init__(self, job_submitter: "LAVAJobSubmitter") -> None:
self.job_submitter: "LAVAJobSubmitter" = job_submitter
# NFS args provided by LAVA
self.lava_nfs_args: str = "root=/dev/nfs rw nfsroot=$NFS_SERVER_IP:$NFS_ROOTFS,tcp,hard,v3 ip=dhcp"
# extra_nfsroot_args appends to cmdline
self.extra_nfsroot_args: str = " init=/init rootwait usbcore.quirks=0bda:8153:k"
def has_ssh_support(self) -> bool:
if FORCE_UART:
@@ -61,20 +57,18 @@ class LAVAJobDefinition:
actions for the LAVA job submission.
"""
args = self.job_submitter
nfsrootfs = {
"url": f"{args.rootfs_url}",
"compression": "zstd",
"format": "tar",
"overlays": args._overlays,
}
values = self.generate_metadata()
nfsrootfs = {
"url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
"compression": "zstd",
}
init_stage1_steps = self.init_stage1_steps()
jwt_steps = self.jwt_steps()
artifact_download_steps = self.artifact_download_steps()
deploy_actions = []
boot_action = []
test_actions = uart_test_actions(args, init_stage1_steps, jwt_steps)
test_actions = uart_test_actions(args, init_stage1_steps, artifact_download_steps)
if args.boot_method == "fastboot":
deploy_actions = fastboot_deploy_actions(self, nfsrootfs)
@@ -96,7 +90,7 @@ class LAVAJobDefinition:
wrap_boot_action(boot_action)
test_actions = (
generate_dut_test(args, init_stage1_steps),
generate_docker_test(args, jwt_steps),
generate_docker_test(args, artifact_download_steps),
)
values["actions"] = [
@@ -121,22 +115,6 @@ class LAVAJobDefinition:
yaml.dump(self.generate_lava_yaml_payload(), job_stream)
return job_stream.getvalue()
def consume_lava_tags_args(self, values: dict[str, Any]):
# python-fire parses --lava-tags without arguments as True
if isinstance(self.job_submitter.lava_tags, tuple):
values["tags"] = self.job_submitter.lava_tags
# python-fire parses "tag-1,tag2" as str and "tag1,tag2" as tuple
# even if the -- --separator is something other than '-'
elif isinstance(self.job_submitter.lava_tags, str):
# Split string tags by comma, removing any trailing commas
values["tags"] = self.job_submitter.lava_tags.rstrip(",").split(",")
# Ensure tags are always a list of non-empty strings
if "tags" in values:
values["tags"] = [tag for tag in values["tags"] if tag]
# Remove empty tags
if "tags" in values and not values["tags"]:
del values["tags"]
def generate_metadata(self) -> dict[str, Any]:
# General metadata and permissions
values = {
@@ -144,7 +122,7 @@ class LAVAJobDefinition:
"device_type": self.job_submitter.device_type,
"visibility": {"group": [self.job_submitter.visibility_group]},
"priority": JOB_PRIORITY,
"context": {"extra_nfsroot_args": self.extra_nfsroot_args},
"context": {"extra_nfsroot_args": " init=/init rootwait usbcore.quirks=0bda:8153:k"},
"timeouts": {
"job": {"minutes": self.job_submitter.job_timeout_min},
"actions": {
@@ -166,7 +144,8 @@ class LAVAJobDefinition:
},
}
self.consume_lava_tags_args(values)
if self.job_submitter.lava_tags:
values["tags"] = self.job_submitter.lava_tags.split(",")
# QEMU lava jobs mandate proper arch value in the context
if self.job_submitter.boot_method == "qemu-nfs":
@@ -190,33 +169,39 @@ class LAVAJobDefinition:
"compression": "zstd"
}
def jwt_steps(self):
def artifact_download_steps(self):
"""
This function is responsible for setting up the SSH server in the DUT and to
export the first boot environment to a file.
"""
# Pre-process the JWT
jwt_steps = [
"set -e",
# Putting JWT pre-processing and mesa download, within init-stage1.sh file,
# as we do with non-SSH version.
download_steps = [
"set -ex",
"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
f"{self.job_submitter.job_rootfs_overlay_url} | tar -xz -C /",
f"mkdir -p {self.job_submitter.ci_project_dir}",
f"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 {self.job_submitter.build_url} | "
f"tar --zstd -x -C {self.job_submitter.ci_project_dir}",
]
# If the JWT file is provided, we will use it to authenticate with the cloud
# storage provider and will hide it from the job output in Gitlab.
if self.job_submitter.jwt_file:
with open(self.job_submitter.jwt_file) as jwt_file:
jwt_steps += [
download_steps += [
"set +x # HIDE_START",
f'echo -n "{jwt_file.read()}" > "{self.job_submitter.jwt_file}"',
"set -x # HIDE_END",
f'echo "export S3_JWT_FILE={self.job_submitter.jwt_file}" >> /set-job-env-vars.sh',
f'echo "export CI_JOB_JWT_FILE={self.job_submitter.jwt_file}" >> /set-job-env-vars.sh',
]
else:
jwt_steps += [
download_steps += [
"echo Could not find jwt file, disabling S3 requests...",
"sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
]
return jwt_steps
return download_steps
def init_stage1_steps(self) -> list[str]:
run_steps = []
@@ -230,7 +215,7 @@ class LAVAJobDefinition:
# For vmware farm, patch nameserver as 8.8.8.8 is off limit.
# This is temporary and will be reverted once the farm is moved.
if self.job_submitter.mesa_job_name.startswith("vmware-"):
run_steps += [x.rstrip().replace("nameserver 8.8.8.8", "nameserver 192.19.189.10") for x in init_sh if not x.startswith("#") and x.rstrip()]
run_steps += [x.rstrip().replace("nameserver 8.8.8.8", "nameserver 10.25.198.110") for x in init_sh if not x.startswith("#") and x.rstrip()]
else:
run_steps += [x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()]
@@ -243,6 +228,4 @@ class LAVAJobDefinition:
+ '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"'
)
run_steps.append("export CURRENT_SECTION=dut_boot")
return run_steps

Some files were not shown because too many files have changed in this diff Show More