Compare commits

..

1 Commits

Author SHA1 Message Date
Dylan Baker
81c239cb85 VERSION: update to 23.2.0-rc1 2023-07-12 14:21:33 -07:00
3070 changed files with 138432 additions and 329097 deletions

View File

@@ -3,7 +3,6 @@
src/**/asahi/**/*
src/**/panfrost/**/*
src/gallium/drivers/i915
src/amd/vulkan/**/*
src/amd/compiler/**/*
src/egl/**/*

View File

@@ -1,6 +1,4 @@
# List of commits to ignore when using `git blame`.
# Enable with:
# git config blame.ignoreRevsFile .git-blame-ignore-revs
#
# Per git-blame(1):
# Ignore revisions listed in the file, one unabbreviated object name
@@ -47,21 +45,3 @@ c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402
# egl: re-format using clang-format
2f670d89db038d5a29f6b72732fd7ad63dfaf4c6
# panfrost: clang-format the tree
0afd691f29683f6e9dde60f79eca094373521806
# aco: Format.
1e2639026fec7069806449f9ba2a124ce4eb5569
# radv: Format.
59c501ca353f8ec9d2717c98af2bfa1a1dbf4d75
# pvr: clang-format fixes
953c04ebd39c52d457301bdd8ac803949001da2d
# freedreno: Re-indent
2d439343ea1aee146d4ce32800992cd389bd505d
# ir3: Reformat source with clang-format
177138d8cb0b4f6a42ef0a1f8593e14d79f17c54

View File

@@ -1,8 +1,5 @@
workflow:
rules:
# do not duplicate pipelines on merge pipelines
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
when: never
# merge pipeline
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH == null
variables:
@@ -11,12 +8,12 @@ workflow:
# post-merge pipeline
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH
variables:
JOB_PRIORITY: 40
LAVA_JOB_PRIORITY: 40
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
# any other pipeline
- if: $GITLAB_USER_LOGIN != "marge-bot"
variables:
JOB_PRIORITY: 50
LAVA_JOB_PRIORITY: 50
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
- when: always
@@ -35,7 +32,6 @@ variables:
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/${KERNEL_TAG}
# reference images stored for traces
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
# For individual CI farm status see .ci-farms folder
@@ -53,9 +49,6 @@ default:
unset CI_JOB_JWT # Unsetting vulnerable env variables
after_script:
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
- find -name '*.log' -exec mv {} {}.txt \;
- >
set +x
@@ -78,24 +71,6 @@ default:
- data_integrity_failure
- unknown_failure
stages:
- sanity
- container
- git-archive
- build-x86_64
- build-misc
- lint
- amd
- intel
- nouveau
- arm
- broadcom
- freedreno
- etnaviv
- software-renderer
- layered-backends
- deploy
include:
- project: 'freedesktop/ci-templates'
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
@@ -134,6 +109,24 @@ include:
- local: 'src/panfrost/ci/gitlab-ci.yml'
- local: 'src/virtio/ci/gitlab-ci.yml'
stages:
- sanity
- container
- git-archive
- build-x86_64
- build-misc
- lint
- amd
- intel
- nouveau
- arm
- broadcom
- freedreno
- etnaviv
- software-renderer
- layered-backends
- deploy
# YAML anchors for rule conditions
# --------------------------------
@@ -153,49 +146,29 @@ include:
when: on_success
.container+build-rules:
# When to automatically run the CI for build jobs
.build-rules:
rules:
# Run when re-enabling a disabled farm, but not when disabling it
- !reference [.disable-farm-mr-rules, rules]
# Run pipeline by default in the main project if any CI pipeline
# configuration files were changed, to ensure docker images are up to date
- if: *is-post-merge
changes: &all_paths
- VERSION
- bin/git_sha1_gen.py
- bin/install_megadrivers.py
- bin/symbols-check.py
# GitLab CI
- .gitlab-ci.yml
- .gitlab-ci/**/*
# Meson
- meson*
- build-support/**/*
- subprojects/**/*
# Source code
- include/**/*
- src/**/*
# If any files affecting the pipeline are changed, build/test jobs run
# automatically once all dependency jobs have passed
- changes: &all_paths
- VERSION
- bin/git_sha1_gen.py
- bin/install_megadrivers.py
- bin/symbols-check.py
# GitLab CI
- .gitlab-ci.yml
- .gitlab-ci/**/*
# Meson
- meson*
- build-support/**/*
- subprojects/**/*
# Source code
- include/**/*
- src/**/*
- .ci-farms/*
when: on_success
# Run pipeline by default if it was triggered by Marge Bot, is for a
# merge request, and any files affecting the pipeline were changed
- if: *is-pre-merge-for-marge
changes:
*all_paths
when: on_success
# Run pipeline by default in the main project if it was not triggered by
# Marge Bot, and any files affecting the pipeline were changed
- if: *is-post-merge-not-for-marge
changes:
*all_paths
when: on_success
# Just skip everything for MRs which don't actually change anything in the
# build - the same rules as above, but without the file-change rules
- if: *is-pre-merge-for-marge
when: never
- if: *is-post-merge
when: never
# Always allow user branches etc to trigger jobs manually
- when: manual
# Otherwise, build/test jobs won't run because no rule matched.
.ci-deqp-artifacts:
@@ -210,6 +183,34 @@ include:
- _build/meson-logs/*.txt
- _build/meson-logs/strace
.container-rules:
rules:
# Run pipeline by default in the main project if any CI pipeline
# configuration files were changed, to ensure docker images are up to date
- if: *is-post-merge
changes:
- .gitlab-ci.yml
- .gitlab-ci/**/*
when: on_success
# Run pipeline by default if it was triggered by Marge Bot, is for a
# merge request, and any files affecting the pipeline were changed
- if: *is-pre-merge-for-marge
changes:
*all_paths
when: on_success
# Run pipeline by default in the main project if it was not triggered by
# Marge Bot, and any files affecting the pipeline were changed
- if: *is-post-merge-not-for-marge
changes:
*all_paths
when: on_success
# Allow triggering jobs manually in other cases if any files affecting the
# pipeline were changed
- changes:
*all_paths
when: manual
# Otherwise, container jobs won't run because no rule matched.
# Git archive
make git archive:
@@ -224,8 +225,6 @@ make git archive:
script:
# Compactify the .git directory
- git gc --aggressive
# Download & cache the perfetto subproject as well.
- rm -rf subprojects/perfetto ; mkdir -p subprojects/perfetto && curl https://android.googlesource.com/platform/external/perfetto/+archive/$(grep 'revision =' subprojects/perfetto.wrap | cut -d ' ' -f3).tar.gz | tar zxf - -C subprojects/perfetto
# compress the current folder
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .

View File

@@ -2,13 +2,6 @@
# non-zero-length and not starting with '#', will regex match to
# delete lines from the test list. Be careful.
# This test checks the driver's reported conformance version against the
# version of the CTS we're running. This check fails every few months
# and everyone has to go and bump the number in every driver.
# Running this check only makes sense while preparing a conformance
# submission, so skip it in the regular CI.
dEQP-VK.api.driver_properties.conformance_version
# These are tremendously slow (pushing toward a minute), and aren't
# reliable to be run in parallel with other tests due to CPU-side timing.
dEQP-GLES[0-9]*.functional.flush_finish.*
@@ -36,3 +29,6 @@ spec@!opengl 1.1@windowoverlap
# Note that "glx-" tests don't appear in x11-skips.txt because they can be
# run even if PIGLIT_PLATFORM=gbm (for example)
glx@glx-copy-sub-buffer.*
# Reads the front buffer but it doesn't have to.
# https://gitlab.freedesktop.org/mesa/piglit/-/merge_requests/755
glx-swap-copy

View File

@@ -50,8 +50,8 @@ deployment:
SALAD.machine_id={{ '{{' }} machine_id }}
console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
loglevel={{ log_level }} no_hash_pointers
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/telegraf-container:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/machine_registration:latest check"
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"

View File

@@ -83,10 +83,11 @@ mkdir -p /nfs/results
rm -rf /tftp/*
if echo "$BM_KERNEL" | grep -q http; then
apt-get install -y curl
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
$BM_KERNEL -o /tftp/vmlinuz
else
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
cp $BM_KERNEL /tftp/vmlinuz
fi
echo "$BM_CMDLINE" > /tftp/cmdline

View File

@@ -1,13 +1,33 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import queue
import re
import sys
from serial_buffer import SerialBuffer
import sys
import threading
class CrosServoRun:
@@ -43,7 +63,6 @@ class CrosServoRun:
self.ec_write("reboot\n")
bootloader_done = False
tftp_failures = 0
# This is emitted right when the bootloader pauses to check for input.
# Emit a ^N character to request network boot, because we don't have a
# direct-to-netboot firmware on cheza.
@@ -53,17 +72,6 @@ class CrosServoRun:
bootloader_done = True
break
# The Cheza firmware seems to occasionally get stuck looping in
# this error state during TFTP booting, possibly based on amount of
# network traffic around it, but it'll usually recover after a
# reboot. Currently mostly visible on google-freedreno-cheza-14.
if re.search("R8152: Bulk read error 0xffffffbf", line):
tftp_failures += 1
if tftp_failures >= 10:
self.print_error(
"Detected intermittent tftp failure, restarting run.")
return 1
# If the board has a netboot firmware and we made it to booting the
# kernel, proceed to processing of the test run.
if re.search("Booting Linux", line):
@@ -75,29 +83,41 @@ class CrosServoRun:
# in the farm.
if re.search("POWER_GOOD not seen in time", line):
self.print_error(
"Detected intermittent poweron failure, abandoning run.")
return 1
"Detected intermittent poweron failure, restarting run...")
return 2
if not bootloader_done:
print("Failed to make it through bootloader, abandoning run.")
return 1
print("Failed to make it through bootloader, restarting run...")
return 2
tftp_failures = 0
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
if re.search("---. end Kernel panic", line):
return 1
# The Cheza firmware seems to occasionally get stuck looping in
# this error state during TFTP booting, possibly based on amount of
# network traffic around it, but it'll usually recover after a
# reboot.
if re.search("R8152: Bulk read error 0xffffffbf", line):
tftp_failures += 1
if tftp_failures >= 100:
self.print_error(
"Detected intermittent tftp failure, restarting run...")
return 2
# There are very infrequent bus errors during power management transitions
# on cheza, which we don't expect to be the case on future boards.
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
self.print_error(
"Detected cheza power management bus error, abandoning run.")
return 1
"Detected cheza power management bus error, restarting run...")
return 2
# If the network device dies, it's probably not graphics's fault, just try again.
if re.search("NETDEV WATCHDOG", line):
self.print_error(
"Detected network device failure, abandoning run.")
return 1
"Detected network device failure, restarting run...")
return 2
# These HFI response errors started appearing with the introduction
# of piglit runs. CosmicPenguin says:
@@ -110,17 +130,17 @@ class CrosServoRun:
# break many tests after that, just restart the whole run.
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
self.print_error(
"Detected cheza power management bus error, abandoning run.")
return 1
"Detected cheza power management bus error, restarting run...")
return 2
if re.search("coreboot.*bootblock starting", line):
self.print_error(
"Detected spontaneous reboot, abandoning run.")
return 1
"Detected spontaneous reboot, restarting run...")
return 2
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
self.print_error("Detected cheza MMU fail, abandoning run.")
return 1
self.print_error("Detected cheza MMU fail, restarting run...")
return 2
result = re.search("hwci: mesa: (\S*)", line)
if result:
@@ -131,7 +151,7 @@ class CrosServoRun:
self.print_error(
"Reached the end of the CPU serial log without finding a result")
return 1
return 2
def main():
@@ -144,14 +164,16 @@ def main():
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
args = parser.parse_args()
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
retval = servo.run()
while True:
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
retval = servo.run()
# power down the CPU on the device
servo.ec_write("power off\n")
servo.close()
# power down the CPU on the device
servo.ec_write("power off\n")
servo.close()
sys.exit(retval)
if retval != 2:
sys.exit(retval)
if __name__ == '__main__':

View File

@@ -105,6 +105,8 @@ fi
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
# fetch them instead of looking in the container.
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
apt-get install -y curl
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
"$BM_KERNEL" -o kernel
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
@@ -113,8 +115,8 @@ if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
cat kernel dtb > Image.gz-dtb
rm kernel
else
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
cp /baremetal-files/"$BM_DTB".dtb dtb
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
cp $BM_DTB dtb
fi
export PATH=$BM:$PATH

View File

@@ -51,8 +51,8 @@ class FastbootRun:
try:
return subprocess.call(cmd, shell=True, timeout=timeout)
except subprocess.TimeoutExpired:
self.print_error("timeout, abandoning run.")
return 1
self.print_error("timeout, restarting run...")
return 2
def run(self):
if ret := self.logged_system(self.powerup):
@@ -60,20 +60,20 @@ class FastbootRun:
fastboot_ready = False
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \
if re.search("fastboot: processing commands", line) or \
re.search("Listening for fastboot command on", line):
fastboot_ready = True
break
if re.search("data abort", line):
self.print_error(
"Detected crash during boot, abandoning run.")
return 1
"Detected crash during boot, restarting run...")
return 2
if not fastboot_ready:
self.print_error(
"Failed to get to fastboot prompt, abandoning run.")
return 1
"Failed to get to fastboot prompt, restarting run...")
return 2
if ret := self.logged_system(self.fastboot):
return ret
@@ -81,7 +81,7 @@ class FastbootRun:
print_more_lines = -1
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
if print_more_lines == 0:
return 1
return 2
if print_more_lines > 0:
print_more_lines -= 1
@@ -92,20 +92,20 @@ class FastbootRun:
# when if we see a reboot after we got past fastboot.
if re.search("PON REASON", line):
self.print_error(
"Detected spontaneous reboot, abandoning run.")
return 1
"Detected spontaneous reboot, restarting run...")
return 2
# db820c sometimes wedges around iommu fault recovery
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
self.print_error(
"Detected kernel soft lockup, abandoning run.")
return 1
"Detected kernel soft lockup, restarting run...")
return 2
# If the network device dies, it's probably not graphics's fault, just try again.
if re.search("NETDEV WATCHDOG", line):
self.print_error(
"Detected network device failure, abandoning run.")
return 1
"Detected network device failure, restarting run...")
return 2
# A3xx recovery doesn't quite work. Sometimes the GPU will get
# wedged and recovery will fail (because power can't be reset?)
@@ -115,7 +115,7 @@ class FastbootRun:
# of the hang. Once a hang happens, it's pretty chatty.
if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
self.print_error(
"Detected GPU hang, abandoning run.")
"Detected GPU hang, restarting run...")
if print_more_lines == -1:
print_more_lines = 30
@@ -127,8 +127,8 @@ class FastbootRun:
return 1
self.print_error(
"Reached the end of the CPU serial log without finding a result, abandoning run.")
return 1
"Reached the end of the CPU serial log without finding a result, restarting run...")
return 2
def main():
@@ -147,8 +147,13 @@ def main():
fastboot = FastbootRun(args, args.test_timeout * 60)
retval = fastboot.run()
fastboot.close()
while True:
retval = fastboot.run()
fastboot.close()
if retval != 2:
break
fastboot = FastbootRun(args, args.test_timeout * 60)
fastboot.logged_system(args.powerdown)

View File

@@ -91,6 +91,7 @@ date +'%F %T'
# If BM_BOOTFS is an URL, download it
if echo $BM_BOOTFS | grep -q http; then
apt-get install -y curl
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
"${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS" -o /tmp/bootfs.tar
BM_BOOTFS=/tmp/bootfs.tar
@@ -158,7 +159,7 @@ echo "$BM_CMDLINE" > /tftp/cmdline.txt
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
set +e
ATTEMPTS=3
ATTEMPTS=10
while [ $((ATTEMPTS--)) -gt 0 ]; do
python3 $BM/poe_run.py \
--dev="$BM_SERIAL" \

View File

@@ -59,7 +59,7 @@ class PoERun:
if not boot_detected:
self.print_error(
"Something wrong; couldn't detect the boot start up sequence")
return 1
return 2
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
if re.search("---. end Kernel panic", line):
@@ -71,13 +71,13 @@ class PoERun:
return 1
if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
self.print_error("nouveau jetson boot bug, abandoning run.")
return 1
self.print_error("nouveau jetson boot bug, retrying.")
return 2
# network fail on tk1
if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line):
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
return 1
self.print_error("nouveau jetson tk1 network fail, retrying.")
return 2
result = re.search("hwci: mesa: (\S*)", line)
if result:
@@ -88,7 +88,7 @@ class PoERun:
self.print_error(
"Reached the end of the CPU serial log without finding a result")
return 1
return 2
def main():

View File

@@ -19,7 +19,6 @@ date +'%F %T'
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
cp $CI_COMMON/kdl.sh $rootfs_dst/
cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
set +x

View File

@@ -1,16 +1,8 @@
# Shared between windows and Linux
.build-common:
extends: .container+build-rules
extends: .build-rules
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
# Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner
# without a populated ccache.
# These jobs are never slow, either they finish within reasonable time or
# something has gone wrong and the job will never terminate, so we should
# instead timeout so that the retry mechanism can kick in.
# A few exception are made, see `timeout:` overrides in the rest of this
# file.
timeout: 30m
artifacts:
name: "mesa_${CI_JOB_NAME}"
when: always
@@ -43,9 +35,12 @@
- !reference [default, after_script]
.build-windows:
extends:
- .build-common
- .windows-docker-tags
extends: .build-common
tags:
- windows
- docker
- "2022"
- mesa
cache:
key: ${CI_JOB_NAME}
paths:
@@ -85,7 +80,7 @@ debian-testing:
-D dri3=enabled
-D gallium-va=enabled
GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915"
VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio,nouveau-experimental"
VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio"
BUILDTYPE: "debugoptimized"
EXTRA_OPTION: >
-D spirv-to-dxil=true
@@ -152,6 +147,15 @@ debian-testing-msan:
- .gitlab-ci/meson/build.sh
- .gitlab-ci/prepare-artifacts.sh
# TODO: remove together with Clover
.debian-clover-testing:
extends:
- .debian-cl-testing
variables:
GALLIUM_ST: >
-D gallium-opencl=icd
-D opencl-spirv=true
debian-rusticl-testing:
extends:
- .debian-cl-testing
@@ -199,7 +203,6 @@ debian-build-testing:
.gitlab-ci/meson/build.sh
section_switch shader-db "shader-db"
.gitlab-ci/run-shader-db.sh
timeout: 30m
# Test a release build with -Werror so new warnings don't sneak in.
debian-release:
@@ -342,6 +345,7 @@ debian-android:
-Wno-error=enum-conversion
-Wno-error=initializer-overrides
-Wno-error=sometimes-uninitialized
-Wno-error=implicit-const-int-float-conversion
CPP_ARGS: >
-Wno-error=c99-designator
-Wno-error=unused-variable
@@ -458,7 +462,6 @@ debian-arm64:
-D valgrind=disabled
-D imagination-srv=true
-D perfetto=true
-D freedreno-kmds=msm,virtio
S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
script:
- .gitlab-ci/meson/build.sh
@@ -512,10 +515,12 @@ debian-clang:
C_ARGS: >
-Wno-error=constant-conversion
-Wno-error=enum-conversion
-Wno-error=implicit-const-int-float-conversion
-Wno-error=initializer-overrides
-Wno-error=sometimes-uninitialized
CPP_ARGS: >
-Wno-error=c99-designator
-Wno-error=implicit-const-int-float-conversion
-Wno-error=overloaded-virtual
-Wno-error=tautological-constant-out-of-range-compare
-Wno-error=unused-private-field
@@ -604,6 +609,21 @@ windows-vs2019:
EXTRA_OPTION: >
-D valgrind=disabled
# TODO: remove with Clover
.debian-clover:
extends: .debian-cl
variables:
GALLIUM_DRIVERS: "r600,radeonsi,swrast"
GALLIUM_ST: >
-D dri3=disabled
-D gallium-vdpau=disabled
-D gallium-omx=disabled
-D gallium-va=disabled
-D gallium-xa=disabled
-D gallium-nine=false
-D gallium-opencl=icd
-D gallium-rusticl=false
debian-rusticl:
extends: .debian-cl
variables:
@@ -645,7 +665,7 @@ debian-vulkan:
-D c_args=-fno-sanitize-recover=all
-D cpp_args=-fno-sanitize-recover=all
UBSAN_OPTIONS: "print_stacktrace=1"
VULKAN_DRIVERS: amd,broadcom,freedreno,intel,intel_hasvk,virtio,imagination-experimental,microsoft-experimental
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,imagination-experimental,microsoft-experimental
EXTRA_OPTION: >
-D vulkan-layers=device-select,overlay
-D build-aco-tests=true

View File

@@ -69,7 +69,7 @@ for var in \
JOB_ARTIFACTS_BASE \
JOB_RESULTS_PATH \
JOB_ROOTFS_OVERLAY_PATH \
KERNEL_IMAGE_BASE \
KERNEL_IMAGE_BASE_URL \
KERNEL_IMAGE_NAME \
LD_LIBRARY_PATH \
LP_NUM_THREADS \

View File

@@ -86,7 +86,7 @@ if [ "$HWCI_KVM" = "true" ]; then
mkdir -p /lava-files
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/lava-files/${KERNEL_IMAGE_NAME}" \
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
"${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
fi
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
@@ -127,15 +127,6 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then
/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
fi
# Start a little daemon to capture sysfs records and produce a JSON file
if [ -x /kdl.sh ]; then
echo "launch kdl.sh!"
/kdl.sh &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
else
echo "kdl.sh not found!"
fi
# Increase freedreno hangcheck timer because it's right at the edge of the
# spilling tests timing out (and some traces, too)
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
@@ -216,11 +207,7 @@ fi
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
set +x
# Print the final result; both bare-metal and LAVA look for this string to get
# the result of our run, so try really hard to get it out rather than losing
# the run. The device gets shut down right at this point, and a630 seems to
# enjoy corrupting the last line of serial output before shutdown.
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done
echo "hwci: mesa: $RESULT"
# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
sleep 1
exit $EXIT_CODE

View File

@@ -1,24 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091 # the path is created in build-kdl and
# here is check if exist
terminate() {
echo "ci-kdl.sh caught SIGTERM signal! propagating to child processes"
for job in $(jobs -p)
do
kill -15 "$job"
done
}
trap terminate SIGTERM
if [ -f /ci-kdl.venv/bin/activate ]; then
source /ci-kdl.venv/bin/activate
/ci-kdl.venv/bin/python /ci-kdl.venv/bin/ci-kdl | tee -a /results/kdl.log &
child=$!
wait $child
mv kdl_*.json /results/kdl.json
else
echo -e "Not possible to activate ci-kdl virtual environment"
fi

View File

@@ -1,14 +1,18 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# ALPINE_X86_64_BUILD_TAG
set -e
set -o xtrace
EPHEMERAL=(
autoconf
automake
bzip2
libtool
libepoxy-dev
libtbb-dev
make
openssl-dev
unzip
)
@@ -50,6 +54,13 @@ apk add "${DEPS[@]}" "${EPHEMERAL[@]}"
. .gitlab-ci/container/container_pre_build.sh
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
############### Uninstall the build software

View File

@@ -0,0 +1,69 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_ZRAM_MEMORY_TRACKING=y
CONFIG_ZRAM_WRITEBACK=y
CONFIG_ZRAM=y
CONFIG_ZSMALLOC_STAT=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DRM=y
CONFIG_DRM_ETNAVIV=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_MFD_RK808=y
CONFIG_REGULATOR_RK808=y
CONFIG_RTC_DRV_RK808=y
CONFIG_COMMON_CLK_RK808=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=n
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
# TK1
CONFIG_ARM_TEGRA_DEVFREQ=y
# 32-bit build failure
CONFIG_DRM_MSM=n

View File

@@ -0,0 +1,197 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_ZRAM_MEMORY_TRACKING=y
CONFIG_ZRAM_WRITEBACK=y
CONFIG_ZRAM=y
CONFIG_ZSMALLOC_STAT=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
CONFIG_DRM_MSM=y
CONFIG_DRM_ETNAVIV=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_STMMAC_ETH=y
CONFIG_TYPEC_FUSB302=y
CONFIG_TYPEC=y
CONFIG_TYPEC_TCPM=y
# MSM platform bits
# For CONFIG_QCOM_LMH
CONFIG_OF=y
CONFIG_ARM_SMMU_QCOM=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_RPMHPD=y
CONFIG_QCOM_RPMPD=y
CONFIG_QCOM_OCMEM=y
CONFIG_SDM_GPUCC_845=y
CONFIG_SDM_VIDEOCC_845=y
CONFIG_SDM_DISPCC_845=y
CONFIG_SDM_LPASSCC_845=y
CONFIG_SDM_CAMCC_845=y
CONFIG_RESET_QCOM_PDC=y
CONFIG_DRM_TI_SN65DSI86=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI_QCOM_GENI=y
CONFIG_PHY_QCOM_QUSB2=y
CONFIG_PHY_QCOM_QMP=y
CONFIG_QCOM_CLK_APCC_MSM8996=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_LMH=y
CONFIG_QCOM_SPMI_TEMP_ALARM=y
CONFIG_QCOM_WDT=y
CONFIG_POWER_RESET_QCOM_PON=y
CONFIG_RTC_DRV_PM8XXX=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8996=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_MSM8916=y
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
CONFIG_INTERCONNECT_QCOM_SC7180=y
CONFIG_INTERCONNECT_QCOM_SM8350=y
CONFIG_CRYPTO_DEV_QCOM_RNG=y
CONFIG_SC_DISPCC_7180=y
CONFIG_SC_GPUCC_7180=y
CONFIG_SM_GPUCC_8350=y
CONFIG_QCOM_SPMI_ADC5=y
CONFIG_DRM_PARADE_PS8640=y
CONFIG_DRM_LONTIUM_LT9611UXC=y
CONFIG_PHY_QCOM_USB_HS=y
CONFIG_QCOM_GPI_DMA=y
CONFIG_USB_ONBOARD_HUB=y
CONFIG_NVMEM_QCOM_QFPROM=y
CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
# db410c ethernet
CONFIG_USB_RTL8152=y
# db820c ethernet
CONFIG_ATL1C=y
# Chromebooks ethernet
CONFIG_USB_ONBOARD_HUB=y
# 888 HDK ethernet
CONFIG_USB_LAN78XX=y
CONFIG_ARCH_ALPINE=n
CONFIG_ARCH_BCM2835=n
CONFIG_ARCH_BCM_IPROC=n
CONFIG_ARCH_BERLIN=n
CONFIG_ARCH_BRCMSTB=n
CONFIG_ARCH_EXYNOS=n
CONFIG_ARCH_K3=n
CONFIG_ARCH_LAYERSCAPE=n
CONFIG_ARCH_LG1K=n
CONFIG_ARCH_HISI=n
CONFIG_ARCH_MVEBU=n
CONFIG_ARCH_SEATTLE=n
CONFIG_ARCH_SYNQUACER=n
CONFIG_ARCH_RENESAS=n
CONFIG_ARCH_R8A774A1=n
CONFIG_ARCH_R8A774C0=n
CONFIG_ARCH_R8A7795=n
CONFIG_ARCH_R8A7796=n
CONFIG_ARCH_R8A77965=n
CONFIG_ARCH_R8A77970=n
CONFIG_ARCH_R8A77980=n
CONFIG_ARCH_R8A77990=n
CONFIG_ARCH_R8A77995=n
CONFIG_ARCH_STRATIX10=n
CONFIG_ARCH_TEGRA=n
CONFIG_ARCH_SPRD=n
CONFIG_ARCH_THUNDER=n
CONFIG_ARCH_THUNDER2=n
CONFIG_ARCH_UNIPHIER=n
CONFIG_ARCH_VEXPRESS=n
CONFIG_ARCH_XGENE=n
CONFIG_ARCH_ZX=n
CONFIG_ARCH_ZYNQMP=n
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_USER_HELPER=n
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
# For amlogic
CONFIG_MESON_GXL_PHY=y
CONFIG_MDIO_BUS_MUX_MESON_G12A=y
CONFIG_DRM_MESON=y
# For Mediatek
CONFIG_DRM_MEDIATEK=y
CONFIG_PWM_MEDIATEK=y
CONFIG_DRM_MEDIATEK_HDMI=y
CONFIG_GNSS=y
CONFIG_GNSS_MTK_SERIAL=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MTK=y
CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_TEGRA=m
CONFIG_R8169=y
CONFIG_STAGING=y
CONFIG_DRM_TEGRA_STAGING=y
CONFIG_TEGRA_HOST1X=y
CONFIG_ARM_TEGRA_DEVFREQ=y
CONFIG_TEGRA_SOCTHERM=y
CONFIG_DRM_TEGRA_DEBUG=y
CONFIG_PWM_TEGRA=y

View File

@@ -24,11 +24,11 @@ if [[ $arch == "arm64" ]]; then
pushd /baremetal-files
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}"/arm64/Image
-O "${ARTIFACTS_URL}"/Image
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
-O "${ARTIFACTS_URL}"/Image.gz
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel
-O "${ARTIFACTS_URL}"/cheza-kernel
DEVICE_TREES=""
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
@@ -38,7 +38,7 @@ if [[ $arch == "arm64" ]]; then
for DTB in $DEVICE_TREES; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}/arm64/$DTB"
-O "${ARTIFACTS_URL}/$DTB"
done
popd
@@ -47,7 +47,7 @@ elif [[ $arch == "armhf" ]]; then
pushd /baremetal-files
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}"/armhf/zImage
-O "${ARTIFACTS_URL}"/zImage
DEVICE_TREES=""
DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
@@ -55,7 +55,7 @@ elif [[ $arch == "armhf" ]]; then
for DTB in $DEVICE_TREES; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}/armhf/$DTB"
-O "${ARTIFACTS_URL}/$DTB"
done
popd

View File

@@ -2,7 +2,7 @@
set -ex
ANGLE_REV="0518a3ff4d4e7e5b2ce8203358f719613a31c118"
ANGLE_REV="82f1cee01a9ea24960e8f23b24d348fccbe0aae0"
# DEPOT tools
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git

View File

@@ -6,13 +6,13 @@ set -ex
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
CROSVM_VERSION=e3815e62d675ef436956a992e0ed58b7309c759d
CROSVM_VERSION=d0cbf0b23eb4bd2355b011184025c7c5d8749376
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
pushd /platform/crosvm
git checkout "$CROSVM_VERSION"
git submodule update --init
VIRGLRENDERER_VERSION=10120c0d9ebdc00eae1b5c9f7c98fc0d198ba602
VIRGLRENDERER_VERSION=45bb2449b81336b88c267b1c1735f3b4946c7b3a
rm -rf third_party/virglrenderer
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
pushd third_party/virglrenderer
@@ -25,7 +25,6 @@ cargo update -p pkg-config@0.3.26 --precise 0.3.27
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
bindgen-cli \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local \
--version 0.65.1 \

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/sh
# shellcheck disable=SC2086 # we want word splitting
set -ex

View File

@@ -14,7 +14,7 @@ git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/KhronosGroup/VK-GL-CTS.git \
-b vulkan-cts-1.3.7.0 \
-b vulkan-cts-1.3.5.2 \
--depth 1 \
/VK-GL-CTS
pushd /VK-GL-CTS
@@ -26,6 +26,46 @@ pushd /VK-GL-CTS
# patches.
cts_commits_to_backport=(
# sync fix for SSBO writes
44f1be32fe6bd2a7de7b9169fc71cc44e0b26124
# sync fix for KHR-GL46.multi_bind.dispatch_bind_image_textures
db6c9e295ab38054ace425cb75ff966719ccc609
# VK robustness barriers fix
6052f21c4d6077438d644f525c10cc58dcdf25bf
# correctness fixes for zink validation fails
1923cbc89ed3969a3afe7c6926124b51157902e1
af3a979c49dc65f8809c27660405ae3a76c7da4a
# GL/GLES vertex_attrib_binding.advanced-largeStrideAndOffsetsNewAndLegacyAPI fix
bdb456dcf85e34fced872ebdaf06f6b73451f99c
# KHR-GLES31.core.compute_shader.max fix
7aa3ebb49d07982f5c44edd4799edb5a894567e9
# GL arrays_of_arrays perf fix
b481dada59734e8e34050fe884ba6d627d9e5c54
# GL shadow samplers require depth compares fix
a8bc242ec234bf8d7df8b4eec1eeccab4e401288
# GL PolygonOffsetClamp fix
1f2feb2388da88b4e46eba55547d50856467cc20
# KHR-GL46.texture_view.view_sampling fix
aca29fb9553ebe28094513ce18bb46bad138cf46
# video validation fails
4cc3980a86ba5b7fe6e76b559cc1a9cb5fd1b253
a7a2ce442db51ca058ce051de7e09d62db44ae81
# Check for robustness before testing it
ee7138d8adf5ed3c4845e5ac2553c4f9697be9d8
# dEQP-VK.wsi.acquire_drm_display.*invalid_fd
98ad9402e7d94030d1689fd59135da7a2f52384c
)
for commit in "${cts_commits_to_backport[@]}"
@@ -83,14 +123,12 @@ cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
-DCMAKE_BUILD_TYPE=Release \
$EXTRA_CMAKE_ARGS
mold --run ninja
ninja
if [ "${DEQP_TARGET}" = 'android' ]; then
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android
fi
git -C /VK-GL-CTS describe --long > /deqp/version
# Copy out the mustpass lists we want.
mkdir /deqp/mustpass
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do

View File

@@ -1,15 +1,10 @@
#!/bin/bash
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
set -ex
git clone https://github.com/ValveSoftware/Fossilize.git
cd Fossilize
git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
git checkout 16fba1b8b5d9310126bb02323d7bae3227338461
git submodule update --init
mkdir build
cd build

View File

@@ -1,23 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091 # the path is created by the script
set -ex
KDL_REVISION="5056f71b100a68b72b285c6fc845a66a2ed25985"
git clone \
https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git \
--depth 1 \
ci-kdl.git
pushd ci-kdl.git
git checkout ${KDL_REVISION}
popd
python3 -m venv ci-kdl.venv
source ci-kdl.venv/bin/activate
pushd ci-kdl.git
pip install -r requirements.txt
pip install .
popd
rm -rf ci-kdl.git

View File

@@ -1,30 +1,53 @@
#!/usr/bin/env bash
#!/bin/bash
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC2153
set -ex
mkdir -p kernel
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 ${KERNEL_URL} \
| tar -xj --strip-components=1 -C kernel
pushd kernel
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
KERNEL_IMAGE_NAME+=" cheza-kernel"
fi
for image in ${KERNEL_IMAGE_NAME}; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/lava-files/${image}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${image}"
# The kernel doesn't like the gold linker (or the old lld in our debians).
# Sneak in some override symlinks during kernel build until we can update
# debian (they'll get blown away by the rm of the kernel dir at the end).
mkdir -p ld-links
for i in /usr/bin/*-ld /usr/bin/ld; do
i=$(basename $i)
ln -sf /usr/bin/$i.bfd ld-links/$i
done
for dtb in ${DEVICE_TREES}; do
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/lava-files/${dtb}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${dtb}"
done
NEWPATH=$(pwd)/ld-links
export PATH=$NEWPATH:$PATH
mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}"
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-O "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst"
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/lava-files/rootfs-${DEBIAN_ARCH}/"
KERNEL_FILENAME=$(basename $KERNEL_URL)
export LOCALVERSION="$KERNEL_FILENAME"
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
make ${KERNEL_IMAGE_NAME}
for image in ${KERNEL_IMAGE_NAME}; do
cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
cp ${DEVICE_TREES} /lava-files/.
fi
make modules
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
make Image.lzma
mkimage \
-f auto \
-A arm \
-O linux \
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
/lava-files/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
fi
popd
rm -rf kernel

View File

@@ -1,14 +1,9 @@
#!/bin/bash
# shellcheck disable=SC2086 # we want word splitting
set -ex
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_X86_64_TEST_GL_TAG
# DEBIAN_X86_64_TEST_VK_TAG
# KERNEL_ROOTFS_TAG
REV="f7db20b03de6896d013826c0a731bc4417c1a5a0"
REV="5036601c43fff63f7be5cd8ad7b319a5c1f6652c"
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
pushd /piglit

View File

@@ -11,11 +11,11 @@ set -ex
mkdir -p "$HOME"/.cargo
ln -s /usr/local/bin "$HOME"/.cargo/bin
# Rusticl requires at least Rust 1.66.0
# Rusticl requires at least Rust 1.60.0
#
# Also, pick a specific snapshot from rustup so the compiler doesn't drift on
# us.
RUST_VERSION=1.66.1-2023-01-10
RUST_VERSION=1.60.0-2022-04-07
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
# version of the compiler, rather than whatever the container's Debian comes

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BUILD_TAG
set -ex
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd

View File

@@ -6,7 +6,7 @@
# KERNEL_ROOTFS_TAG
set -ex
VKD3D_PROTON_COMMIT="2ad6cfdeaacdf47e2689e30a8fb5ac8193725f0d"
VKD3D_PROTON_COMMIT="6365efeba253807beecaed0eaa963295522c6b70"
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"

View File

@@ -7,7 +7,7 @@
set -ex
VALIDATION_TAG="v1.3.263"
VALIDATION_TAG="v1.3.251"
git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
pushd Vulkan-ValidationLayers

View File

@@ -15,7 +15,6 @@ dpkg --add-architecture $arch
apt-get update
apt-get install -y --no-remove \
$EXTRA_LOCAL_PACKAGES \
$STABLE_EPHEMERAL \
crossbuild-essential-$arch \
pkgconf:$arch \

View File

@@ -18,7 +18,6 @@ STABLE_EPHEMERAL=" \
apt-get update
apt-get install -y --no-remove \
$EXTRA_LOCAL_PACKAGES \
$STABLE_EPHEMERAL \
apt-utils \
bison \
@@ -58,6 +57,7 @@ apt-get install -y --no-remove \
libxxf86vm-dev \
libwayland-egl-backend-dev \
make \
meson \
ninja-build \
openssh-server \
pkgconf \
@@ -79,9 +79,6 @@ apt-get install -y --no-remove \
# Needed for ci-fairy, this revision is able to upload files to S3
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
# We need at least 1.2 for Rust's `debug_assertions`
pip3 install --break-system-packages meson==1.2.0
. .gitlab-ci/container/build-rust.sh
. .gitlab-ci/container/debian/x86_64_build-base-wine.sh

View File

@@ -1,10 +1,6 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BUILD_TAG
set -e
set -o xtrace
@@ -78,7 +74,12 @@ rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-wayland.sh
. .gitlab-ci/container/build-shader-db.sh
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
git clone https://github.com/microsoft/DirectX-Headers -b v1.711.3-preview --depth 1
pushd DirectX-Headers
@@ -92,7 +93,6 @@ python3 -m pip install --break-system-packages -r .gitlab-ci/lava/requirements.t
# install bindgen
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
bindgen-cli --version 0.62.0 \
--locked \
-j ${FDO_CI_CONCURRENT:-4} \
--root /usr/local

View File

@@ -1,10 +1,6 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086 # we want word splitting
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# DEBIAN_BASE_TAG
set -e
set -o xtrace
@@ -39,7 +35,6 @@ STABLE_EPHEMERAL=" \
libepoxy-dev \
libgbm-dev \
libpciaccess-dev \
libssl-dev
libvulkan-dev \
libwayland-dev \
libx11-xcb-dev \
@@ -128,8 +123,6 @@ pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedeskt
# Needed for manipulation with traces yaml files.
pip3 install --break-system-packages yq
. .gitlab-ci/container/build-mold.sh
############### Build LLVM-SPIRV translator
. .gitlab-ci/container/build-llvm-spirv.sh

View File

@@ -46,7 +46,6 @@ STABLE_EPHEMERAL=" \
apt-get update
apt-get install -y --no-remove \
$EXTRA_LOCAL_PACKAGES \
$STABLE_EPHEMERAL \
clinfo \
iptables \

View File

@@ -1,10 +1,5 @@
#!/usr/bin/env bash
# shellcheck disable=SC1091
# When changing this file, you need to bump the following
# .gitlab-ci/image-tags.yml tags:
# FEDORA_X86_64_BUILD_TAG
set -e
set -o xtrace
@@ -19,7 +14,6 @@ EPHEMERAL=(
"pkgconfig(epoxy)"
"pkgconfig(gbm)"
"pkgconfig(openssl)"
python3-pip
unzip
xz
)
@@ -36,7 +30,7 @@ DEPS=(
glslang
kernel-headers
llvm-devel
ninja-build
meson
"pkgconfig(LLVMSPIRVLib)"
"pkgconfig(SPIRV-Tools)"
"pkgconfig(dri2proto)"
@@ -96,15 +90,19 @@ tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
rm -rf $XORGMACROS_VERSION
# We need at least 1.2 for Rust's `debug_assertions`
pip install meson==1.2.0
. .gitlab-ci/container/build-mold.sh
. .gitlab-ci/container/build-libdrm.sh
. .gitlab-ci/container/build-wayland.sh
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
############### Uninstall the build software

View File

@@ -47,7 +47,7 @@
.container:
stage: container
extends:
- .container+build-rules
- .container-rules
- .incorporate-templates-commit
- .use-wine
variables:
@@ -61,6 +61,8 @@
extends:
- .container
- .incorporate-base-tag+templates-commit
# Don't want the .container rules
- .build-rules
# Debian based x86_64 build image base
debian/x86_64_build-base:
@@ -190,7 +192,8 @@ debian/android_build:
debian/x86_64_test-base:
extends: debian/x86_64_build-base
variables:
MESA_IMAGE_TAG: &debian-x86_64_test-base "${DEBIAN_BASE_TAG}--${KERNEL_TAG}"
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v6.3-for-mesa-ci-43c973a8ff91/linux-v6.3-for-mesa-ci-6fc749a2a59a.tar.bz2"
MESA_IMAGE_TAG: &debian-x86_64_test-base ${DEBIAN_BASE_TAG}
.use-debian/x86_64_test-base:
extends:
@@ -324,12 +327,13 @@ fedora/x86_64_build:
.kernel+rootfs:
extends:
- .container+build-rules
- .build-rules
stage: container
variables:
GIT_STRATEGY: fetch
KERNEL_URL: *kernel-rootfs-url
MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG}
DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
script:
- .gitlab-ci/container/lava_build.sh
@@ -340,7 +344,7 @@ kernel+rootfs_x86_64:
image: "$FDO_BASE_IMAGE"
variables:
DEBIAN_ARCH: "amd64"
DISTRIBUTION_TAG: &distribution-tag-x86_64 "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
DISTRIBUTION_TAG: &distribution-tag-x86_64 "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
kernel+rootfs_arm64:
extends:
@@ -374,11 +378,11 @@ kernel+rootfs_arm32:
- .fdo.container-build@debian
- .container
# Don't want the .container rules
- .container+build-rules
- .build-rules
variables:
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
ARTIFACTS_PREFIX: "https://${S3_HOST}/mesa-lava"
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
MESA_ARTIFACTS_TAG: *debian-arm64_build
MESA_ROOTFS_TAG: *kernel-rootfs
@@ -403,7 +407,7 @@ debian/arm64_test:
MESA_ROOTFS_TAG: *kernel-rootfs
.use-debian/arm32_test:
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
extends:
- .use-debian/arm_test
variables:
@@ -413,7 +417,7 @@ debian/arm64_test:
- debian/arm_test
.use-debian/arm64_test:
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
extends:
- .use-debian/arm_test
variables:
@@ -438,8 +442,6 @@ debian/arm64_test:
variables:
MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}"
MESA_UPSTREAM_IMAGE: "$CI_REGISTRY/$FDO_UPSTREAM_REPO/$MESA_IMAGE_PATH:${MESA_IMAGE_TAG}"
extends:
- .windows-docker-tags
.windows_container_build:
inherit:
@@ -447,13 +449,17 @@ debian/arm64_test:
extends:
- .container
- .windows-docker-vs2019
- .windows-shell-tags
rules:
- !reference [.microsoft-farm-container-rules, rules]
- !reference [.container+build-rules, rules]
- !reference [.container-rules, rules]
variables:
GIT_STRATEGY: fetch # we do actually need the full repository though
MESA_BASE_IMAGE: None
tags:
- windows
- shell
- "2022"
- mesa
script:
- .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE}
@@ -475,7 +481,7 @@ windows_build_vs2019:
- .windows_container_build
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.container+build-rules, rules]
- !reference [.build-rules, rules]
variables:
MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH}
MESA_IMAGE_TAG: &windows_build_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_BUILD_TAG}
@@ -494,7 +500,7 @@ windows_test_vs2019:
- .windows_container_build
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.container+build-rules, rules]
- !reference [.build-rules, rules]
variables:
MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH}
MESA_IMAGE_TAG: &windows_test_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_TEST_TAG}

View File

@@ -35,20 +35,20 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
KERNEL_ARCH="arm64"
SKQP_ARCH="arm64"
DEFCONFIG="arch/arm64/configs/defconfig"
DEVICE_TREES="rk3399-gru-kevin.dtb"
DEVICE_TREES+=" meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" meson-gxl-s805x-libretech-ac.dtb"
DEVICE_TREES+=" meson-gxm-khadas-vim2.dtb"
DEVICE_TREES+=" sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" imx8mq-nitrogen.dtb"
DEVICE_TREES+=" mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" mt8183-kukui-jacuzzi-juniper-sku16.dtb"
DEVICE_TREES+=" tegra210-p3450-0000.dtb"
DEVICE_TREES+=" apq8016-sbc.dtb"
DEVICE_TREES+=" apq8096-db820c.dtb"
DEVICE_TREES+=" sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
DEVICE_TREES+=" sc7180-trogdor-kingoftown.dtb"
DEVICE_TREES+=" sm8350-hdk.dtb"
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown-r1.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb"
KERNEL_IMAGE_NAME="Image"
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
@@ -56,10 +56,10 @@ elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
KERNEL_ARCH="arm"
SKQP_ARCH="arm"
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
DEVICE_TREES="rk3288-veyron-jaq.dtb"
DEVICE_TREES+=" sun8i-h3-libretech-all-h3-cc.dtb"
DEVICE_TREES+=" imx6q-cubox-i.dtb"
DEVICE_TREES+=" tegra124-jetson-tk1.dtb"
DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb"
DEVICE_TREES+=" arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
DEVICE_TREES+=" arch/arm/boot/dts/imx6q-cubox-i.dtb"
DEVICE_TREES+=" arch/arm/boot/dts/tegra124-jetson-tk1.dtb"
KERNEL_IMAGE_NAME="zImage"
. .gitlab-ci/container/create-cross-file.sh armhf
else
@@ -92,7 +92,6 @@ fi
apt-get update
apt-get install -y --no-remove \
-o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' \
${EXTRA_LOCAL_PACKAGES} \
${ARCH_PACKAGES} \
automake \
bc \
@@ -130,7 +129,6 @@ apt-get install -y --no-remove \
python3-mako \
python3-numpy \
python3-serial \
python3-venv \
unzip \
zstd
@@ -214,9 +212,6 @@ mmdebstrap \
"$ROOTFS/" \
"http://deb.debian.org/debian"
############### Install mold
. .gitlab-ci/container/build-mold.sh
############### Setuping
if [ "$DEBIAN_ARCH" = "amd64" ]; then
. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
@@ -295,12 +290,6 @@ if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
mv /usr/local/libexec/virgl* $ROOTFS/usr/local/libexec/
fi
############### Build ci-kdl
section_start kdl "Prepare a venv for kdl"
. .gitlab-ci/container/build-kdl.sh
mv ci-kdl.venv $ROOTFS
section_end kdl
############### Build local stuff for use by igt and kernel testing, which
############### will reuse most of our container build process from a specific
############### hash of the Mesa tree.
@@ -334,16 +323,25 @@ if [ "${DEBIAN_ARCH}" = "arm64" ]; then
KERNEL_IMAGE_NAME+=" Image.gz"
fi
ROOTFSTAR="lava-rootfs.tar.zst"
du -ah "$ROOTFS" | sort -h | tail -100
pushd $ROOTFS
tar --zstd -cf /lava-files/${ROOTFSTAR} .
tar --zstd -cf /lava-files/lava-rootfs.tar.zst .
popd
. .gitlab-ci/container/container_post_build.sh
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/"${ROOTFSTAR}" \
https://${S3_PATH}/"${ROOTFSTAR}"
############### Upload the files!
FILES_TO_UPLOAD="lava-rootfs.tar.zst \
$KERNEL_IMAGE_NAME"
if [[ -n $DEVICE_TREES ]]; then
FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
fi
for f in $FILES_TO_UPLOAD; do
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/$f \
https://${S3_PATH}/$f
done
touch /lava-files/done
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/done https://${S3_PATH}/done

View File

@@ -1,4 +1,4 @@
From dc97ee83a813f6b170079ddf2a04bbb06221a5a7 Mon Sep 17 00:00:00 2001
From bf8ada0d15f94824ee1643d4e17a66dffdbaf2e5 Mon Sep 17 00:00:00 2001
From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Date: Fri, 26 Aug 2022 18:24:27 +0200
Subject: [PATCH 1/2] Allow running on Android from the command line
@@ -13,6 +13,7 @@ $ cmake -S . -B build/ -DDEQP_TARGET=android -DDEQP_TARGET_TOOLCHAIN=ndk-modern
$ ninja -C build modules/egl/deqp-egl
Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Signed-off-by: David Heidelberg <david.heidelberg@collabora.com>
---
CMakeLists.txt | 36 ++-----------------
.../android/tcuAndroidNativeActivity.cpp | 36 ++++++++++---------
@@ -20,11 +21,11 @@ Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
3 files changed, 33 insertions(+), 51 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f9c61d0db..d6ad2990b 100644
index 1ff2bb9..8c76abb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -272,7 +272,7 @@ include_directories(
external/vulkancts/framework/vulkan
@@ -249,7 +249,7 @@ include_directories(
external/vulkancts/framework/vulkan/generated/vulkan
)
-if (DE_OS_IS_ANDROID OR DE_OS_IS_IOS)
@@ -32,7 +33,7 @@ index f9c61d0db..d6ad2990b 100644
# On Android deqp modules are compiled as libraries and linked into final .so
set(DEQP_MODULE_LIBRARIES )
set(DEQP_MODULE_ENTRY_POINTS )
@@ -316,7 +316,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY)
@@ -293,7 +293,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY)
set(DEQP_MODULE_LIBRARIES ${DEQP_MODULE_LIBRARIES} PARENT_SCOPE)
set(DEQP_MODULE_ENTRY_POINTS ${DEQP_MODULE_ENTRY_POINTS} PARENT_SCOPE)
@@ -41,8 +42,8 @@ index f9c61d0db..d6ad2990b 100644
# Executable target
add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY})
target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}")
@@ -390,37 +390,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL})
add_subdirectory(external/openglcts ${MAYBE_EXCLUDE_FROM_ALL})
@@ -367,37 +367,7 @@ add_subdirectory(external/vulkancts/vkscpc)
add_subdirectory(external/openglcts)
# Single-binary targets
-if (DE_OS_IS_ANDROID)
@@ -81,7 +82,7 @@ index f9c61d0db..d6ad2990b 100644
set(DEQP_IOS_CODE_SIGN_IDENTITY "drawElements" CACHE STRING "Code sign identity for iOS build")
diff --git a/framework/platform/android/tcuAndroidNativeActivity.cpp b/framework/platform/android/tcuAndroidNativeActivity.cpp
index 6f8cd8fc5..b83e30f41 100644
index 6f8cd8f..b83e30f 100644
--- a/framework/platform/android/tcuAndroidNativeActivity.cpp
+++ b/framework/platform/android/tcuAndroidNativeActivity.cpp
@@ -116,23 +116,25 @@ namespace Android
@@ -128,7 +129,7 @@ index 6f8cd8fc5..b83e30f41 100644
NativeActivity::~NativeActivity (void)
diff --git a/framework/platform/android/tcuAndroidPlatform.cpp b/framework/platform/android/tcuAndroidPlatform.cpp
index b8a35898c..cf02e6b70 100644
index 69ab384..d7288f6 100644
--- a/framework/platform/android/tcuAndroidPlatform.cpp
+++ b/framework/platform/android/tcuAndroidPlatform.cpp
@@ -22,6 +22,7 @@
@@ -148,7 +149,7 @@ index b8a35898c..cf02e6b70 100644
return new NativeWindow(window, params.width, params.height, format);
}
@@ -292,6 +293,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity)
@@ -286,6 +287,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity)
try
{
@@ -158,7 +159,7 @@ index b8a35898c..cf02e6b70 100644
const size_t totalMemory = getTotalAndroidSystemMemory(activity);
print("Device has %.2f MiB of system memory\n", static_cast<double>(totalMemory) / static_cast<double>(MiB));
return totalMemory;
@@ -388,3 +392,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const
@@ -382,3 +386,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const
} // Android
} // tcu
@@ -169,5 +170,5 @@ index b8a35898c..cf02e6b70 100644
+ return new tcu::Android::Platform(activity);
+}
--
2.42.0
2.39.1

View File

@@ -1,15 +1,16 @@
From a602822c53e22e985f942f843ccadbfb64613212 Mon Sep 17 00:00:00 2001
From c2d5252f4a8be94720235feb9e358ecb0a2e8e11 Mon Sep 17 00:00:00 2001
From: Helen Koike <helen.koike@collabora.com>
Date: Tue, 27 Sep 2022 12:35:22 -0300
Subject: [PATCH 2/2] Android prints to stdout instead of logcat
Signed-off-by: Helen Koike <helen.koike@collabora.com>
Signed-off-by: David Heidelberg <david.heidelberg@collabora.com>
---
framework/qphelper/qpDebugOut.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/framework/qphelper/qpDebugOut.c b/framework/qphelper/qpDebugOut.c
index 6579e9f48..c200c6f6b 100644
index 6579e9f..c200c6f 100644
--- a/framework/qphelper/qpDebugOut.c
+++ b/framework/qphelper/qpDebugOut.c
@@ -98,7 +98,7 @@ void qpDiev (const char* format, va_list args)
@@ -22,5 +23,5 @@ index 6579e9f48..c200c6f6b 100644
#include <android/log.h>
--
2.42.0
2.39.1

View File

@@ -0,0 +1,111 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_ZRAM_MEMORY_TRACKING=y
CONFIG_ZRAM_WRITEBACK=y
CONFIG_ZRAM=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_PWM=y
CONFIG_PM_DEVFREQ=y
CONFIG_OF=y
CONFIG_CROS_EC=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=y
CONFIG_FW_LOADER_COMPRESS=y
# options for AMD devices
CONFIG_X86_AMD_PLATFORM_DEVICE=y
CONFIG_ACPI_VIDEO=y
CONFIG_X86_AMD_FREQ_SENSITIVITY=y
CONFIG_PINCTRL=y
CONFIG_PINCTRL_AMD=y
CONFIG_DRM_AMDGPU=m
CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AMD_ACP=n
CONFIG_ACPI_WMI=y
CONFIG_MXM_WMI=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_SERIAL_8250_DW=y
CONFIG_CHROME_PLATFORMS=y
CONFIG_KVM_AMD=m
#options for Intel devices
CONFIG_MFD_INTEL_LPSS_PCI=y
CONFIG_KVM_INTEL=m
#options for KVM guests
CONFIG_FUSE_FS=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_KVM=y
CONFIG_KVM_GUEST=y
CONFIG_VIRT_DRIVERS=y
CONFIG_VIRTIO_FS=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_PARAVIRT=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTUALIZATION=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_BLK_MQ_VIRTIO=y
CONFIG_TUN=y
CONFIG_VSOCKETS=y
CONFIG_VIRTIO_VSOCKETS=y
CONFIG_VHOST_VSOCK=m

View File

@@ -92,7 +92,7 @@ RESULTS=/data/results
uncollapsed_section_switch cuttlefish_test "cuttlefish: testing"
set +e
$ADB shell "mkdir /data/results; cd /data; ./deqp-runner \
$ADB shell "mkdir /data/results; cd /data; strace -o /data/results/out.strace -f -s 1000 ./deqp-runner \
suite \
--suite /data/deqp-$DEQP_SUITE.toml \
--output $RESULTS \

View File

@@ -126,11 +126,6 @@ if [ "$PIGLIT_PLATFORM" = "gbm" ]; then
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/gbm-skips.txt"
fi
if [ -n "$VK_DRIVER" ] && [ -z "$DEQP_SUITE" ]; then
# Bump the number of tests per group to reduce the startup time of VKCTS.
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --tests-per-group ${DEQP_RUNNER_TESTS_PER_GROUP:-5000}"
fi
# Set the path to VK validation layer settings (in case it ends up getting loaded)
export VK_LAYER_SETTINGS_PATH=$INSTALL/$GPU_VERSION-validation-settings.txt
@@ -159,15 +154,13 @@ if [ -z "$DEQP_SUITE" ]; then
export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --renderer-check $DEQP_EXPECTED_RENDERER"
fi
if [ $DEQP_VER != vk ] && [ $DEQP_VER != egl ]; then
VER=$(sed 's/[() ]/./g' "$INSTALL/VERSION")
VER=$(sed 's/[() ]/./g' "$INSTALL/VERSION")
export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check $VER"
fi
fi
uncollapsed_section_switch deqp "deqp: deqp-runner"
echo "deqp $(cat /deqp/version)"
set +e
if [ -z "$DEQP_SUITE" ]; then
deqp-runner \
@@ -179,7 +172,7 @@ if [ -z "$DEQP_SUITE" ]; then
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS \
$DEQP_RUNNER_OPTIONS \
-- \
$DEQP_OPTIONS
else
@@ -191,13 +184,12 @@ else
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
--fraction-start $CI_NODE_INDEX \
--fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
--fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
--jobs ${FDO_CI_CONCURRENT:-4} \
$DEQP_RUNNER_OPTIONS
$DEQP_RUNNER_OPTIONS
fi
DEQP_EXITCODE=$?
set -e
set +x
@@ -237,13 +229,13 @@ if [ -n "$FLAKES_CHANNEL" ]; then
--job "$CI_JOB_ID" \
--url "$CI_JOB_URL" \
--branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
# Compress results.csv to save on bandwidth during the upload of artifacts to
# GitLab. This reduces the size in a VKCTS run from 135 to 7.6MB, and takes
# 0.17s on a Ryzen 5950X (16 threads, 0.95s when limited to 1 thread).
zstd --rm -T0 -8q "$RESULTS/results.csv" -o "$RESULTS/results.csv.zst"
zstd --rm -T0 -8qc $RESULTS/results.csv -o $RESULTS/results.csv.zst
section_end test_post_process

View File

@@ -15,7 +15,7 @@ fi
TMP_DIR=$(mktemp -d)
echo "$(date +"%F %T") Downloading archived master..."
echo "Downloading archived master..."
if ! /usr/bin/wget \
-O "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" \
"https://${S3_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz";
@@ -27,10 +27,8 @@ fi
set -e
rm -rf "$CI_PROJECT_DIR"
echo "$(date +"%F %T") Extracting tarball into '$CI_PROJECT_DIR'..."
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
mkdir -p "$CI_PROJECT_DIR"
tar xzf "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" -C "$CI_PROJECT_DIR"
rm -rf "$TMP_DIR"
chmod a+w "$CI_PROJECT_DIR"
echo "$(date +"%F %T") Git cache download done"

View File

@@ -1,42 +1,10 @@
# The logic for each $FARM is as follows:
#
# If the disable file exists, we are disabling the farm, or it's already
# disabled:
# - exists: [ .ci-farms-disabled/$FARM ]
# when: never
#
# Otherwise, changing the disable file means removing it, so we are
# re-enabling the farm:
# - changes: [ .ci-farms-disabled/$FARM ]
# if: '$CI_PIPELINE_SOURCE != "schedule"'
# when: on_success
# Note: the "manual" variant of each farm rules changes the above to `never`,
# so that jobs meant to be manual don't run in re-enablement MRs. This is the
# only difference between `.$FARM-farm-rules` and `.$FARM-farm-manual-rules`.
#
# If any other disable file is modified, we are disabling/re-enabling another
# farm:
# - changes: [ .ci-farms-disabled/* ]
# if: '$CI_PIPELINE_SOURCE != "schedule"'
# when: never
#
# The `not schedule` condition is there to make sure scheduled pipelines
# contains all the jobs, as `changes` conditions in scheduled pipelines are
# always evaluated to `true`.
#
# The "fallback", if none of these rules match, is usually the list of files
# that are used by a driver. See the various `.$DRIVER-rules` in the
# corresponding `src/**/ci/gitlab-ci.yml`.
.microsoft-farm-rules:
rules:
- exists: [ .ci-farms-disabled/microsoft ] # 1. Is disabled, never run
when: never
- changes: [ .ci-farms-disabled/microsoft ] # 2. Removed from disabled, run
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ] # 3. We touched other farms in MR, do not run
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
# 4. Fall-through (other rules or on_success)
@@ -47,7 +15,6 @@
- exists: [ .ci-farms-disabled/microsoft ]
when: never
- changes: [ .ci-farms-disabled/microsoft ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.microsoft-farm-rules, rules]
@@ -58,41 +25,28 @@
- exists: [ .ci-farms-disabled/microsoft ]
when: never
- changes: [ .ci-farms-disabled/microsoft ]
if: '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
when: on_success
- changes: [ .ci-farms-disabled/microsoft ]
if: '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
- changes: [ .ci-farms-disabled/microsoft ]
if: '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
when: never
- changes: [ .ci-farms-disabled/* ]
if: '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: never
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
when: never
when: manual
- !reference [.microsoft-farm-rules, rules]
.collabora-farm-rules:
rules:
- exists: [ .ci-farms-disabled/collabora ]
- if: '$RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
exists: [ .ci-farms-disabled/collabora ]
when: never
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- if: '$RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
changes: [ .ci-farms-disabled/collabora ]
when: on_success
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- if: '$RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
changes: [ .ci-farms-disabled/* ]
when: never
.collabora-farm-manual-rules:
rules:
- exists: [ .ci-farms-disabled/collabora ]
- if: '$RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
exists: [ .ci-farms-disabled/collabora ]
when: never
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- if: '$RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
changes: [ .ci-farms-disabled/collabora ]
when: never
- !reference [.collabora-farm-rules, rules]
@@ -103,10 +57,8 @@
- exists: [ .ci-farms-disabled/igalia ]
when: never
- changes: [ .ci-farms-disabled/igalia ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.igalia-farm-manual-rules:
@@ -114,7 +66,6 @@
- exists: [ .ci-farms-disabled/igalia ]
when: never
- changes: [ .ci-farms-disabled/igalia ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.igalia-farm-rules, rules]
@@ -124,10 +75,8 @@
- exists: [ .ci-farms-disabled/lima ]
when: never
- changes: [ .ci-farms-disabled/lima ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.lima-farm-manual-rules:
@@ -135,7 +84,6 @@
- exists: [ .ci-farms-disabled/lima ]
when: never
- changes: [ .ci-farms-disabled/lima ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.lima-farm-rules, rules]
@@ -145,10 +93,8 @@
- exists: [ .ci-farms-disabled/anholt ]
when: never
- changes: [ .ci-farms-disabled/anholt ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.anholt-farm-manual-rules:
@@ -156,7 +102,6 @@
- exists: [ .ci-farms-disabled/anholt ]
when: never
- changes: [ .ci-farms-disabled/anholt ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.anholt-farm-rules, rules]
@@ -170,13 +115,12 @@
if: '$RUNNER_FARM_LOCATION == "keywords"'
when: never
- changes: [ .ci-farms-disabled/valve-mupuf ]
if: '$RUNNER_FARM_LOCATION == "mupuf" && $CI_PIPELINE_SOURCE != "schedule"'
if: '$RUNNER_FARM_LOCATION == "mupuf"'
when: on_success
- changes: [ .ci-farms-disabled/valve-kws ]
if: '$RUNNER_FARM_LOCATION == "keywords" && $CI_PIPELINE_SOURCE != "schedule"'
if: '$RUNNER_FARM_LOCATION == "keywords"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.valve-farm-manual-rules:
@@ -188,10 +132,10 @@
if: '$RUNNER_FARM_LOCATION == "keywords"'
when: never
- changes: [ .ci-farms-disabled/valve-mupuf ]
if: '$RUNNER_FARM_LOCATION == "mupuf" && $CI_PIPELINE_SOURCE != "schedule"'
if: '$RUNNER_FARM_LOCATION == "mupuf"'
when: never
- changes: [ .ci-farms-disabled/valve-kws ]
if: '$RUNNER_FARM_LOCATION == "keywords" && $CI_PIPELINE_SOURCE != "schedule"'
if: '$RUNNER_FARM_LOCATION == "keywords"'
when: never
- !reference [.valve-farm-rules, rules]
@@ -201,10 +145,8 @@
- exists: [ .ci-farms-disabled/austriancoder ]
when: never
- changes: [ .ci-farms-disabled/austriancoder ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.austriancoder-farm-manual-rules:
@@ -212,7 +154,6 @@
- exists: [ .ci-farms-disabled/austriancoder ]
when: never
- changes: [ .ci-farms-disabled/austriancoder ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.austriancoder-farm-rules, rules]
@@ -222,10 +163,8 @@
- exists: [ .ci-farms-disabled/freedreno ]
when: never
- changes: [ .ci-farms-disabled/freedreno ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: on_success
- changes: [ .ci-farms-disabled/* ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
.freedreno-farm-manual-rules:
@@ -233,61 +172,6 @@
- exists: [ .ci-farms-disabled/freedreno ]
when: never
- changes: [ .ci-farms-disabled/freedreno ]
if: '$CI_PIPELINE_SOURCE != "schedule"'
when: never
- !reference [.freedreno-farm-rules, rules]
# Skip container & build jobs when disabling any farm, and run them if any
# farm gets re-enabled.
# Only apply these rules in MR context, because otherwise we get a false
# positive on files being 'created' when pushing to a new branch, and break
# our pipeline
.disable-farm-mr-rules:
rules:
# changes(disabled) + exists(disabled) = disabling the farm
# Note: this cannot be simplified into a single `.ci-farms-disabled/*` rule
# because if there are more than one disabled farm and we only re-enable
# one, the exits(.ci-farms-disabled/*) would match and what should be
# a farm re-enable pipeline will be detected as a farm disable pipeline.
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/microsoft ]
exists: [ .ci-farms-disabled/microsoft ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/collabora ]
exists: [ .ci-farms-disabled/collabora ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/igalia ]
exists: [ .ci-farms-disabled/igalia ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/lima ]
exists: [ .ci-farms-disabled/lima ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/anholt ]
exists: [ .ci-farms-disabled/anholt ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/valve-mupuf ]
exists: [ .ci-farms-disabled/valve-mupuf ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/valve-kws ]
exists: [ .ci-farms-disabled/valve-kws ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/austriancoder ]
exists: [ .ci-farms-disabled/austriancoder ]
when: never
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
changes: [ .ci-farms-disabled/freedreno ]
exists: [ .ci-farms-disabled/freedreno ]
when: never
# Any other change to ci-farms/* means some farm is getting re-enabled.
# Run jobs in Marge pipelines (and let it fallback to manual otherwise)
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $GITLAB_USER_LOGIN == "marge-bot"'
changes: [ .ci-farms/* ]
when: on_success

View File

@@ -69,7 +69,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then
--job "$CI_JOB_ID" \
--url "$CI_JOB_URL" \
--branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
exit $GTEST_EXITCODE

View File

@@ -1,16 +1,9 @@
# Keep the tags below under 25-30 chars each, as they end up combined into
# docker image tags, and docker has a length limit of 128 chars total in tags.
#
# If you update a tag and you get an error like this:
# cannot parse input: "$image:$tag": invalid reference format
# check the length of $tag; if it's > 128 chars you need to shorten your tag.
variables:
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "2023-10-13-rust-1.66"
DEBIAN_BASE_TAG: "2023-07-10-virglrenderer"
DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
DEBIAN_BUILD_TAG: "2023-09-30-shader-db"
DEBIAN_BUILD_TAG: "2023-06-24-agility-711"
DEBIAN_X86_64_BUILD_MINGW_IMAGE_PATH: "debian/x86_64_build-mingw"
DEBIAN_BUILD_MINGW_TAG: "2023-05-25-bookworm"
@@ -21,15 +14,14 @@ variables:
DEBIAN_X86_64_TEST_IMAGE_VK_PATH: "debian/x86_64_test-vk"
DEBIAN_X86_64_TEST_ANDROID_IMAGE_PATH: "debian/x86_64_test-android"
DEBIAN_X86_64_TEST_ANDROID_TAG: "2023-10-15-deqp"
DEBIAN_X86_64_TEST_GL_TAG: "2023-10-15-deqp"
DEBIAN_X86_64_TEST_VK_TAG: "2023-10-15-deqp"
DEBIAN_X86_64_TEST_ANDROID_TAG: "2023-06-07-deqp"
DEBIAN_X86_64_TEST_GL_TAG: "2023-07-02-apitrace-lto"
DEBIAN_X86_64_TEST_VK_TAG: "2023-07-08-weston-0"
ALPINE_X86_64_BUILD_TAG: "2023-10-04-ephemeral"
ALPINE_X86_64_BUILD_TAG: "2023-05-01-3.18-bump-1"
ALPINE_X86_64_LAVA_SSH_TAG: "2023-06-26-first-version"
FEDORA_X86_64_BUILD_TAG: "2023-08-04-shader-db"
KERNEL_ROOTFS_TAG: "2023-10-13-deqp"
KERNEL_TAG: "v6.4.12-for-mesa-ci-f6b4ad45f48d"
FEDORA_X86_64_BUILD_TAG: "2023-05-05-ccache-on"
KERNEL_ROOTFS_TAG: "2023-07-10-virglrenderer"
WINDOWS_X64_VS_PATH: "windows/x64_vs"
WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"

View File

@@ -38,7 +38,6 @@ variables:
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --zstd -x
needs:
- alpine/x86_64_lava_ssh_client
- !reference [.required-for-hardware-jobs, needs]
.lava-test:arm32:
variables:
@@ -116,8 +115,6 @@ variables:
.lava-traces-base:
variables:
HWCI_TEST_SCRIPT: "/install/piglit/piglit-traces.sh"
# until we overcome Infrastructure issues, give traces extra 5 min before timeout
DEVICE_HANGING_TIMEOUT_SEC: 600
artifacts:
reports:
junit: results/junit.xml

View File

@@ -21,12 +21,12 @@ mkdir -p results/job-rootfs-overlay/
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
cp artifacts/ci-common/kdl.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
# Prepare env vars for upload.
section_start variables "Variables passed through:"
artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
section_end variables
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
@@ -41,7 +41,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
--kernel-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
--build-url "${ARTIFACT_URL}" \
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \

View File

@@ -58,7 +58,7 @@ except ImportError as e:
# Timeout in seconds to decide if the device from the dispatched LAVA job has
# hung or not due to the lack of new log output.
DEVICE_HANGING_TIMEOUT_SEC = int(getenv("DEVICE_HANGING_TIMEOUT_SEC", 5*60))
DEVICE_HANGING_TIMEOUT_SEC = int(getenv("LAVA_DEVICE_HANGING_TIMEOUT_SEC", 5*60))
# How many seconds the script should wait before try a new polling iteration to
# check if the dispatched LAVA job is running or waiting in the job queue.

View File

@@ -16,7 +16,7 @@ NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3
# Supports any integers in [0, 100].
# The scheduler considers the job priority when ordering the queue
# to consider which job should run next.
JOB_PRIORITY = int(getenv("JOB_PRIORITY", 75))
JOB_PRIORITY = int(getenv("LAVA_JOB_PRIORITY", 75))
def has_ssh_support(job_submitter: "LAVAJobSubmitter") -> bool:

View File

@@ -120,7 +120,7 @@ if [ -n "$FLAKES_CHANNEL" ]; then
--job "$CI_JOB_ID" \
--url "$CI_JOB_URL" \
--branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
# Compress results.csv to save on bandwidth during the upload of artifacts to

View File

@@ -16,9 +16,6 @@ mkdir -p "$RESULTS"
if [ "$PIGLIT_REPLAY_SUBCOMMAND" = "profile" ]; then
yq -iY 'del(.traces[][] | select(.label[]? == "no-perf"))' \
"$PIGLIT_REPLAY_DESCRIPTION_FILE"
else
# keep the images for the later upload
export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image ${PIGLIT_REPLAY_EXTRA_ARGS}"
fi
# WINE
@@ -125,7 +122,7 @@ if [ -n "$CI_NODE_INDEX" ]; then
fi
# shellcheck disable=SC2317
replay_s3_upload_images() {
replay_minio_upload_images() {
find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
| while read -r line; do
@@ -136,7 +133,7 @@ replay_s3_upload_images() {
fi
__S3_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE"
__DESTINATION_FILE_PATH="${line##*-}"
if curl -L -s -I "https://${__S3_PATH}/${__DESTINATION_FILE_PATH}" | grep -q "content-type: application/octet-stream" 2>/dev/null; then
if curl -L -s -X HEAD "https://${__S3_PATH}/${__DESTINATION_FILE_PATH}" 2>/dev/null; then
continue
fi
else
@@ -206,7 +203,7 @@ __S3_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
__S3_TRACES_PREFIX="traces"
if [ "$PIGLIT_REPLAY_SUBCOMMAND" != "profile" ]; then
quiet replay_s3_upload_images
quiet replay_minio_upload_images
fi
@@ -224,5 +221,5 @@ find "$RESULTS"/summary -type f -name "*.html" -print0 \
echo "Failures in traces:"
cat $RESULTSFILE
error echo "Review the image changes and get the new checksums at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html "
error echo "Review the image changes and get the new checksums at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html"
exit 1

View File

@@ -19,14 +19,14 @@ for driver in freedreno intel v3d vc4; do
done
# Run shader-db over a number of supported chipsets for nouveau
#for chipset in 40 a3 c0 e4 f0 134 162; do
# section_start shader-db-nouveau-${chipset} "Running shader-db for nouveau - ${chipset}"
# env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \
# NOUVEAU_CHIPSET=${chipset} \
# ./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
# > "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt"
# section_end shader-db-nouveau-${chipset}
#done
for chipset in 40 a3 c0 e4 f0 134 162; do
section_start shader-db-nouveau-${chipset} "Running shader-db for nouveau - ${chipset}"
env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \
NOUVEAU_CHIPSET=${chipset} \
./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
> "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt"
section_end shader-db-nouveau-${chipset}
done
# Run shader-db for r300 (RV370 and RV515)
for chipset in 0x5460 0x7140; do

View File

@@ -6,7 +6,7 @@
rules:
- if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
when: on_success
retry:
retry: &scheduled-pipeline-retries
max: 1
# Don't retry on script_failure, job_execution_timeout, runner_unsupported,
# stale_schedule, archived_failure, or unmet_prerequisites
@@ -35,7 +35,7 @@
.restricted-rules:
rules:
# If the triggerer has access to the restricted traces and if it is pre-merge
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo|kwg|majanes|llanderwelin|zmike|vigneshraman)$/") &&
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo|kwg|majanes|llanderwelin|zmike)$/") &&
($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
when: never
@@ -55,6 +55,7 @@
- src/drm-shim/**/*
- src/gbm/**/*
- src/gtest/**/*
- src/include/**/*
# Some src/util and src/compiler files use headers from mesa/ (e.g.
# mtypes.h). We should clean that up.
- src/mesa/**/*.h
@@ -64,7 +65,7 @@
# Same core dependencies for doing manual runs.
.core-manual-rules:
retry: !reference [.scheduled_pipeline-rules, retry]
retry: *scheduled-pipeline-retries
rules:
# We only want manual jobs to show up when it's not marge's pre-merge CI
# run, otherwise she'll wait until her timeout. The exception is
@@ -97,7 +98,7 @@
extends:
- .performance-rules
variables:
JOB_PRIORITY: 40
LAVA_JOB_PRIORITY: 40
PIGLIT_REPLAY_SUBCOMMAND: "profile"
PIGLIT_REPLAY_EXTRA_ARGS: "--db-path ${CI_PROJECT_DIR}/replayer-db/"
# More than this can hit OOM due to BOs leaked during the replay of the last frame
@@ -105,7 +106,7 @@
# We don't want for more than one workload to be submitted to the GPU at a time
FDO_CI_CONCURRENT: 1
# Piglit is very sparse in its status output and downloads of big traces can take a while
DEVICE_HANGING_TIMEOUT_SEC: 600
LAVA_DEVICE_HANGING_TIMEOUT_SEC: 600
GIT_STRATEGY: none
HWCI_FREQ_MAX: "true"
# Always use the same device
@@ -133,21 +134,6 @@
- debian-release
# Mesa source file dependencies that may impact any GL driver test job.
.gallium-core-rules:
rules:
- !reference [.core-rules, rules]
- changes: &gallium_core_file_list
- src/gallium/*
- src/gallium/auxiliary/**/*
- src/gallium/drivers/*
- src/gallium/include/**/*
- src/gallium/frontends/dri/*
- src/gallium/frontends/glx/**/*
- src/gallium/targets/**/*
- src/gallium/tests/**/*
- src/gallium/winsys/*
when: on_success
.gl-rules:
rules:
- !reference [.core-rules, rules]
@@ -168,10 +154,20 @@
- src/mesa/x86/**/*
- src/mesa/x86-64/**/*
when: on_success
- !reference [.gallium-core-rules, rules]
- changes: &gallium_core_file_list
- src/gallium/*
- src/gallium/auxiliary/**/*
- src/gallium/drivers/*
- src/gallium/include/**/*
- src/gallium/frontends/dri/*
- src/gallium/frontends/glx/**/*
- src/gallium/targets/**/*
- src/gallium/tests/**/*
- src/gallium/winsys/*
when: on_success
.gl-manual-rules:
retry: !reference [.scheduled_pipeline-rules, retry]
retry: *scheduled-pipeline-retries
rules:
- !reference [.core-manual-rules, rules]
- changes:
@@ -190,33 +186,823 @@
when: on_success
.vulkan-manual-rules:
retry: !reference [.scheduled_pipeline-rules, retry]
retry: *scheduled-pipeline-retries
rules:
- !reference [.core-manual-rules, rules]
- changes:
*vulkan_file_list
when: manual
.softpipe-rules:
stage: software-renderer
rules:
- !reference [.gl-rules, rules]
- changes: &softpipe_file_list
- src/gallium/drivers/softpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
.llvmpipe-rules:
stage: software-renderer
rules:
- !reference [.gl-rules, rules]
- changes: &llvmpipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
.lavapipe-rules:
stage: software-renderer
rules:
- !reference [.vulkan-rules, rules]
# One could probably be a little more clever here and skip non-gallium Mesa changes (see also .llvmpipe-cl-rules).
- !reference [.gl-rules, rules]
- changes: &lavapipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/frontends/lavapipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
.lavapipe-manual-rules:
stage: software-renderer
retry: *scheduled-pipeline-retries
rules:
- !reference [.vulkan-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes: *lavapipe_file_list
when: manual
.llvmpipe-cl-rules:
stage: software-renderer
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- changes: &llvmpipe_cl_files
- .gitlab-ci.yml
- .gitlab-ci/**/*
- meson.build
- .gitattributes
- include/**/*
- src/compiler/**/*
- src/include/**/*
- src/util/**/*
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
# TODO: remove together with Clover
.llvmpipe-clover-rules:
rules:
- !reference [.llvmpipe-cl-rules, rules]
- changes:
- src/gallium/frontends/clover/**/*
when: on_success
.llvmpipe-rusticl-rules:
rules:
- !reference [.llvmpipe-cl-rules, rules]
- changes:
- src/gallium/frontends/rusticl/**/*
when: on_success
# Rules for changes that impact either freedreno or turnip.
.freedreno-common-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- !reference [.freedreno-farm-rules, rules]
- changes: &freedreno_core_file_list
- src/freedreno/ci/**/*
- src/freedreno/common/**/*
- src/freedreno/drm/**/*
- src/freedreno/fdl/**/*
- src/freedreno/ir3/**/*
- src/freedreno/isa/**/*
- src/freedreno/registers/**/*
when: on_success
.freedreno-common-manual-rules:
retry: *scheduled-pipeline-retries
rules:
- !reference [.freedreno-farm-manual-rules, rules]
- !reference [.core-manual-rules, rules]
- changes:
*freedreno_core_file_list
when: manual
.freedreno-rules:
stage: freedreno
rules:
- !reference [.freedreno-common-rules, rules]
- !reference [.gl-rules, rules]
- changes: &freedreno_gl_file_list
- src/freedreno/ir2/**/*
- src/gallium/drivers/freedreno/**/*
- src/gallium/winsys/freedreno/**/*
when: on_success
.freedreno-manual-rules:
stage: freedreno
retry: *scheduled-pipeline-retries
rules:
- !reference [.freedreno-common-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*freedreno_gl_file_list
when: manual
.turnip-rules:
stage: freedreno
rules:
- !reference [.freedreno-common-rules, rules]
- !reference [.vulkan-rules, rules]
- changes: &freedreno_vulkan_file_list
- src/freedreno/vulkan/**/*
when: on_success
.turnip-manual-rules:
stage: freedreno
retry: *scheduled-pipeline-retries
rules:
- !reference [.freedreno-common-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- changes:
*freedreno_vulkan_file_list
when: manual
# For piglit and skqp test jobs that run both GL and VK tests.
.freedreno-turnip-rules:
rules:
- !reference [.freedreno-rules, rules]
- !reference [.turnip-rules, rules]
.freedreno-rules-restricted:
stage: freedreno
rules:
- !reference [.restricted-rules, rules]
- !reference [.freedreno-rules, rules]
.freedreno-rules-performance:
stage: freedreno
retry: *scheduled-pipeline-retries
rules:
- !reference [.performance-rules, rules]
- !reference [.freedreno-manual-rules, rules]
allow_failure: true # see comment in .performance-rules, which we don't inherit this line from.
variables:
LAVA_JOB_PRIORITY: 40
# Ensure that we are using the release build artifact
S3_ARTIFACT_NAME: mesa-arm64-default-release
needs:
- debian/arm64_test
- debian-arm64-release
dependencies: null
.nouveau-rules:
stage: nouveau
rules:
- !reference [.anholt-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &nouveau_file_list
- src/nouveau/**/*
- src/gallium/drivers/nouveau/**/*
- src/gallium/winsys/kmsro/**/*
- src/gallium/winsys/nouveau/**/*
when: on_success
.nouveau-manual-rules:
stage: nouveau
retry: *scheduled-pipeline-retries
rules:
- !reference [.anholt-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*nouveau_file_list
when: manual
.panfrost-midgard-rules:
stage: arm
rules:
- !reference [.collabora-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &panfrost_gallium_file_list
- src/gallium/drivers/panfrost/**/*
- src/gallium/winsys/panfrost/**/*
when: on_success
- changes: &panfrost_common_file_list
- src/panfrost/ci/*
- src/panfrost/include/*
- src/panfrost/lib/*
- src/panfrost/shared/*
- src/panfrost/util/*
when: on_success
- changes:
- src/panfrost/midgard/**/*
when: on_success
.panfrost-midgard-manual-rules:
stage: arm
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes: *panfrost_gallium_file_list
when: manual
- changes: *panfrost_common_file_list
when: manual
- changes:
- src/panfrost/midgard/**/*
when: manual
.panfrost-bifrost-rules:
stage: arm
rules:
- !reference [.collabora-farm-rules, rules]
- !reference [.vulkan-rules, rules]
- !reference [.gl-rules, rules]
- changes:
*panfrost_common_file_list
when: on_success
- changes:
*panfrost_gallium_file_list
when: on_success
- changes: &panfrost_vulkan_file_list
- src/panfrost/vulkan/*
when: on_success
- changes: &panfrost_bifrost_file_list
- src/panfrost/compiler/**/*
when: on_success
.panfrost-bifrost-manual-rules:
stage: arm
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*panfrost_common_file_list
when: manual
- changes:
*panfrost_gallium_file_list
when: manual
- changes:
*panfrost_vulkan_file_list
when: manual
- changes:
*panfrost_bifrost_file_list
when: manual
.broadcom-common-rules:
rules:
- changes: &broadcom_file_list
- src/broadcom/meson.build
- src/broadcom/ci/**/*
- src/broadcom/cle/**/*
- src/broadcom/clif/**/*
- src/broadcom/common/**/*
- src/broadcom/compiler/**/*
- src/broadcom/drm-shim/**/*
- src/broadcom/qpu/**/*
- src/broadcom/simulator/**/*
when: on_success
.vc4-rules:
stage: broadcom
rules:
- !reference [.igalia-farm-rules, rules]
- !reference [.gl-rules, rules]
- !reference [.broadcom-common-rules, rules]
- changes:
- src/gallium/drivers/vc4/**/*
- src/gallium/winsys/vc4/**/*
- src/gallium/auxiliary/renderonly/**/*
- src/gallium/winsys/kmsro/**/*
when: on_success
.v3d-rules:
stage: broadcom
rules:
- !reference [.igalia-farm-rules, rules]
- !reference [.gl-rules, rules]
- !reference [.broadcom-common-rules, rules]
- changes: &v3d_file_list
- src/gallium/drivers/v3d/**/*
- src/gallium/winsys/v3d/**/*
- src/gallium/auxiliary/renderonly/**/*
- src/gallium/winsys/kmsro/**/*
when: on_success
.v3d-manual-rules:
stage: broadcom
retry: *scheduled-pipeline-retries
rules:
- !reference [.igalia-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*broadcom_file_list
when: manual
- changes:
*v3d_file_list
when: manual
.v3dv-rules:
stage: broadcom
rules:
- !reference [.igalia-farm-rules, rules]
- !reference [.vulkan-rules, rules]
- changes:
- src/broadcom/**/*
when: on_success
.lima-rules:
stage: arm
rules:
- !reference [.lima-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes:
- src/gallium/drivers/lima/**/*
- src/gallium/winsys/lima/**/*
- src/lima/**/*
when: on_success
.radv-rules:
stage: amd
rules:
- !reference [.vulkan-rules, rules]
- changes: &radv_file_list
- src/amd/**/*
- src/vulkan/**/*
when: on_success
.radv-collabora-rules:
stage: amd
rules:
- !reference [.collabora-farm-rules, rules]
- !reference [.radv-rules, rules]
.radv-valve-rules:
stage: amd
rules:
- !reference [.valve-farm-rules, rules]
- !reference [.radv-rules, rules]
.radv-valve-manual-rules:
stage: amd
retry: *scheduled-pipeline-retries
rules:
- !reference [.valve-farm-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- changes:
*radv_file_list
when: manual
.venus-rules:
stage: layered-backends
rules:
- !reference [.lavapipe-rules, rules]
- changes: &venus_file_list
- src/virtio/**/*
when: on_success
- when: never
.radeonsi-rules:
stage: amd
rules:
- !reference [.collabora-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &radeonsi_file_list
- src/gallium/drivers/radeonsi/**/*
- src/gallium/include/winsys/**/*
- src/gallium/winsys/amdgpu/**/*
- src/amd/*
- src/amd/addrlib/**/*
- src/amd/ci/*
- src/amd/common/**/*
- src/amd/llvm/**/*
- src/amd/registers/**/*
when: on_success
.radeonsi+radv-rules:
stage: amd
rules:
- !reference [.radeonsi-rules, rules]
- !reference [.radv-rules, rules]
.radeonsi-vaapi-rules:
stage: amd
rules:
- !reference [.collabora-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes:
*radeonsi_file_list
when: on_success
- changes: &radeon_vcn_file_list
- src/gallium/frontends/va/**/*
- src/gallium/targets/va/**/*
when: on_success
.radeonsi-vaapi-manual-rules:
stage: amd
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*radeon_vcn_file_list
when: manual
.radeonsi-valve-rules:
stage: amd
rules:
- !reference [.valve-farm-rules, rules]
- !reference [.radeonsi-rules, rules]
.radeonsi-valve-manual-rules:
stage: amd
rules:
- !reference [.valve-farm-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- changes:
*radeonsi_file_list
when: manual
.i915g-rules:
stage: intel
rules:
- !reference [.gl-rules, rules]
- changes: &i915g_file_list
- src/gallium/drivers/i915/**/*
- src/gallium/winsys/i915/**/*
- src/intel/**/*
when: on_success
.i915g-manual-rules:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.gl-manual-rules, rules]
- changes:
*i915g_file_list
when: manual
.crocus-rules:
stage: intel
rules:
- !reference [.anholt-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &crocus_file_list
- src/gallium/drivers/crocus/**/*
- src/gallium/winsys/crocus/**/*
- src/intel/**/*
when: on_success
.crocus-manual-rules:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.anholt-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*crocus_file_list
when: manual
.iris-rules:
stage: intel
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- !reference [.collabora-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &iris_file_list
- src/gallium/drivers/iris/**/*
- src/gallium/winsys/iris/**/*
- src/intel/**/*
when: on_success
.iris-manual-rules:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*iris_file_list
when: manual
# Unfortunately we can't sed the on_success from another rules set, so we have
# to do duplicate the files lists to set the job to manual (see
# .performance-rules)
.iris-rules-performance:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.performance-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*iris_file_list
when: manual
allow_failure: true # see comment in .performance-rules, which we don't inherit this line from.
variables:
LAVA_JOB_PRIORITY: 40
S3_ARTIFACT_NAME: "mesa-x86_64-default-release"
needs:
- kernel+rootfs_x86_64
- debian-release
.anv-rules:
stage: intel
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- !reference [.collabora-farm-rules, rules]
- !reference [.vulkan-rules, rules]
- changes:
- src/intel/**/*
when: on_success
.anv-manual-rules:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- changes:
- src/intel/**/*
when: manual
.hasvk-rules:
stage: intel
rules:
- !reference [.anholt-farm-rules, rules]
- !reference [.vulkan-rules, rules]
- changes:
- src/intel/**/*
when: on_success
.hasvk-manual-rules:
stage: intel
retry: *scheduled-pipeline-retries
rules:
- !reference [.anholt-farm-manual-rules, rules]
- !reference [.vulkan-manual-rules, rules]
- changes:
- src/intel/**/*
when: on_success
# ruleset to trigger on changes affecting either anv or iris, for jobs using both (piglit, skqp)
.intel-rules:
stage: intel
rules:
- !reference [.iris-rules, rules]
- !reference [.anv-rules, rules]
.intel-manual-rules:
stage: intel
rules:
- !reference [.iris-manual-rules, rules]
- !reference [.anv-manual-rules, rules]
.virgl-rules:
stage: layered-backends
rules:
- !reference [.gl-rules, rules]
- changes:
*llvmpipe_file_list
when: on_success
- changes: &virgl_file_list
- src/gallium/drivers/virgl/**/*
- src/gallium/winsys/virgl/**/*
when: on_success
.virgl-iris-manual-rules:
stage: layered-backends
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*virgl_file_list
when: manual
- changes:
*iris_file_list
when: manual
.virgl-iris-rules-performance:
stage: layered-backends
retry: *scheduled-pipeline-retries
rules:
- !reference [.collabora-farm-manual-rules, rules]
- !reference [.performance-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*llvmpipe_file_list
when: manual
- changes:
*virgl_file_list
when: manual
allow_failure: true # see comment in .performance-rules, which we don't inherit this line from.
variables:
LAVA_JOB_PRIORITY: 40
S3_ARTIFACT_NAME: "mesa-x86_64-default-release"
needs:
- kernel+rootfs_x86_64
- debian-release
.zink-common-rules:
rules:
- !reference [.gl-rules, rules]
- changes:
- src/gallium/drivers/zink/**/*
when: on_success
.zink-common-manual-rules:
retry: *scheduled-pipeline-retries
rules:
- !reference [.gl-manual-rules, rules]
- changes:
- src/gallium/drivers/zink/**/*
when: manual
.zink-lvp-rules:
stage: layered-backends
rules:
- !reference [.lavapipe-rules, rules]
- !reference [.zink-common-rules, rules]
.zink-anv-rules:
stage: layered-backends
rules:
- !reference [.anv-rules, rules]
- !reference [.zink-common-rules, rules]
.zink-anv-manual-rules:
stage: layered-backends
retry: *scheduled-pipeline-retries
rules:
- !reference [.anv-manual-rules, rules]
- !reference [.zink-common-manual-rules, rules]
.zink-anv-rules-restricted:
stage: layered-backends
rules:
- !reference [.restricted-rules, rules]
- !reference [.anv-rules, rules]
- !reference [.zink-common-rules, rules]
.zink-turnip-rules:
stage: layered-backends
rules:
- !reference [.turnip-rules, rules]
- !reference [.zink-common-rules, rules]
.zink-turnip-manual-rules:
stage: layered-backends
retry: *scheduled-pipeline-retries
rules:
- !reference [.turnip-manual-rules, rules]
- !reference [.zink-common-manual-rules, rules]
.zink-radv-rules:
stage: layered-backends
rules:
- !reference [.radv-valve-rules, rules]
- !reference [.zink-common-rules, rules]
.zink-radv-manual-rules:
stage: layered-backends
retry: *scheduled-pipeline-retries
rules:
- !reference [.radv-valve-manual-rules, rules]
- !reference [.zink-common-manual-rules, rules]
- changes:
- .gitlab-ci/container/build-piglit.sh
when: manual
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
# rules duplication manually
.windows-build-rules:
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.zink-common-rules, rules]
- !reference [.vulkan-rules, rules]
- changes:
*softpipe_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes: &d3d12_file_list
- src/gallium/drivers/d3d12/**/*
- src/gallium/frontends/wgl/*
- src/gallium/winsys/d3d12/wgl/*
- src/gallium/targets/libgl-gdi/*
- src/gallium/targets/libgl-d3d12/*
when: on_success
- changes:
- src/microsoft/**/*
- src/gallium/frontends/va/*
- src/gallium/targets/va/*
when: on_success
- changes:
*radv_file_list
when: on_success
.glon12-test-rules:
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: *d3d12_file_list
when: on_success
- changes:
- src/microsoft/compiler/*
when: on_success
.spirv2dxil-test-rules:
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.core-rules, rules]
- changes: &spirv2dxil_file_list
- src/microsoft/ci/*
- src/microsoft/compiler/*
- src/microsoft/spirv_to_dxil/*
when: on_success
.dozen-test-rules:
rules:
- !reference [.microsoft-farm-rules, rules]
- !reference [.vulkan-rules, rules]
- changes:
*spirv2dxil_file_list
when: on_success
- changes:
- src/microsoft/vulkan/*
when: on_success
.etnaviv-rules:
stage: etnaviv
rules:
- !reference [.austriancoder-farm-rules, rules]
- !reference [.gl-rules, rules]
- changes: &etnaviv_file_list
- src/etnaviv/**/*
- src/gallium/drivers/etnaviv/**/*
- src/gallium/winsys/etnaviv/**/*
- src/gallium/auxiliary/renderonly/**/*
- src/gallium/winsys/kmsro/**/*
when: on_success
.etnaviv-manual-rules:
stage: etnaviv
retry: *scheduled-pipeline-retries
rules:
- !reference [.austriancoder-farm-manual-rules, rules]
- !reference [.gl-manual-rules, rules]
- changes:
*etnaviv_file_list
when: manual
# Rules for unusual architectures that only build a subset of drivers
.ppc64el-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- !reference [.zink-common-rules, rules]
- !reference [.softpipe-rules, rules]
- !reference [.llvmpipe-rules, rules]
- !reference [.lavapipe-rules, rules]
- !reference [.radv-rules, rules]
- !reference [.radeonsi-rules, rules]
- !reference [.virgl-rules, rules]
- !reference [.nouveau-rules, rules]
- changes:
*softpipe_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*radv_file_list
when: on_success
- changes:
*radeonsi_file_list
when: on_success
- changes:
*virgl_file_list
when: on_success
- changes:
- src/gallium/drivers/nouveau/**/*
- src/gallium/winsys/nouveau/**/*
when: on_success
.s390x-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
- !reference [.zink-common-rules, rules]
- !reference [.softpipe-rules, rules]
- !reference [.llvmpipe-rules, rules]
- !reference [.lavapipe-rules, rules]
- changes:
*softpipe_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
# Rules for linters
.lint-rustfmt-rules:

View File

@@ -17,45 +17,36 @@
paths:
- results/
.formatting-check:
rustfmt:
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
stage: lint
extends:
- .use-debian/x86_64_build
rules:
# in merge pipeline, don't touch the default settings
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH == null
# in other pipelines, formatting checks are allowed to fail
- allow_failure: true
- .lint-rustfmt-rules
variables:
GIT_STRATEGY: fetch
timeout: 10m
script:
- git diff --color=always --exit-code # Fails if there are diffs
rustfmt:
extends:
- .formatting-check
- .lint-rustfmt-rules
before_script:
- shopt -s globstar
- rustfmt --version
- rustfmt --verbose src/**/lib.rs
- rustfmt --check --verbose src/**/lib.rs
clang-format:
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
stage: lint
extends:
- .formatting-check
- .use-debian/x86_64_build
- .lint-clang-format-rules
variables:
GIT_STRATEGY: fetch
LLVM_VERSION: 15
before_script:
script:
- shopt -s globstar
# We need a meson build dir, but its config doesn't actually matter, so
# let's just use the default.
- meson setup build
- clang-format-${LLVM_VERSION} --version
- ninja -C build clang-format
- git diff --color=always --exit-code # Fails if there are diffs
.test-gl:
extends:
@@ -65,8 +56,6 @@ clang-format:
- debian/x86_64_test-gl
- debian-testing
- !reference [.required-for-hardware-jobs, needs]
variables:
DEBIAN_ARCH: amd64
.test-vk:
extends:
@@ -76,8 +65,6 @@ clang-format:
- debian-testing
- debian/x86_64_test-vk
- !reference [.required-for-hardware-jobs, needs]
variables:
DEBIAN_ARCH: amd64
.test-cl:
extends:
@@ -143,9 +130,7 @@ clang-format:
exclude:
- results/*.shader_cache
variables:
PIGLIT_REPLAY_EXTRA_ARGS: --db-path ${CI_PROJECT_DIR}/replayer-db/ --minio_bucket=mesa-tracie-public --jwt-file=${CI_JOB_JWT_FILE}
# until we overcome Infrastructure issues, give traces extra 5 min before timeout
DEVICE_HANGING_TIMEOUT_SEC: 600
PIGLIT_REPLAY_EXTRA_ARGS: --keep-image --db-path ${CI_PROJECT_DIR}/replayer-db/ --minio_bucket=mesa-tracie-public --jwt-file=${CI_JOB_JWT_FILE}
script:
- section_start variables "Variables passed through:"
- install/common/generate-env.sh
@@ -154,7 +139,6 @@ clang-format:
.deqp-test:
script:
- rm -rf results # Clear out old results if the docker container was cached
- ./install/deqp-runner.sh
artifacts:
exclude:
@@ -193,8 +177,6 @@ clang-format:
- rm -rf install
- (set -x; curl -L --retry 4 -f --retry-all-errors --retry-delay 60 ${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME}.tar.zst | tar --zstd -x)
- section_end artifacts_download
variables:
BM_ROOTFS: /rootfs-${DEBIAN_ARCH}
artifacts:
when: always
name: "mesa_${CI_JOB_NAME}"
@@ -212,7 +194,7 @@ clang-format:
- .baremetal-test
- .use-debian/arm32_test
variables:
DEBIAN_ARCH: armhf
BM_ROOTFS: /rootfs-armhf
S3_ARTIFACT_NAME: mesa-arm32-default-debugoptimized
needs:
- debian/arm32_test
@@ -226,7 +208,7 @@ clang-format:
- .baremetal-test
- .use-debian/arm64_test
variables:
DEBIAN_ARCH: arm64
BM_ROOTFS: /rootfs-arm64
S3_ARTIFACT_NAME: mesa-arm64-default-debugoptimized
needs:
- debian/arm64_test
@@ -278,13 +260,13 @@ clang-format:
# like FDO_DISTRIBUTION_TAG for *the* image, there is no way to
# depend on more than one image per job. So, the job container is
# built as part of the CI in the boot2container project.
image: registry.freedesktop.org/gfx-ci/ci-tron/mesa-trigger:2023-06-02.1
image: registry.freedesktop.org/mupuf/valve-infra/mesa-trigger:2023-03-08.1
timeout: 1h 40m
variables:
# No need by default to pull the whole repo
GIT_STRATEGY: none
# boot2container initrd configuration parameters.
B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/gfx-ci/ci-tron/-/package_files/519/download' # Linux 6.1
B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/mupuf/valve-infra/-/package_files/519/download' # Linux 6.1
B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.10/downloads/initramfs.linux_amd64.cpio.xz'
B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$'
B2C_JOB_WARN_REGEX: '\*ERROR\* ring .* timeout'
@@ -299,7 +281,6 @@ clang-format:
B2C_TIMEOUT_MINUTES: 5
B2C_TIMEOUT_OVERALL_MINUTES: 90
B2C_TIMEOUT_RETRIES: 0
B2C_JOB_VOLUME_EXCLUSIONS: "*.shader_cache,install/*,*/install/*,*/vkd3d-proton.cache*,vkd3d-proton.cache*,*.qpa"
# As noted in the top description, we make a distinction between the
# container used by gitlab-runner to queue the work, and the container
@@ -341,7 +322,7 @@ clang-format:
[ -d "$CI_COMMON_SCRIPTS" ] || exit 1
B2C_TEST_SCRIPT="bash -euc 'tar xf ${INSTALL_TARBALL_NAME}; ./install/common/init-stage2.sh'"
B2C_TEST_SCRIPT="bash -c 'tar xf ${INSTALL_TARBALL_NAME}; ./install/common/init-stage2.sh'"
# The Valve CI gateway receives jobs in a YAML format. Create a
# job description from the CI environment.
@@ -374,10 +355,6 @@ clang-format:
rm -rf ${JOB_FOLDER} || true
mkdir -v ${JOB_FOLDER}
# Keep the results path the same as baremetal and LAVA
ln -s "$JOB_FOLDER"/results/ .
# Create a script to regenerate the CI environment when this job
# begins running on the remote DUT.
set +x
@@ -409,9 +386,9 @@ clang-format:
when: always
name: "mesa_${CI_JOB_NAME}"
paths:
- results
- ${JOB_FOLDER}/results
reports:
junit: results/**/junit.xml
junit: ${JOB_FOLDER}/results/**/junit.xml
.b2c-test-vk:
extends:

View File

@@ -0,0 +1,26 @@
#!/bin/bash
set -eu
function execute_testsuite {
local RESULTS_FOLDER EXEC_DONE_FILE
RESULTS_FOLDER="results/$1"
EXEC_DONE_FILE="$RESULTS_FOLDER/.done"
if [ ! -f "$EXEC_DONE_FILE" ]; then
DEQP_RESULTS_DIR="$RESULTS_FOLDER" PIGLIT_RESULTS_DIR="$RESULTS_FOLDER" $2
touch "$EXEC_DONE_FILE"
else
echo "--> Skipped, as it already was executed"
fi
}
echo -e "\n# GL CTS testing"
DEQP_VER=gl46 execute_testsuite gl ./install/deqp-runner.sh
echo -e "\n# GLES CTS testing"
DEQP_SUITE=zink-radv execute_testsuite gles ./install/deqp-runner.sh
echo -e "\n# Piglit testing"
execute_testsuite piglit ./install/piglit/piglit-runner.sh

View File

@@ -55,11 +55,7 @@ quiet printf "%s\n" "Running vkd3d-proton testsuite..."
set +e
if ! /vkd3d-proton-tests/x64/bin/d3d12 > "$RESULTS/vkd3d-proton.log";
then
# Check if the executable finished (ie. no segfault).
if ! grep "tests executed" "$RESULTS/vkd3d-proton.log" > /dev/null; then
error printf "%s\n" "Failed, see vkd3d-proton.log!"
exit 1
fi
error printf "%s\n" "Failed, see vkd3d-proton.log!"
# Collect all the failures
VKD3D_PROTON_RESULTS="${VKD3D_PROTON_RESULTS:-vkd3d-proton-results}"
@@ -79,8 +75,9 @@ then
if ! diff -q ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"; then
error printf "%s\n" "Changes found, see vkd3d-proton.log!"
quiet diff --color=always -u ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"
exit 1
fi
exit 1
fi
printf "%s\n" "vkd3d-proton execution: SUCCESS"

View File

@@ -66,7 +66,7 @@ Did it used to work in a previous Mesa version? It can greatly help to know when
### API captures (if applicable, optional)
Consider recording a [GFXReconstruct](https://github.com/LunarG/gfxreconstruct/blob/dev/USAGE_desktop_Vulkan.md) (preferred), [RenderDoc](https://renderdoc.org/), or [apitrace](https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown) capture of the issue with the RADV driver active. This can tremendously help when debugging issues, but you're still encouraged to report issues if you can't provide a capture file.
Consider recording a [GFXReconstruct](https://github.com/LunarG/gfxreconstruct/blob/dev/USAGE_desktop.md) (preferred), [RenderDoc](https://renderdoc.org/), or [apitrace](https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown) capture of the issue with the RADV driver active. This can tremendously help when debugging issues, but you're still encouraged to report issues if you can't provide a capture file.
### Further information (optional)

File diff suppressed because it is too large Load Diff

View File

@@ -144,10 +144,6 @@ meson.build @dbaker @eric
# R300
/src/gallium/drivers/r300/ @ondracka @gawin
# VirGL - Video
/src/gallium/drivers/virgl/virgl_video.* @flynnjiang
/src/virtio/virtio-gpu/virgl_video_hw.h @flynnjiang
# VMware
/src/gallium/drivers/svga/ @brianp @charmainel
/src/gallium/winsys/svga/ @thomash @drawat

View File

@@ -1 +1 @@
23.3.0
23.2.0-rc1

View File

@@ -41,8 +41,8 @@ include $(CLEAR_VARS)
LOCAL_SHARED_LIBRARIES := libc libdl libdrm libm liblog libcutils libz libc++ libnativewindow libsync libhardware
LOCAL_STATIC_LIBRARIES := libexpat libarect libelf
LOCAL_HEADER_LIBRARIES := libnativebase_headers hwvulkan_headers
MESON_GEN_PKGCONFIGS := cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf
LOCAL_HEADER_LIBRARIES := libnativebase_headers hwvulkan_headers libbacktrace_headers
MESON_GEN_PKGCONFIGS := backtrace cutils expat hardware libdrm:$(LIBDRM_VERSION) nativewindow sync zlib:1.2.11 libelf
LOCAL_CFLAGS += $(BOARD_MESA3D_CFLAGS)
ifneq ($(filter swrast,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),)
@@ -61,12 +61,9 @@ LOCAL_SHARED_LIBRARIES += libdrm_intel
MESON_GEN_PKGCONFIGS += libdrm_intel:$(LIBDRM_VERSION)
endif
ifneq ($(filter radeonsi,$(BOARD_MESA3D_GALLIUM_DRIVERS)),)
ifneq ($(filter radeonsi amd,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),)
MESON_GEN_LLVM_STUB := true
LOCAL_CFLAGS += -DFORCE_BUILD_AMDGPU # instructs LLVM to declare LLVMInitializeAMDGPU* functions
endif
ifneq ($(filter radeonsi amd,$(BOARD_MESA3D_GALLIUM_DRIVERS) $(BOARD_MESA3D_VULKAN_DRIVERS)),)
LOCAL_SHARED_LIBRARIES += libdrm_amdgpu
MESON_GEN_PKGCONFIGS += libdrm_amdgpu:$(LIBDRM_VERSION)
endif
@@ -161,7 +158,6 @@ include $(BUILD_PREBUILT)
endif
endef
ifneq ($(strip $(BOARD_MESA3D_GALLIUM_DRIVERS)),)
# Module 'libgallium_dri', produces '/vendor/lib{64}/dri/libgallium_dri.so'
# This module also trigger DRI symlinks creation process
$(eval $(call mesa3d-lib,libgallium_dri,.so.0,dri,MESA3D_GALLIUM_DRI_BIN))
@@ -174,7 +170,6 @@ $(eval $(call mesa3d-lib,libEGL_mesa,.so.1,egl,MESA3D_LIBEGL_BIN))
$(eval $(call mesa3d-lib,libGLESv1_CM_mesa,.so.1,egl,MESA3D_LIBGLESV1_BIN))
# Module 'libGLESv2_mesa', produces '/vendor/lib{64}/egl/libGLESv2_mesa.so'
$(eval $(call mesa3d-lib,libGLESv2_mesa,.so.2,egl,MESA3D_LIBGLESV2_BIN))
endif
# Modules 'vulkan.{driver_name}', produces '/vendor/lib{64}/hw/vulkan.{driver_name}.so' HAL
$(foreach driver,$(BOARD_MESA3D_VULKAN_DRIVERS), \

View File

@@ -88,11 +88,9 @@ MESON_GEN_NINJA := \
-Dgallium-drivers=$(subst $(space),$(comma),$(BOARD_MESA3D_GALLIUM_DRIVERS)) \
-Dvulkan-drivers=$(subst $(space),$(comma),$(subst radeon,amd,$(BOARD_MESA3D_VULKAN_DRIVERS))) \
-Dgbm=enabled \
-Degl=$(if $(BOARD_MESA3D_GALLIUM_DRIVERS),enabled,disabled) \
-Dllvm=$(if $(MESON_GEN_LLVM_STUB),enabled,disabled) \
-Degl=enabled \
-Dcpp_rtti=false \
-Dlmsensors=disabled \
-Dandroid-libbacktrace=disabled \
MESON_BUILD := PATH=/usr/bin:/bin:/sbin:$$PATH ninja -C $(MESON_OUT_DIR)/build
@@ -204,9 +202,7 @@ define m-c-flags
endef
define filter-c-flags
$(filter-out -std=gnu++17 -std=gnu++14 -std=gnu99 -fno-rtti \
-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang \
-ftrivial-auto-var-init=zero,
$(filter-out -std=gnu++17 -std=gnu++14 -std=gnu99 -fno-rtti, \
$(patsubst -W%,, $1))
endef
@@ -292,7 +288,7 @@ $(MESON_OUT_DIR)/install/.install.timestamp: $(MESON_OUT_DIR)/.build.timestamp
rm -rf $(dir $@)
mkdir -p $(dir $@)
DESTDIR=$(call relative-to-absolute,$(dir $@)) $(MESON_BUILD) install
$(if $(BOARD_MESA3D_GALLIUM_DRIVERS),$(MESON_COPY_LIBGALLIUM))
$(MESON_COPY_LIBGALLIUM)
touch $@
$($(M_TARGET_PREFIX)MESA3D_LIBGBM_BIN) $(MESA3D_GLES_BINS): $(MESON_OUT_DIR)/install/.install.timestamp

View File

@@ -17,20 +17,14 @@ import re
from subprocess import check_output
import sys
import time
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from itertools import chain
from typing import Literal, Optional
from typing import Optional
import gitlab
from colorama import Fore, Style
from gitlab_common import (
get_gitlab_project,
read_token,
wait_for_pipeline,
pretty_duration,
)
from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline
from gitlab_gql import GitlabGQL, create_job_needs_dag, filter_dag, print_dag
GITLAB_URL = "https://gitlab.freedesktop.org"
@@ -55,24 +49,34 @@ STATUS_COLORS = {
COMPLETED_STATUSES = ["success", "failed"]
def print_job_status(job, new_status=False) -> None:
def print_job_status(job) -> None:
"""It prints a nice, colored job status with a link to the job."""
if job.status == "canceled":
return
if job.duration:
duration = job.duration
elif job.started_at:
duration = time.perf_counter() - time.mktime(job.started_at.timetuple())
print(
STATUS_COLORS[job.status]
+ "🞋 job "
+ URL_START
+ f"{job.web_url}\a{job.name}"
+ URL_END
+ (f" has new status: {job.status}" if new_status else f" :: {job.status}")
+ (f" ({pretty_duration(duration)})" if job.started_at else "")
+ f" :: {job.status}"
+ Style.RESET_ALL
)
def print_job_status_change(job) -> None:
"""It reports job status changes."""
if job.status == "canceled":
return
print(
STATUS_COLORS[job.status]
+ "🗘 job "
+ URL_START
+ f"{job.web_url}\a{job.name}"
+ URL_END
+ f" has new status: {job.status}"
+ Style.RESET_ALL
)
@@ -87,80 +91,82 @@ def pretty_wait(sec: int) -> None:
def monitor_pipeline(
project,
pipeline,
target_job: str,
target_job: Optional[str],
dependencies,
force_manual: bool,
stress: int,
stress: bool,
) -> tuple[Optional[int], Optional[int]]:
"""Monitors pipeline and delegate canceling jobs"""
statuses: dict[str, str] = defaultdict(str)
target_statuses: dict[str, str] = defaultdict(str)
stress_status_counter = defaultdict(lambda: defaultdict(int))
target_id = None
statuses = {}
target_statuses = {}
stress_succ = 0
stress_fail = 0
target_jobs_regex = re.compile(target_job.strip())
if target_job:
target_jobs_regex = re.compile(target_job.strip())
while True:
to_cancel = []
for job in pipeline.jobs.list(all=True, sort="desc"):
# target jobs
if target_jobs_regex.match(job.name):
target_id = job.id
if target_job and target_jobs_regex.match(job.name):
if force_manual and job.status == "manual":
enable_job(project, job, True)
if stress and job.status in ["success", "failed"]:
if (
stress < 0
or sum(stress_status_counter[job.name].values()) < stress
):
enable_job(project, job, "retry", force_manual)
stress_status_counter[job.name][job.status] += 1
else:
enable_job(project, job, "target", force_manual)
if job.status == "success":
stress_succ += 1
if job.status == "failed":
stress_fail += 1
retry_job(project, job)
if (job.id not in target_statuses) or (
job.status not in target_statuses[job.id]
):
print_job_status_change(job)
target_statuses[job.id] = job.status
else:
print_job_status(job)
print_job_status(job, job.status not in target_statuses[job.name])
target_statuses[job.name] = job.status
continue
# all jobs
if job.status != statuses[job.name]:
print_job_status(job, True)
statuses[job.name] = job.status
if (job.id not in statuses) or (job.status not in statuses[job.id]):
print_job_status_change(job)
statuses[job.id] = job.status
# run dependencies and cancel the rest
# dependencies and cancelling the rest
if job.name in dependencies:
enable_job(project, job, "dep", True)
else:
if job.status == "manual":
enable_job(project, job, False)
elif target_job and job.status not in [
"canceled",
"success",
"failed",
"skipped",
]:
to_cancel.append(job)
cancel_jobs(project, to_cancel)
if target_job:
cancel_jobs(project, to_cancel)
if stress:
enough = True
for job_name, status in stress_status_counter.items():
print(
f"{job_name}\tsucc: {status['success']}; "
f"fail: {status['failed']}; "
f"total: {sum(status.values())} of {stress}",
flush=False,
)
if stress < 0 or sum(status.values()) < stress:
enough = False
if not enough:
pretty_wait(REFRESH_WAIT_JOBS)
continue
print(
"∑ succ: " + str(stress_succ) + "; fail: " + str(stress_fail),
flush=False,
)
pretty_wait(REFRESH_WAIT_JOBS)
continue
print("---------------------------------", flush=False)
if len(target_statuses) == 1 and {"running"}.intersection(
target_statuses.values()
):
return target_id, None
return next(iter(target_statuses)), None
if (
{"failed"}.intersection(target_statuses.values())
and not set(["running", "pending"]).intersection(target_statuses.values())
):
if {"failed", "canceled"}.intersection(target_statuses.values()):
return None, 1
if {"success", "manual"}.issuperset(target_statuses.values()):
@@ -169,43 +175,27 @@ def monitor_pipeline(
pretty_wait(REFRESH_WAIT_JOBS)
def enable_job(
project, job, action_type: Literal["target", "dep", "retry"], force_manual: bool
) -> None:
"""enable job"""
if (
(job.status in ["success", "failed"] and action_type != "retry")
or (job.status == "manual" and not force_manual)
or job.status in ["skipped", "running", "created", "pending"]
):
return
def enable_job(project, job, target: bool) -> None:
"""enable manual job"""
pjob = project.jobs.get(job.id, lazy=True)
if job.status in ["success", "failed", "canceled"]:
pjob.retry()
else:
pjob.play()
if action_type == "target":
pjob.play()
if target:
jtype = "🞋 "
elif action_type == "retry":
jtype = ""
else:
jtype = "(dependency)"
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
def retry_job(project, job) -> None:
"""retry job"""
pjob = project.jobs.get(job.id, lazy=True)
pjob.retry()
jtype = ""
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
def cancel_job(project, job) -> None:
"""Cancel GitLab job"""
if job.status in [
"canceled",
"success",
"failed",
"skipped",
]:
return
pjob = project.jobs.get(job.id, lazy=True)
pjob.cancel()
print(f"{job.name}", end=" ")
@@ -248,11 +238,13 @@ def parse_args() -> None:
epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
+ '--target ".*traces" ',
)
parser.add_argument("--target", metavar="target-job", help="Target job")
parser.add_argument(
"--target",
metavar="target-job",
help="Target job regex. For multiple targets, separate with pipe | character",
required=True,
"--rev", metavar="revision", help="repository git revision (default: HEAD)"
)
parser.add_argument(
"--pipeline-url",
help="URL of the pipeline to use, instead of auto-detecting it.",
)
parser.add_argument(
"--token",
@@ -262,37 +254,8 @@ def parse_args() -> None:
parser.add_argument(
"--force-manual", action="store_true", help="Force jobs marked as manual"
)
parser.add_argument(
"--stress",
default=0,
type=int,
help="Stresstest job(s). Number or repetitions or -1 for infinite.",
)
parser.add_argument(
"--project",
default="mesa",
help="GitLab project in the format <user>/<project> or just <project>",
)
mutex_group1 = parser.add_mutually_exclusive_group()
mutex_group1.add_argument(
"--rev", default="HEAD", metavar="revision", help="repository git revision (default: HEAD)"
)
mutex_group1.add_argument(
"--pipeline-url",
help="URL of the pipeline to use, instead of auto-detecting it.",
)
args = parser.parse_args()
# argparse doesn't support groups inside add_mutually_exclusive_group(),
# which means we can't just put `--project` and `--rev` in a group together,
# we have to do this by heand instead.
if args.pipeline_url and args.project != parser.get_default("project"):
# weird phrasing but it's the error add_mutually_exclusive_group() gives
parser.error("argument --project: not allowed with argument --pipeline-url")
return args
parser.add_argument("--stress", action="store_true", help="Stresstest job(s)")
return parser.parse_args()
def find_dependencies(target_job: str, project_path: str, sha: str) -> set[str]:
@@ -326,6 +289,9 @@ if __name__ == "__main__":
retry_transient_errors=True)
REV: str = args.rev
if not REV:
REV = check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
print(f"Revision: {REV}")
if args.pipeline_url:
assert args.pipeline_url.startswith(GITLAB_URL)
@@ -337,15 +303,9 @@ if __name__ == "__main__":
pipeline_id = int(url_path_components[5])
cur_project = gl.projects.get(project_name)
pipe = cur_project.pipelines.get(pipeline_id)
REV = pipe.sha
else:
REV = check_output(['git', 'rev-parse', REV]).decode('ascii').strip()
mesa_project = gl.projects.get("mesa/mesa")
user_project = get_gitlab_project(gl, args.project)
(pipe, cur_project) = wait_for_pipeline([mesa_project, user_project], REV)
print(f"Revision: {REV}")
cur_project = get_gitlab_project(gl, "mesa")
pipe = wait_for_pipeline(cur_project, REV)
print(f"Pipeline: {pipe.web_url}")
deps = set()

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/ci_run_n_monitor.py" "$@"

View File

@@ -12,26 +12,11 @@ import time
from typing import Optional
def pretty_duration(seconds):
"""Pretty print duration"""
hours, rem = divmod(seconds, 3600)
minutes, seconds = divmod(rem, 60)
if hours:
return f"{hours:0.0f}h{minutes:0.0f}m{seconds:0.0f}s"
if minutes:
return f"{minutes:0.0f}m{seconds:0.0f}s"
return f"{seconds:0.0f}s"
def get_gitlab_project(glab, name: str):
"""Finds a specified gitlab project for given user"""
if "/" in name:
project_path = name
else:
glab.auth()
username = glab.user.username
project_path = f"{username}/{name}"
return glab.projects.get(project_path)
glab.auth()
username = glab.user.username
return glab.projects.get(f"{username}/{name}")
def read_token(token_arg: Optional[str]) -> str:
@@ -45,19 +30,13 @@ def read_token(token_arg: Optional[str]) -> str:
)
def wait_for_pipeline(projects, sha: str, timeout=None):
def wait_for_pipeline(project, sha: str):
"""await until pipeline appears in Gitlab"""
project_names = [project.path_with_namespace for project in projects]
print(f"⏲ for the pipeline to appear in {project_names}..", end="")
start_time = time.time()
print("⏲ for the pipeline to appear..", end="")
while True:
for project in projects:
pipelines = project.pipelines.list(sha=sha)
if pipelines:
print("", flush=True)
return (pipelines[0], project)
pipelines = project.pipelines.list(sha=sha)
if pipelines:
print("", flush=True)
return pipelines[0]
print("", end=".", flush=True)
if timeout and time.time() - start_time > timeout:
print(" not found", flush=True)
return (None, None)
time.sleep(1)

View File

@@ -3,8 +3,6 @@
import re
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from os import getenv
from pathlib import Path
@@ -16,7 +14,7 @@ from gql import Client, gql
from gql.transport.aiohttp import AIOHTTPTransport
from graphql import DocumentNode
Dag = dict[str, set[str]]
Dag = dict[str, list[str]]
TOKEN_DIR = Path(getenv("XDG_CONFIG_HOME") or Path.home() / ".config")
@@ -87,7 +85,7 @@ def create_job_needs_dag(
) -> tuple[Dag, dict[str, dict[str, Any]]]:
result = gl_gql.query("pipeline_details.gql", params)
incomplete_dag = defaultdict(set)
dag = {}
jobs = {}
pipeline = result["project"]["pipeline"]
if not pipeline:
@@ -98,23 +96,20 @@ def create_job_needs_dag(
for job in stage_job["jobs"]["nodes"]:
needs = job.pop("needs")["nodes"]
jobs[job["name"]] = job
incomplete_dag[job["name"]] = {node["name"] for node in needs}
# ensure that all needed nodes its in the graph
[incomplete_dag[node["name"]] for node in needs]
dag[job["name"]] = {node["name"] for node in needs}
final_dag: Dag = {}
for job, needs in incomplete_dag.items():
final_needs: set = deepcopy(needs)
for job, needs in dag.items():
needs: set
partial = True
while partial:
next_depth = {n for dn in final_needs for n in incomplete_dag[dn]}
partial = not final_needs.issuperset(next_depth)
final_needs = final_needs.union(next_depth)
next_depth = {n for dn in needs for n in dag[dn]}
partial = not needs.issuperset(next_depth)
needs = needs.union(next_depth)
final_dag[job] = final_needs
dag[job] = needs
return final_dag, jobs
return dag, jobs
def filter_dag(dag: Dag, regex: Pattern) -> Dag:

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/gitlab_gql.py" "$@"

View File

@@ -1,67 +0,0 @@
#!/usr/bin/env python3
# Copyright © 2020 - 2023 Collabora Ltd.
# Authors:
# David Heidelberg <david.heidelberg@collabora.com>
#
# SPDX-License-Identifier: MIT
"""
Monitors Marge-bot and return number of assigned MRs.
"""
import argparse
import time
import sys
from datetime import datetime, timezone
from dateutil import parser
import gitlab
from gitlab_common import read_token, pretty_duration
REFRESH_WAIT = 30
MARGE_BOT_USER_ID = 9716
def parse_args() -> None:
"""Parse args"""
parse = argparse.ArgumentParser(
description="Tool to show merge requests assigned to the marge-bot",
)
parse.add_argument(
"--wait", action="store_true", help="wait until CI is free",
)
parse.add_argument(
"--token",
metavar="token",
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
)
return parse.parse_args()
if __name__ == "__main__":
args = parse_args()
token = read_token(args.token)
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
project = gl.projects.get("mesa/mesa")
while True:
mrs = project.mergerequests.list(assignee_id=MARGE_BOT_USER_ID, scope="all", state="opened", get_all=True)
jobs_num = len(mrs)
for mr in mrs:
updated = parser.parse(mr.updated_at)
now = datetime.now(timezone.utc)
diff = (now - updated).total_seconds()
print(
f"\u001b]8;;{mr.web_url}\u001b\\{mr.title}\u001b]8;;\u001b\\ ({pretty_duration(diff)})"
)
print("Job waiting: " + str(jobs_num))
if jobs_num == 0:
sys.exit(0)
if not args.wait:
sys.exit(min(jobs_num, 127))
time.sleep(REFRESH_WAIT)

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/marge_queue.py" "$@"

View File

@@ -2,8 +2,7 @@ aiohttp==3.8.3
colorama==0.4.5
filecache==0.81
gql==3.4.0
python-dateutil==2.8.2
python-gitlab==3.5.0
PyYAML==6.0.1
PyYAML==6.0
ruamel.yaml.clib==0.2.7
ruamel.yaml==0.17.21

View File

@@ -70,7 +70,7 @@ def gather_results(
# parse artifact
results_json_bz2 = cur_job.artifact(path="results/results.json.bz2", streamed=False)
results_json = bz2.decompress(results_json_bz2).decode("utf-8", errors="replace")
results_json = bz2.decompress(results_json_bz2).decode("utf-8")
results = json.loads(results_json)
for _, value in results["tests"].items():
@@ -134,7 +134,7 @@ if __name__ == "__main__":
cur_project = get_gitlab_project(gl, "mesa")
print(f"Revision: {args.rev}")
(pipe, cur_project) = wait_for_pipeline([cur_project], args.rev)
pipe = wait_for_pipeline(cur_project, args.rev)
print(f"Pipeline: {pipe.web_url}")
gather_results(cur_project, pipe)

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/../python-venv.sh" \
"$this_dir/requirements.txt" \
"$this_dir/update_traces_checksum.py" "$@"

View File

@@ -168,7 +168,6 @@ class Inliner(states.Inliner):
break
# Quote all original backslashes
checked = re.sub('\x00', "\\\x00", checked)
checked = re.sub('@', '\\@', checked)
return docutils.utils.unescape(checked, 1)
inliner = Inliner();

View File

@@ -198,8 +198,3 @@ async def test_parse_issues(content: str, bugs: typing.List[str]) -> None:
mock.patch('bin.gen_release_notes.gather_commits', mock.AsyncMock(return_value='sha\n')):
ids = await parse_issues('1234 not used')
assert set(ids) == set(bugs)
@pytest.mark.asyncio
async def test_rst_escape():
out = inliner.quoteInline('foo@bar')
assert out == 'foo\@bar'

View File

@@ -143,11 +143,11 @@ SOURCES = [
{
'api': 'spirv',
'sources': [
Source('src/compiler/spirv/spirv.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/spirv.h'),
Source('src/compiler/spirv/spirv.core.grammar.json', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/spirv.core.grammar.json'),
Source('src/compiler/spirv/OpenCL.std.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/OpenCL.std.h'),
Source('src/compiler/spirv/GLSL.std.450.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/main/include/spirv/unified1/GLSL.std.450.h'),
Source('src/compiler/spirv/GLSL.ext.AMD.h', 'https://github.com/KhronosGroup/glslang/raw/main/SPIRV/GLSL.ext.AMD.h'), # FIXME: is this the canonical source?
Source('src/compiler/spirv/spirv.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/spirv.h'),
Source('src/compiler/spirv/spirv.core.grammar.json', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/spirv.core.grammar.json'),
Source('src/compiler/spirv/OpenCL.std.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/OpenCL.std.h'),
Source('src/compiler/spirv/GLSL.std.450.h', 'https://github.com/KhronosGroup/SPIRV-Headers/raw/master/include/spirv/unified1/GLSL.std.450.h'),
Source('src/compiler/spirv/GLSL.ext.AMD.h', 'https://github.com/KhronosGroup/glslang/raw/master/SPIRV/GLSL.ext.AMD.h'), # FIXME: is this the canonical source?
],
},

View File

@@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -eu
this_dir=$(dirname -- "$(readlink -f -- "${BASH_SOURCE[0]}")")
readonly this_dir
exec \
"$this_dir/python-venv.sh" \
"$this_dir/pick/requirements.txt" \
"$this_dir/pick-ui.py" "$@"

View File

@@ -51,8 +51,6 @@ IS_FIX = re.compile(r'^\s*fixes:\s*([a-f0-9]{6,40})', flags=re.MULTILINE | re.IG
IS_CC = re.compile(r'^\s*cc:\s*["\']?([0-9]{2}\.[0-9])?["\']?\s*["\']?([0-9]{2}\.[0-9])?["\']?\s*\<?mesa-stable',
flags=re.MULTILINE | re.IGNORECASE)
IS_REVERT = re.compile(r'This reverts commit ([0-9a-f]{40})')
IS_BACKPORT = re.compile(r'^\s*backport-to:\s*(\d{2}\.\d),?\s*(\d{2}\.\d)?',
flags=re.MULTILINE | re.IGNORECASE)
# XXX: hack
SEM = asyncio.Semaphore(50)
@@ -75,7 +73,6 @@ class NominationType(enum.Enum):
FIXES = 1
REVERT = 2
NONE = 3
BACKPORT = 4
@enum.unique
@@ -279,11 +276,13 @@ async def resolve_nomination(commit: 'Commit', version: str) -> 'Commit':
out = _out.decode()
# We give precedence to fixes and cc tags over revert tags.
if fix_for_commit := IS_FIX.search(out):
# XXX: not having the walrus operator available makes me sad :=
m = IS_FIX.search(out)
if m:
# We set the nomination_type and because_sha here so that we can later
# check to see if this fixes another staged commit.
try:
commit.because_sha = fixed = await full_sha(fix_for_commit.group(1))
commit.because_sha = fixed = await full_sha(m.group(1))
except PickUIException:
pass
else:
@@ -292,22 +291,18 @@ async def resolve_nomination(commit: 'Commit', version: str) -> 'Commit':
commit.nominated = True
return commit
if backport_to := IS_BACKPORT.search(out):
if version in backport_to.groups():
commit.nominated = True
commit.nomination_type = NominationType.BACKPORT
return commit
if cc_to := IS_CC.search(out):
if cc_to.groups() == (None, None) or version in cc_to.groups():
m = IS_CC.search(out)
if m:
if m.groups() == (None, None) or version in m.groups():
commit.nominated = True
commit.nomination_type = NominationType.CC
return commit
if revert_of := IS_REVERT.search(out):
m = IS_REVERT.search(out)
if m:
# See comment for IS_FIX path
try:
commit.because_sha = reverted = await full_sha(revert_of.group(1))
commit.because_sha = reverted = await full_sha(m.group(1))
except PickUIException:
pass
else:

View File

@@ -94,9 +94,9 @@ class TestRE:
Reviewed-by: Jonathan Marek <jonathan@marek.ca>
""")
fix_for_commit = core.IS_FIX.search(message)
assert fix_for_commit is not None
assert fix_for_commit.group(1) == '3d09bb390a39'
m = core.IS_FIX.search(message)
assert m is not None
assert m.group(1) == '3d09bb390a39'
class TestCC:
@@ -114,9 +114,9 @@ class TestRE:
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '19.2'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '19.2'
def test_multiple_branches(self):
"""Tests commit with more than one branch specified"""
@@ -130,10 +130,10 @@ class TestRE:
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '19.1'
assert cc_to.group(2) == '19.2'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '19.1'
assert m.group(2) == '19.2'
def test_no_branch(self):
"""Tests commit with no branch specification"""
@@ -148,8 +148,8 @@ class TestRE:
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
m = core.IS_CC.search(message)
assert m is not None
def test_quotes(self):
"""Tests commit with quotes around the versions"""
@@ -162,9 +162,9 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
def test_multiple_quotes(self):
"""Tests commit with quotes around the versions"""
@@ -177,10 +177,10 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
assert cc_to.group(2) == '20.1'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
assert m.group(2) == '20.1'
def test_single_quotes(self):
"""Tests commit with quotes around the versions"""
@@ -193,9 +193,9 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
def test_multiple_single_quotes(self):
"""Tests commit with quotes around the versions"""
@@ -208,10 +208,10 @@ class TestRE:
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3454>
""")
cc_to = core.IS_CC.search(message)
assert cc_to is not None
assert cc_to.group(1) == '20.0'
assert cc_to.group(2) == '20.1'
m = core.IS_CC.search(message)
assert m is not None
assert m.group(1) == '20.0'
assert m.group(2) == '20.1'
class TestRevert:
@@ -232,61 +232,9 @@ class TestRE:
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
revert_of = core.IS_REVERT.search(message)
assert revert_of is not None
assert revert_of.group(1) == '2ca8629fa9b303e24783b76a7b3b0c2513e32fbd'
class TestBackportTo:
def test_single_release(self):
"""Tests commit meant for a single branch, ie, 19.1"""
message = textwrap.dedent("""\
radv: fix DCC fast clear code for intensity formats
This fixes a rendering issue with DiRT 4 on GFX10. Only GFX10 was
affected because intensity formats are different.
Backport-to: 19.2
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1923
Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.2', None)
def test_multiple_release_space(self):
"""Tests commit with more than one branch specified"""
message = textwrap.dedent("""\
radeonsi: enable zerovram for Rocket League
Fixes corruption on game startup.
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1888
Backport-to: 19.1 19.2
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.1', '19.2')
def test_multiple_release_comma(self):
"""Tests commit with more than one branch specified"""
message = textwrap.dedent("""\
radeonsi: enable zerovram for Rocket League
Fixes corruption on game startup.
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/1888
Backport-to: 19.1, 19.2
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
""")
backport_to = core.IS_BACKPORT.search(message)
assert backport_to is not None
assert backport_to.groups() == ('19.1', '19.2')
m = core.IS_REVERT.search(message)
assert m is not None
assert m.group(1) == '2ca8629fa9b303e24783b76a7b3b0c2513e32fbd'
class TestResolveNomination:
@@ -375,28 +323,6 @@ class TestResolveNomination:
assert not c.nominated
assert c.nomination_type is None
@pytest.mark.asyncio
async def test_backport_is_nominated(self):
s = self.FakeSubprocess(b'Backport-to: 16.2')
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
await core.resolve_nomination(c, '16.2')
assert c.nominated
assert c.nomination_type is core.NominationType.BACKPORT
@pytest.mark.asyncio
async def test_backport_is_not_nominated(self):
s = self.FakeSubprocess(b'Backport-to: 16.2')
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
await core.resolve_nomination(c, '16.1')
assert not c.nominated
assert c.nomination_type is None
@pytest.mark.asyncio
async def test_revert_is_nominated(self):
s = self.FakeSubprocess(b'This reverts commit 1234567890123456789012345678901234567890.')
@@ -421,21 +347,6 @@ class TestResolveNomination:
assert not c.nominated
assert c.nomination_type is core.NominationType.REVERT
@pytest.mark.asyncio
async def test_is_fix_and_backport(self):
s = self.FakeSubprocess(
b'Fixes: 3d09bb390a39 (etnaviv: GC7000: State changes for HALTI3..5)\n'
b'Backport-to: 16.1'
)
c = core.Commit('abcdef1234567890', 'a commit')
with mock.patch('bin.pick.core.asyncio.create_subprocess_exec', s.mock):
with mock.patch('bin.pick.core.is_commit_in_branch', self.return_true):
await core.resolve_nomination(c, '16.1')
assert c.nominated
assert c.nomination_type is core.NominationType.FIXES
@pytest.mark.asyncio
async def test_is_fix_and_cc(self):
s = self.FakeSubprocess(

View File

@@ -1,2 +0,0 @@
attrs==23.1.0
urwid==2.1.2

View File

@@ -1,27 +0,0 @@
#!/usr/bin/env bash
set -eu
readonly requirements_file=$1
shift
venv_dir="$(dirname "$requirements_file")"/.venv
readonly venv_dir
readonly venv_req=$venv_dir/requirements.txt
if ! [ -r "$venv_dir/bin/activate" ]
then
echo "Creating Python environment..."
python -m venv "$venv_dir"
fi
# shellcheck disable=1091
source "$venv_dir/bin/activate"
if ! cmp --quiet "$requirements_file" "$venv_req"
then
echo "$(realpath --relative-to="$PWD" "$requirements_file") has changed, re-installing..."
pip --disable-pip-version-check install --requirement "$requirements_file"
cp "$requirements_file" "$venv_req"
fi
python "$@"

Some files were not shown because too many files have changed in this diff Show More