Compare commits
176 Commits
mesa-23.0.
...
mesa-22.3.
Author | SHA1 | Date | |
---|---|---|---|
|
e16ab1b4cb | ||
|
11f33e6198 | ||
|
4b38684f60 | ||
|
a4eeeb8f78 | ||
|
532521adbc | ||
|
ac303c5d5b | ||
|
3982e3aca7 | ||
|
174236d2ed | ||
|
46bb7eaf56 | ||
|
33ebcd5d10 | ||
|
654d0cebb8 | ||
|
562e8a17d5 | ||
|
fb51066b48 | ||
|
c0bc23f194 | ||
|
1e46b971c9 | ||
|
8090e45618 | ||
|
569c230e22 | ||
|
2946c2b3b3 | ||
|
b92f135377 | ||
|
17b00ea533 | ||
|
b3754a5b17 | ||
|
e32d17def7 | ||
|
77a9b631db | ||
|
46517e0b65 | ||
|
73580de2e8 | ||
|
aee1c4ca00 | ||
|
949c4ec5b7 | ||
|
3b282d2c3d | ||
|
d567ac1dc8 | ||
|
fbc9f23653 | ||
|
c0c581ec34 | ||
|
1a60fb52be | ||
|
a8477c373d | ||
|
4bb8973deb | ||
|
399eacfce1 | ||
|
6c19c36288 | ||
|
6bf4a48155 | ||
|
7f33a0b997 | ||
|
4e0f9c36e0 | ||
|
8f6c7cb351 | ||
|
b2b0770690 | ||
|
35c695882d | ||
|
d0cc462008 | ||
|
8718187b22 | ||
|
df7dc583e7 | ||
|
d6b2c77fac | ||
|
84ada12002 | ||
|
9476566032 | ||
|
9a31d05b01 | ||
|
054a971ecd | ||
|
00c3949310 | ||
|
d496e5e4ff | ||
|
76e3938fec | ||
|
3de72d74c8 | ||
|
7964efaa97 | ||
|
bbcde41cb2 | ||
|
e566cd6add | ||
|
7d19be3eb5 | ||
|
1f23f529d1 | ||
|
578f84373b | ||
|
df117562bf | ||
|
30ea71b9b0 | ||
|
a5c8473a59 | ||
|
09623eb2fd | ||
|
449eb0798f | ||
|
d09f780bf3 | ||
|
a44264e3c0 | ||
|
e39b25b788 | ||
|
7f9e923959 | ||
|
64b775c972 | ||
|
559530f151 | ||
|
62a6fd3983 | ||
|
86e7eb1830 | ||
|
2aae392bba | ||
|
f55b090c84 | ||
|
4734048d51 | ||
|
053f0f5934 | ||
|
34319c7d84 | ||
|
76dc28e3ff | ||
|
935ba13ee4 | ||
|
03d1edf3c8 | ||
|
f3c55ddd42 | ||
|
e51b0b1060 | ||
|
f100c77d0e | ||
|
de5ebe433b | ||
|
5421dd52dd | ||
|
2a28762dff | ||
|
ae63ea0631 | ||
|
be102fede4 | ||
|
6e4a46e2a8 | ||
|
5abbb4131d | ||
|
84381034a7 | ||
|
8747b27202 | ||
|
c06dff600b | ||
|
fcb0d2fa31 | ||
|
97a017ed25 | ||
|
5824068393 | ||
|
0bcf214da3 | ||
|
aa63bf1834 | ||
|
18a8b0a122 | ||
|
f77567942a | ||
|
e0867504d1 | ||
|
fc57b9ac44 | ||
|
61ad5a811f | ||
|
d7ca6ccee2 | ||
|
238c58e7d1 | ||
|
d9d4c97ca1 | ||
|
a90e68cb04 | ||
|
782af9b02a | ||
|
e1415b0d5a | ||
|
e127adf5ec | ||
|
cf6a913796 | ||
|
7db1912a2a | ||
|
f68e25315f | ||
|
986a55f9ba | ||
|
aececb9c4e | ||
|
077e04bd86 | ||
|
fd23e5b22e | ||
|
91dfc02570 | ||
|
57e8d21fff | ||
|
5f5821232a | ||
|
0dad87e413 | ||
|
674e20d1d0 | ||
|
dc7b34983e | ||
|
38cb64bfe9 | ||
|
80b0483ea9 | ||
|
7701bf1228 | ||
|
d580ab8898 | ||
|
dd860e4bb9 | ||
|
6c24336ea4 | ||
|
d812245daa | ||
|
e56be98d93 | ||
|
f4a7f28608 | ||
|
e54150d6e1 | ||
|
87e7794d7b | ||
|
4eea1cb8cf | ||
|
1391564493 | ||
|
935aaef351 | ||
|
19711e41c5 | ||
|
a23360cd0f | ||
|
a7482cfa89 | ||
|
f4fac39ded | ||
|
7110d632c1 | ||
|
7ba025d528 | ||
|
c0c453e5ca | ||
|
68b469ba1a | ||
|
4acfc6a9be | ||
|
6b4e3cce45 | ||
|
29c6d79a5f | ||
|
5a5c33ee95 | ||
|
cdecc19acc | ||
|
15aae04df5 | ||
|
ab1a7fe377 | ||
|
d4a9ef1b21 | ||
|
ebd65d011f | ||
|
614ae3ba0b | ||
|
7331880ab4 | ||
|
7f3657fbe3 | ||
|
ef0210c517 | ||
|
78b5a25430 | ||
|
abedacb9c6 | ||
|
0f99923b54 | ||
|
424e3c10ce | ||
|
dea1b80747 | ||
|
8ebbb2f04b | ||
|
d34e913aef | ||
|
32a7d9b892 | ||
|
b4ba437ea7 | ||
|
a6bf520407 | ||
|
bf46cebc0c | ||
|
cfa0a22315 | ||
|
4be9ceb5b8 | ||
|
ec5bc021fe | ||
|
76906af371 | ||
|
41de25b2f0 | ||
|
f1beb9bfb3 |
@@ -8,7 +8,7 @@ charset = utf-8
|
||||
insert_final_newline = true
|
||||
tab_width = 8
|
||||
|
||||
[*.{c,h,cpp,hpp,cc,hh,y,yy}]
|
||||
[*.{c,h,cpp,hpp,cc,hh}]
|
||||
indent_style = space
|
||||
indent_size = 3
|
||||
max_line_length = 78
|
||||
|
3
.github/workflows/macos.yml
vendored
3
.github/workflows/macos.yml
vendored
@@ -26,7 +26,6 @@ jobs:
|
||||
brew "libxcb"
|
||||
brew "libxdamage"
|
||||
brew "libxext"
|
||||
brew "molten-vk"
|
||||
brew "ninja"
|
||||
brew "pkg-config"
|
||||
brew "python@3.10"
|
||||
@@ -42,7 +41,7 @@ jobs:
|
||||
[binaries]
|
||||
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
||||
EOL
|
||||
$MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }}
|
||||
$MESON_EXEC . build --native-file=native_config -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast -Dglx=${{ matrix.glx_option }}
|
||||
- name: Build
|
||||
run: $MESON_EXEC compile -C build
|
||||
- name: Test
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
.vscode*
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.out
|
||||
|
@@ -23,8 +23,6 @@ variables:
|
||||
LIMA_FARM: "online"
|
||||
IGALIA_FARM: "online"
|
||||
ANHOLT_FARM: "online"
|
||||
VALVE_FARM: "online"
|
||||
AUSTRIANCODER_FARM: "online" # only etnaviv GPUs
|
||||
|
||||
default:
|
||||
before_script:
|
||||
@@ -55,7 +53,6 @@ include:
|
||||
- project: 'freedesktop/ci-templates'
|
||||
ref: *ci-templates-commit
|
||||
file:
|
||||
- '/templates/alpine.yml'
|
||||
- '/templates/debian.yml'
|
||||
- '/templates/fedora.yml'
|
||||
- local: '.gitlab-ci/image-tags.yml'
|
||||
@@ -130,24 +127,21 @@ stages:
|
||||
|
||||
|
||||
.docs-base:
|
||||
variables:
|
||||
BUILDER: html
|
||||
extends:
|
||||
- .fdo.ci-fairy
|
||||
- .build-rules
|
||||
artifacts:
|
||||
expose_as: 'Documentation preview'
|
||||
paths:
|
||||
- public/
|
||||
script:
|
||||
- apk --no-cache add graphviz doxygen
|
||||
- pip3 install sphinx===5.1.1 breathe===4.34.0 mako===1.2.3 sphinx_rtd_theme===1.0.0
|
||||
- docs/doxygen-wrapper.py --out-dir=docs/doxygen_xml
|
||||
- sphinx-build -W -b $BUILDER docs public
|
||||
- sphinx-build -W -b html docs public
|
||||
|
||||
pages:
|
||||
extends: .docs-base
|
||||
stage: deploy
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
needs: []
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
@@ -176,24 +170,16 @@ test-docs-mr:
|
||||
- test-docs
|
||||
needs:
|
||||
- sanity
|
||||
artifacts:
|
||||
expose_as: 'Documentation preview'
|
||||
paths:
|
||||
- public/
|
||||
rules:
|
||||
- if: *is-pre-merge
|
||||
changes: *docs-or-ci
|
||||
when: on_success
|
||||
# Other cases default to never
|
||||
|
||||
lincheck-docs:
|
||||
extends: .docs-base
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
stage: deploy
|
||||
needs: []
|
||||
rules:
|
||||
- !reference [.scheduled_pipeline-rules, rules]
|
||||
allow_failure: true
|
||||
variables:
|
||||
BUILDER: linkcheck
|
||||
|
||||
# When to automatically run the CI for build jobs
|
||||
.build-rules:
|
||||
rules:
|
||||
|
@@ -52,7 +52,7 @@ deployment:
|
||||
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/machine_registration:latest check"
|
||||
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
|
||||
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
||||
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
|
||||
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},expiration=pipeline_end,preserve"
|
||||
{% for volume in volumes %}
|
||||
b2c.volume={{ volume }}
|
||||
{% endfor %}
|
||||
|
@@ -92,12 +92,10 @@ if args.mount_volume is not None:
|
||||
values['working_dir'] = args.working_dir
|
||||
|
||||
assert(len(args.local_container) > 0)
|
||||
|
||||
# Use the gateway's pull-through registry caches to reduce load on fd.o.
|
||||
values['local_container'] = args.local_container
|
||||
for url, replacement in [('registry.freedesktop.org', '{{ fdo_proxy_registry }}'),
|
||||
('harbor.freedesktop.org', '{{ harbor_fdo_registry }}')]:
|
||||
values['local_container'] = values['local_container'].replace(url, replacement)
|
||||
values['local_container'] = args.local_container.replace(
|
||||
# Use the gateway's pull-through registry cache to reduce load on fd.o.
|
||||
'registry.freedesktop.org', '{{ fdo_proxy_registry }}'
|
||||
)
|
||||
|
||||
if 'B2C_KERNEL_CMDLINE_EXTRAS' in environ:
|
||||
values['cmdline_extras'] = environ['B2C_KERNEL_CMDLINE_EXTRAS']
|
||||
|
26
.gitlab-ci/bare-metal/arm64_a630_egl.sh
Executable file
26
.gitlab-ci/bare-metal/arm64_a630_egl.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This test script groups together a bunch of fast dEQP variant runs
|
||||
# to amortize the cost of rebooting the board.
|
||||
|
||||
set -ex
|
||||
|
||||
EXIT=0
|
||||
|
||||
# Run reset tests without parallelism:
|
||||
if ! env \
|
||||
DEQP_RESULTS_DIR=results/reset \
|
||||
FDO_CI_CONCURRENT=1 \
|
||||
DEQP_CASELIST_FILTER='.*reset.*' \
|
||||
/install/deqp-runner.sh; then
|
||||
EXIT=1
|
||||
fi
|
||||
|
||||
# Then run everything else with parallelism:
|
||||
if ! env \
|
||||
DEQP_RESULTS_DIR=results/nonrobustness \
|
||||
DEQP_CASELIST_INV_FILTER='.*reset.*' \
|
||||
/install/deqp-runner.sh; then
|
||||
EXIT=1
|
||||
fi
|
||||
|
@@ -1 +0,0 @@
|
||||
../bin/ci
|
@@ -13,7 +13,6 @@ and show the job(s) logs.
|
||||
|
||||
import argparse
|
||||
import re
|
||||
from subprocess import check_output
|
||||
import sys
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
@@ -215,7 +214,7 @@ def print_log(project, job_id) -> None:
|
||||
job = project.jobs.get(job_id)
|
||||
|
||||
# GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
|
||||
lines = job.trace().decode("raw_unicode_escape").splitlines()
|
||||
lines = job.trace().decode("unicode_escape").splitlines()
|
||||
for line in lines[printed_lines:]:
|
||||
print(line)
|
||||
printed_lines = len(lines)
|
||||
@@ -236,7 +235,7 @@ def parse_args() -> None:
|
||||
)
|
||||
parser.add_argument("--target", metavar="target-job", help="Target job")
|
||||
parser.add_argument(
|
||||
"--rev", metavar="revision", help="repository git revision (default: HEAD)"
|
||||
"--rev", metavar="revision", help="repository git revision", required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
@@ -257,9 +256,6 @@ def find_dependencies(target_job: str, project_path: str, sha: str) -> set[str]:
|
||||
)
|
||||
|
||||
target_dep_dag = filter_dag(dag, target_job)
|
||||
if not target_dep_dag:
|
||||
print(Fore.RED + "The job(s) were not found in the pipeline." + Fore.RESET)
|
||||
sys.exit(1)
|
||||
print(Fore.YELLOW)
|
||||
print("Detected job dependencies:")
|
||||
print()
|
||||
@@ -276,23 +272,18 @@ if __name__ == "__main__":
|
||||
|
||||
token = read_token(args.token)
|
||||
|
||||
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org",
|
||||
private_token=token,
|
||||
retry_transient_errors=True)
|
||||
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
|
||||
|
||||
cur_project = get_gitlab_project(gl, "mesa")
|
||||
|
||||
REV: str = args.rev
|
||||
if not REV:
|
||||
REV = check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
|
||||
print(f"Revision: {REV}")
|
||||
pipe = wait_for_pipeline(cur_project, REV)
|
||||
print(f"Revision: {args.rev}")
|
||||
pipe = wait_for_pipeline(cur_project, args.rev)
|
||||
print(f"Pipeline: {pipe.web_url}")
|
||||
deps = set()
|
||||
if args.target:
|
||||
print("🞋 job: " + Fore.BLUE + args.target + Style.RESET_ALL)
|
||||
deps = find_dependencies(
|
||||
target_job=args.target, sha=REV, project_path=cur_project
|
||||
target_job=args.target, sha=args.rev, project_path=cur_project
|
||||
)
|
||||
target_job_id, ret = monitor_pipeline(
|
||||
cur_project, pipe, args.target, deps, args.force_manual, args.stress
|
@@ -18,7 +18,6 @@ import sys
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import gitlab
|
||||
from colorama import Fore, Style
|
||||
from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline
|
||||
|
||||
|
||||
@@ -38,8 +37,8 @@ def gather_results(
|
||||
if target_jobs_regex.match(job.name) and job.status == "failed":
|
||||
cur_job = project.jobs.get(job.id)
|
||||
# get variables
|
||||
print(f"👁 {job.name}...")
|
||||
log: list[str] = cur_job.trace().decode("unicode_escape").splitlines()
|
||||
print(f"👁 Looking through logs for the device variable and traces.yml file in {job.name}...")
|
||||
log = cur_job.trace().decode("unicode_escape").splitlines()
|
||||
filename: str = ''
|
||||
dev_name: str = ''
|
||||
for logline in log:
|
||||
@@ -51,7 +50,7 @@ def gather_results(
|
||||
dev_name = device_name.group(1)
|
||||
|
||||
if not filename or not dev_name:
|
||||
print(Fore.RED + "Couldn't find device name or YML file in the logs!" + Style.RESET_ALL)
|
||||
print("! Couldn't find device name or YML file in the logs!")
|
||||
return
|
||||
|
||||
print(f"👁 Found {dev_name} and file {filename}")
|
||||
@@ -84,20 +83,19 @@ def gather_results(
|
||||
checksum: str = value['images'][0]['checksum_render']
|
||||
|
||||
if not checksum:
|
||||
print(Fore.RED + f"{dev_name}: {trace}: checksum is missing! Crash?" + Style.RESET_ALL)
|
||||
print(f"Trace {trace} checksum is missing! Abort.")
|
||||
continue
|
||||
|
||||
if checksum == "error":
|
||||
print(Fore.RED + f"{dev_name}: {trace}: crashed" + Style.RESET_ALL)
|
||||
print(f"Trace {trace} crashed")
|
||||
continue
|
||||
|
||||
if target['traces'][trace][dev_name].get('checksum') == checksum:
|
||||
continue
|
||||
|
||||
if "label" in target['traces'][trace][dev_name]:
|
||||
print(f'{dev_name}: {trace}: please verify that label {Fore.BLUE}{target["traces"][trace][dev_name]["label"]}{Style.RESET_ALL} is still valid')
|
||||
print(f'{trace}: {dev_name}: has label: {target["traces"][trace][dev_name]["label"]}, is it still right?')
|
||||
|
||||
print(Fore.GREEN + f'{dev_name}: {trace}: checksum updated' + Style.RESET_ALL)
|
||||
target['traces'][trace][dev_name]['checksum'] = checksum
|
||||
|
||||
with open(traces_file[0], 'w', encoding='utf-8') as target_file:
|
@@ -183,8 +183,6 @@ debian-build-testing:
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi
|
||||
-D b_lto=true
|
||||
LLVM_VERSION: 13
|
||||
script:
|
||||
- .gitlab-ci/lava/lava-pytest.sh
|
||||
- .gitlab-ci/run-shellcheck.sh
|
||||
@@ -226,44 +224,6 @@ debian-release:
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
alpine-build-testing:
|
||||
extends:
|
||||
- .meson-build
|
||||
- .use-alpine/x86_build
|
||||
stage: build-x86_64
|
||||
variables:
|
||||
BUILDTYPE: "release"
|
||||
C_ARGS: >
|
||||
-Wno-error=cpp
|
||||
-Wno-error=array-bounds
|
||||
-Wno-error=stringop-overread
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D glvnd=false
|
||||
-D platforms=wayland
|
||||
LLVM_VERSION: ""
|
||||
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=true
|
||||
-D gallium-rusticl=false
|
||||
-D gles1=disabled
|
||||
-D gles2=enabled
|
||||
-D llvm=enabled
|
||||
-D microsoft-clc=disabled
|
||||
-D shared-llvm=enabled
|
||||
UNWIND: "disabled"
|
||||
VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
fedora-release:
|
||||
extends:
|
||||
- .meson-build
|
||||
@@ -273,6 +233,9 @@ fedora-release:
|
||||
C_ARGS: >
|
||||
-Wno-error=array-bounds
|
||||
-Wno-error=stringop-overread
|
||||
-Wno-error=uninitialized
|
||||
CPP_ARGS: >
|
||||
-Wno-error=array-bounds
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
@@ -322,6 +285,9 @@ debian-android:
|
||||
-Wno-error=initializer-overrides
|
||||
-Wno-error=missing-braces
|
||||
-Wno-error=sometimes-uninitialized
|
||||
-Wno-error=unused-function
|
||||
CPP_ARGS: >
|
||||
-Wno-error=deprecated-declarations
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D gbm=disabled
|
||||
@@ -362,7 +328,7 @@ debian-android:
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D platforms=x11,wayland
|
||||
-D platforms=x11
|
||||
-D osmesa=false
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
@@ -395,9 +361,6 @@ debian-armhf:
|
||||
-D llvm=disabled
|
||||
-D valgrind=false
|
||||
MINIO_ARTIFACT_NAME: mesa-armhf
|
||||
# The strip command segfaults, failing to strip the binary and leaving
|
||||
# tempfiles in our artifacts.
|
||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
@@ -458,6 +421,7 @@ debian-clang:
|
||||
-Wno-error=implicit-const-int-float-conversion
|
||||
-Wno-error=overloaded-virtual
|
||||
-Wno-error=tautological-constant-out-of-range-compare
|
||||
-Wno-error=unused-const-variable
|
||||
-Wno-error=unused-private-field
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
|
@@ -28,6 +28,8 @@ for var in \
|
||||
CROSVM_GALLIUM_DRIVER \
|
||||
CROSVM_GPU_ARGS \
|
||||
DEQP_BIN_DIR \
|
||||
DEQP_CASELIST_FILTER \
|
||||
DEQP_CASELIST_INV_FILTER \
|
||||
DEQP_CONFIG \
|
||||
DEQP_EXPECTED_RENDERER \
|
||||
DEQP_FRACTION \
|
||||
@@ -60,7 +62,6 @@ for var in \
|
||||
HWCI_FREQ_MAX \
|
||||
HWCI_KERNEL_MODULES \
|
||||
HWCI_KVM \
|
||||
HWCI_START_WESTON \
|
||||
HWCI_START_XORG \
|
||||
HWCI_TEST_SCRIPT \
|
||||
IR3_SHADER_DEBUG \
|
||||
|
@@ -45,16 +45,6 @@ set -ex
|
||||
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
||||
}
|
||||
|
||||
# Set up ZRAM
|
||||
HWCI_ZRAM_SIZE=2G
|
||||
if zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
|
||||
mkswap /dev/zram0
|
||||
swapon /dev/zram0
|
||||
echo "zram: $HWCI_ZRAM_SIZE activated"
|
||||
else
|
||||
echo "zram: skipping, not supported"
|
||||
fi
|
||||
|
||||
#
|
||||
# Load the KVM module specific to the detected CPU virtualization extensions:
|
||||
# - vmx for Intel VT
|
||||
@@ -128,7 +118,6 @@ BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
if [ -n "$HWCI_START_XORG" ]; then
|
||||
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
||||
env \
|
||||
VK_ICD_FILENAMES=/install/share/vulkan/icd.d/${VK_DRIVER}_icd.`uname -m`.json \
|
||||
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
|
||||
@@ -142,21 +131,6 @@ if [ -n "$HWCI_START_XORG" ]; then
|
||||
export DISPLAY=:0
|
||||
fi
|
||||
|
||||
if [ -n "$HWCI_START_WESTON" ]; then
|
||||
export XDG_RUNTIME_DIR=/run/user
|
||||
mkdir -p $XDG_RUNTIME_DIR
|
||||
|
||||
# Xwayland to be used when HWCI_START_XORG is not set
|
||||
export DISPLAY=:0
|
||||
mkdir -p /tmp/.X11-unix
|
||||
|
||||
env \
|
||||
VK_ICD_FILENAMES=/install/share/vulkan/icd.d/${VK_DRIVER}_icd.`uname -m`.json \
|
||||
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland &
|
||||
export WAYLAND_DISPLAY=wayland-0
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
RESULT=fail
|
||||
set +e
|
||||
sh -c "$HWCI_TEST_SCRIPT"
|
||||
|
@@ -1,70 +0,0 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
|
||||
EPHEMERAL="
|
||||
autoconf
|
||||
automake
|
||||
bzip2
|
||||
cmake
|
||||
git
|
||||
libtool
|
||||
libepoxy-dev
|
||||
libtbb-dev
|
||||
make
|
||||
openssl-dev
|
||||
unzip
|
||||
wget
|
||||
xz
|
||||
zstd-dev
|
||||
"
|
||||
|
||||
apk add \
|
||||
bison \
|
||||
ccache \
|
||||
clang-dev \
|
||||
flex \
|
||||
gcc \
|
||||
g++ \
|
||||
gettext \
|
||||
glslang \
|
||||
linux-headers \
|
||||
llvm15-dev \
|
||||
meson \
|
||||
expat-dev \
|
||||
elfutils-dev \
|
||||
libselinux-dev \
|
||||
libva-dev \
|
||||
libpciaccess-dev \
|
||||
zlib-dev \
|
||||
python3-dev \
|
||||
py3-mako \
|
||||
py3-ply \
|
||||
vulkan-headers \
|
||||
spirv-tools-dev \
|
||||
util-macros \
|
||||
$EPHEMERAL
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
pushd /usr/local
|
||||
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
|
||||
rm -rf shader-db/.git
|
||||
cd shader-db
|
||||
make
|
||||
popd
|
||||
|
||||
|
||||
############### Uninstall the build software
|
||||
|
||||
apk del $EPHEMERAL
|
||||
|
||||
. .gitlab-ci/container/container_post_build.sh
|
@@ -1,12 +1,6 @@
|
||||
CONFIG_LOCALVERSION_AUTO=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_ZRAM_MEMORY_TRACKING=y
|
||||
CONFIG_ZRAM_WRITEBACK=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
|
||||
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
|
||||
CONFIG_BLK_DEV_INITRD=n
|
||||
|
||||
|
@@ -1,12 +1,6 @@
|
||||
CONFIG_LOCALVERSION_AUTO=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_ZRAM_MEMORY_TRACKING=y
|
||||
CONFIG_ZRAM_WRITEBACK=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
|
||||
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
|
||||
CONFIG_BLK_DEV_INITRD=n
|
||||
|
||||
@@ -66,7 +60,6 @@ CONFIG_POWER_RESET_QCOM_PON=y
|
||||
CONFIG_RTC_DRV_PM8XXX=y
|
||||
CONFIG_INTERCONNECT=y
|
||||
CONFIG_INTERCONNECT_QCOM=y
|
||||
CONFIG_INTERCONNECT_QCOM_MSM8996=y
|
||||
CONFIG_INTERCONNECT_QCOM_SDM845=y
|
||||
CONFIG_INTERCONNECT_QCOM_MSM8916=y
|
||||
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
|
||||
@@ -74,9 +67,6 @@ CONFIG_INTERCONNECT_QCOM_SC7180=y
|
||||
CONFIG_CRYPTO_DEV_QCOM_RNG=y
|
||||
CONFIG_SC_DISPCC_7180=y
|
||||
CONFIG_SC_GPUCC_7180=y
|
||||
CONFIG_QCOM_SPMI_ADC5=y
|
||||
CONFIG_DRM_PARADE_PS8640=y
|
||||
CONFIG_PHY_QCOM_USB_HS=y
|
||||
|
||||
# db410c ethernet
|
||||
CONFIG_USB_RTL8152=y
|
||||
|
@@ -6,13 +6,13 @@ set -ex
|
||||
git config --global user.email "mesa@example.com"
|
||||
git config --global user.name "Mesa CI"
|
||||
|
||||
CROSVM_VERSION=504899212d626ecf42b1c459e5592891dde5bf91
|
||||
CROSVM_VERSION=acd262cb42111c53b580a67355e795775545cced
|
||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
||||
pushd /platform/crosvm
|
||||
git checkout "$CROSVM_VERSION"
|
||||
git submodule update --init
|
||||
|
||||
VIRGLRENDERER_VERSION=3f2685355f71201f22b98c19aa778b43732c8435
|
||||
VIRGLRENDERER_VERSION=3c5a9bbb7464e0e91e446991055300f4f989f6a9
|
||||
rm -rf third_party/virglrenderer
|
||||
git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
||||
pushd third_party/virglrenderer
|
||||
|
@@ -16,7 +16,7 @@ if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
||||
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
|
||||
else
|
||||
# Install from package registry
|
||||
DEQP_RUNNER_CARGO_ARGS="--version 0.16.0 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
DEQP_RUNNER_CARGO_ARGS="--version 0.15.0 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
fi
|
||||
|
||||
cargo install --locked \
|
||||
|
@@ -40,12 +40,6 @@ cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||
ninja modules/egl/deqp-egl
|
||||
cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
|
||||
|
||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||
-DDEQP_TARGET=wayland \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
$EXTRA_CMAKE_ARGS
|
||||
ninja modules/egl/deqp-egl
|
||||
cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland
|
||||
|
||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
|
||||
|
@@ -1,13 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
MOLD_VERSION="1.9.0"
|
||||
MOLD_VERSION="1.6.0"
|
||||
|
||||
git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
|
||||
pushd mold
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
|
||||
cmake --build . --parallel
|
||||
cmake --install .
|
||||
popd
|
||||
cd mold
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
rm -rf mold
|
||||
|
@@ -5,7 +5,12 @@ set -ex
|
||||
|
||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||
pushd /piglit
|
||||
git checkout 1cd716180cfb6ef0c1fc54702460ef49e5115791
|
||||
git checkout 591c91865012de4224bea551eac5d2274acf06ad
|
||||
|
||||
# TODO: Remove the following patch when piglit commit got past
|
||||
# 1cd716180cfb6ef0c1fc54702460ef49e5115791
|
||||
git apply $OLDPWD/.gitlab-ci/piglit/build-piglit_backport-s3-migration.diff
|
||||
|
||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
||||
ninja $PIGLIT_BUILD_TARGETS
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
VKD3D_PROTON_COMMIT="804751ee1cb108a2ec59e182ce0c052bafef268e"
|
||||
VKD3D_PROTON_COMMIT="5b73139f182d86cd58a757e4b5f0d4cfad96d319"
|
||||
|
||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -9,7 +8,7 @@ export WAYLAND_PROTOCOLS_VERSION="1.24"
|
||||
git clone https://gitlab.freedesktop.org/wayland/wayland
|
||||
cd wayland
|
||||
git checkout "$LIBWAYLAND_VERSION"
|
||||
meson -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build $EXTRA_MESON_ARGS
|
||||
meson -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build
|
||||
ninja -C _build install
|
||||
cd ..
|
||||
rm -rf wayland
|
||||
@@ -17,7 +16,7 @@ rm -rf wayland
|
||||
git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
|
||||
cd wayland-protocols
|
||||
git checkout "$WAYLAND_PROTOCOLS_VERSION"
|
||||
meson _build $EXTRA_MESON_ARGS
|
||||
meson _build
|
||||
ninja -C _build install
|
||||
cd ..
|
||||
rm -rf wayland-protocols
|
||||
|
@@ -2,8 +2,6 @@
|
||||
|
||||
if test -f /etc/debian_version; then
|
||||
CCACHE_PATH=/usr/lib/ccache
|
||||
elif test -f /etc/alpine-release; then
|
||||
CCACHE_PATH=/usr/lib/ccache/bin
|
||||
else
|
||||
CCACHE_PATH=/usr/lib64/ccache
|
||||
fi
|
||||
@@ -45,6 +43,4 @@ export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
|
||||
echo -e "retry_connrefused = on\n" \
|
||||
"read_timeout = 300\n" \
|
||||
"tries = 4\n" \
|
||||
"retry_on_host_error = on\n" \
|
||||
"retry_on_http_error = 429,500,502,503,504\n" \
|
||||
"wait_retry = 32" >> /etc/wgetrc
|
||||
|
@@ -2,18 +2,19 @@
|
||||
|
||||
arch=$1
|
||||
cross_file="/cross_file-$arch.txt"
|
||||
meson env2mfile --cross --debarch "$arch" -o "$cross_file"
|
||||
|
||||
/usr/share/meson/debcrossgen --arch "$arch" -o "$cross_file"
|
||||
# Explicitly set ccache path for cross compilers
|
||||
sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
|
||||
|
||||
if [ "$arch" = "i386" ]; then
|
||||
# Work around a bug in debcrossgen that should be fixed in the next release
|
||||
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file"
|
||||
fi
|
||||
# Rely on qemu-user being configured in binfmt_misc on the host
|
||||
# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
|
||||
sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
|
||||
|
||||
# Add a line for rustc, which meson env2mfile is missing.
|
||||
cc=$(sed -n "s|^c\s*=\s*\[?'\(.*\)'\]?|\1|p" < "$cross_file")
|
||||
|
||||
# Add a line for rustc, which debcrossgen is missing.
|
||||
cc=$(sed -n 's|c = .\(.*\).|\1|p' < "$cross_file")
|
||||
if [[ "$arch" = "arm64" ]]; then
|
||||
rust_target=aarch64-unknown-linux-gnu
|
||||
elif [[ "$arch" = "armhf" ]]; then
|
||||
@@ -27,7 +28,6 @@ elif [[ "$arch" = "s390x" ]]; then
|
||||
else
|
||||
echo "Needs rustc target mapping"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
|
||||
sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
|
||||
|
||||
|
@@ -102,11 +102,9 @@ apt-get -y install --no-install-recommends \
|
||||
sntp \
|
||||
strace \
|
||||
waffle-utils \
|
||||
weston \
|
||||
wget \
|
||||
xinit \
|
||||
xserver-xorg-core \
|
||||
xwayland \
|
||||
zstd
|
||||
|
||||
|
||||
|
@@ -18,7 +18,6 @@ apt-get install -y --no-remove \
|
||||
crossbuild-essential-$arch \
|
||||
libelf-dev:$arch \
|
||||
libexpat1-dev:$arch \
|
||||
libffi-dev:$arch \
|
||||
libpciaccess-dev:$arch \
|
||||
libstdc++6:$arch \
|
||||
libvulkan-dev:$arch \
|
||||
@@ -36,7 +35,6 @@ apt-get install -y --no-remove \
|
||||
libxrandr-dev:$arch \
|
||||
libxshmfence-dev:$arch \
|
||||
libxxf86vm-dev:$arch \
|
||||
libwayland-dev:$arch \
|
||||
wget
|
||||
|
||||
if [[ $arch != "armhf" ]]; then
|
||||
@@ -52,6 +50,7 @@ if [[ $arch != "armhf" ]]; then
|
||||
# around this.
|
||||
apt-get install -y --no-remove --no-install-recommends \
|
||||
libclang-cpp${LLVM}:$arch \
|
||||
libffi-dev:$arch \
|
||||
libgcc-s1:$arch \
|
||||
libtinfo-dev:$arch \
|
||||
libz3-dev:$arch \
|
||||
@@ -69,8 +68,6 @@ fi
|
||||
EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)"
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
apt-get purge -y \
|
||||
$STABLE_EPHEMERAL
|
||||
|
||||
|
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
arch=arm64 . .gitlab-ci/container/debian/arm_test.sh
|
@@ -50,9 +50,8 @@ apt-get -y install \
|
||||
libxrandr-dev \
|
||||
libxshmfence-dev \
|
||||
libxxf86vm-dev \
|
||||
libwayland-dev \
|
||||
llvm-11-dev \
|
||||
ninja-build \
|
||||
meson \
|
||||
pkg-config \
|
||||
python3-mako \
|
||||
python3-pil \
|
||||
@@ -71,9 +70,6 @@ apt-get install -y --no-remove -t buster \
|
||||
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
|
||||
# We need at least 0.61.4 for proper Rust; 0.62 for modern meson env2mfile
|
||||
pip3 install meson==0.63.3
|
||||
|
||||
arch=armhf
|
||||
. .gitlab-ci/container/cross_build.sh
|
||||
|
||||
@@ -85,8 +81,6 @@ arch=armhf
|
||||
EXTRA_MESON_ARGS=
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
apt-get purge -y $STABLE_EPHEMERAL
|
||||
|
||||
. .gitlab-ci/container/container_post_build.sh
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -26,13 +25,12 @@ apt-get install -y --no-remove \
|
||||
wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
|
||||
-O /usr/share/snmp/mibs/SNMPv2-SMI.txt
|
||||
|
||||
. .gitlab-ci/container/baremetal_build.sh
|
||||
arch=arm64 . .gitlab-ci/container/baremetal_build.sh
|
||||
arch=armhf . .gitlab-ci/container/baremetal_build.sh
|
||||
|
||||
if [[ "$arch" == "arm64" ]]; then
|
||||
# This firmware file from Debian bullseye causes hangs
|
||||
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
|
||||
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw
|
||||
fi
|
||||
# This firmware file from Debian bullseye causes hangs
|
||||
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
|
||||
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw
|
||||
|
||||
mkdir -p /baremetal-files/jetson-nano/boot/
|
||||
ln -s \
|
||||
|
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
arch=armhf . .gitlab-ci/container/debian/arm_test.sh
|
@@ -59,7 +59,7 @@ apt-get install -y --no-remove \
|
||||
libxshmfence-dev \
|
||||
libxxf86vm-dev \
|
||||
make \
|
||||
ninja-build \
|
||||
meson \
|
||||
pkg-config \
|
||||
python3-mako \
|
||||
python3-pil \
|
||||
@@ -78,8 +78,8 @@ apt-get install -y --no-remove \
|
||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
|
||||
# We need at least 1.0.0 for proper Rust; 0.62 for modern meson env2mfile
|
||||
pip3 install meson==1.0.0
|
||||
# We need at least 0.61.4 for proper Rust
|
||||
pip3 install meson==0.61.5
|
||||
|
||||
. .gitlab-ci/container/build-rust.sh
|
||||
|
||||
|
@@ -27,8 +27,9 @@ popd
|
||||
# Building libva
|
||||
git clone https://github.com/intel/libva
|
||||
pushd libva/
|
||||
# libva-win32 is released with libva version 2.17 (see https://github.com/intel/libva/releases/tag/2.17.0)
|
||||
git checkout 2.17.0
|
||||
# Checking out commit hash with libva-win32 support
|
||||
# This feature will be released with libva version 2.17
|
||||
git checkout 2579eb0f77897dc01a02c1e43defc63c40fd2988
|
||||
popd
|
||||
# libva already has a build dir in their repo, use builddir instead
|
||||
mkdir -p libva/builddir
|
||||
|
@@ -92,7 +92,7 @@ ninja install
|
||||
popd
|
||||
rm -rf DirectX-Headers
|
||||
|
||||
pip3 install lavacli==1.5.2
|
||||
pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7
|
||||
|
||||
# install bindgen
|
||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||
|
@@ -25,7 +25,6 @@ STABLE_EPHEMERAL=" \
|
||||
libudev-dev \
|
||||
libvulkan-dev \
|
||||
libwaffle-dev \
|
||||
libwayland-dev \
|
||||
libx11-xcb-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
libxkbcommon-dev \
|
||||
@@ -60,8 +59,7 @@ apt-get install -y --no-remove \
|
||||
python3-lxml \
|
||||
python3-renderdoc \
|
||||
python3-simplejson \
|
||||
spirv-tools \
|
||||
weston
|
||||
spirv-tools
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
@@ -45,6 +45,7 @@ STABLE_EPHEMERAL=" \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
software-properties-common \
|
||||
wget \
|
||||
wine64-tools \
|
||||
xz-utils \
|
||||
"
|
||||
@@ -56,7 +57,6 @@ apt-get install -y --no-remove --no-install-recommends \
|
||||
pciutils \
|
||||
python3-lxml \
|
||||
python3-simplejson \
|
||||
wget \
|
||||
xinit \
|
||||
xserver-xorg-video-amdgpu \
|
||||
xserver-xorg-video-ati
|
||||
@@ -66,6 +66,10 @@ apt-key add .gitlab-ci/container/debian/winehq.gpg.key
|
||||
apt-add-repository https://dl.winehq.org/wine-builds/debian/
|
||||
apt-get update -q
|
||||
|
||||
# Needed for Valve's tracing jobs to collect information about the graphics
|
||||
# hardware on the test devices.
|
||||
pip3 install gfxinfo-mupuf==0.0.9
|
||||
|
||||
# workaround wine needing 32-bit
|
||||
# https://bugs.winehq.org/show_bug.cgi?id=53393
|
||||
apt-get install -y --no-remove wine-stable-amd64 # a requirement for wine-stable
|
||||
|
@@ -31,6 +31,7 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
glslang \
|
||||
kernel-headers \
|
||||
llvm-devel \
|
||||
clang-devel \
|
||||
meson \
|
||||
"pkgconfig(dri2proto)" \
|
||||
"pkgconfig(expat)" \
|
||||
@@ -64,6 +65,8 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
python-unversioned-command \
|
||||
python3-devel \
|
||||
python3-mako \
|
||||
python3-devel \
|
||||
python3-mako \
|
||||
python3-ply \
|
||||
vulkan-headers \
|
||||
spirv-tools-devel \
|
||||
|
@@ -189,7 +189,7 @@ debian/android_build:
|
||||
debian/x86_test-base:
|
||||
extends: debian/x86_build-base
|
||||
variables:
|
||||
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v6.0-for-mesa-ci-93bd820c433b/linux-v6.0-for-mesa-ci-93bd820c433b.tar.bz2"
|
||||
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.19-for-mesa-ci-d4efddaec194/linux-v5.17-for-mesa-ci-b78f7870d97b.tar.bz2"
|
||||
MESA_IMAGE_TAG: &debian-x86_test-base ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/x86_test-base:
|
||||
@@ -213,7 +213,7 @@ debian/x86_test-gl:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_test-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_GL_PATH}
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_PATH}
|
||||
MESA_IMAGE_TAG: *debian-x86_test-gl
|
||||
needs:
|
||||
- debian/x86_test-gl
|
||||
@@ -229,7 +229,7 @@ debian/x86_test-vk:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_test-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_VK_PATH}
|
||||
MESA_IMAGE_PATH: "debian/x86_test-vk"
|
||||
MESA_IMAGE_TAG: *debian-x86_test-vk
|
||||
needs:
|
||||
- debian/x86_test-vk
|
||||
@@ -255,24 +255,6 @@ debian/arm_build:
|
||||
- debian/arm_build
|
||||
|
||||
|
||||
# Alpine based x86 build image
|
||||
alpine/x86_build:
|
||||
extends:
|
||||
- .fdo.container-build@alpine
|
||||
- .container
|
||||
variables:
|
||||
FDO_DISTRIBUTION_VERSION: "3.16"
|
||||
MESA_IMAGE_TAG: &alpine-x86_build ${ALPINE_X86_BUILD_TAG}
|
||||
|
||||
.use-alpine/x86_build:
|
||||
extends:
|
||||
- .set-image
|
||||
variables:
|
||||
MESA_IMAGE_PATH: "alpine/x86_build"
|
||||
MESA_IMAGE_TAG: *alpine-x86_build
|
||||
needs:
|
||||
- alpine/x86_build
|
||||
|
||||
# Fedora 34 based x86 build image
|
||||
fedora/x86_build:
|
||||
extends:
|
||||
@@ -340,56 +322,30 @@ kernel+rootfs_armhf:
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
# x86 image with ARM64 & armhf kernel & rootfs for baremetal testing
|
||||
.debian/arm_test:
|
||||
debian/arm_test:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .container
|
||||
# Don't want the .container rules
|
||||
- .build-rules
|
||||
variables:
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
ARTIFACTS_PREFIX: "https://${MINIO_HOST}/mesa-lava"
|
||||
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
MESA_ARTIFACTS_TAG: *debian-arm_build
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
debian/armhf_test:
|
||||
extends:
|
||||
- .debian/arm_test
|
||||
needs:
|
||||
- kernel+rootfs_armhf
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-armhf_test ${DEBIAN_BASE_TAG}
|
||||
|
||||
debian/arm64_test:
|
||||
extends:
|
||||
- .debian/arm_test
|
||||
needs:
|
||||
- kernel+rootfs_arm64
|
||||
- kernel+rootfs_armhf
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-arm64_test ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/arm_test:
|
||||
variables:
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
ARTIFACTS_PREFIX: "https://${MINIO_HOST}/mesa-lava"
|
||||
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
MESA_ARM_BUILD_TAG: *debian-arm_build
|
||||
MESA_IMAGE_TAG: &debian-arm_test ${DEBIAN_BASE_TAG}
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
.use-debian/armhf_test:
|
||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
extends:
|
||||
- .use-debian/arm_test
|
||||
.use-debian/arm_test:
|
||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
variables:
|
||||
MESA_IMAGE_PATH: "debian/armhf_test"
|
||||
MESA_IMAGE_TAG: *debian-armhf_test
|
||||
needs:
|
||||
- debian/arm_test
|
||||
|
||||
.use-debian/arm64_test:
|
||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
extends:
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
MESA_IMAGE_PATH: "debian/arm64_test"
|
||||
MESA_IMAGE_TAG: *debian-arm64_test
|
||||
MESA_ARM_BUILD_TAG: *debian-arm_build
|
||||
MESA_IMAGE_PATH: "debian/arm_test"
|
||||
MESA_IMAGE_TAG: *debian-arm_test
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
needs:
|
||||
- debian/arm_test
|
||||
|
||||
|
@@ -40,7 +40,6 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown-r1.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dtb"
|
||||
KERNEL_IMAGE_NAME="Image"
|
||||
|
||||
@@ -110,7 +109,6 @@ apt-get install -y --no-remove \
|
||||
libx11-xcb-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
libxkbcommon-dev \
|
||||
libwayland-dev \
|
||||
ninja-build \
|
||||
patch \
|
||||
protobuf-compiler \
|
||||
@@ -200,8 +198,6 @@ if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
|
||||
. .gitlab-ci/container/build-crosvm.sh
|
||||
mv /usr/local/bin/crosvm /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
|
||||
mv /usr/local/lib/$GCC_ARCH/libvirglrenderer.* /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/local/libexec/
|
||||
mv /usr/local/libexec/virgl* /lava-files/rootfs-${DEBIAN_ARCH}/usr/local/libexec/
|
||||
fi
|
||||
|
||||
############### Build libdrm
|
||||
@@ -244,7 +240,6 @@ cp .gitlab-ci/container/debian/winehq.gpg.key /lava-files/rootfs-${DEBIAN_ARCH}/
|
||||
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/{llvm-snapshot,winehq}.gpg.key
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
|
||||
cp /etc/wgetrc /lava-files/rootfs-${DEBIAN_ARCH}/etc/.
|
||||
|
||||
|
||||
############### Install the built libdrm
|
||||
|
@@ -1,12 +1,6 @@
|
||||
CONFIG_LOCALVERSION_AUTO=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_ZRAM_MEMORY_TRACKING=y
|
||||
CONFIG_ZRAM_WRITEBACK=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
|
||||
CONFIG_PWM=y
|
||||
CONFIG_PM_DEVFREQ=y
|
||||
CONFIG_OF=y
|
||||
|
@@ -96,8 +96,7 @@ LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
|
||||
GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
|
||||
VK_ICD_FILENAMES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER}_icd.x86_64.json \
|
||||
crosvm --no-syslog run \
|
||||
--gpu "${CROSVM_GPU_ARGS}" --gpu-render-server "path=/usr/local/libexec/virgl_render_server" \
|
||||
-m "${CROSVM_MEMORY:-4096}" -c 2 --disable-sandbox \
|
||||
--gpu "${CROSVM_GPU_ARGS}" -m "${CROSVM_MEMORY:-4096}" -c 2 --disable-sandbox \
|
||||
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
|
||||
--host-ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \
|
||||
-s $VM_SOCKET \
|
||||
|
@@ -18,7 +18,6 @@ INSTALL=`pwd`/install
|
||||
export LD_LIBRARY_PATH=`pwd`/install/lib/
|
||||
export EGL_PLATFORM=surfaceless
|
||||
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
|
||||
export OCL_ICD_VENDORS=`pwd`/install/etc/OpenCL/vendors/
|
||||
|
||||
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
|
||||
mkdir -p $RESULTS
|
||||
@@ -86,6 +85,14 @@ if [ -z "$DEQP_SUITE" ]; then
|
||||
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
|
||||
fi
|
||||
|
||||
if [ -n "$DEQP_CASELIST_FILTER" ]; then
|
||||
sed -ni "/$DEQP_CASELIST_FILTER/p" /tmp/case-list.txt
|
||||
fi
|
||||
|
||||
if [ -n "$DEQP_CASELIST_INV_FILTER" ]; then
|
||||
sed -ni "/$DEQP_CASELIST_INV_FILTER/!p" /tmp/case-list.txt
|
||||
fi
|
||||
|
||||
if [ ! -s /tmp/case-list.txt ]; then
|
||||
echo "Caselist generation failed"
|
||||
exit 1
|
||||
|
@@ -1,31 +1,27 @@
|
||||
variables:
|
||||
DEBIAN_X86_BUILD_BASE_IMAGE: "debian/x86_build-base"
|
||||
DEBIAN_BASE_TAG: "2023-01-31-rust-valgrind-23-stable"
|
||||
DEBIAN_BASE_TAG: "2022-11-15-ci-fairy"
|
||||
|
||||
DEBIAN_X86_BUILD_IMAGE_PATH: "debian/x86_build"
|
||||
DEBIAN_BUILD_TAG: "2023-01-09-lavacli"
|
||||
DEBIAN_BUILD_TAG: "2022-11-15-ci-fairy"
|
||||
|
||||
DEBIAN_X86_BUILD_MINGW_IMAGE_PATH: "debian/x86_build-mingw"
|
||||
DEBIAN_BUILD_MINGW_TAG: "2023-01-03-ci-libva-2.17"
|
||||
DEBIAN_BUILD_MINGW_TAG: "2022-10-18-dx-headers-va"
|
||||
|
||||
DEBIAN_X86_TEST_BASE_IMAGE: "debian/x86_test-base"
|
||||
|
||||
DEBIAN_X86_TEST_IMAGE_GL_PATH: "debian/x86_test-gl"
|
||||
DEBIAN_X86_TEST_IMAGE_VK_PATH: "debian/x86_test-vk"
|
||||
DEBIAN_X86_TEST_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_GL_PATH}
|
||||
DEBIAN_X86_TEST_IMAGE_PATH: "debian/x86_test-gl"
|
||||
DEBIAN_X86_TEST_GL_TAG: "2022-11-15-ci-fairy"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-11-15-ci-fairy"
|
||||
|
||||
DEBIAN_X86_TEST_GL_TAG: "2023-01-08-weston-xwayland"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-12-12-vkd3d-proton-uprev"
|
||||
|
||||
ALPINE_X86_BUILD_TAG: "2023-01-10-robust-wget"
|
||||
FEDORA_X86_BUILD_TAG: "2023-01-10-robust-wget"
|
||||
KERNEL_ROOTFS_TAG: "2023-01-10-virglrenderer"
|
||||
FEDORA_X86_BUILD_TAG: "2022-09-22-python3-ply-2"
|
||||
KERNEL_ROOTFS_TAG: "2022-11-03-piglit_mesa-22.3"
|
||||
|
||||
WINDOWS_X64_VS_PATH: "windows/x64_vs"
|
||||
WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"
|
||||
|
||||
WINDOWS_X64_BUILD_PATH: "windows/x64_build"
|
||||
WINDOWS_X64_BUILD_TAG: "2023-01-03-ci-libva-2.17"
|
||||
WINDOWS_X64_BUILD_TAG: "2022-10-18-wrap-nodownload-va"
|
||||
|
||||
WINDOWS_X64_TEST_PATH: "windows/x64_test"
|
||||
WINDOWS_X64_TEST_TAG: "2023-01-03-piglit-waffle"
|
||||
WINDOWS_X64_TEST_TAG: "2022-08-17-bump"
|
||||
|
@@ -20,11 +20,11 @@ import traceback
|
||||
import urllib.parse
|
||||
import xmlrpc.client
|
||||
from datetime import datetime, timedelta
|
||||
from io import StringIO
|
||||
from os import getenv
|
||||
from typing import Any, Optional
|
||||
|
||||
import lavacli
|
||||
import yaml
|
||||
from lava.exceptions import (
|
||||
MesaCIException,
|
||||
MesaCIKnownIssueException,
|
||||
@@ -42,7 +42,7 @@ from lava.utils import (
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
)
|
||||
from lavacli.utils import flow_yaml as lava_yaml
|
||||
from lavacli.utils import loader
|
||||
|
||||
# Timeout in seconds to decide if the device from the dispatched LAVA job has
|
||||
# hung or not due to the lack of new log output.
|
||||
@@ -62,7 +62,7 @@ NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT
|
||||
NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
|
||||
|
||||
|
||||
def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
def generate_lava_yaml(args):
|
||||
# General metadata and permissions, plus also inexplicably kernel arguments
|
||||
values = {
|
||||
'job_name': 'mesa: {}'.format(args.pipeline_info),
|
||||
@@ -74,20 +74,11 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
},
|
||||
"timeouts": {
|
||||
"job": {"minutes": args.job_timeout},
|
||||
"action": {"minutes": 3},
|
||||
"actions": {
|
||||
"depthcharge-retry": {
|
||||
# Could take between 1 and 1.5 min in slower boots
|
||||
"minutes": 2
|
||||
},
|
||||
"depthcharge-start": {
|
||||
# Should take less than 1 min.
|
||||
"minutes": 1,
|
||||
},
|
||||
"depthcharge-action": {
|
||||
# This timeout englobes the entire depthcharge timing,
|
||||
# including retries
|
||||
"minutes": 2 * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
},
|
||||
"minutes": 3 * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -156,13 +147,8 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
# - fetch and unpack per-job environment from lava-submit.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
|
||||
with open(args.first_stage_init, "r") as init_sh:
|
||||
run_steps += [
|
||||
x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()
|
||||
]
|
||||
run_steps.append(
|
||||
f"wget -S --progress=dot:giga -O- {args.job_rootfs_overlay_url} | tar -xz -C /",
|
||||
)
|
||||
with open(args.first_stage_init, 'r') as init_sh:
|
||||
run_steps += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
|
||||
|
||||
if args.jwt_file:
|
||||
with open(args.jwt_file) as jwt_file:
|
||||
@@ -181,6 +167,7 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
run_steps += [
|
||||
'mkdir -p {}'.format(args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar --zstd -x -C {}'.format(args.build_url, args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
|
||||
|
||||
# Sleep a bit to give time for bash to dump shell xtrace messages into
|
||||
# console which may cause interleaving with LAVA_SIGNAL_STARTTC in some
|
||||
@@ -198,7 +185,7 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
{ 'test': test },
|
||||
]
|
||||
|
||||
return values
|
||||
return yaml.dump(values, width=10000000)
|
||||
|
||||
|
||||
def setup_lava_proxy():
|
||||
@@ -285,12 +272,8 @@ class LAVAJob:
|
||||
|
||||
def _load_log_from_data(self, data) -> list[str]:
|
||||
lines = []
|
||||
if isinstance(data, xmlrpc.client.Binary):
|
||||
# We are dealing with xmlrpc.client.Binary
|
||||
# Let's extract the data
|
||||
data = data.data
|
||||
# When there is no new log data, the YAML is empty
|
||||
if loaded_lines := lava_yaml.load(data):
|
||||
if loaded_lines := yaml.load(str(data), Loader=loader(False)):
|
||||
lines = loaded_lines
|
||||
self.last_log_line += len(lines)
|
||||
return lines
|
||||
@@ -355,7 +338,7 @@ def find_exception_from_metadata(metadata, job_id):
|
||||
def find_lava_error(job) -> None:
|
||||
# Look for infrastructure errors and retry if we see them.
|
||||
results_yaml = _call_proxy(job.proxy.results.get_testjob_results_yaml, job.job_id)
|
||||
results = lava_yaml.load(results_yaml)
|
||||
results = yaml.load(results_yaml, Loader=loader(False))
|
||||
for res in results:
|
||||
metadata = res["metadata"]
|
||||
find_exception_from_metadata(metadata, job.job_id)
|
||||
@@ -365,17 +348,16 @@ def find_lava_error(job) -> None:
|
||||
job.status = "fail"
|
||||
|
||||
|
||||
def show_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"):
|
||||
def show_job_data(job):
|
||||
with GitlabSection(
|
||||
"job_data",
|
||||
"LAVA job info",
|
||||
type=LogSectionType.LAVA_POST_PROCESSING,
|
||||
start_collapsed=True,
|
||||
colour=colour,
|
||||
):
|
||||
show = _call_proxy(job.proxy.scheduler.jobs.show, job.job_id)
|
||||
for field, value in show.items():
|
||||
print(f"{field:<15}: {value}")
|
||||
print("{}\t: {}".format(field, value))
|
||||
|
||||
|
||||
def fetch_logs(job, max_idle_time, log_follower) -> None:
|
||||
@@ -451,6 +433,8 @@ def follow_job_execution(job):
|
||||
while not job.is_finished:
|
||||
fetch_logs(job, max_idle_time, lf)
|
||||
|
||||
show_job_data(job)
|
||||
|
||||
# Mesa Developers expect to have a simple pass/fail job result.
|
||||
# If this does not happen, it probably means a LAVA infrastructure error
|
||||
# happened.
|
||||
@@ -469,7 +453,6 @@ def print_job_final_status(job):
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
|
||||
show_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{color}")
|
||||
|
||||
def retriable_follow_job(proxy, job_definition) -> LAVAJob:
|
||||
retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
|
||||
@@ -522,9 +505,7 @@ def main(args):
|
||||
# script section timeout with a reasonable delay.
|
||||
GL_SECTION_TIMEOUTS[LogSectionType.TEST_CASE] = timedelta(minutes=args.job_timeout)
|
||||
|
||||
job_definition_stream = StringIO()
|
||||
lava_yaml.dump(generate_lava_yaml_payload(args), job_definition_stream)
|
||||
job_definition = job_definition_stream.getvalue()
|
||||
job_definition = generate_lava_yaml(args)
|
||||
|
||||
if args.dump_yaml:
|
||||
with GitlabSection(
|
||||
|
@@ -3,6 +3,7 @@ from .gitlab_section import GitlabSection
|
||||
from .log_follower import (
|
||||
LogFollower,
|
||||
fatal_err,
|
||||
fix_lava_color_log,
|
||||
fix_lava_gitlab_section_log,
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
|
@@ -49,10 +49,6 @@ class LogFollower:
|
||||
section_is_created == section_has_started
|
||||
), "Can't follow logs beginning from uninitialized GitLab sections."
|
||||
|
||||
# Initialize fix_lava_gitlab_section_log generator
|
||||
self.gl_section_fix_gen = fix_lava_gitlab_section_log()
|
||||
next(self.gl_section_fix_gen)
|
||||
|
||||
@property
|
||||
def phase(self) -> LogSectionType:
|
||||
return (
|
||||
@@ -142,7 +138,7 @@ class LogFollower:
|
||||
# job is progressing
|
||||
is_job_healthy = True
|
||||
self.manage_gl_sections(line)
|
||||
if parsed_line := self.parse_lava_line(line):
|
||||
if parsed_line := parse_lava_line(line):
|
||||
self._buffer.append(parsed_line)
|
||||
|
||||
self.log_hints.detect_failure(new_lines)
|
||||
@@ -154,61 +150,55 @@ class LogFollower:
|
||||
self._buffer = []
|
||||
return buffer
|
||||
|
||||
def parse_lava_line(self, line) -> Optional[str]:
|
||||
prefix = ""
|
||||
suffix = ""
|
||||
|
||||
if line["lvl"] in ["results", "feedback", "debug"]:
|
||||
return
|
||||
elif line["lvl"] in ["warning", "error"]:
|
||||
prefix = CONSOLE_LOG["FG_RED"]
|
||||
suffix = CONSOLE_LOG["RESET"]
|
||||
elif line["lvl"] == "input":
|
||||
prefix = "$ "
|
||||
suffix = ""
|
||||
elif line["lvl"] == "target":
|
||||
# gl_section_fix_gen will output the stored line if it can't find a
|
||||
# match for the first split line
|
||||
# So we can recover it and put it back to the buffer
|
||||
if recovered_first_line := self.gl_section_fix_gen.send(line):
|
||||
self._buffer.append(recovered_first_line)
|
||||
def fix_lava_color_log(line):
|
||||
"""This function is a temporary solution for the color escape codes mangling
|
||||
problem. There is some problem in message passing between the LAVA
|
||||
dispatcher and the device under test (DUT). Here \x1b character is missing
|
||||
before `[:digit::digit:?:digit:?m` ANSI TTY color codes, or the more
|
||||
complicated ones with number values for text format before background and
|
||||
foreground colors.
|
||||
When this problem is fixed on the LAVA side, one should remove this function.
|
||||
"""
|
||||
line["msg"] = re.sub(r"(\[(\d+;){0,2}\d{1,3}m)", "\x1b" + r"\1", line["msg"])
|
||||
|
||||
return f'{prefix}{line["msg"]}{suffix}'
|
||||
|
||||
def fix_lava_gitlab_section_log():
|
||||
def fix_lava_gitlab_section_log(line):
|
||||
"""This function is a temporary solution for the Gitlab section markers
|
||||
splitting problem. Gitlab parses the following lines to define a collapsible
|
||||
mangling problem. Gitlab parses the following lines to define a collapsible
|
||||
gitlab section in their log:
|
||||
- \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
|
||||
- \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
|
||||
There is some problem in message passing between the LAVA dispatcher and the
|
||||
device under test (DUT), that replaces \r control characters into \n. When
|
||||
this problem is fixed on the LAVA side, one should remove this function.
|
||||
device under test (DUT), that digests \x1b and \r control characters
|
||||
incorrectly. When this problem is fixed on the LAVA side, one should remove
|
||||
this function.
|
||||
"""
|
||||
while True:
|
||||
line = yield False
|
||||
first_line = None
|
||||
split_line_pattern = re.compile(r"\x1b\[0K(section_\w+):(\d+):([^\s\r]+)$")
|
||||
second_line_pattern = re.compile(r"\x1b\[0K([\S ]+)?")
|
||||
if match := re.match(r"\[0K(section_\w+):(\d+):(\S+)\[0K([\S ]+)?", line["msg"]):
|
||||
marker, timestamp, id_collapsible, header = match.groups()
|
||||
# The above regex serves for both section start and end lines.
|
||||
# When the header is None, it means we are dealing with `section_end` line
|
||||
header = header or ""
|
||||
line["msg"] = f"\x1b[0K{marker}:{timestamp}:{id_collapsible}\r\x1b[0K{header}"
|
||||
|
||||
if not re.search(split_line_pattern, line["msg"]):
|
||||
continue
|
||||
|
||||
first_line = line["msg"]
|
||||
# Delete the current line and hold this log line stream to be able to
|
||||
# possibly merge it with the next line.
|
||||
line["msg"] = ""
|
||||
line = yield False
|
||||
def parse_lava_line(line) -> Optional[str]:
|
||||
prefix = ""
|
||||
suffix = ""
|
||||
|
||||
# This code reached when we detect a possible first split line
|
||||
if re.search(second_line_pattern, line["msg"]):
|
||||
assert first_line
|
||||
line["msg"] = f"{first_line}\r{line['msg']}"
|
||||
else:
|
||||
# The current line doesn't match with the previous one, send back the
|
||||
# latter to give the user the chance to recover it.
|
||||
yield first_line
|
||||
if line["lvl"] in ["results", "feedback", "debug"]:
|
||||
return
|
||||
elif line["lvl"] in ["warning", "error"]:
|
||||
prefix = CONSOLE_LOG["FG_RED"]
|
||||
suffix = CONSOLE_LOG["RESET"]
|
||||
elif line["lvl"] == "input":
|
||||
prefix = "$ "
|
||||
suffix = ""
|
||||
elif line["lvl"] == "target":
|
||||
fix_lava_color_log(line)
|
||||
fix_lava_gitlab_section_log(line)
|
||||
|
||||
return f'{prefix}{line["msg"]}{suffix}'
|
||||
|
||||
|
||||
def print_log(msg):
|
||||
@@ -224,5 +214,5 @@ def fatal_err(msg):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def hide_sensitive_data(yaml_data: str, hide_tag: str ="HIDEME"):
|
||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
||||
|
@@ -24,7 +24,7 @@ fi
|
||||
# tests in their meson.build with:
|
||||
#
|
||||
# test(...,
|
||||
# should_fail: meson.get_external_property('xfail', '').contains(t),
|
||||
# should_fail: meson.get_cross_property('xfail', '').contains(t),
|
||||
# )
|
||||
#
|
||||
# where t is the name of the test, and the '' is the string to search when
|
||||
|
89
.gitlab-ci/piglit/build-piglit_backport-s3-migration.diff
Normal file
89
.gitlab-ci/piglit/build-piglit_backport-s3-migration.diff
Normal file
@@ -0,0 +1,89 @@
|
||||
diff --git a/framework/replay/download_utils.py b/framework/replay/download_utils.py
|
||||
index 36322b000..5c3fe140d 100644
|
||||
--- a/framework/replay/download_utils.py
|
||||
+++ b/framework/replay/download_utils.py
|
||||
@@ -27,20 +27,20 @@ import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import xml.etree.ElementTree as ET
|
||||
-
|
||||
-from typing import Dict
|
||||
from email.utils import formatdate
|
||||
from os import path
|
||||
from time import time
|
||||
+from typing import Dict
|
||||
+from urllib.parse import urlparse
|
||||
+
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
-from framework.replay.local_file_adapter import LocalFileAdapter
|
||||
from requests.utils import requote_uri
|
||||
|
||||
from framework import core, exceptions
|
||||
+from framework.replay.local_file_adapter import LocalFileAdapter
|
||||
from framework.replay.options import OPTIONS
|
||||
|
||||
-
|
||||
__all__ = ['ensure_file']
|
||||
|
||||
minio_credentials = None
|
||||
@@ -90,7 +90,7 @@ def get_minio_credentials(url):
|
||||
minio_credentials['SessionToken'])
|
||||
|
||||
|
||||
-def get_authorization_headers(url, resource):
|
||||
+def get_minio_authorization_headers(url, resource):
|
||||
minio_key, minio_secret, minio_token = get_minio_credentials(url)
|
||||
|
||||
date = formatdate(timeval=None, localtime=False, usegmt=True)
|
||||
@@ -107,6 +107,17 @@ def get_authorization_headers(url, resource):
|
||||
return headers
|
||||
|
||||
|
||||
+def get_jwt_authorization_headers(url, resource):
|
||||
+ date = formatdate(timeval=None, localtime=False, usegmt=True)
|
||||
+ jwt = OPTIONS.download['jwt']
|
||||
+ host = urlparse(url).netloc
|
||||
+
|
||||
+ headers = {'Host': host,
|
||||
+ 'Date': date,
|
||||
+ 'Authorization': 'Bearer %s' % (jwt)}
|
||||
+ return headers
|
||||
+
|
||||
+
|
||||
def download(url: str, file_path: str, headers: Dict[str, str], attempts: int = 2) -> None:
|
||||
"""Downloads a URL content into a file
|
||||
|
||||
@@ -178,7 +189,9 @@ def ensure_file(file_path):
|
||||
assert OPTIONS.download['minio_bucket']
|
||||
assert OPTIONS.download['role_session_name']
|
||||
assert OPTIONS.download['jwt']
|
||||
- headers = get_authorization_headers(url, file_path)
|
||||
+ headers = get_minio_authorization_headers(url, file_path)
|
||||
+ elif OPTIONS.download['jwt']:
|
||||
+ headers = get_jwt_authorization_headers(url, file_path)
|
||||
else:
|
||||
headers = None
|
||||
|
||||
diff --git a/unittests/framework/replay/test_download_utils.py b/unittests/framework/replay/test_download_utils.py
|
||||
index 1e78b26e7..749c5d835 100644
|
||||
--- a/unittests/framework/replay/test_download_utils.py
|
||||
+++ b/unittests/framework/replay/test_download_utils.py
|
||||
@@ -195,3 +195,17 @@ class TestDownloadUtils(object):
|
||||
get_request = requests_mock.request_history[1]
|
||||
assert(get_request.method == 'GET')
|
||||
assert(requests_mock.request_history[1].headers['Authorization'].startswith('AWS Key'))
|
||||
+
|
||||
+ def test_jwt_authorization(self, requests_mock):
|
||||
+ """download_utils.ensure_file: Check we send the authentication headers to the server"""
|
||||
+ # reset minio_host from previous tests
|
||||
+ OPTIONS.download['minio_host'] = ''
|
||||
+ OPTIONS.download['jwt'] = 'jwt'
|
||||
+
|
||||
+ assert not self.trace_file.check()
|
||||
+ download_utils.ensure_file(self.trace_path)
|
||||
+ TestDownloadUtils.check_same_file(self.trace_file, "remote")
|
||||
+
|
||||
+ get_request = requests_mock.request_history[0]
|
||||
+ assert(get_request.method == 'GET')
|
||||
+ assert(requests_mock.request_history[0].headers['Authorization'].startswith('Bearer'))
|
75
.gitlab-ci/piglit/run_cl.sh
Executable file
75
.gitlab-ci/piglit/run_cl.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
VERSION=`head -1 install/VERSION`
|
||||
ROOTDIR=`pwd`
|
||||
|
||||
if [ -d results ]; then
|
||||
cd results && rm -rf ..?* .[!.]* *
|
||||
fi
|
||||
cd /piglit
|
||||
|
||||
export OCL_ICD_VENDORS=$ROOTDIR/install/etc/OpenCL/vendors/
|
||||
|
||||
set +e
|
||||
unset DISPLAY
|
||||
export LD_LIBRARY_PATH=$ROOTDIR/install/lib
|
||||
clinfo
|
||||
|
||||
# If the job is parallel at the gitlab job level, will take the corresponding
|
||||
# fraction of the caselist.
|
||||
if [ -n "$CI_NODE_INDEX" ]; then
|
||||
|
||||
if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
|
||||
echo "Can't parallelize piglit with multiple profiles"
|
||||
exit 1
|
||||
fi
|
||||
USE_CASELIST=1
|
||||
fi
|
||||
|
||||
if [ -n "$USE_CASELIST" ]; then
|
||||
./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format "{name}" > /tmp/case-list.txt
|
||||
|
||||
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
|
||||
|
||||
PIGLIT_TESTS="--test-list /tmp/case-list.txt"
|
||||
fi
|
||||
|
||||
./piglit run -c -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES $ROOTDIR/results
|
||||
retVal=$?
|
||||
if [ $retVal -ne 0 ]; then
|
||||
echo "Found $(cat /tmp/version.txt), expected $VERSION"
|
||||
fi
|
||||
set -e
|
||||
|
||||
PIGLIT_RESULTS=${PIGLIT_RESULTS:-$PIGLIT_PROFILES}
|
||||
mkdir -p .gitlab-ci/piglit
|
||||
./piglit summary console $ROOTDIR/results \
|
||||
| tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
|
||||
| head -n -1 \
|
||||
| grep -v ": pass" \
|
||||
| sed '/^summary:/Q' \
|
||||
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt
|
||||
|
||||
if [ -n "$USE_CASELIST" ]; then
|
||||
# Just filter the expected results based on the tests that were actually
|
||||
# executed, and switch to the version with no summary
|
||||
cat .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig | sed '/^summary:/Q' | rev \
|
||||
| cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
|
||||
grep -F -f /tmp/executed.txt $ROOTDIR/install/$PIGLIT_RESULTS.txt \
|
||||
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline || true
|
||||
else
|
||||
cp $ROOTDIR/install/$PIGLIT_RESULTS.txt .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline
|
||||
fi
|
||||
|
||||
if diff -q .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
./piglit summary html --exclude-details=pass $ROOTDIR/results/summary $ROOTDIR/results
|
||||
|
||||
echo Unexpected change in results:
|
||||
diff -u .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}
|
||||
exit 1
|
@@ -10,7 +10,7 @@ rm -rf install/bin install/include
|
||||
|
||||
# Strip the drivers in the artifacts to cut 80% of the artifacts size.
|
||||
if [ -n "$CROSS" ]; then
|
||||
STRIP=$(sed -n -E "s/strip\s*=\s*\[?'(.*)'\]?/\1/p" "$CROSS_FILE")
|
||||
STRIP=`sed -n -E "s/strip\s*=\s*'(.*)'/\1/p" "$CROSS_FILE"`
|
||||
if [ -z "$STRIP" ]; then
|
||||
echo "Failed to find strip command in cross file"
|
||||
exit 1
|
||||
|
@@ -1,41 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -v
|
||||
|
||||
echo -e "\e[0Ksection_start:$(date +%s):shader-db-prepare[collapsed=true]\r\e[0KPreparing shader-db"
|
||||
ARTIFACTSDIR=$(pwd)/shader-db
|
||||
mkdir -p "$ARTIFACTSDIR"
|
||||
ARTIFACTSDIR=`pwd`/shader-db
|
||||
mkdir -p $ARTIFACTSDIR
|
||||
export DRM_SHIM_DEBUG=true
|
||||
|
||||
LIBDIR=$(pwd)/install/lib
|
||||
LIBDIR=`pwd`/install/lib
|
||||
export LD_LIBRARY_PATH=$LIBDIR
|
||||
|
||||
cd /usr/local/shader-db
|
||||
echo -e "\e[0Ksection_end:$(date +%s):shader-db-prepare\r\e[0K"
|
||||
|
||||
for driver in freedreno intel v3d vc4; do
|
||||
echo -e "\e[0Ksection_start:$(date +%s):shader-db-${driver}[collapsed=true]\r\e[0KRunning shader-db for $driver"
|
||||
env LD_PRELOAD="$LIBDIR/lib${driver}_noop_drm_shim.so" \
|
||||
./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
|
||||
> "$ARTIFACTSDIR/${driver}-shader-db.txt"
|
||||
echo -e "\e[0Ksection_end:$(date +%s):shader-db-${driver}\r\e[0K"
|
||||
for driver in freedreno intel v3d; do
|
||||
echo "Running drm-shim for $driver"
|
||||
env LD_PRELOAD=$LIBDIR/lib${driver}_noop_drm_shim.so \
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/${driver}-shader-db.txt
|
||||
done
|
||||
|
||||
# Run shader-db over a number of supported chipsets for nouveau
|
||||
for chipset in 40 a3 c0 e4 f0 134 162; do
|
||||
echo -e "\e[0Ksection_start:$(date +%s):shader-db-nouveau-${chipset}[collapsed=true]\r\e[0KRunning shader-db for nouveau - ${chipset}"
|
||||
env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \
|
||||
echo "Running drm-shim for nouveau - $chipset"
|
||||
env LD_PRELOAD=$LIBDIR/libnouveau_noop_drm_shim.so \
|
||||
NOUVEAU_CHIPSET=${chipset} \
|
||||
./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
|
||||
> "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt"
|
||||
echo -e "\e[0Ksection_end:$(date +%s):shader-db-nouveau-${chipset}\r\e[0K"
|
||||
done
|
||||
|
||||
# Run shader-db for r300 (RV370 and RV515)
|
||||
for chipset in 0x5460 0x7140; do
|
||||
echo -e "\e[0Ksection_start:$(date +%s):shader-db-r300-${chipset}[collapsed=true]\r\e[0KRunning shader-db for r300 - ${chipset}"
|
||||
env LD_PRELOAD="$LIBDIR/libradeon_noop_drm_shim.so" \
|
||||
RADEON_GPU_ID=${chipset} \
|
||||
./run -j"${FDO_CI_CONCURRENT:-4}" -o r300 ./shaders \
|
||||
> "$ARTIFACTSDIR/r300-${chipset}-shader-db.txt"
|
||||
echo -e "\e[0Ksection_end:$(date +%s):shader-db-r300-${chipset}\r\e[0K"
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt
|
||||
done
|
||||
|
371
.gitlab-ci/skqp-runner.sh
Executable file
371
.gitlab-ci/skqp-runner.sh
Executable file
@@ -0,0 +1,371 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# Args:
|
||||
# $1: section id
|
||||
# $2: section header
|
||||
gitlab_section_start() {
|
||||
echo -e "\e[0Ksection_start:$(date +%s):$1[collapsed=${GL_COLLAPSED:-false}]\r\e[0K\e[32;1m$2\e[0m"
|
||||
}
|
||||
|
||||
# Args:
|
||||
# $1: section id
|
||||
gitlab_section_end() {
|
||||
echo -e "\e[0Ksection_end:$(date +%s):$1\r\e[0K"
|
||||
}
|
||||
|
||||
|
||||
# sponge allows piping to files that are being used as input.
|
||||
# E.g.: sort file.txt | sponge file.txt
|
||||
# In order to avoid installing moreutils just to have the sponge binary, we can
|
||||
# use a bash function for it
|
||||
# Source https://unix.stackexchange.com/a/561346/310927
|
||||
sponge () (
|
||||
set +x
|
||||
append=false
|
||||
|
||||
while getopts 'a' opt; do
|
||||
case $opt in
|
||||
a) append=true ;;
|
||||
*) echo error; exit 1
|
||||
esac
|
||||
done
|
||||
shift "$(( OPTIND - 1 ))"
|
||||
|
||||
outfile=$1
|
||||
|
||||
tmpfile=$(mktemp "$(dirname "$outfile")/tmp-sponge.XXXXXXXX") &&
|
||||
cat >"$tmpfile" &&
|
||||
if "$append"; then
|
||||
cat "$tmpfile" >>"$outfile"
|
||||
else
|
||||
if [ -f "$outfile" ]; then
|
||||
chmod --reference="$outfile" "$tmpfile"
|
||||
fi
|
||||
if [ -f "$outfile" ]; then
|
||||
mv "$tmpfile" "$outfile"
|
||||
elif [ -n "$outfile" ] && [ ! -e "$outfile" ]; then
|
||||
cat "$tmpfile" >"$outfile"
|
||||
else
|
||||
cat "$tmpfile"
|
||||
fi
|
||||
fi &&
|
||||
rm -f "$tmpfile"
|
||||
)
|
||||
|
||||
remove_comments_from_files() (
|
||||
INPUT_FILES="$*"
|
||||
for INPUT_FILE in ${INPUT_FILES}
|
||||
do
|
||||
[ -f "${INPUT_FILE}" ] || continue
|
||||
sed -i '/#/d' "${INPUT_FILE}"
|
||||
sed -i '/^\s*$/d' "${INPUT_FILE}"
|
||||
done
|
||||
)
|
||||
|
||||
subtract_test_lists() (
|
||||
MINUEND=$1
|
||||
sort "${MINUEND}" | sponge "${MINUEND}"
|
||||
shift
|
||||
for SUBTRAHEND in "$@"
|
||||
do
|
||||
sort "${SUBTRAHEND}" | sponge "${SUBTRAHEND}"
|
||||
join -v 1 "${MINUEND}" "${SUBTRAHEND}" |
|
||||
sponge "${MINUEND}"
|
||||
done
|
||||
)
|
||||
|
||||
merge_rendertests_files() {
|
||||
BASE_FILE=$1
|
||||
shift
|
||||
FILES="$*"
|
||||
# shellcheck disable=SC2086
|
||||
cat $FILES "$BASE_FILE" |
|
||||
sort --unique --stable --field-separator=, --key=1,1 |
|
||||
sponge "$BASE_FILE"
|
||||
}
|
||||
|
||||
assure_files() (
|
||||
for CASELIST_FILE in $*
|
||||
do
|
||||
>&2 echo "Looking for ${CASELIST_FILE}..."
|
||||
[ -f ${CASELIST_FILE} ] || (
|
||||
>&2 echo "Not found. Creating empty."
|
||||
touch ${CASELIST_FILE}
|
||||
)
|
||||
done
|
||||
)
|
||||
|
||||
# Generate rendertests from scratch, customizing with fails/flakes/crashes files
|
||||
generate_rendertests() (
|
||||
set -e
|
||||
GENERATED_FILE=$(mktemp)
|
||||
TESTS_FILE_PREFIX="${SKQP_FILE_PREFIX}-${SKQP_BACKEND}_rendertests"
|
||||
FLAKES_FILE="${TESTS_FILE_PREFIX}-flakes.txt"
|
||||
FAILS_FILE="${TESTS_FILE_PREFIX}-fails.txt"
|
||||
CRASHES_FILE="${TESTS_FILE_PREFIX}-crashes.txt"
|
||||
RENDER_TESTS_FILE="${TESTS_FILE_PREFIX}.txt"
|
||||
|
||||
# Default to an empty known flakes file if it doesn't exist.
|
||||
assure_files ${FLAKES_FILE} ${FAILS_FILE} ${CRASHES_FILE}
|
||||
|
||||
# skqp does not support comments in rendertests.txt file
|
||||
remove_comments_from_files "${FLAKES_FILE}" "${FAILS_FILE}" "${CRASHES_FILE}"
|
||||
|
||||
# create an exhaustive rendertest list
|
||||
"${SKQP_BIN_DIR}"/list_gms | sort > "$GENERATED_FILE"
|
||||
|
||||
# Remove undesirable tests from the list
|
||||
subtract_test_lists "${GENERATED_FILE}" "${CRASHES_FILE}" "${FLAKES_FILE}"
|
||||
|
||||
# Add ",0" to each test to set the expected diff sum to zero
|
||||
sed -i 's/$/,0/g' "$GENERATED_FILE"
|
||||
|
||||
merge_rendertests_files "$GENERATED_FILE" "${FAILS_FILE}"
|
||||
|
||||
mv "${GENERATED_FILE}" "${RENDER_TESTS_FILE}"
|
||||
|
||||
echo "${RENDER_TESTS_FILE}"
|
||||
)
|
||||
|
||||
generate_unittests() (
|
||||
set -e
|
||||
GENERATED_FILE=$(mktemp)
|
||||
TESTS_FILE_PREFIX="${SKQP_FILE_PREFIX}_unittests"
|
||||
FLAKES_FILE="${TESTS_FILE_PREFIX}-flakes.txt"
|
||||
FAILS_FILE="${TESTS_FILE_PREFIX}-fails.txt"
|
||||
CRASHES_FILE="${TESTS_FILE_PREFIX}-crashes.txt"
|
||||
UNIT_TESTS_FILE="${TESTS_FILE_PREFIX}.txt"
|
||||
|
||||
# Default to an empty known flakes file if it doesn't exist.
|
||||
assure_files ${FLAKES_FILE} ${FAILS_FILE} ${CRASHES_FILE}
|
||||
|
||||
# Remove unitTest_ prefix
|
||||
for UT_FILE in "${FAILS_FILE}" "${CRASHES_FILE}" "${FLAKES_FILE}"; do
|
||||
sed -i 's/^unitTest_//g' "${UT_FILE}"
|
||||
done
|
||||
|
||||
# create an exhaustive unittests list
|
||||
"${SKQP_BIN_DIR}"/list_gpu_unit_tests > "${GENERATED_FILE}"
|
||||
|
||||
# Remove undesirable tests from the list
|
||||
subtract_test_lists "${GENERATED_FILE}" "${CRASHES_FILE}" "${FLAKES_FILE}" "${FAILS_FILE}"
|
||||
|
||||
remove_comments_from_files "${GENERATED_FILE}"
|
||||
mv "${GENERATED_FILE}" "${UNIT_TESTS_FILE}"
|
||||
|
||||
echo "${UNIT_TESTS_FILE}"
|
||||
)
|
||||
|
||||
run_all_tests() {
|
||||
rm -f "${SKQP_ASSETS_DIR}"/skqp/*.txt
|
||||
}
|
||||
|
||||
copy_tests_files() (
|
||||
# Copy either unit test or render test files from a specific driver given by
|
||||
# GPU VERSION variable.
|
||||
# If there is no test file at the expected location, this function will
|
||||
# return error_code 1
|
||||
SKQP_BACKEND="${1}"
|
||||
SKQP_FILE_PREFIX="${INSTALL}/${GPU_VERSION}-skqp"
|
||||
|
||||
if echo "${SKQP_BACKEND}" | grep -qE 'vk|gl(es)?'
|
||||
then
|
||||
echo "Generating rendertests.txt file"
|
||||
GENERATED_RENDERTESTS=$(generate_rendertests)
|
||||
cp "${GENERATED_RENDERTESTS}" "${SKQP_ASSETS_DIR}"/skqp/rendertests.txt
|
||||
mkdir -p "${SKQP_RESULTS_DIR}/${SKQP_BACKEND}"
|
||||
cp "${GENERATED_RENDERTESTS}" "${SKQP_RESULTS_DIR}/${SKQP_BACKEND}/generated_rendertests.txt"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# The unittests.txt path is hardcoded inside assets directory,
|
||||
# that is why it needs to be a special case.
|
||||
if echo "${SKQP_BACKEND}" | grep -qE "unitTest"
|
||||
then
|
||||
echo "Generating unittests.txt file"
|
||||
GENERATED_UNITTESTS=$(generate_unittests)
|
||||
cp "${GENERATED_UNITTESTS}" "${SKQP_ASSETS_DIR}"/skqp/unittests.txt
|
||||
mkdir -p "${SKQP_RESULTS_DIR}/${SKQP_BACKEND}"
|
||||
cp "${GENERATED_UNITTESTS}" "${SKQP_RESULTS_DIR}/${SKQP_BACKEND}/generated_unittests.txt"
|
||||
fi
|
||||
)
|
||||
|
||||
resolve_tests_files() {
|
||||
if [ -n "${RUN_ALL_TESTS}" ]
|
||||
then
|
||||
run_all_tests
|
||||
return
|
||||
fi
|
||||
|
||||
SKQP_BACKEND=${1}
|
||||
if ! copy_tests_files "${SKQP_BACKEND}"
|
||||
then
|
||||
echo "No override test file found for ${SKQP_BACKEND}. Using the default one."
|
||||
fi
|
||||
}
|
||||
|
||||
test_vk_backend() {
|
||||
if echo "${SKQP_BACKENDS:?}" | grep -qE 'vk'
|
||||
then
|
||||
if [ -n "$VK_DRIVER" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "VK_DRIVER environment variable is missing."
|
||||
# shellcheck disable=SC2012
|
||||
VK_DRIVERS=$(ls "$INSTALL"/share/vulkan/icd.d/ | cut -f 1 -d '_')
|
||||
if [ -n "${VK_DRIVERS}" ]
|
||||
then
|
||||
echo "Please set VK_DRIVER to the correct driver from the list:"
|
||||
echo "${VK_DRIVERS}"
|
||||
fi
|
||||
echo "No Vulkan tests will be executed, but it was requested in SKQP_BACKENDS variable. Exiting."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Vulkan environment is not configured, but it was not requested by the job
|
||||
return 1
|
||||
}
|
||||
|
||||
setup_backends() {
|
||||
if test_vk_backend
|
||||
then
|
||||
export VK_ICD_FILENAMES="$INSTALL"/share/vulkan/icd.d/"$VK_DRIVER"_icd."${VK_CPU:-$(uname -m)}".json
|
||||
fi
|
||||
}
|
||||
|
||||
show_reports() (
|
||||
set +xe
|
||||
|
||||
# Unit tests produce empty HTML reports, guide the user to check the TXT file.
|
||||
if echo "${SKQP_BACKENDS}" | grep -qE "unitTest"
|
||||
then
|
||||
# Remove the empty HTML report to avoid confusion
|
||||
rm -f "${SKQP_RESULTS_DIR}"/unitTest/report.html
|
||||
|
||||
echo "See skqp unit test results at:"
|
||||
echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts${SKQP_RESULTS_DIR}/unitTest/unit_tests.txt"
|
||||
fi
|
||||
|
||||
REPORT_FILES=$(mktemp)
|
||||
find "${SKQP_RESULTS_DIR}"/**/report.html -type f > "${REPORT_FILES}"
|
||||
while read -r REPORT
|
||||
do
|
||||
# shellcheck disable=SC2001
|
||||
BACKEND_NAME=$(echo "${REPORT}" | sed 's@.*/\([^/]*\)/report.html@\1@')
|
||||
echo "See skqp ${BACKEND_NAME} render tests report at:"
|
||||
echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts${REPORT}"
|
||||
done < "${REPORT_FILES}"
|
||||
|
||||
# If there is no report available, tell the user that something is wrong.
|
||||
if [ ! -s "${REPORT_FILES}" ]
|
||||
then
|
||||
echo "No skqp report available. Probably some fatal error has occured during the skqp execution."
|
||||
fi
|
||||
)
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") [-a]
|
||||
|
||||
Arguments:
|
||||
-a: Run all unit tests and render tests, useful when introducing a new driver to skqp.
|
||||
EOF
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while getopts ':ah' opt; do
|
||||
case "$opt" in
|
||||
a)
|
||||
echo "Running all skqp tests"
|
||||
export RUN_ALL_TESTS=1
|
||||
shift
|
||||
;;
|
||||
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
?)
|
||||
echo "Invalid command option."
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
set -e
|
||||
|
||||
parse_args "${@}"
|
||||
|
||||
# Needed so configuration files can contain paths to files in /install
|
||||
INSTALL="$CI_PROJECT_DIR"/install
|
||||
|
||||
if [ -z "$GPU_VERSION" ]; then
|
||||
echo 'GPU_VERSION must be set to something like "llvmpipe" or
|
||||
"freedreno-a630" (it will serve as a component to find the path for files
|
||||
residing in src/**/ci/*.txt)'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LD_LIBRARY_PATH=$INSTALL:$LD_LIBRARY_PATH
|
||||
setup_backends
|
||||
|
||||
SKQP_BIN_DIR=${SKQP_BIN_DIR:-/skqp}
|
||||
SKQP_ASSETS_DIR="${SKQP_BIN_DIR}"/assets
|
||||
SKQP_RESULTS_DIR="${SKQP_RESULTS_DIR:-${PWD}/results}"
|
||||
|
||||
mkdir -p "${SKQP_ASSETS_DIR}"/skqp
|
||||
|
||||
# Show the reports on exit, even when a test crashes
|
||||
trap show_reports INT TERM EXIT
|
||||
|
||||
SKQP_EXITCODE=0
|
||||
for SKQP_BACKEND in ${SKQP_BACKENDS}
|
||||
do
|
||||
resolve_tests_files "${SKQP_BACKEND}"
|
||||
SKQP_BACKEND_RESULTS_DIR="${SKQP_RESULTS_DIR}"/"${SKQP_BACKEND}"
|
||||
mkdir -p "${SKQP_BACKEND_RESULTS_DIR}"
|
||||
BACKEND_EXITCODE=0
|
||||
|
||||
GL_COLLAPSED=true gitlab_section_start "skqp_${SKQP_BACKEND}" "skqp logs for ${SKQP_BACKEND}"
|
||||
"${SKQP_BIN_DIR}"/skqp "${SKQP_ASSETS_DIR}" "${SKQP_BACKEND_RESULTS_DIR}" "${SKQP_BACKEND}_" ||
|
||||
BACKEND_EXITCODE=$?
|
||||
gitlab_section_end "skqp_${SKQP_BACKEND}"
|
||||
|
||||
if [ ! $BACKEND_EXITCODE -eq 0 ]
|
||||
then
|
||||
echo "skqp failed on ${SKQP_BACKEND} tests with exit code: ${BACKEND_EXITCODE}."
|
||||
else
|
||||
echo "skqp succeeded on ${SKQP_BACKEND}."
|
||||
fi
|
||||
|
||||
# Propagate error codes to leverage the final job result
|
||||
SKQP_EXITCODE=$(( SKQP_EXITCODE | BACKEND_EXITCODE ))
|
||||
done
|
||||
|
||||
exit $SKQP_EXITCODE
|
@@ -14,20 +14,6 @@
|
||||
- if: *is-scheduled-pipeline
|
||||
when: never
|
||||
|
||||
# Rule for restricted traces jobs to only run for users with access to those
|
||||
# traces (both https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db-private
|
||||
# for trace access, and minio bucket access for viewing result images from CI).
|
||||
#
|
||||
# This is a compromise, allowing some marked developers to have their MRs
|
||||
# blocked on regressions to non-redistributable traces, while not blocking
|
||||
# merges for other devs who would be unable to debug changes to them.
|
||||
.restricted-rules:
|
||||
rules:
|
||||
# If the triggerer has access to the restricted traces and if it is pre-merge
|
||||
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo)$/") &&
|
||||
($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
|
||||
when: never
|
||||
|
||||
# Mesa core source file dependencies that may impact any test job
|
||||
# ---------------------------------------------------------------
|
||||
.core-rules:
|
||||
@@ -174,21 +160,6 @@
|
||||
- if: '$ANHOLT_FARM == "offline"'
|
||||
when: never
|
||||
|
||||
.valve-farm-rules:
|
||||
rules:
|
||||
- if: '$VALVE_FARM == "offline"'
|
||||
when: never
|
||||
# The Valve CI Farm uses allow lists for projects/users to prevent abuse,
|
||||
# so only enable automatic testing when running in the context of Mesa to
|
||||
# prevent failures in forks.
|
||||
- if: '$CI_PROJECT_NAMESPACE != "mesa"'
|
||||
when: never
|
||||
|
||||
.austriancoder-farm-rules:
|
||||
rules:
|
||||
- if: '$AUSTRIANCODER_FARM == "offline"'
|
||||
when: never
|
||||
|
||||
# Skips freedreno jobs if either of the farms we use are offline.
|
||||
.freedreno-farm-rules:
|
||||
rules:
|
||||
@@ -199,7 +170,6 @@
|
||||
# Rules for changes that impact either freedreno or turnip.
|
||||
.freedreno-common-rules:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.freedreno-farm-rules, rules]
|
||||
- changes: &freedreno_core_file_list
|
||||
- src/freedreno/ci/**/*
|
||||
@@ -240,7 +210,10 @@
|
||||
.freedreno-rules-restricted:
|
||||
stage: freedreno
|
||||
rules:
|
||||
- !reference [.restricted-rules, rules]
|
||||
# If the triggerer has access to the restricted traces and if it is pre-merge
|
||||
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo)$/") &&
|
||||
($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
|
||||
when: never
|
||||
- !reference [.freedreno-rules, rules]
|
||||
|
||||
# Rules for GL driver performance tracking. We want them to run as code is
|
||||
@@ -322,7 +295,7 @@
|
||||
- src/panfrost/vulkan/*
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/panfrost/compiler/**/*
|
||||
- src/panfrost/bifrost/**/*
|
||||
when: on_success
|
||||
|
||||
.broadcom-common-rules:
|
||||
@@ -387,24 +360,13 @@
|
||||
.radv-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- changes: &radv_file_list
|
||||
- src/amd/**/*
|
||||
- src/vulkan/**/*
|
||||
when: on_success
|
||||
|
||||
.radv-collabora-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.radv-rules, rules]
|
||||
|
||||
.radv-valve-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.valve-farm-rules, rules]
|
||||
- !reference [.radv-rules, rules]
|
||||
|
||||
.virgl-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
@@ -568,13 +530,6 @@
|
||||
- !reference [.anv-rules, rules]
|
||||
- !reference [.zink-common-rules, rules]
|
||||
|
||||
.zink-anv-rules-restricted:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.restricted-rules, rules]
|
||||
- !reference [.anv-rules, rules]
|
||||
- !reference [.zink-common-rules, rules]
|
||||
|
||||
.zink-turnip-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
@@ -649,7 +604,6 @@
|
||||
.etnaviv-rules:
|
||||
stage: etnaviv
|
||||
rules:
|
||||
- !reference [.austriancoder-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- changes:
|
||||
- src/etnaviv/**/*
|
||||
|
@@ -109,6 +109,11 @@ rustfmt:
|
||||
variables:
|
||||
DEQP_VER: vk
|
||||
|
||||
.skqp-test:
|
||||
variables:
|
||||
HWCI_START_XORG: 1
|
||||
HWCI_TEST_SCRIPT: "/install/skqp-runner.sh"
|
||||
|
||||
.fossilize-test:
|
||||
script:
|
||||
- ./install/fossilize-runner.sh
|
||||
@@ -130,8 +135,6 @@ rustfmt:
|
||||
# improve it even more (see https://docs.mesa3d.org/ci/bare-metal.html for
|
||||
# setup).
|
||||
- echo -e "\e[0Ksection_start:$(date +%s):artifacts_download[collapsed=true]\r\e[0KDownloading artifacts from minio"
|
||||
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
|
||||
- rm -rf install
|
||||
- wget ${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}.tar.zst -S --progress=dot:giga -O- | tar --zstd -x
|
||||
- echo -e "\e[0Ksection_end:$(date +%s):artifacts_download\r\e[0K"
|
||||
artifacts:
|
||||
@@ -149,12 +152,12 @@ rustfmt:
|
||||
.baremetal-test-armhf:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/armhf_test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
BM_ROOTFS: /rootfs-armhf
|
||||
MINIO_ARTIFACT_NAME: mesa-armhf
|
||||
needs:
|
||||
- debian/armhf_test
|
||||
- debian/arm_test
|
||||
- job: debian-armhf
|
||||
artifacts: false
|
||||
|
||||
@@ -162,12 +165,12 @@ rustfmt:
|
||||
.baremetal-test-arm64:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/arm64_test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
BM_ROOTFS: /rootfs-arm64
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64
|
||||
needs:
|
||||
- debian/arm64_test
|
||||
- debian/arm_test
|
||||
- job: debian-arm64
|
||||
artifacts: false
|
||||
|
||||
@@ -175,12 +178,12 @@ rustfmt:
|
||||
.baremetal-arm64-asan-test:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/arm64_test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.6:/install/lib/libdlclose-skip.so"
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64-asan
|
||||
needs:
|
||||
- debian/arm64_test
|
||||
- debian/arm_test
|
||||
- job: debian-arm64-asan
|
||||
artifacts: false
|
||||
|
||||
@@ -201,13 +204,13 @@ rustfmt:
|
||||
# like FDO_DISTRIBUTION_TAG for *the* image, there is no way to
|
||||
# depend on more than one image per job. So, the job container is
|
||||
# built as part of the CI in the boot2container project.
|
||||
image: registry.freedesktop.org/mupuf/valve-infra/mesa-trigger:2022-12-08.1
|
||||
image: registry.freedesktop.org/mupuf/valve-infra/mesa-trigger:2022-03-03.2
|
||||
timeout: 1h 40m
|
||||
variables:
|
||||
# No need by default to pull the whole repo
|
||||
GIT_STRATEGY: none
|
||||
# boot2container initrd configuration parameters.
|
||||
B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/mupuf/valve-infra/-/package_files/519/download' # Linux 6.1
|
||||
B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/mupuf/valve-infra/-/package_files/144/download' # 5.17.1
|
||||
B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.8/downloads/initramfs.linux_amd64.cpio.xz'
|
||||
B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$'
|
||||
B2C_JOB_WARN_REGEX: '\*ERROR\* ring .* timeout, but soft recovered'
|
||||
@@ -236,11 +239,9 @@ rustfmt:
|
||||
GENERATE_ENV_SCRIPT: "${CI_COMMON_SCRIPTS}/generate-env.sh"
|
||||
B2C_JOB_TEMPLATE: "${CI_B2C_ARTIFACTS}/b2c.yml.jinja2.jinja2"
|
||||
JOB_FOLDER: "job_folder"
|
||||
|
||||
before_script:
|
||||
# We don't want the tarball unpacking of .test, but will take the JWT bits.
|
||||
- !reference [default, before_script]
|
||||
|
||||
- |
|
||||
set -x
|
||||
|
||||
@@ -320,6 +321,7 @@ rustfmt:
|
||||
env PYTHONUNBUFFERED=1 executorctl \
|
||||
run -w b2c.yml.jinja2 -j $(slugify "$CI_JOB_NAME") -s ${JOB_FOLDER}
|
||||
|
||||
ls -l
|
||||
# Anything our job places in results/ will be collected by the
|
||||
# Gitlab coordinator for status presentation. results/junit.xml
|
||||
# will be parsed by the UI for more detailed explanations of
|
||||
@@ -330,7 +332,7 @@ rustfmt:
|
||||
paths:
|
||||
- ${JOB_FOLDER}/results
|
||||
reports:
|
||||
junit: ${JOB_FOLDER}/results/**/junit.xml
|
||||
junit: ${JOB_FOLDER}/results/junit.xml
|
||||
|
||||
.b2c-test-vk:
|
||||
extends:
|
||||
|
@@ -1,22 +1,15 @@
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
from itertools import cycle
|
||||
from typing import Any, Callable, Generator, Iterable, Optional, Tuple, Union
|
||||
from typing import Callable, Generator, Iterable, Optional, Tuple, Union
|
||||
|
||||
import yaml
|
||||
from freezegun import freeze_time
|
||||
from lava.utils.log_section import (
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
||||
LogSectionType,
|
||||
)
|
||||
from lavacli.utils import flow_yaml as lava_yaml
|
||||
|
||||
|
||||
def yaml_dump(data: dict[str, Any]) -> str:
|
||||
stream = StringIO()
|
||||
lava_yaml.dump(data, stream)
|
||||
return stream.getvalue()
|
||||
|
||||
|
||||
def section_timeout(section_type: LogSectionType) -> int:
|
||||
@@ -53,7 +46,7 @@ def jobs_logs_response(
|
||||
|
||||
logs = [timed_msg] if msg is None else msg
|
||||
|
||||
return finished, yaml_dump(logs)
|
||||
return finished, yaml.safe_dump(logs)
|
||||
|
||||
|
||||
def section_aware_message_generator(
|
||||
|
@@ -298,9 +298,11 @@ def test_parse_job_result_from_log(message, expectation, mock_proxy):
|
||||
reason="Slow and sketchy test. Needs a LAVA log raw file at /tmp/log.yaml"
|
||||
)
|
||||
def test_full_yaml_log(mock_proxy, frozen_time):
|
||||
import itertools
|
||||
import random
|
||||
from datetime import datetime
|
||||
|
||||
from lavacli.utils import flow_yaml as lava_yaml
|
||||
import yaml
|
||||
|
||||
def time_travel_from_log_chunk(data_chunk):
|
||||
if not data_chunk:
|
||||
@@ -319,28 +321,26 @@ def test_full_yaml_log(mock_proxy, frozen_time):
|
||||
# the same of from the job submitter execution
|
||||
with open("/tmp/log.yaml", "r") as f:
|
||||
first_log = f.readline()
|
||||
first_log_time = lava_yaml.load(first_log)[0]["dt"]
|
||||
first_log_time = yaml.safe_load(first_log)[0]["dt"]
|
||||
frozen_time.move_to(first_log_time)
|
||||
|
||||
def load_lines() -> list:
|
||||
with open("/tmp/log.yaml", "r") as f:
|
||||
# data = yaml.safe_load(f)
|
||||
data = f.readlines()
|
||||
stream = chain(data)
|
||||
data = yaml.safe_load(f)
|
||||
chain = itertools.chain(data)
|
||||
try:
|
||||
while True:
|
||||
data_chunk = [next(stream) for _ in range(random.randint(0, 50))]
|
||||
serial_message = "".join(data_chunk)
|
||||
data_chunk = [next(chain) for _ in range(random.randint(0, 50))]
|
||||
# Suppose that the first message timestamp is the same of
|
||||
# log fetch RPC call
|
||||
time_travel_from_log_chunk(data_chunk)
|
||||
yield False, "[]"
|
||||
yield False, []
|
||||
# Travel to the same datetime of the last fetched log line
|
||||
# in the chunk
|
||||
time_travel_from_log_chunk(data_chunk)
|
||||
yield False, serial_message
|
||||
yield False, data_chunk
|
||||
except StopIteration:
|
||||
yield True, serial_message
|
||||
yield True, data_chunk
|
||||
return
|
||||
|
||||
proxy = mock_proxy()
|
||||
|
@@ -8,16 +8,18 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from lava.exceptions import MesaCIKnownIssueException, MesaCITimeoutError
|
||||
from lava.utils import (
|
||||
GitlabSection,
|
||||
LogFollower,
|
||||
LogSectionType,
|
||||
fix_lava_color_log,
|
||||
fix_lava_gitlab_section_log,
|
||||
hide_sensitive_data,
|
||||
)
|
||||
|
||||
from ..lava.helpers import create_lava_yaml_msg, does_not_raise, lava_yaml, yaml_dump
|
||||
from ..lava.helpers import create_lava_yaml_msg, does_not_raise
|
||||
|
||||
GITLAB_SECTION_SCENARIOS = {
|
||||
"start collapsed": (
|
||||
@@ -156,49 +158,91 @@ SENSITIVE_DATA_SCENARIOS = {
|
||||
ids=SENSITIVE_DATA_SCENARIOS.keys(),
|
||||
)
|
||||
def test_hide_sensitive_data(input, expectation, tag):
|
||||
yaml_data = yaml_dump(input)
|
||||
yaml_data = yaml.safe_dump(input)
|
||||
yaml_result = hide_sensitive_data(yaml_data, tag)
|
||||
result = lava_yaml.load(yaml_result)
|
||||
result = yaml.safe_load(yaml_result)
|
||||
|
||||
assert result == expectation
|
||||
|
||||
|
||||
GITLAB_SECTION_SPLIT_SCENARIOS = {
|
||||
"Split section_start at target level": (
|
||||
"\x1b[0Ksection_start:1668454947:test_post_process[collapsed=true]\r\x1b[0Kpost-processing test results",
|
||||
(
|
||||
"\x1b[0Ksection_start:1668454947:test_post_process[collapsed=true]",
|
||||
"\x1b[0Kpost-processing test results",
|
||||
COLOR_MANGLED_SCENARIOS = {
|
||||
"Mangled error message at target level": (
|
||||
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
|
||||
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
|
||||
),
|
||||
"Mangled pass message at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||
),
|
||||
"Split section_end at target level": (
|
||||
"\x1b[0Ksection_end:1666309222:test_post_process\r\x1b[0K",
|
||||
("\x1b[0Ksection_end:1666309222:test_post_process", "\x1b[0K"),
|
||||
"Mangled error message with bold formatting at target level": (
|
||||
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
|
||||
"\x1b[1;31mReview the image changes...",
|
||||
),
|
||||
"Second line is not split from the first": (
|
||||
("\x1b[0Ksection_end:1666309222:test_post_process", "Any message"),
|
||||
("\x1b[0Ksection_end:1666309222:test_post_process", "Any message"),
|
||||
"Mangled error message with high intensity background at target level": (
|
||||
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
|
||||
"\x1b[100mReview the image changes...",
|
||||
),
|
||||
"Mangled error message with underline+bg color+fg color at target level": (
|
||||
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
|
||||
"\x1b[4;41;97mReview the image changes...",
|
||||
),
|
||||
"Bad input for color code.": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[4;97 This message is missing the `m`.", lvl="target"
|
||||
),
|
||||
"[4;97 This message is missing the `m`.",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"expected_message, messages",
|
||||
GITLAB_SECTION_SPLIT_SCENARIOS.values(),
|
||||
ids=GITLAB_SECTION_SPLIT_SCENARIOS.keys(),
|
||||
"message, fixed_message",
|
||||
COLOR_MANGLED_SCENARIOS.values(),
|
||||
ids=COLOR_MANGLED_SCENARIOS.keys(),
|
||||
)
|
||||
def test_fix_lava_gitlab_section_log(expected_message, messages):
|
||||
fixed_messages = []
|
||||
gen = fix_lava_gitlab_section_log()
|
||||
next(gen)
|
||||
def test_fix_lava_color_log(message, fixed_message):
|
||||
fix_lava_color_log(message)
|
||||
|
||||
for message in messages:
|
||||
lava_log = create_lava_yaml_msg(msg=message, lvl="target")
|
||||
if recovered_line := gen.send(lava_log):
|
||||
fixed_messages.append((recovered_line, lava_log["msg"]))
|
||||
fixed_messages.append(lava_log["msg"])
|
||||
assert message["msg"] == fixed_message
|
||||
|
||||
assert expected_message in fixed_messages
|
||||
|
||||
GITLAB_SECTION_MANGLED_SCENARIOS = {
|
||||
"Mangled section_start at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
|
||||
),
|
||||
"Mangled section_start at target level with header with spaces": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
|
||||
),
|
||||
"Mangled section_end at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_end:1652658415:test_setup[0K",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message, fixed_message",
|
||||
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
|
||||
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
|
||||
)
|
||||
def test_fix_lava_gitlab_section_log(message, fixed_message):
|
||||
fix_lava_gitlab_section_log(message)
|
||||
|
||||
assert message["msg"] == fixed_message
|
||||
|
||||
|
||||
WATCHDOG_SCENARIOS = {
|
||||
|
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
function execute_testsuite {
|
||||
local RESULTS_FOLDER EXEC_DONE_FILE
|
||||
|
||||
RESULTS_FOLDER="results/$1"
|
||||
EXEC_DONE_FILE="$RESULTS_FOLDER/.done"
|
||||
|
||||
if [ ! -f "$EXEC_DONE_FILE" ]; then
|
||||
DEQP_RESULTS_DIR="$RESULTS_FOLDER" PIGLIT_RESULTS_DIR="$RESULTS_FOLDER" $2
|
||||
touch "$EXEC_DONE_FILE"
|
||||
else
|
||||
echo "--> Skipped, as it already was executed"
|
||||
fi
|
||||
}
|
||||
|
||||
echo -e "\n# GL CTS testing"
|
||||
DEQP_VER=gl46 execute_testsuite gl ./install/deqp-runner.sh
|
||||
|
||||
echo -e "\n# GLES CTS testing"
|
||||
DEQP_SUITE=zink-radv execute_testsuite gles ./install/deqp-runner.sh
|
||||
|
||||
echo -e "\n# Piglit testing"
|
||||
execute_testsuite piglit ./install/piglit/piglit-runner.sh
|
@@ -1,3 +1,10 @@
|
||||
$dxil_dll = cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 -no_logo && where dxil.dll" 2>&1
|
||||
if ($dxil_dll -notmatch "dxil.dll$") {
|
||||
Write-Output "Couldn't get path to dxil.dll"
|
||||
exit 1
|
||||
}
|
||||
$env:Path = "$(Split-Path $dxil_dll);$env:Path"
|
||||
|
||||
# VK_ICD_FILENAMES environment variable is not used when running with
|
||||
# elevated privileges. Add a key to the registry instead.
|
||||
$hkey_path = "HKLM:\SOFTWARE\Khronos\Vulkan\Drivers\"
|
||||
|
@@ -83,5 +83,6 @@ Copy-Item ".\.gitlab-ci\windows\spirv2dxil_check.ps1" -Destination $installdir
|
||||
Copy-Item ".\.gitlab-ci\windows\spirv2dxil_run.ps1" -Destination $installdir
|
||||
|
||||
Copy-Item ".\.gitlab-ci\windows\deqp_runner_run.ps1" -Destination $installdir
|
||||
Copy-Item ".\src\microsoft\ci\deqp-dozen.toml" -Destination $installdir
|
||||
|
||||
Get-ChildItem -Recurse -Filter "ci" | Get-ChildItem -Include "*.txt","*.toml" | Copy-Item -Destination $installdir
|
||||
Get-ChildItem -Recurse -Filter "ci" | Get-ChildItem -Filter "*.txt" | Copy-Item -Destination $installdir
|
||||
|
@@ -64,9 +64,10 @@ if (!$?) {
|
||||
}
|
||||
|
||||
Push-Location -Path ".\deps\libva"
|
||||
Write-Host "Checking out libva df3c584bb79d1a1e521372d62fa62e8b1c52ce6c"
|
||||
# libva-win32 is released with libva version 2.17 (see https://github.com/intel/libva/releases/tag/2.17.0)
|
||||
git checkout 2.17.0
|
||||
Write-Host "Checking out libva commit 2579eb0f77897dc01a02c1e43defc63c40fd2988"
|
||||
# Checking out commit hash with libva-win32 support
|
||||
# This feature will be released with libva version 2.17
|
||||
git checkout 2579eb0f77897dc01a02c1e43defc63c40fd2988
|
||||
Pop-Location
|
||||
|
||||
Write-Host "Building libva"
|
||||
|
@@ -1,36 +1,33 @@
|
||||
Get-Date
|
||||
Write-Host "Cloning Waffle"
|
||||
Write-Host "Downloading Freeglut"
|
||||
|
||||
$freeglut_zip = 'freeglut-MSVC.zip'
|
||||
$freeglut_url = "https://www.transmissionzero.co.uk/files/software/development/GLUT/$freeglut_zip"
|
||||
|
||||
For ($i = 0; $i -lt 5; $i++) {
|
||||
Invoke-WebRequest -Uri $freeglut_url -OutFile $freeglut_zip
|
||||
$freeglut_downloaded = $?
|
||||
if ($freeglut_downloaded) {
|
||||
Break
|
||||
}
|
||||
}
|
||||
|
||||
if (!$freeglut_downloaded) {
|
||||
Write-Host "Failed to download Freeglut"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
Get-Date
|
||||
Write-Host "Installing Freeglut"
|
||||
Expand-Archive $freeglut_zip -DestinationPath C:\
|
||||
if (!$?) {
|
||||
Write-Host "Failed to install Freeglut"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
$MyPath = $MyInvocation.MyCommand.Path | Split-Path -Parent
|
||||
. "$MyPath\mesa_vs_init.ps1"
|
||||
|
||||
git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/waffle.git 'C:\src\waffle'
|
||||
if (!$?) {
|
||||
Write-Host "Failed to clone Waffle repository"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
Push-Location -Path C:\src\waffle
|
||||
git checkout 950a1f35a718bc2a8e1dda75845e52651bb331a7
|
||||
Pop-Location
|
||||
|
||||
Get-Date
|
||||
$waffle_build = New-Item -ItemType Directory -Path "C:\src\waffle" -Name "build"
|
||||
Push-Location -Path $waffle_build.FullName
|
||||
Write-Host "Compiling Waffle"
|
||||
meson setup `
|
||||
--buildtype=release `
|
||||
--default-library=static `
|
||||
--prefix="C:\Waffle" && `
|
||||
ninja -j32 install
|
||||
$buildstatus = $?
|
||||
Pop-Location
|
||||
Remove-Item -Recurse -Path $waffle_build
|
||||
if (!$buildstatus) {
|
||||
Write-Host "Failed to compile or install Waffle"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
Get-Date
|
||||
Write-Host "Downloading glext.h"
|
||||
New-Item -ItemType Directory -Path ".\glext" -Name "GL"
|
||||
@@ -39,31 +36,39 @@ Invoke-WebRequest -Uri 'https://www.khronos.org/registry/OpenGL/api/GL/glext.h'
|
||||
|
||||
Get-Date
|
||||
Write-Host "Cloning Piglit"
|
||||
git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\piglit'
|
||||
git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\src\piglit'
|
||||
if (!$?) {
|
||||
Write-Host "Failed to clone Piglit repository"
|
||||
Exit 1
|
||||
}
|
||||
Push-Location -Path C:\piglit
|
||||
git checkout b41accc83689966f91217fc5b57dbe06202b8c8c
|
||||
Push-Location -Path C:\src\piglit
|
||||
git checkout f7f2a6c2275cae023a27b6cc81be3dda8c99492d
|
||||
Pop-Location
|
||||
|
||||
Get-Date
|
||||
$piglit_build = New-Item -ItemType Directory -Path "C:\src\piglit" -Name "build"
|
||||
Push-Location -Path $piglit_build.FullName
|
||||
Write-Host "Compiling Piglit"
|
||||
cmake -S . -B . `
|
||||
cmake .. `
|
||||
-GNinja `
|
||||
-DCMAKE_BUILD_TYPE=Release `
|
||||
-DPIGLIT_USE_WAFFLE=ON `
|
||||
-DWaffle_INCLUDE_DIRS=C:\Waffle\include\waffle-1 `
|
||||
-DWaffle_LDFLAGS=C:\Waffle\lib\libwaffle-1.a `
|
||||
-DCMAKE_INSTALL_PREFIX="C:\Piglit" `
|
||||
-DGLUT_INCLUDE_DIR=C:\freeglut\include `
|
||||
-DGLUT_glut_LIBRARY_RELEASE=C:\freeglut\lib\x64\freeglut.lib `
|
||||
-DGLEXT_INCLUDE_DIR=.\glext && `
|
||||
ninja -j32
|
||||
$buildstatus = $?
|
||||
ninja -j32 install | Out-Null
|
||||
$installstatus = $?
|
||||
Pop-Location
|
||||
if (!$buildstatus) {
|
||||
Write-Host "Failed to compile Piglit"
|
||||
Remove-Item -Recurse -Path $piglit_build
|
||||
if (!$buildstatus -Or !$installstatus) {
|
||||
Write-Host "Failed to compile or install Piglit"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
Copy-Item -Path C:\freeglut\bin\x64\freeglut.dll -Destination C:\Piglit\lib\piglit\bin\freeglut.dll
|
||||
|
||||
Get-Date
|
||||
Write-Host "Cloning spirv-samples"
|
||||
git clone --no-progress --single-branch --no-checkout https://github.com/dneto0/spirv-samples.git C:\spirv-samples\
|
||||
@@ -99,8 +104,8 @@ cmake -S $($deqp_source) `
|
||||
ninja -j32
|
||||
$buildstatus = $?
|
||||
Pop-Location
|
||||
if (!$buildstatus) {
|
||||
Write-Host "Failed to compile deqp"
|
||||
if (!$buildstatus -Or !$installstatus) {
|
||||
Write-Host "Failed to compile or install deqp"
|
||||
Exit 1
|
||||
}
|
||||
|
||||
@@ -130,50 +135,5 @@ Write-Host "Installing deqp-runner"
|
||||
$env:Path += ";$($env:USERPROFILE)\.cargo\bin"
|
||||
cargo install --git https://gitlab.freedesktop.org/anholt/deqp-runner.git
|
||||
|
||||
Get-Date
|
||||
Write-Host "Downloading DirectX 12 Agility SDK"
|
||||
Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12/1.706.3-preview -OutFile 'agility.zip'
|
||||
Expand-Archive -Path 'agility.zip' -DestinationPath 'C:\agility'
|
||||
Remove-Item 'agility.zip'
|
||||
|
||||
$piglit_bin = 'C:\Piglit\bin'
|
||||
$vk_cts_bin = "$deqp_build\external\vulkancts\modules\vulkan"
|
||||
|
||||
# Copy Agility SDK into subfolder of piglit and Vulkan CTS
|
||||
$agility_dest = New-Item -ItemType Directory -Path $piglit_bin -Name 'D3D12'
|
||||
Copy-Item 'C:\agility\build\native\bin\x64\*.dll' -Destination $agility_dest
|
||||
$agility_dest = New-Item -ItemType Directory -Path $vk_cts_bin -Name 'D3D12'
|
||||
Copy-Item 'C:\agility\build\native\bin\x64\*.dll' -Destination $agility_dest
|
||||
Remove-Item -Recurse 'C:\agility'
|
||||
|
||||
Get-Date
|
||||
Write-Host "Downloading Updated WARP"
|
||||
Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.WARP/1.0.2 -OutFile 'warp.zip'
|
||||
Expand-Archive -Path 'warp.zip' -DestinationPath 'C:\warp'
|
||||
Remove-Item 'warp.zip'
|
||||
|
||||
# Copy WARP next to piglit and Vulkan CTS
|
||||
Copy-Item 'C:\warp\build\native\amd64\d3d10warp.dll' -Destination $piglit_bin
|
||||
Copy-Item 'C:\warp\build\native\amd64\d3d10warp.dll' -Destination $vk_cts_bin
|
||||
Remove-Item -Recurse 'C:\warp'
|
||||
|
||||
Get-Date
|
||||
Write-Host "Downloading DirectXShaderCompiler release"
|
||||
Invoke-WebRequest -Uri https://github.com/microsoft/DirectXShaderCompiler/releases/download/v1.7.2207/dxc_2022_07_18.zip -OutFile 'DXC.zip'
|
||||
Expand-Archive -Path 'DXC.zip' -DestinationPath 'C:\DXC'
|
||||
# No more need to get dxil.dll from the VS install
|
||||
Copy-Item 'C:\DXC\bin\x64\*.dll' -Destination 'C:\Windows\System32'
|
||||
|
||||
Get-Date
|
||||
Write-Host "Enabling developer mode"
|
||||
# Create AppModelUnlock if it doesn't exist, required for enabling Developer Mode
|
||||
$RegistryKeyPath = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\AppModelUnlock"
|
||||
if (-not(Test-Path -Path $RegistryKeyPath)) {
|
||||
New-Item -Path $RegistryKeyPath -ItemType Directory -Force
|
||||
}
|
||||
|
||||
# Add registry value to enable Developer Mode
|
||||
New-ItemProperty -Path $RegistryKeyPath -Name AllowDevelopmentWithoutDevLicense -PropertyType DWORD -Value 1 -Force
|
||||
|
||||
Get-Date
|
||||
Write-Host "Complete"
|
||||
|
@@ -1,13 +1,28 @@
|
||||
$env:PIGLIT_NO_FAST_SKIP = 1
|
||||
|
||||
Copy-Item -Path _install\bin\opengl32.dll -Destination C:\Piglit\bin\opengl32.dll
|
||||
Copy-Item -Path _install\bin\libgallium_wgl.dll -Destination C:\Piglit\bin\libgallium_wgl.dll
|
||||
Copy-Item -Path _install\bin\libglapi.dll -Destination C:\Piglit\bin\libglapi.dll
|
||||
Copy-Item -Path _install\bin\opengl32.dll -Destination C:\Piglit\lib\piglit\bin\opengl32.dll
|
||||
Copy-Item -Path _install\bin\libgallium_wgl.dll -Destination C:\Piglit\lib\piglit\bin\libgallium_wgl.dll
|
||||
Copy-Item -Path _install\bin\libglapi.dll -Destination C:\Piglit\lib\piglit\bin\libglapi.dll
|
||||
|
||||
deqp-runner suite --output .\logs --suite "_install/$env:PIGLIT_SUITE" `
|
||||
--skips "_install/$env:PIGLIT_SKIPS" `
|
||||
--baseline "_install/$env:PIGLIT_BASELINE" `
|
||||
--flakes "_install/$env:PIGLIT_FLAKES"
|
||||
if (!$?) {
|
||||
# Run this using VsDevCmd.bat to ensure DXIL.dll is in %PATH%
|
||||
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && py -3 C:\Piglit\bin\piglit.py run `"$env:PIGLIT_PROFILE`" $env:PIGLIT_OPTIONS $env:PIGLIT_TESTS .\results"
|
||||
|
||||
py -3 C:\Piglit\bin\piglit.py summary console .\results | Select -SkipLast 1 | Select-String -NotMatch -Pattern ': pass' | Set-Content -Path .\result.txt
|
||||
|
||||
$reference = Get-Content ".\_install\$env:PIGLIT_RESULTS.txt"
|
||||
$result = Get-Content .\result.txt
|
||||
if (-Not ($reference -And $result)) {
|
||||
Exit 1
|
||||
}
|
||||
|
||||
$diff = Compare-Object -ReferenceObject $reference -DifferenceObject $result
|
||||
if (-Not $diff) {
|
||||
Exit 0
|
||||
}
|
||||
|
||||
py -3 C:\Piglit\bin\piglit.py summary html --exclude-details=pass .\summary .\results
|
||||
|
||||
Write-Host "Unexpected change in results:"
|
||||
Write-Output $diff | Format-Table -Property SideIndicator,InputObject -Wrap
|
||||
|
||||
Exit 1
|
||||
|
@@ -1,3 +1,11 @@
|
||||
# Ensure that dxil.dll in on the %PATH%
|
||||
$dxil_dll = cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 -no_logo && where dxil.dll" 2>&1
|
||||
if ($dxil_dll -notmatch "dxil.dll$") {
|
||||
Write-Output "Couldn't get path to dxil.dll"
|
||||
exit 1
|
||||
}
|
||||
$env:Path = "$(Split-Path $dxil_dll);$env:Path"
|
||||
|
||||
$exec_mode_to_stage = @{ Fragment = "fragment"; Vertex = "vertex"; GLCompute = "compute" }
|
||||
|
||||
$spvasm_files = (Get-ChildItem C:\spirv-samples\spvasm\*.spvasm) | Sort-Object Name
|
||||
|
37779
.pick_status.json
37779
.pick_status.json
File diff suppressed because it is too large
Load Diff
@@ -123,8 +123,8 @@ meson.build @dbaker @eric
|
||||
/src/gallium/drivers/freedreno/ @robclark
|
||||
|
||||
# Imagination
|
||||
/include/drm-uapi/pvr_drm.h @CreativeCylon @frankbinns @MTCoster
|
||||
/src/imagination/ @CreativeCylon @frankbinns @MTCoster
|
||||
/include/drm-uapi/pvr_drm.h @CreativeCylon @frankbinns
|
||||
/src/imagination/ @CreativeCylon @frankbinns
|
||||
/src/imagination/rogue/ @simon-perretta-img
|
||||
|
||||
# Intel
|
||||
|
@@ -30,7 +30,6 @@ LIBDRM_VERSION = $(shell cat external/libdrm/meson.build | grep -o "\<version\>\
|
||||
|
||||
MESA_VK_LIB_SUFFIX_amd := radeon
|
||||
MESA_VK_LIB_SUFFIX_intel := intel
|
||||
MESA_VK_LIB_SUFFIX_intel_hasvk := intel_hasvk
|
||||
MESA_VK_LIB_SUFFIX_freedreno := freedreno
|
||||
MESA_VK_LIB_SUFFIX_broadcom := broadcom
|
||||
MESA_VK_LIB_SUFFIX_panfrost := panfrost
|
||||
|
@@ -45,26 +45,24 @@ def is_commit_valid(commit: str) -> bool:
|
||||
return ret == 0
|
||||
|
||||
|
||||
def branch_has_commit(upstream_branch: str, commit: str) -> bool:
|
||||
def branch_has_commit(upstream: str, branch: str, commit: str) -> bool:
|
||||
"""
|
||||
Returns True if the commit is actually present in the branch
|
||||
"""
|
||||
ret = subprocess.call(['git', 'merge-base', '--is-ancestor',
|
||||
commit, upstream_branch],
|
||||
commit, upstream + '/' + branch],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL)
|
||||
return ret == 0
|
||||
|
||||
|
||||
def branch_has_backport_of_commit(upstream_branch: str, commit: str) -> str:
|
||||
def branch_has_backport_of_commit(upstream: str, branch: str, commit: str) -> str:
|
||||
"""
|
||||
Returns the commit hash if the commit has been backported to the branch,
|
||||
or an empty string if is hasn't
|
||||
"""
|
||||
upstream, _ = upstream_branch.split('/', 1)
|
||||
|
||||
out = subprocess.check_output(['git', 'log', '--format=%H',
|
||||
upstream + '..' + upstream_branch,
|
||||
branch + '-branchpoint..' + upstream + '/' + branch,
|
||||
'--grep', 'cherry picked from commit ' + commit],
|
||||
stderr=subprocess.DEVNULL)
|
||||
return out.decode().strip()
|
||||
@@ -91,7 +89,7 @@ def validate_branch(branch: str) -> str:
|
||||
out = subprocess.check_output(['git', 'remote', '--verbose'],
|
||||
stderr=subprocess.DEVNULL)
|
||||
remotes = out.decode().splitlines()
|
||||
upstream, _ = branch.split('/', 1)
|
||||
(upstream, _) = branch.split('/')
|
||||
valid_remote = False
|
||||
for line in remotes:
|
||||
if line.startswith(upstream + '\t'):
|
||||
@@ -127,15 +125,17 @@ if __name__ == "__main__":
|
||||
help='colorize output (default: true if stdout is a terminal)')
|
||||
args = parser.parse_args()
|
||||
|
||||
if branch_has_commit(args.branch, args.commit):
|
||||
print_(args, True, 'Commit ' + args.commit + ' is in branch ' + args.branch)
|
||||
(upstream, branch) = args.branch.split('/')
|
||||
|
||||
if branch_has_commit(upstream, branch, args.commit):
|
||||
print_(args, True, 'Commit ' + args.commit + ' is in branch ' + branch)
|
||||
exit(0)
|
||||
|
||||
backport = branch_has_backport_of_commit(args.branch, args.commit)
|
||||
backport = branch_has_backport_of_commit(upstream, branch, args.commit)
|
||||
if backport:
|
||||
print_(args, True,
|
||||
'Commit ' + args.commit + ' was backported to branch ' + args.branch + ' as commit ' + backport)
|
||||
'Commit ' + args.commit + ' was backported to branch ' + branch + ' as commit ' + backport)
|
||||
exit(0)
|
||||
|
||||
print_(args, False, 'Commit ' + args.commit + ' is NOT in branch ' + args.branch)
|
||||
print_(args, False, 'Commit ' + args.commit + ' is NOT in branch ' + branch)
|
||||
exit(1)
|
||||
|
@@ -46,7 +46,6 @@ def test_canonicalize_commit(commit: str, expected: bool) -> None:
|
||||
'commit, expected',
|
||||
[
|
||||
(get_upstream() + '/20.1', True),
|
||||
(get_upstream() + '/staging/20.1', True),
|
||||
(get_upstream() + '/main', True),
|
||||
('20.1', False),
|
||||
('main', False),
|
||||
@@ -74,7 +73,6 @@ def test_validate_branch(commit: str, expected: bool) -> None:
|
||||
('20.1-branchpoint', True),
|
||||
('20.1', False),
|
||||
(get_upstream() + '/20.1', True),
|
||||
(get_upstream() + '/staging/20.1', True),
|
||||
('e58a10af640ba58b6001f5c5ad750b782547da76', True),
|
||||
('d043d24654c851f0be57dbbf48274b5373dea42b', True),
|
||||
('dd2bd68fa69124c86cd008b256d06f44fab8e6cd', True),
|
||||
@@ -88,31 +86,31 @@ def test_is_commit_valid(commit: str, expected: bool) -> None:
|
||||
@pytest.mark.parametrize(
|
||||
'branch, commit, expected',
|
||||
[
|
||||
(get_upstream() + '/20.1', '20.1-branchpoint', True),
|
||||
(get_upstream() + '/20.1', '20.0', False),
|
||||
(get_upstream() + '/20.1', 'main', False),
|
||||
(get_upstream() + '/20.1', 'e58a10af640ba58b6001f5c5ad750b782547da76', True),
|
||||
(get_upstream() + '/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
|
||||
(get_upstream() + '/staging/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
|
||||
(get_upstream() + '/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', False),
|
||||
(get_upstream() + '/main', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', True),
|
||||
(get_upstream() + '/20.0', 'd043d24654c851f0be57dbbf48274b5373dea42b', False),
|
||||
('20.1', '20.1-branchpoint', True),
|
||||
('20.1', '20.0', False),
|
||||
('20.1', 'main', False),
|
||||
('20.1', 'e58a10af640ba58b6001f5c5ad750b782547da76', True),
|
||||
('20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', True),
|
||||
('20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', False),
|
||||
('main', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', True),
|
||||
('20.0', 'd043d24654c851f0be57dbbf48274b5373dea42b', False),
|
||||
])
|
||||
def test_branch_has_commit(branch: str, commit: str, expected: bool) -> None:
|
||||
assert branch_has_commit(branch, commit) == expected
|
||||
upstream = get_upstream()
|
||||
assert branch_has_commit(upstream, branch, commit) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'branch, commit, expected',
|
||||
[
|
||||
(get_upstream() + '/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
|
||||
(get_upstream() + '/staging/20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
|
||||
(get_upstream() + '/20.1', '20.1-branchpoint', ''),
|
||||
(get_upstream() + '/20.1', '20.0', ''),
|
||||
(get_upstream() + '/20.1', '20.2', 'abac4859618e02aea00f705b841a7c5c5007ad1a'),
|
||||
(get_upstream() + '/20.1', 'main', ''),
|
||||
(get_upstream() + '/20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', ''),
|
||||
(get_upstream() + '/20.0', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', '8cd4f57381cefe69019a3282d457d5bda3644030'),
|
||||
('20.1', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', 'd043d24654c851f0be57dbbf48274b5373dea42b'),
|
||||
('20.1', '20.1-branchpoint', ''),
|
||||
('20.1', '20.0', ''),
|
||||
('20.1', '20.2', ''),
|
||||
('20.1', 'main', ''),
|
||||
('20.1', 'd043d24654c851f0be57dbbf48274b5373dea42b', ''),
|
||||
('20.0', 'dd2bd68fa69124c86cd008b256d06f44fab8e6cd', ''),
|
||||
])
|
||||
def test_branch_has_backport_of_commit(branch: str, commit: str, expected: bool) -> None:
|
||||
assert branch_has_backport_of_commit(branch, commit) == expected
|
||||
upstream = get_upstream()
|
||||
assert branch_has_backport_of_commit(upstream, branch, commit) == expected
|
||||
|
@@ -175,7 +175,7 @@ inliner = Inliner();
|
||||
|
||||
async def gather_commits(version: str) -> str:
|
||||
p = await asyncio.create_subprocess_exec(
|
||||
'git', 'log', '--oneline', f'mesa-{version}..', '-i', '--grep', r'\(Closes\|Fixes\): \(https\|#\).*',
|
||||
'git', 'log', '--oneline', f'mesa-{version}..', '--grep', r'Closes: \(https\|#\).*',
|
||||
stdout=asyncio.subprocess.PIPE)
|
||||
out, _ = await p.communicate()
|
||||
assert p.returncode == 0, f"git log didn't work: {version}"
|
||||
@@ -193,34 +193,22 @@ async def parse_issues(commits: str) -> typing.List[str]:
|
||||
out = _out.decode().split('\n')
|
||||
|
||||
for line in reversed(out):
|
||||
if not line.lower().startswith(('closes:', 'fixes:')):
|
||||
continue
|
||||
bug = line.split(':', 1)[1].strip()
|
||||
if (bug.startswith('https://gitlab.freedesktop.org/mesa/mesa')
|
||||
# Avoid parsing "merge_requests" URL. Note that a valid issue
|
||||
# URL may or may not contain the "/-/" text, so we check if
|
||||
# the word "issues" is contained in URL.
|
||||
and '/issues' in bug):
|
||||
# This means we have a bug in the form "Closes: https://..."
|
||||
issues.append(os.path.basename(urllib.parse.urlparse(bug).path))
|
||||
elif ',' in bug:
|
||||
multiple_bugs = [b.strip().lstrip('#') for b in bug.split(',')]
|
||||
if not all(b.isdigit() for b in multiple_bugs):
|
||||
# this is likely a "Fixes" tag that refers to a commit name
|
||||
continue
|
||||
issues.extend(multiple_bugs)
|
||||
elif bug.startswith('#'):
|
||||
issues.append(bug.lstrip('#'))
|
||||
if line.startswith('Closes:'):
|
||||
bug = line.lstrip('Closes:').strip()
|
||||
if bug.startswith('https://gitlab.freedesktop.org/mesa/mesa'):
|
||||
# This means we have a bug in the form "Closes: https://..."
|
||||
issues.append(os.path.basename(urllib.parse.urlparse(bug).path))
|
||||
elif ',' in bug:
|
||||
issues.extend([b.strip().lstrip('#') for b in bug.split(',')])
|
||||
elif bug.startswith('#'):
|
||||
issues.append(bug.lstrip('#'))
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
async def gather_bugs(version: str) -> typing.List[str]:
|
||||
commits = await gather_commits(version)
|
||||
if commits:
|
||||
issues = await parse_issues(commits)
|
||||
else:
|
||||
issues = []
|
||||
issues = await parse_issues(commits)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
async with aiohttp.ClientSession(loop=loop) as session:
|
||||
@@ -239,12 +227,7 @@ async def get_bug(session: aiohttp.ClientSession, bug_id: str) -> str:
|
||||
params = {'iids[]': bug_id}
|
||||
async with session.get(url, params=params) as response:
|
||||
content = await response.json()
|
||||
if not content:
|
||||
# issues marked as "confidential" look like "404" page for
|
||||
# unauthorized users
|
||||
return f'Confidential issue #{bug_id}'
|
||||
else:
|
||||
return content[0]['title']
|
||||
return content[0]['title']
|
||||
|
||||
|
||||
async def get_shortlog(version: str) -> str:
|
||||
@@ -298,45 +281,22 @@ def calculate_previous_version(version: str, is_point: bool) -> str:
|
||||
|
||||
|
||||
def get_features(is_point_release: bool) -> typing.Generator[str, None, None]:
|
||||
p = pathlib.Path('docs') / 'relnotes' / 'new_features.txt'
|
||||
if p.exists() and p.stat().st_size > 0:
|
||||
p = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / 'new_features.txt'
|
||||
if p.exists():
|
||||
if is_point_release:
|
||||
print("WARNING: new features being introduced in a point release", file=sys.stderr)
|
||||
with p.open('rt') as f:
|
||||
for line in f:
|
||||
yield line.rstrip()
|
||||
yield line
|
||||
else:
|
||||
yield "None"
|
||||
p.unlink()
|
||||
else:
|
||||
yield "None"
|
||||
|
||||
|
||||
def update_release_notes_index(version: str) -> None:
|
||||
relnotes_index_path = pathlib.Path('docs') / 'relnotes.rst'
|
||||
|
||||
with relnotes_index_path.open('r') as f:
|
||||
relnotes = f.readlines()
|
||||
|
||||
new_relnotes = []
|
||||
first_list = True
|
||||
second_list = True
|
||||
for line in relnotes:
|
||||
if first_list and line.startswith('-'):
|
||||
first_list = False
|
||||
new_relnotes.append(f'- :doc:`{version} release notes <relnotes/{version}>`\n')
|
||||
if not first_list and second_list and line.startswith(' relnotes/'):
|
||||
second_list = False
|
||||
new_relnotes.append(f' relnotes/{version}\n')
|
||||
new_relnotes.append(line)
|
||||
|
||||
with relnotes_index_path.open('w') as f:
|
||||
for line in new_relnotes:
|
||||
f.write(line)
|
||||
|
||||
subprocess.run(['git', 'add', relnotes_index_path])
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
v = pathlib.Path('VERSION')
|
||||
v = pathlib.Path(__file__).parent.parent / 'VERSION'
|
||||
with v.open('rt') as f:
|
||||
raw_version = f.read().strip()
|
||||
is_point_release = '-rc' not in raw_version
|
||||
@@ -353,7 +313,7 @@ async def main() -> None:
|
||||
gather_bugs(previous_version),
|
||||
)
|
||||
|
||||
final = pathlib.Path('docs') / 'relnotes' / f'{this_version}.rst'
|
||||
final = pathlib.Path(__file__).parent.parent / 'docs' / 'relnotes' / f'{this_version}.rst'
|
||||
with final.open('wt') as f:
|
||||
try:
|
||||
f.write(TEMPLATE.render(
|
||||
@@ -371,12 +331,8 @@ async def main() -> None:
|
||||
))
|
||||
except:
|
||||
print(exceptions.text_error_template().render())
|
||||
return
|
||||
|
||||
subprocess.run(['git', 'add', final])
|
||||
|
||||
update_release_notes_index(this_version)
|
||||
|
||||
subprocess.run(['git', 'commit', '-m',
|
||||
f'docs: add release notes for {this_version}'])
|
||||
|
||||
|
@@ -148,45 +148,6 @@ async def test_gather_commits():
|
||||
''',
|
||||
['3456', '3457', '3458'],
|
||||
),
|
||||
(
|
||||
'''\
|
||||
Without /-/
|
||||
|
||||
Closes: https://gitlab.freedesktop.org/mesa/mesa/issues/36
|
||||
''',
|
||||
['36'],
|
||||
),
|
||||
(
|
||||
'''\
|
||||
Ignore merge_requests
|
||||
|
||||
Closes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20241
|
||||
''',
|
||||
[],
|
||||
),
|
||||
(
|
||||
'''\
|
||||
Parse "Fixes:" tag too
|
||||
|
||||
Fixes: https://gitlab.freedesktop.org/mesa/mesa/issues/36
|
||||
Fixes: 142565a3bc2
|
||||
Fixes: 142565a3bc2 ("docs: do something very useful")
|
||||
Fixes: 142565a3bc2 ("docs: fix #1234, have a comma")
|
||||
Fixes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/37
|
||||
''',
|
||||
['36', '37'],
|
||||
),
|
||||
(
|
||||
'''\
|
||||
Parse Fixes/Closes in weird cases
|
||||
|
||||
fixes: https://gitlab.freedesktop.org/mesa/mesa/issues/36
|
||||
fiXES: https://gitlab.freedesktop.org/mesa/mesa/issues/37
|
||||
closes: https://gitlab.freedesktop.org/mesa/mesa/issues/38
|
||||
cloSES: https://gitlab.freedesktop.org/mesa/mesa/issues/39
|
||||
''',
|
||||
['36', '37', '38', '39'],
|
||||
),
|
||||
])
|
||||
async def test_parse_issues(content: str, bugs: typing.List[str]) -> None:
|
||||
mock_com = mock.AsyncMock(return_value=(textwrap.dedent(content).encode(), ''))
|
||||
|
@@ -118,25 +118,24 @@ SOURCES = [
|
||||
'api': 'opencl',
|
||||
'inc_folder': 'CL',
|
||||
'sources': [
|
||||
Source('include/CL/opencl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/opencl.h'),
|
||||
Source('include/CL/cl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl.h'),
|
||||
Source('include/CL/cl_platform.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_platform.h'),
|
||||
Source('include/CL/cl_gl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_gl.h'),
|
||||
Source('include/CL/cl_gl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_gl_ext.h'),
|
||||
Source('include/CL/cl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_ext.h'),
|
||||
Source('include/CL/cl_version.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_version.h'),
|
||||
Source('include/CL/cl_icd.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_icd.h'),
|
||||
Source('include/CL/cl_egl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_egl.h'),
|
||||
Source('include/CL/cl_d3d10.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_d3d10.h'),
|
||||
Source('include/CL/cl_d3d11.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_d3d11.h'),
|
||||
Source('include/CL/cl_dx9_media_sharing.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_dx9_media_sharing.h'),
|
||||
Source('include/CL/cl_dx9_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_dx9_media_sharing_intel.h'),
|
||||
Source('include/CL/cl_ext_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_ext_intel.h'),
|
||||
Source('include/CL/cl_va_api_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/main/CL/cl_va_api_media_sharing_intel.h'),
|
||||
Source('include/CL/opencl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/opencl.h'),
|
||||
Source('include/CL/cl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl.h'),
|
||||
Source('include/CL/cl_platform.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_platform.h'),
|
||||
Source('include/CL/cl_gl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_gl.h'),
|
||||
Source('include/CL/cl_gl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_gl_ext.h'),
|
||||
Source('include/CL/cl_ext.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_ext.h'),
|
||||
Source('include/CL/cl_version.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_version.h'),
|
||||
Source('include/CL/cl_icd.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_icd.h'),
|
||||
Source('include/CL/cl_egl.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_egl.h'),
|
||||
Source('include/CL/cl_d3d10.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_d3d10.h'),
|
||||
Source('include/CL/cl_d3d11.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_d3d11.h'),
|
||||
Source('include/CL/cl_dx9_media_sharing.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_dx9_media_sharing.h'),
|
||||
Source('include/CL/cl_dx9_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_dx9_media_sharing_intel.h'),
|
||||
Source('include/CL/cl_ext_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_ext_intel.h'),
|
||||
Source('include/CL/cl_va_api_media_sharing_intel.h', 'https://github.com/KhronosGroup/OpenCL-Headers/raw/master/CL/cl_va_api_media_sharing_intel.h'),
|
||||
|
||||
Source('include/CL/cl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/5f3cc41df821a3e5988490232082a3e3b82c0283/include/CL/cl.hpp'),
|
||||
Source('include/CL/cl2.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/main/include/CL/cl2.hpp'),
|
||||
Source('include/CL/opencl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/main/include/CL/opencl.hpp'),
|
||||
Source('include/CL/cl.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/master/include/CL/cl.hpp'),
|
||||
Source('include/CL/cl2.hpp', 'https://github.com/KhronosGroup/OpenCL-CLHPP/raw/master/include/CL/cl2.hpp'),
|
||||
],
|
||||
},
|
||||
|
||||
|
@@ -20,16 +20,11 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('version_dir', help="Directory with VERSION file")
|
||||
args = parser.parse_args()
|
||||
|
||||
filename = os.path.join(args.version_dir, 'VERSION')
|
||||
filename = os.path.join(os.environ['MESON_SOURCE_ROOT'], 'VERSION')
|
||||
with open(filename) as f:
|
||||
version = f.read().strip()
|
||||
print(version, end='')
|
||||
|
@@ -27,6 +27,31 @@ import pathlib
|
||||
import subprocess
|
||||
|
||||
|
||||
def update_release_notes(version: str) -> None:
|
||||
p = pathlib.Path('docs') / 'relnotes.rst'
|
||||
|
||||
with open(p, 'r') as f:
|
||||
relnotes = f.readlines()
|
||||
|
||||
new_relnotes = []
|
||||
first_list = True
|
||||
second_list = True
|
||||
for line in relnotes:
|
||||
if first_list and line.startswith('-'):
|
||||
first_list = False
|
||||
new_relnotes.append(f'- :doc:`{version} release notes <relnotes/{version}>`\n')
|
||||
if not first_list and second_list and line.startswith(' relnotes/'):
|
||||
second_list = False
|
||||
new_relnotes.append(f' relnotes/{version}\n')
|
||||
new_relnotes.append(line)
|
||||
|
||||
with open(p, 'w') as f:
|
||||
for line in new_relnotes:
|
||||
f.write(line)
|
||||
|
||||
subprocess.run(['git', 'add', p])
|
||||
|
||||
|
||||
def update_calendar(version: str) -> None:
|
||||
p = pathlib.Path('docs') / 'release-calendar.csv'
|
||||
|
||||
@@ -56,9 +81,14 @@ def main() -> None:
|
||||
args = parser.parse_args()
|
||||
|
||||
update_calendar(args.version)
|
||||
done = 'update calendar'
|
||||
|
||||
if 'rc' not in args.version:
|
||||
update_release_notes(args.version)
|
||||
done += ' and link releases notes'
|
||||
|
||||
subprocess.run(['git', 'commit', '-m',
|
||||
f'docs: update calendar for {args.version}'])
|
||||
f'docs: {done} for {args.version}'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@@ -6,8 +6,17 @@
|
||||
import docutils.nodes
|
||||
import sphinx.addnodes
|
||||
|
||||
from sphinx.util.nodes import split_explicit_title
|
||||
from docutils import nodes, utils
|
||||
def parse_envvar(env, sig, signode):
|
||||
envvar, t, default = sig.split(" ", 2)
|
||||
envvar = envvar.strip().upper()
|
||||
t = "Type: %s" % t.strip(" <>").lower()
|
||||
default = "Default: %s" % default.strip(" ()")
|
||||
signode += sphinx.addnodes.desc_name(envvar, envvar)
|
||||
signode += docutils.nodes.Text(' ')
|
||||
signode += sphinx.addnodes.desc_type(t, t)
|
||||
signode += docutils.nodes.Text(', ')
|
||||
signode += sphinx.addnodes.desc_annotation(default, default)
|
||||
return envvar
|
||||
|
||||
def parse_opcode(env, sig, signode):
|
||||
opcode, desc = sig.split("-", 1)
|
||||
@@ -17,23 +26,8 @@ def parse_opcode(env, sig, signode):
|
||||
signode += sphinx.addnodes.desc_annotation(desc, desc)
|
||||
return opcode
|
||||
|
||||
|
||||
def ext_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||
text = utils.unescape(text)
|
||||
has_explicit_title, title, ext = split_explicit_title(text)
|
||||
|
||||
parts = ext.split('_', 2)
|
||||
if parts[0] == 'VK':
|
||||
full_url = f'https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/{ext}.html'
|
||||
elif parts[0] == 'GL':
|
||||
full_url = f'https://registry.khronos.org/OpenGL/extensions/{parts[1]}/{parts[1]}_{parts[2]}.txt'
|
||||
else:
|
||||
raise Exception(f'Unexpected API: {parts[0]}')
|
||||
|
||||
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
|
||||
return [pnode], []
|
||||
|
||||
def setup(app):
|
||||
app.add_object_type("envvar", "envvar", "%s (environment variable)",
|
||||
parse_envvar)
|
||||
app.add_object_type("opcode", "opcode", "%s (TGSI opcode)",
|
||||
parse_opcode)
|
||||
app.add_role('ext', ext_role)
|
||||
|
@@ -3,7 +3,7 @@ Amber Branch
|
||||
|
||||
After Mesa 21.3, all non-Gallium DRI drivers were removed from the Mesa
|
||||
source-tree. These drivers are still being maintained to some degree,
|
||||
but only on the ``amber`` branch, and only for critical fixes.
|
||||
but only on the 21.3.x branch, and only for critical fixes.
|
||||
|
||||
These drivers include:
|
||||
|
||||
@@ -39,8 +39,8 @@ enable that logic, you need to pass the ``-Damber=true`` flag to Meson.
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
On `docs.mesa3d.org <https://docs.mesa3d.org/>`__, we currently only
|
||||
On `docs.mesa3d.org <https://docs.mesa3d.org/>`, we currently only
|
||||
publish the documentation from our main branch. But you can view the
|
||||
documentation for the Amber branch `here
|
||||
<https://gitlab.freedesktop.org/mesa/mesa/-/tree/amber/docs>`__.
|
||||
<https://gitlab.freedesktop.org/mesa/mesa/-/tree/21.3/docs>`_.
|
||||
|
||||
|
@@ -1,13 +1,13 @@
|
||||
LAVA CI
|
||||
=======
|
||||
|
||||
`LAVA <https://www.lavasoftware.org/>`__ is a system for functional
|
||||
testing of boards including deploying custom bootloaders and kernels.
|
||||
This is particularly relevant to testing Mesa because we often need
|
||||
to change kernels for UAPI changes (and this lets us do full testing
|
||||
of a new kernel during development), and our workloads can easily
|
||||
take down boards when mistakes are made (kernel oopses, OOMs that
|
||||
take out critical system services).
|
||||
`LAVA <https://lavasoftware.org/>`_ is a system for functional testing
|
||||
of boards including deploying custom bootloaders and kernels. This is
|
||||
particularly relevant to testing Mesa because we often need to change
|
||||
kernels for UAPI changes (and this lets us do full testing of a new
|
||||
kernel during development), and our workloads can easily take down
|
||||
boards when mistakes are made (kernel oopses, OOMs that take out
|
||||
critical system services).
|
||||
|
||||
Mesa-LAVA software architecture
|
||||
-------------------------------
|
||||
|
@@ -34,7 +34,7 @@ initramfs) for trace replay testing. Given that we need networking already, and
|
||||
our dEQP/Piglit/etc. payload is large, we use NFS from the x86 runner system
|
||||
rather than initramfs.
|
||||
|
||||
See ``src/freedreno/ci/gitlab-ci.yml`` for an example of fastboot on DB410c and
|
||||
See `src/freedreno/ci/gitlab-ci.yml` for an example of fastboot on DB410c and
|
||||
DB820c (freedreno-a306 and freedreno-a530).
|
||||
|
||||
Requirements (Servo)
|
||||
@@ -74,7 +74,7 @@ call "servo"::
|
||||
dhcp-option=tag:cheza1,option:root-path,/srv/nfs/cheza1
|
||||
dhcp-option=tag:cheza2,option:root-path,/srv/nfs/cheza2
|
||||
|
||||
See ``src/freedreno/ci/gitlab-ci.yml`` for an example of Servo on cheza. Note
|
||||
See `src/freedreno/ci/gitlab-ci.yml` for an example of Servo on cheza. Note
|
||||
that other Servo boards in CI are managed using LAVA.
|
||||
|
||||
Requirements (POE)
|
||||
@@ -124,12 +124,12 @@ With that set up, you should be able to power on/off a port with something like:
|
||||
Note that the "1.3.6..." SNMP OID changes between switches. The last digit
|
||||
above is the interface id (port number). You can probably find the right OID by
|
||||
google, that was easier than figuring it out from finding the switch's MIB
|
||||
database. You can query the POE status from the switch serial using the ``show
|
||||
power inline`` command.
|
||||
database. You can query the POE status from the switch serial using the `show
|
||||
power inline` command.
|
||||
|
||||
Other than that, find the dnsmasq/tftp/NFS setup for your boards "servo" above.
|
||||
|
||||
See ``src/broadcom/ci/gitlab-ci.yml`` and ``src/nouveau/ci/gitlab-ci.yml`` for an
|
||||
See `src/broadcom/ci/gitlab-ci.yml` and `src/nouveau/ci/gitlab-ci.yml` for an
|
||||
examples of POE for Raspberry Pi 3/4, and Jetson Nano.
|
||||
|
||||
Setup
|
||||
|
@@ -3,7 +3,7 @@ Docker CI
|
||||
|
||||
For LLVMpipe and Softpipe CI, we run tests in a container containing
|
||||
VK-GL-CTS, on the shared GitLab runners provided by `freedesktop
|
||||
<https://freedesktop.org>`__
|
||||
<http://freedesktop.org>`_
|
||||
|
||||
Software architecture
|
||||
---------------------
|
||||
@@ -53,7 +53,7 @@ step across multiple test runs. Since the images are large and change
|
||||
approximately weekly, the DUTs also need to be running some script to
|
||||
prune stale Docker images periodically in order to not run out of disk
|
||||
space as we rev those containers (perhaps `this script
|
||||
<https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2980#note_169233611>`__).
|
||||
<https://gitlab.com/gitlab-org/gitlab-runner/issues/2980#note_169233611>`_).
|
||||
|
||||
Note that Docker doesn't allow containers to be stored on NFS, and
|
||||
doesn't allow multiple Docker daemons to interact with the same
|
||||
|
@@ -59,7 +59,7 @@ The CI replays application traces with various drivers in two different jobs. Th
|
||||
job replays traces listed in ``src/<driver>/ci/traces-<driver>.yml`` files and if any
|
||||
of those traces fail the pipeline fails as well. The second job replays traces listed in
|
||||
``src/<driver>/ci/restricted-traces-<driver>.yml`` and it is allowed to fail. This second
|
||||
job is only created when the pipeline is triggered by ``marge-bot`` or any other user that
|
||||
job is only created when the pipeline is triggered by `marge-bot` or any other user that
|
||||
has been granted access to these traces.
|
||||
|
||||
A traces YAML file also includes a ``download-url`` pointing to a MinIO
|
||||
@@ -173,7 +173,7 @@ faster personal machine as a runner. You can find the gitlab-runner
|
||||
package in Debian, or use GitLab's own builds.
|
||||
|
||||
To do so, follow `GitLab's instructions
|
||||
<https://docs.gitlab.com/ee/ci/runners/runners_scope.html#create-a-specific-runner>`__ to
|
||||
<https://docs.gitlab.com/ce/ci/runners/#create-a-specific-runner>`__ to
|
||||
register your personal GitLab runner in your Mesa fork. Then, tell
|
||||
Mesa how many jobs it should serve (``concurrent=``) and how many
|
||||
cores those jobs should use (``FDO_CI_CONCURRENT=``) by editing these
|
||||
@@ -191,7 +191,7 @@ Docker caching
|
||||
The CI system uses Docker images extensively to cache
|
||||
infrequently-updated build content like the CTS. The `freedesktop.org
|
||||
CI templates
|
||||
<https://gitlab.freedesktop.org/freedesktop/ci-templates/>`__ help us
|
||||
<https://gitlab.freedesktop.org/freedesktop/ci-templates/>`_ help us
|
||||
manage the building of the images to reduce how frequently rebuilds
|
||||
happen, and trim down the images (stripping out manpages, cleaning the
|
||||
apt cache, and other such common pitfalls of building Docker images).
|
||||
@@ -199,7 +199,7 @@ apt cache, and other such common pitfalls of building Docker images).
|
||||
When running a container job, the templates will look for an existing
|
||||
build of that image in the container registry under
|
||||
``MESA_IMAGE_TAG``. If it's found it will be reused, and if
|
||||
not, the associated ``.gitlab-ci/containers/<jobname>.sh`` will be run
|
||||
not, the associated `.gitlab-ci/containers/<jobname>.sh`` will be run
|
||||
to build it. So, when developing any change to container build
|
||||
scripts, you need to update the associated ``MESA_IMAGE_TAG`` to
|
||||
a new unique string. We recommend using the current date plus some
|
||||
@@ -211,7 +211,7 @@ When developing a given change to your Docker image, you would have to
|
||||
bump the tag on each ``git commit --amend`` to your development
|
||||
branch, which can get tedious. Instead, you can navigate to the
|
||||
`container registry
|
||||
<https://gitlab.freedesktop.org/mesa/mesa/container_registry>`__ for
|
||||
<https://gitlab.freedesktop.org/mesa/mesa/container_registry>`_ for
|
||||
your repository and delete the tag to force a rebuild. When your code
|
||||
is eventually merged to main, a full image rebuild will occur again
|
||||
(forks inherit images from the main repo, but MRs don't propagate
|
||||
|
@@ -18,8 +18,8 @@ Linux mainline, that is why Mesa has its own kernel version which should be used
|
||||
as the base for newer kernels.
|
||||
|
||||
So, one should base the kernel uprev from the last tag used in the Mesa CI,
|
||||
please refer to ``.gitlab-ci/container/gitlab-ci.yml`` ``KERNEL_URL`` variable.
|
||||
Every tag has a standard naming: ``vX.YZ-for-mesa-ci-<commit_short_SHA>``, which
|
||||
please refer to `.gitlab-ci/container/gitlab-ci.yml` `KERNEL_URL` variable.
|
||||
Every tag has a standard naming: `vX.YZ-for-mesa-ci-<commit_short_SHA>`, which
|
||||
can be created via the command:
|
||||
|
||||
:code:`git tag vX.YZ-for-mesa-ci-$(git rev-parse --short HEAD)`
|
||||
@@ -28,7 +28,7 @@ Building Kernel
|
||||
---------------
|
||||
|
||||
When Mesa CI generates a new rootfs image, the Linux Kernel is built based on
|
||||
the script located at ``.gitlab-ci/container/build-kernel.sh``.
|
||||
the script located at `.gitlab-ci/container/build-kernel.sh`.
|
||||
|
||||
Updating Kconfigs
|
||||
^^^^^^^^^^^^^^^^^
|
||||
@@ -36,7 +36,7 @@ Updating Kconfigs
|
||||
When a Kernel uprev happens, it is worth compiling and cross-compiling the
|
||||
Kernel locally, in order to update the Kconfigs accordingly. Remember that the
|
||||
resulting Kconfig is a merge between *Mesa CI Kconfig* and *Linux tree
|
||||
defconfig* made via ``merge_config.sh`` script located at Linux Kernel tree.
|
||||
defconfig* made via `merge_config.sh` script located at Linux Kernel tree.
|
||||
|
||||
Kconfigs location
|
||||
"""""""""""""""""
|
||||
@@ -70,9 +70,9 @@ Development routine
|
||||
|
||||
1. Compile the newer kernel locally for each platform.
|
||||
2. Compile device trees for ARM platforms
|
||||
3. Update Kconfigs. Are new Kconfigs necessary? Is CONFIG_XYZ_BLA deprecated? Does the ``merge_config.sh`` override an important config?
|
||||
3. Update Kconfigs. Are new Kconfigs necessary? Is CONFIG_XYZ_BLA deprecated? Does the `merge_config.sh` override an important config?
|
||||
4. Push a new development branch to `Kernel repository`_ based on the latest kernel tag used in GitLab CI
|
||||
5. Hack ``build-kernel.sh`` script to clone kernel from your development branch
|
||||
5. Hack `build-kernel.sh` script to clone kernel from your development branch
|
||||
6. Update image tags. See `Updating image tags`_
|
||||
7. Run the entire CI pipeline, all the automatic jobs should be green. If some job is red or taking too long, you will need to investigate it and probably ask for help.
|
||||
|
||||
@@ -80,7 +80,7 @@ When the Kernel uprev is stable
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
1. Push a new tag to Mesa CI `Kernel repository`_
|
||||
2. Update KERNEL_URL ``debian/x86_test-gl`` job definition
|
||||
2. Update KERNEL_URL `debian/x86_test-gl` job definition
|
||||
3. Open a merge request, if it is not opened yet
|
||||
|
||||
Tips and Tricks
|
||||
@@ -107,15 +107,15 @@ Bare-metal custom kernels
|
||||
Some CI jobs have support to plug in a custom kernel by simply changing a variable.
|
||||
This is great, since rebuilding the kernel and rootfs may takes dozens of minutes.
|
||||
|
||||
For example, Freedreno jobs ``gitlab.yml`` manifest support a variable named
|
||||
``BM_KERNEL``. If one puts a gz-compressed kernel URL there, the job will use that
|
||||
kernel to boot the Freedreno bare-metal devices. The same works for ``BM_DTB`` in
|
||||
For example, Freedreno jobs `gitlab.yml` manifest support a variable named
|
||||
`BM_KERNEL`. If one puts a gz-compressed kernel URL there, the job will use that
|
||||
kernel to boot the Freedreno bare-metal devices. The same works for `BM_DTB` in
|
||||
the case of device tree binaries.
|
||||
|
||||
Careful reading of the job logs
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Sometimes a job may turn to red for reasons unrelated to the kernel update, e.g.
|
||||
LAVA ``tftp`` timeout, problems with the freedesktop servers etc.
|
||||
LAVA `tftp` timeout, problems with the freedesktop servers etc.
|
||||
So it is important to see the reason why the job turned red, and retry it if an
|
||||
infrastructure error has happened.
|
||||
|
@@ -3,10 +3,10 @@ Running traces on a local machine
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
- Install `Apitrace <https://apitrace.github.io/>`__
|
||||
- Install `Renderdoc <https://renderdoc.org/>`__ (only needed for some traces)
|
||||
- Download and compile `Piglit <https://gitlab.freedesktop.org/mesa/piglit>`__ and install his `dependencies <https://gitlab.freedesktop.org/mesa/piglit#2-setup>`__
|
||||
- Download traces you want to replay from `traces-db <https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db/>`__
|
||||
- Install `Apitrace <https://apitrace.github.io/>`_
|
||||
- Install `Renderdoc <https://renderdoc.org/>`_ (only needed for some traces)
|
||||
- Download and compile `Piglit <https://gitlab.freedesktop.org/mesa/piglit>`_ and install his `dependencies <https://gitlab.freedesktop.org/mesa/piglit#2-setup>`_
|
||||
- Download traces you want to replay from `traces-db <https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db/>`_
|
||||
|
||||
Running single trace
|
||||
--------------------
|
||||
@@ -16,7 +16,7 @@ A simple run to see the output of the trace can be done with
|
||||
|
||||
apitrace replay -w name_of_trace.trace
|
||||
|
||||
For more information, look into the `Apitrace documentation <https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown>`__.
|
||||
For more information, look into the `Apitrace documentation <https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown>`_.
|
||||
|
||||
For comparing checksums use:
|
||||
|
||||
@@ -32,7 +32,7 @@ Simulating CI trace job
|
||||
|
||||
Sometimes it's useful to be able to test traces on your local machine instead of the Mesa CI runner. To simulate the CI environment as closely as possible.
|
||||
|
||||
Download the YAML file from your driver's ``ci/`` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``)
|
||||
Download the YAML file from your driver's `ci/` directory and then change the path in the YAML file from local proxy or MinIO to the local directory (url-like format ``file://``)
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
104
docs/ci/skqp.rst
104
docs/ci/skqp.rst
@@ -1,33 +1,101 @@
|
||||
SkQP
|
||||
====
|
||||
|
||||
`SkQP <https://skia.org/docs/dev/testing/skqp/>`__ stands for SKIA Quality
|
||||
`SkQP <https://skia.org/docs/dev/testing/skqp/>`_ stands for SKIA Quality
|
||||
Program conformance tests. Basically, it has sets of rendering tests and unit
|
||||
tests to ensure that `SKIA <https://skia.org/>`__ is meeting its design specifications on a specific
|
||||
tests to ensure that `SKIA <https://skia.org/>`_ is meeting its design specifications on a specific
|
||||
device.
|
||||
|
||||
The rendering tests have support for GL, GLES and Vulkan backends and test some
|
||||
rendering scenarios.
|
||||
And the unit tests check the GPU behavior without rendering images, using any of the GL/GLES or Vulkan drivers.
|
||||
And the unit tests check the GPU behavior without rendering images.
|
||||
|
||||
Tests
|
||||
-----
|
||||
|
||||
Render tests design
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is worth noting that `rendertests.txt` can bring some detail about each test
|
||||
expectation, so each test can have a max pixel error count, to tell SkQP that it
|
||||
is OK to have at most that number of errors for that test. See also:
|
||||
https://github.com/google/skia/blob/c29454d1c9ebed4758a54a69798869fa2e7a36e0/tools/skqp/README_ALGORITHM.md
|
||||
|
||||
.. _test-location:
|
||||
|
||||
Location
|
||||
^^^^^^^^
|
||||
|
||||
Each `rendertests.txt` and `unittest.txt` file must be located inside a specific
|
||||
subdirectory inside SkQP assets directory.
|
||||
|
||||
+--------------+--------------------------------------------+
|
||||
| Test type | Location |
|
||||
+==============+============================================+
|
||||
| Render tests | `${SKQP_ASSETS_DIR}/skqp/rendertests.txt` |
|
||||
+--------------+--------------------------------------------+
|
||||
| Unit tests | `${SKQP_ASSETS_DIR}/skqp/unittests.txt` |
|
||||
+--------------+--------------------------------------------+
|
||||
|
||||
The `skqp-runner.sh` script will make the necessary modifications to separate
|
||||
`rendertests.txt` for each backend-driver combination. As long as the test files are located in the expected place:
|
||||
|
||||
+--------------+----------------------------------------------------------------------------------------------+
|
||||
| Test type | Location |
|
||||
+==============+==============================================================================================+
|
||||
| Render tests | `${MESA_REPOSITORY_DIR}/src/${GPU_DRIVER}/ci/${GPU_VERSION}-${SKQP_BACKEND}_rendertests.txt` |
|
||||
+--------------+----------------------------------------------------------------------------------------------+
|
||||
| Unit tests | `${MESA_REPOSITORY_DIR}/src/${GPU_DRIVER}/ci/${GPU_VERSION}_unittests.txt` |
|
||||
+--------------+----------------------------------------------------------------------------------------------+
|
||||
|
||||
Where `SKQP_BACKEND` can be:
|
||||
|
||||
- gl: for GL backend
|
||||
- gles: for GLES backend
|
||||
- vk: for Vulkan backend
|
||||
|
||||
Example file
|
||||
""""""""""""
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
src/freedreno/ci/freedreno-a630-skqp-gl_rendertests.txt
|
||||
|
||||
- GPU_DRIVER: `freedreno`
|
||||
- GPU_VERSION: `freedreno-a630`
|
||||
- SKQP_BACKEND: `gl`
|
||||
|
||||
.. _rendertests-design:
|
||||
|
||||
SkQP reports
|
||||
------------
|
||||
|
||||
SkQP generates reports after finishing its execution, and deqp-runner collects
|
||||
them in the job artifacts results directory under the test name. Click the
|
||||
'Browse' button from a failing job to get to them.
|
||||
SkQP generates reports after finishing its execution, they are located at the job
|
||||
artifacts results directory and are divided in subdirectories by rendering tests
|
||||
backends and unit
|
||||
tests. The job log has links to every generated report in order to facilitate
|
||||
the SkQP debugging.
|
||||
|
||||
SkQP failing tests
|
||||
------------------
|
||||
Maintaining SkQP on Mesa CI
|
||||
---------------------------
|
||||
|
||||
SkQP rendering tests will have a range of pixel values allowed for the driver's
|
||||
rendering for a given test. This can make the "expected" image in the result
|
||||
output look rather strange, but you should be able to make sense of it knowing
|
||||
that.
|
||||
SkQP is built alongside with another binary, namely `list_gpu_unit_tests`, it is
|
||||
located in the same folder where `skqp` binary is.
|
||||
|
||||
In SkQP itself, testcases can have increased failing pixel thresholds added to
|
||||
them to keep CI green when the rendering is "correct" but out of normal range.
|
||||
However, we don't support changing the thresholds in our testing. Because any
|
||||
driver rendering not meeting the normal thresholds will trigger Android CTS
|
||||
failures, we treat them as failures and track them as expected failures the
|
||||
```*-fails.txt`` file.`
|
||||
This binary will generate the expected `unittests.txt` for the target GPU, so
|
||||
ideally it should be executed on every SkQP update and when a new device
|
||||
receives SkQP CI jobs.
|
||||
|
||||
1. Generate target unit tests for the current GPU with :code:`./list_gpu_unit_tests > unittests.txt`
|
||||
|
||||
2. Run SkQP job
|
||||
|
||||
3. If there is a failing or crashing unit test, remove it from the corresponding `unittests.txt`
|
||||
|
||||
4. If there is a crashing render test, remove it from the corresponding `rendertests.txt`
|
||||
|
||||
5. If there is a failing render test, visually inspect the result from the HTML report
|
||||
- If the render result is OK, update the max error count for that test
|
||||
- Otherwise, or put `-1` in the same threshold, as seen in :ref:`rendertests-design`
|
||||
|
||||
6. Remember to put the new tests files to the locations cited in :ref:`test-location`
|
||||
|
@@ -36,7 +36,7 @@ Basic formatting guidelines
|
||||
|
||||
- Use comments wherever you think it would be helpful for other
|
||||
developers. Several specific cases and style examples follow. Note
|
||||
that we roughly follow `Doxygen <https://www.doxygen.nl>`__
|
||||
that we roughly follow `Doxygen <http://www.doxygen.nl>`__
|
||||
conventions.
|
||||
|
||||
Single-line comments:
|
||||
|
18
docs/conf.py
18
docs/conf.py
@@ -89,9 +89,8 @@ todo_include_todos = False
|
||||
|
||||
# Disable highlighting unless a language is specified, otherwise we'll get
|
||||
# python keywords highlit in literal blocks.
|
||||
highlight_language = 'none'
|
||||
highlight_language = "none"
|
||||
|
||||
default_role = 'c:expr'
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
@@ -100,7 +99,7 @@ default_role = 'c:expr'
|
||||
#
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
html_favicon = 'favicon.ico'
|
||||
html_favicon = "favicon.ico"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
@@ -140,19 +139,6 @@ html_redirects = [
|
||||
]
|
||||
|
||||
|
||||
# -- Options for linkcheck ------------------------------------------------
|
||||
|
||||
linkcheck_ignore = [
|
||||
r'specs/.*\.spec', # gets copied during the build process
|
||||
r'news:.*', # seems linkcheck doesn't like the news: URI-scheme...
|
||||
r'http://mesa-ci-results.jf.intel.com', # only available for Intel employees
|
||||
r'https://gitlab.com/.*#.*', # needs JS eval
|
||||
r'https://gitlab.freedesktop.org/.*#.*', # needs JS eval
|
||||
r'https://github.com/.*#.*', # needs JS eval
|
||||
]
|
||||
linkcheck_exclude_documents = [r'relnotes/.*']
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
|
@@ -14,4 +14,4 @@ In your debugger you can set a breakpoint in ``_mesa_error()`` to trap
|
||||
Mesa errors.
|
||||
|
||||
There is a display list printing/debugging facility. See the end of
|
||||
``src/mesa/main/dlist.c`` for details.
|
||||
``src/dlist.c`` for details.
|
||||
|
@@ -71,7 +71,7 @@ This can be implemented in just a few lines of C code. The file
|
||||
{
|
||||
const struct _glapi_table * const dispatch = GET_DISPATCH();
|
||||
|
||||
dispatch->Vertex3f(x, y, z);
|
||||
(*dispatch->Vertex3f)(x, y, z);
|
||||
}
|
||||
|
||||
The problem with this simple implementation is the large amount of
|
||||
@@ -94,10 +94,10 @@ where each can or cannot be used are listed.
|
||||
|
||||
Starting with the 2.4.20 Linux kernel, each thread is allocated an area
|
||||
of per-thread, global storage. Variables can be put in this area using
|
||||
some extensions to GCC that called ``ELF TLS``. By storing the dispatch table
|
||||
some extensions to GCC that called `ELF TLS`. By storing the dispatch table
|
||||
pointer in this area, the expensive call to ``pthread_getspecific`` and
|
||||
the test of ``_glapi_Dispatch`` can be avoided. As we don't support for
|
||||
Linux kernel earlier than 2.4.20, so we can always using ``ELF TLS``.
|
||||
Linux kernel earlier than 2.4.20, so we can always using `ELF TLS`.
|
||||
|
||||
The dispatch table pointer is stored in a new variable called
|
||||
``_glapi_tls_Dispatch``. A new variable name is used so that a single
|
||||
|
@@ -5,7 +5,7 @@ Downloading
|
||||
-----------
|
||||
|
||||
You can download the released versions of Mesa via
|
||||
`HTTPS <https://archive.mesa3d.org/>`__ or
|
||||
`HTTPS <https://mesa.freedesktop.org/archive/>`__ or
|
||||
`FTP <ftp://ftp.freedesktop.org/pub/mesa/>`__.
|
||||
|
||||
Our release tarballs are GPG-signed, and the public keys are available
|
||||
|
@@ -7,9 +7,6 @@ Debugging
|
||||
Here are a few environment variable debug environment variables
|
||||
specific to ANV:
|
||||
|
||||
:envvar:`ANV_ENABLE_GENERATED_INDIRECT_DRAWS`
|
||||
If defined to ``0`` or ``false``, this will disable the generated
|
||||
indirect draw optimization in Anv. This will only affect Gfx11+.
|
||||
:envvar:`ANV_ENABLE_PIPELINE_CACHE`
|
||||
If defined to ``0`` or ``false``, this will disable pipeline
|
||||
caching, forcing ANV to reparse and recompile any VkShaderModule
|
||||
@@ -17,7 +14,7 @@ specific to ANV:
|
||||
:envvar:`ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS`
|
||||
If defined to ``1`` or ``true``, this will prevent usage of self
|
||||
modifying command buffers to implement ``vkCmdExecuteCommands``. As
|
||||
a result of this, it will also disable :ext:`VK_KHR_performance_query`.
|
||||
a result of this, it will also disable ``VK_KHR_performance_query``.
|
||||
:envvar:`ANV_ALWAYS_BINDLESS`
|
||||
If defined to ``1`` or ``true``, this forces all descriptor sets to
|
||||
use the internal `Bindless model`_.
|
||||
@@ -31,7 +28,7 @@ specific to ANV:
|
||||
Haswell, Cherryview).
|
||||
:envvar:`ANV_PRIMITIVE_REPLICATION_MAX_VIEWS`
|
||||
Specifies up to how many view shaders can be lowered to handle
|
||||
:ext:`VK_KHR_multiview`. Beyond this number, multiview is implemented
|
||||
VK_KHR_multiview. Beyond this number, multiview is implemented
|
||||
using instanced rendering. If unspecified, the value default to
|
||||
``2``.
|
||||
|
||||
@@ -41,7 +38,7 @@ Experimental features
|
||||
|
||||
:envvar:`ANV_EXPERIMENTAL_NV_MESH_SHADER`
|
||||
If defined to ``1`` or ``true``, this advertise support for
|
||||
:ext:`VK_NV_mesh_shader` extension for platforms that have hardware
|
||||
VK_NV_mesh_shader extension for platforms that have hardware
|
||||
support for it.
|
||||
|
||||
|
||||
@@ -275,34 +272,3 @@ checking for ``ANV_CMD_DIRTY_PIPELINE``. It should only do so if it
|
||||
requires to know some value that is coming from the
|
||||
``anv_graphics_pipeline`` object that is not available from
|
||||
``anv_dynamic_state``.
|
||||
|
||||
|
||||
Generated indirect draws optimization
|
||||
-------------------------------------
|
||||
|
||||
Indirect draws have traditionally been implemented on Intel HW by
|
||||
loading the indirect parameters from memory into HW registers using
|
||||
the command streamer's ``MI_LOAD_REGISTER_MEM`` instruction before
|
||||
dispatching a draw call to the 3D pipeline.
|
||||
|
||||
On recent products, it was found that the command streamer is showing
|
||||
as performance bottleneck, because it cannot dispatch draw calls fast
|
||||
enough to keep the 3D pipeline busy.
|
||||
|
||||
The solution to this problem is to change the way we deal with
|
||||
indirect draws. Instead of loading HW registers with values using the
|
||||
command streamer, we generate entire set of ``3DPRIMITIVE``
|
||||
instructions using a shader. The generated instructions contain the
|
||||
entire draw call parameters. This way the command streamer executes
|
||||
only ``3DPRIMITIVE`` instructions and doesn´t do any data loading from
|
||||
memory or touch HW registers, feeding the 3D pipeline as fast as it
|
||||
can.
|
||||
|
||||
In Anv this implemented by using a side batch buffer. When Anv
|
||||
encounters the first indirect draws, it generates a jump into the side
|
||||
batch, the side batch contains a draw call using a generation shader
|
||||
for each indirect draw. We keep adding on more generation draws into
|
||||
the batch until we have to stop due to command buffer end, secondary
|
||||
command buffer calls or a barrier containing the access flag
|
||||
``VK_ACCESS_INDIRECT_COMMAND_READ_BIT``. The side batch buffer jump
|
||||
back right after the instruction where it was called.
|
||||
|
@@ -12,8 +12,6 @@ On macOS, the experimental Asahi driver may built with options:
|
||||
|
||||
To use, set the ``DYLD_LIBRARY_PATH`` environment variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
DYLD_LIBRARY_PATH=/Users/nobody/mesa/build/src/gallium/targets/libgl-xlib/ glmark2 --reuse-context
|
||||
|
||||
Only X11 apps are supported. XQuartz must be setup separately.
|
||||
@@ -38,7 +36,7 @@ The library is only built if ``-Dtools=asahi`` is passed. It builds a single
|
||||
|
||||
For example, to trace an app ``./app``, run:
|
||||
|
||||
DYLD_INSERT_LIBRARIES=~/mesa/build/src/asahi/lib/libwrap.dylib ./app
|
||||
DYLD_INSERT_LIBRARIES=~/mesa/build/src/asahi/lib/libwrap.dylib ./app
|
||||
|
||||
Hardware varyings
|
||||
-----------------
|
||||
@@ -152,15 +150,15 @@ within the compiler.
|
||||
Fragment shader
|
||||
```````````````
|
||||
|
||||
In the fragment shader, coefficient registers, identified by the prefix ``cf``
|
||||
In the fragment shader, coefficient registers, identified by the prefix `cf`
|
||||
followed by a decimal index, act as opaque handles to varyings. For flat
|
||||
shading, coefficient registers may be loaded into general registers with the
|
||||
``ldcf`` instruction. For smooth shading, the coefficient register corresponding
|
||||
`ldcf` instruction. For smooth shading, the coefficient register corresponding
|
||||
to the desired varying is passed as an argument to the "iterate" instruction
|
||||
``iter`` in order to "iterate" (interpolate) a varying. As perspective correct
|
||||
`iter` in order to "iterate" (interpolate) a varying. As perspective correct
|
||||
interpolation also requires the W component of the fragment coordinate, the
|
||||
coefficient register for W is passed as a second argument. As an example, if
|
||||
there's a single varying to interpolate, an instruction like ``iter r0, cf1, cf0``
|
||||
there's a single varying to interpolate, an instruction like `iter r0, cf1, cf0`
|
||||
is used.
|
||||
|
||||
Iterator
|
||||
@@ -277,30 +275,3 @@ logically. These extra levels pad out layers of 3D images to the size of the
|
||||
first layer, simplifying layout calculations for both software and hardware.
|
||||
Although the padding is logically unnecessary, it wastes little space compared
|
||||
to the sizes of large mipmapped 3D textures.
|
||||
|
||||
drm-shim (Linux only)
|
||||
---------------------
|
||||
|
||||
Mesa includes a library that mocks out the DRM UABI used by the Asahi driver
|
||||
stack, allowing the Mesa driver to run on non-M1 Linux hardware. This can be
|
||||
useful for exercising the compiler. To build, use options:
|
||||
|
||||
::
|
||||
|
||||
-Dgallium-drivers=asahi -Dtools=drm-shim
|
||||
|
||||
Then run an OpenGL workload with environment variable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
LD_PRELOAD=~/mesa/build/src/asahi/drm-shim/libasahi_noop_drm_shim.so
|
||||
|
||||
For example to compile a shader with shaderdb and print some statistics along
|
||||
with the IR:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
~/shader-db$ AGX_MESA_DEBUG=shaders,shaderdb ASAHI_MESA_DEBUG=precompile LIBGL_DRIVERS_PATH=~/lib/dri/ LD_PRELOAD=~/mesa/build/src/asahi/drm-shim/libasahi_noop_drm_shim.so ./run shaders/glmark/1-12.shader_test
|
||||
|
||||
The drm-shim implementation for Asahi is located in ``src/asahi/drm-shim``. The
|
||||
drm-shim implementation there should be updated as new UABI is added.
|
||||
|
@@ -15,51 +15,47 @@ Debugging
|
||||
There's a few tools that are useful for debugging D3D12, such as these
|
||||
environment variables:
|
||||
|
||||
.. envvar:: D3D12_DEBUG
|
||||
.. envvar:: D3D12_DEBUG <flags> ("")
|
||||
|
||||
Accepts the following comma-separated list of flags:
|
||||
``verbose``
|
||||
Enable verbose output to stdout
|
||||
``blit``
|
||||
Trace blit and copy resource calls
|
||||
``experimental``
|
||||
Enable experimental shader models feature
|
||||
``dxil``
|
||||
Dump DXIL during program compile
|
||||
``disass``
|
||||
Dump disassambly of created DXIL shader
|
||||
``res``
|
||||
Debug resources
|
||||
``debuglayer``
|
||||
Enable `debug layer`_
|
||||
``gpuvalidator``
|
||||
Enable `GPU validator`_
|
||||
|
||||
``verbose``
|
||||
Enable verbose output to stdout
|
||||
``blit``
|
||||
Trace blit and copy resource calls
|
||||
``experimental``
|
||||
Enable experimental shader models feature
|
||||
``dxil``
|
||||
Dump DXIL during program compile
|
||||
``disass``
|
||||
Dump disassambly of created DXIL shader
|
||||
``res``
|
||||
Debug resources
|
||||
``debuglayer``
|
||||
Enable `debug layer`_
|
||||
``gpuvalidator``
|
||||
Enable `GPU validator`_
|
||||
.. envvar:: DXIL_DEBUG <flags> ("")
|
||||
|
||||
.. envvar:: DXIL_DEBUG
|
||||
``verbose``
|
||||
Enable verbose output to stdout
|
||||
``dump_blob``
|
||||
Write shader blobs
|
||||
``trace``
|
||||
Trace instruction conversion
|
||||
``dump_module``
|
||||
dump module tree to stderr
|
||||
|
||||
Accepts the following comma-separated list of flags:
|
||||
|
||||
``verbose``
|
||||
Enable verbose output to stdout
|
||||
``dump_blob``
|
||||
Write shader blobs
|
||||
``trace``
|
||||
Trace instruction conversion
|
||||
``dump_module``
|
||||
dump module tree to stderr
|
||||
|
||||
.. _debug layer: https://learn.microsoft.com/en-us/windows/win32/direct3d12/understanding-the-d3d12-debug-layer
|
||||
.. _GPU validator: https://learn.microsoft.com/en-us/windows/win32/direct3d12/using-d3d12-debug-layer-gpu-based-validation
|
||||
.. _debug layer: https://docs.microsoft.com/en-us/windows/win32/direct3d12/understanding-the-d3d12-debug-layer
|
||||
.. _GPU validator: https://docs.microsoft.com/en-us/windows/win32/direct3d12/using-d3d12-debug-layer-gpu-based-validation
|
||||
|
||||
Utilities
|
||||
---------
|
||||
|
||||
Environment variables that control the behavior of the D3D12 driver.
|
||||
|
||||
.. envvar:: MESA_D3D12_DEFAULT_ADAPTER_NAME
|
||||
.. envvar:: MESA_D3D12_DEFAULT_ADAPTER_NAME <string> ("")
|
||||
|
||||
Specifies a substring to search for when choosing a default adapter to
|
||||
run on. The first adapter matching the substring is chosen. The substring
|
||||
is not case sensitive.
|
||||
Specifies a substring to search for when choosing a default adapter to
|
||||
run on. The first adapter matching the substring is chosen. The substring
|
||||
is not case sensitive.
|
||||
|
||||
|
@@ -1,574 +1,12 @@
|
||||
Freedreno
|
||||
=========
|
||||
|
||||
Freedreno GLES and GL driver for Adreno 2xx-6xx GPUs. It implements up to
|
||||
OpenGL ES 3.2 and desktop OpenGL 4.5.
|
||||
|
||||
See the `Freedreno Wiki
|
||||
<https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/home>`__ for more
|
||||
details.
|
||||
|
||||
Turnip
|
||||
======
|
||||
|
||||
Turnip is a Vulkan 1.3 driver for Adreno 6xx GPUs.
|
||||
|
||||
The current set of specific chip versions supported can be found in
|
||||
:file:`src/freedreno/common/freedreno_devices.py`. The current set of features
|
||||
supported can be found rendered at `Mesa Matrix <https://mesamatrix.net/>`__.
|
||||
There are no plans to port to a5xx or earlier GPUs.
|
||||
|
||||
Hardware architecture
|
||||
---------------------
|
||||
|
||||
Adreno is a mostly tile-mode renderer, but with the option to bypass tiling
|
||||
("gmem") and render directly to system memory ("sysmem"). It is UMA, using
|
||||
mostly write combined memory but with the ability to map some buffers as cache
|
||||
coherent with the CPU.
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
|
||||
freedreno/hw/*
|
||||
|
||||
Hardware acronyms
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. glossary::
|
||||
|
||||
Cluster
|
||||
A group of hardware registers, often with multiple copies to allow
|
||||
pipelining. There is an M:N relationship between hardware blocks that do
|
||||
work and the clusters of registers for the state that hardware blocks use.
|
||||
|
||||
CP
|
||||
Command Processor. Reads the stream of statechanges and draw commands
|
||||
generated by the driver.
|
||||
|
||||
PFP
|
||||
Prefetch Parser. Adreno 2xx-4xx CP component.
|
||||
|
||||
ME
|
||||
Micro Engine. Adreno 2xx-4xx CP component after PFP, handles most PM4 commands.
|
||||
|
||||
SQE
|
||||
a6xx+ replacement for PFP/ME. This is the microcontroller that runs the
|
||||
microcode (loaded from Linux) which actually processes the command stream
|
||||
and writes to the hardware registers. See `afuc
|
||||
<https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/freedreno/afuc/README.rst>`__.
|
||||
|
||||
ROQ
|
||||
DMA engine used by the SQE for reading memory, with some prefetch buffering.
|
||||
Mostly reads in the command stream, but also serves for
|
||||
``CP_MEMCPY``/``CP_MEM_TO_REG`` and visibility stream reads.
|
||||
|
||||
SP
|
||||
Shader Processor. Unified, scalar shader engine. One or more, depending on
|
||||
GPU and tier.
|
||||
|
||||
TP
|
||||
Texture Processor.
|
||||
|
||||
UCHE
|
||||
Unified L2 Cache. 32KB on A330, unclear how big now.
|
||||
|
||||
CCU
|
||||
Color Cache Unit.
|
||||
|
||||
VSC
|
||||
Visibility Stream Compressor
|
||||
|
||||
PVS
|
||||
Primitive Visibiliy Stream
|
||||
|
||||
FE
|
||||
Front End? Index buffer and vertex attribute fetch cluster. Includes PC,
|
||||
VFD, VPC.
|
||||
|
||||
VFD
|
||||
Vertex Fetch and Decode
|
||||
|
||||
VPC
|
||||
Varying/Position Cache? Hardware block that stores shaded vertex data for
|
||||
primitive assembly.
|
||||
|
||||
HLSQ
|
||||
High Level Sequencer. Manages state for the SPs, batches up PS invocations
|
||||
between primitives, is involved in preemption.
|
||||
|
||||
PC_VS
|
||||
Cluster where varyings are read from VPC and assembled into primitives to
|
||||
feed GRAS.
|
||||
|
||||
VS
|
||||
Vertex Shader. Responsible for generating VS/GS/tess invocations
|
||||
|
||||
GRAS
|
||||
Rasterizer. Responsible for generating PS invocations from primitives, also
|
||||
does LRZ
|
||||
|
||||
PS
|
||||
Pixel Shader.
|
||||
|
||||
RB
|
||||
Render Backend. Performs both early and late Z testing, blending, and
|
||||
attachment stores of output of the PS.
|
||||
|
||||
GMEM
|
||||
Roughly 128KB-1MB of memory on the GPU (SKU-dependent), used to store
|
||||
attachments during tiled rendering
|
||||
|
||||
LRZ
|
||||
Low Resolution Z. A low resolution area of the depth buffer that can be
|
||||
initialized during the binning pass to contain the worst-case (farthest) Z
|
||||
values in a block, and then used to early reject fragments during
|
||||
rasterization.
|
||||
|
||||
Cache hierarchy
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
The a6xx GPUs have two main caches: CCU and UCHE.
|
||||
|
||||
UCHE (Unified L2 Cache) is the cache behind the vertex fetch, VSC writes,
|
||||
texture L1, LRZ, and storage image accesses (``ldib``/``stib``). Misses and
|
||||
flushes access system memory.
|
||||
|
||||
The CCU is the separate cache used by 2D blits and sysmem render target access
|
||||
(and also for resolves to system memory when in GMEM mode). Its memory comes
|
||||
from a carveout of GMEM controlled by ``RB_CCU_CNTL``, with a varying amount
|
||||
reserved based on whether we're in a render pass using GMEM for attachment
|
||||
storage, or we're doing sysmem rendering. Cache entries have the attachment
|
||||
number and layer mixed into the cache tag in some way, likely so that a
|
||||
fragment's access is spread through the cache even if the attachments are the
|
||||
same size and alignments in address space. This means that the cache must be
|
||||
flushed and invalidated between memory being used for one attachment and another
|
||||
(notably depth vs color, but also MRT color).
|
||||
|
||||
The Texture Processors (TP) additionally have a small L1 cache (1KB on A330,
|
||||
unclear how big now) before accessing UCHE. This cache is used for normal
|
||||
sampling like ``sam``` and ``isam`` (and the compiler will make read-only
|
||||
storage image access through it as well). It is not coherent with UCHE (may get
|
||||
stale results when you ``sam`` after ``stib``), but must get flushed per draw or
|
||||
something because you don't need a manual invalidate between draws storing to an
|
||||
image and draws sampling from a texture.
|
||||
|
||||
The command processor (CP) does not read from either of these caches, and
|
||||
instead uses FIFOs in the ROQ to avoid stalls reading from system memory.
|
||||
|
||||
Draw states
|
||||
^^^^^^^^^^^
|
||||
|
||||
Since the SQE is not a fast processor, and tiled rendering means that many draws
|
||||
won't even be used in many bins, since a5xx state updates can be batched up into
|
||||
"draw states" that point to a fragment of CP packets. At draw time, if the draw
|
||||
call is going to actually execute (some primitive is visible in the current
|
||||
tile), the SQE goes through the ``GROUP_ID``\s and for any with an update since
|
||||
the last time they were executed, it executes the corresponding fragment.
|
||||
|
||||
Starting with a6xx, states can be taggged with whether they should be executed
|
||||
at draw time for any of sysmem, binning, or tile rendering. This allows a
|
||||
single command stream to be generated which can be executed in any of the modes,
|
||||
unlike pre-a6xx where we had to generate separate command lists for the binning
|
||||
and rendering phases.
|
||||
|
||||
Note that this means that the generated draw state has to always update all of
|
||||
the state you have chosen to pack into that ``GROUP_ID``, since any of your
|
||||
previous statechanges in a previous draw state command may have been skipped.
|
||||
|
||||
Pipelining (a6xx+)
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Most CP commands write to registers. In a6xx+, the registers are located in
|
||||
clusters corresponding to the stage of the pipeline they are used from (see
|
||||
``enum tu_stage`` for a list). To pipeline state updates and drawing, registers
|
||||
generally have two copies ("contexts") in their cluster, so previous draws can
|
||||
be working on the previous set of register state while the next draw's state is
|
||||
being set up. You can find what registers go into which clusters by looking at
|
||||
:command:`crashdec` output in the ``regs-name: CP_MEMPOOL`` section.
|
||||
|
||||
As SQE processes register writes in the command stream, it sends them into a
|
||||
per-cluster queue stored in ``CP_MEMPOOL``. This allows the pipeline stages to
|
||||
process their stream of register updates and events independent of each other
|
||||
(so even with just 2 contexts in a stage, earlier stages can proceed on to later
|
||||
draws before later stages have caught up).
|
||||
|
||||
Each cluster has a per-context bit indicating that the context is done/free.
|
||||
Register writes will stall on the context being done.
|
||||
|
||||
During a 3D draw command, SQE generates several internal events flow through the
|
||||
pipeline:
|
||||
|
||||
- ``CP_EVENT_START`` clears the done bit for the context when written to the
|
||||
cluster
|
||||
- ``PC_EVENT_CMD``/``PC_DRAW_CMD``/``HLSQ_EVENT_CMD``/``HLSQ_DRAW_CMD`` kick off
|
||||
the actual event/drawing.
|
||||
- ``CONTEXT_DONE`` event completes after the event/draw is complete and sets the
|
||||
done flag.
|
||||
- ``CP_EVENT_END`` waits for the done flag on the next context, then copies all
|
||||
the registers that were dirtied in this context to that one.
|
||||
|
||||
The 2D blit engine has its own ``CP_2D_EVENT_START``, ``CP_2D_EVENT_END``,
|
||||
``CONTEXT_DONE_2D``, so 2D and 3D register contexts can do separate context
|
||||
rollover.
|
||||
|
||||
Because the clusters proceed independently of each other even across draws, if
|
||||
you need to synchronize an earlier cluster to the output of a later one, then
|
||||
you will need to ``CP_WAIT_FOR_IDLE`` after flushing and invalidating any
|
||||
necessary caches.
|
||||
|
||||
Also, note that some registers are not banked at all, and will require a
|
||||
``CP_WAIT_FOR_IDLE`` for any previous usage of the register to complete.
|
||||
|
||||
In a2xx-a4xx, there weren't per-stage clusters, and instead there were two
|
||||
register banks that were flipped between per draw.
|
||||
|
||||
Bindless/Bindful Descriptors (a6xx+)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Starting with a6xx++, cat5 (texture) and cat6 (image/ssbo/ubo) instructions are
|
||||
extended to support bindless descriptors.
|
||||
|
||||
In the old bindful model, descriptors are separate for textures, samplers,
|
||||
UBOs, and IBOs (combined descriptor for images and SSBOs), with separate
|
||||
registers for the memory containing the array of descriptors, and/or different
|
||||
``STATE_TYPE`` and ``STATE_BLOCK`` for ``CP_LOAD_STATE``/``_FRAG``/``_GEOM``
|
||||
to pre-load the descriptors into cache.
|
||||
|
||||
- textures - per-shader-stage
|
||||
- registers: ``SP_xS_TEX_CONST``/``SP_xS_TEX_COUNT``
|
||||
- state-type: ``ST6_CONSTANTS``
|
||||
- state-block: ``SB6_xS_TEX``
|
||||
- samplers - per-shader-stage
|
||||
- registers: ``SP_xS_TEX_SAMP``
|
||||
- state-type: ``ST6_SHADER``
|
||||
- state-block: ``SB6_xS_TEX``
|
||||
- UBOs - per-shader-stage
|
||||
- registers: none
|
||||
- state-type: ``ST6_UBO``
|
||||
- state-block: ``SB6_xS_SHADER``
|
||||
- IBOs - global acress shader 3d stages, separate for compute shader
|
||||
- registers: ``SP_IBO``/``SP_IBO_COUNT`` or ``SP_CS_IBO``/``SP_CS_IBO_COUNT``
|
||||
- state-type: ``ST6_SHADER``
|
||||
- state-block: ``ST6_IBO`` or ``ST6_CS_IBO`` for compute shaders
|
||||
- Note, unlike per-shader-stage descriptors, ``CP_LOAD_STATE6`` is used,
|
||||
as opposed to ``CP_LOAD_STATE6_GEOM`` or ``CP_LOAD_STATE6_FRAG``
|
||||
depending on shader stage.
|
||||
|
||||
.. note::
|
||||
For the per-shader-stage registers and state-blocks the ``xS`` notation
|
||||
refers to per-shader-stage names, ex. ``SP_FS_TEX_CONST`` or ``SB6_DS_TEX``
|
||||
|
||||
Textures and IBOs (images) use *basically* the same 64byte descriptor format
|
||||
with some exceptions (for ex, for IBOs cubemaps are handles as 2d array).
|
||||
SSBOs are just untyped buffers, but otherwise use the same descriptors and
|
||||
instructions as images. Samplers use a 16byte descriptor, and UBOs use an
|
||||
8byte descriptor which packs the size in the upper 15 bits of the UBO address.
|
||||
|
||||
In the bindless model, descriptors are split into 5 desciptor sets, which are
|
||||
global across shader stages (but as with bindful IBO descriptors, separate for
|
||||
3d stages vs compute stage). Each hw descriptor is an array of descriptors
|
||||
of configurable size (each descriptor set can be configured for a descriptor
|
||||
pitch of 8bytes or 64bytes). Each descriptor can be of arbitrary format (ie.
|
||||
UBOs/IBOs/textures/samplers interleaved), it's interpretation by the hw is
|
||||
determined by the instruction that references the descriptor. Each descriptor
|
||||
set can contain at least 2^^16 descriptors.
|
||||
|
||||
The hw is configured with the base address of the descriptor set via an array
|
||||
of "BINDLESS_BASE" registers, ie ``SP_BINDLESS_BASE[n]``/``HLSQ_BINDLESS_BASE[n]``
|
||||
for 3d shader stages, or ``SP_CS_BINDLESS_BASE[n]``/``HLSQ_CS_BINDLESS_BASE[n]``
|
||||
for compute shaders, with the descriptor pitch encoded in the low bits.
|
||||
Which of the descriptor sets is referenced is encoded via three bits in the
|
||||
instruction. The address of the descriptor is calculated as::
|
||||
|
||||
descriptor_addr = (BINDLESS_BASE[n] & ~0x3) +
|
||||
(idx * 4 * (2 << BINDLESS_BASE[n] & 0x3))
|
||||
|
||||
|
||||
.. note::
|
||||
Turnip reserves one descriptor set for internal use and exposes the other
|
||||
four for the application via the vulkan API.
|
||||
|
||||
Software Architecture
|
||||
---------------------
|
||||
|
||||
Freedreno and Turnip use a shared core for shader compiler, image layout, and
|
||||
register and command stream definitions. They implement separate state
|
||||
management and command stream generation.
|
||||
Freedreno driver specific docs.
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
|
||||
freedreno/*
|
||||
|
||||
GPU devcoredump
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A kernel message from DRM of "gpu fault" can mean any sort of error reported by
|
||||
the GPU (including its internal hang detection). If a fault in GPU address
|
||||
space happened, you should expect to find a message from the iommu, with the
|
||||
faulting address and a hardware unit involved:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
*** gpu fault: ttbr0=000000001c941000 iova=000000010066a000 dir=READ type=TRANSLATION source=TP|VFD (0,0,0,1)
|
||||
|
||||
On a GPU fault or hang, a GPU core dump is taken by the DRM driver and saved to
|
||||
``/sys/devices/virtual/devcoredump/**/data``. You can cp that file to a
|
||||
:file:`crash.devcore` to save it, otherwise the kernel will expire it
|
||||
eventually. Echo 1 to the file to free the core early, as another core won't be
|
||||
taken until then.
|
||||
|
||||
Once you have your core file, you can use :command:`crashdec -f crash.devcore`
|
||||
to decode it. The output will have ``ESTIMATED CRASH LOCATION`` where we
|
||||
estimate the CP to have stopped. Note that it is expected that this will be
|
||||
some distance past whatever state triggered the fault, given GPU pipelining, and
|
||||
will often be at some ``CP_REG_TO_MEM`` (which waits on previous WFIs) or
|
||||
``CP_WAIT_FOR_ME`` (which waits for all register writes to land) or similar
|
||||
event. You can try running the workload with ``TU_DEBUG=flushall`` or
|
||||
``FD_MESA_DEBUG=flush`` to try to close in on the failing commands.
|
||||
|
||||
You can also find what commands were queued up to each cluster in the
|
||||
``regs-name: CP_MEMPOOL`` section.
|
||||
|
||||
If ``ESTIMATED CRASH LOCATION`` doesn't exist you could find ``CP_SQE_STAT``,
|
||||
though going here is the last resort and likely won't be helpful.
|
||||
|
||||
.. code-block::
|
||||
|
||||
indexed-registers:
|
||||
- regs-name: CP_SQE_STAT
|
||||
dwords: 51
|
||||
PC: 00d7 <-------------
|
||||
PKT: CP_LOAD_STATE6_FRAG
|
||||
$01: 70348003 $11: 00000000
|
||||
$02: 20000000 $12: 00000022
|
||||
|
||||
The ``PC`` value is an instruction address in the current firmware.
|
||||
You would need to disassemble the firmware (/lib/firmware/qcom/aXXX_sqe.fw) via:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
afuc-disasm -v a650_sqe.fw > a650_sqe.fw.disasm
|
||||
|
||||
Now you should search for PC value in the disassembly, e.g.:
|
||||
|
||||
.. code-block::
|
||||
|
||||
l018: 00d1: 08dd0001 add $addr, $06, 0x0001
|
||||
00d2: 981ff806 mov $data, $data
|
||||
00d3: 8a080001 mov $08, 0x0001 << 16
|
||||
00d4: 3108ffff or $08, $08, 0xffff
|
||||
00d5: 9be8f805 and $data, $data, $08
|
||||
00d6: 9806e806 mov $addr, $06
|
||||
00d7: 9803f806 mov $data, $03 <------------- HERE
|
||||
00d8: d8000000 waitin
|
||||
00d9: 981f0806 mov $01, $data
|
||||
|
||||
|
||||
Command Stream Capture
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
During Mesa development, it's often useful to look at the command streams we
|
||||
send to the kernel. Mesa itself doesn't implement a way to stream them out
|
||||
(though it maybe should!). Instead, we have an interface for the kernel to
|
||||
capture all submitted command streams:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
cat /sys/kernel/debug/dri/0/rd > cmdstream &
|
||||
|
||||
By default, command stream capture does not capture texture/vertex/etc. data.
|
||||
You can enable capturing all the BOs with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
echo Y > /sys/module/msm/parameters/rd_full
|
||||
|
||||
Note that, since all command streams get captured, it is easy to run the system
|
||||
out of memory doing this, so you probably don't want to enable it during play of
|
||||
a heavyweight game. Instead, to capture a command stream within a game, you
|
||||
probably want to cause a crash in the GPU during a farme of interest so that a
|
||||
single GPU core dump is generated. Emitting ``0xdeadbeef`` in the CS should be
|
||||
enough to cause a fault.
|
||||
|
||||
Capturing Hang RD
|
||||
+++++++++++++++++
|
||||
|
||||
Devcore file doesn't contain all submitted command streams, only the hanging one.
|
||||
Additionally it is geared towards analyzing the GPU state at the moment of the crash.
|
||||
|
||||
Alternatively, it's possible to obtain the whole submission with all command
|
||||
streams via ``/sys/kernel/debug/dri/0/hangrd``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo cat /sys/kernel/debug/dri/0/hangrd > logfile.rd // Do the cat _before_ the expected hang
|
||||
|
||||
The format of hangrd is the same as in ordinary command stream capture.
|
||||
``rd_full`` also has the same effect on it.
|
||||
|
||||
Replaying Command Stream
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
`replay` tool allows capturing and replaying ``rd`` to reproduce GPU faults.
|
||||
Especially useful for transient GPU issues since it has much higher chances to
|
||||
reproduce them.
|
||||
|
||||
Dumping rendering results or even just memory is currently unsupported.
|
||||
|
||||
- Replaying command streams requires kernel with ``MSM_INFO_SET_IOVA`` support.
|
||||
- Requires ``rd`` capture to have full snapshots of the memory (``rd_full`` is enabled).
|
||||
|
||||
Replaying is done via `replay` tool:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./replay test_replay.rd
|
||||
|
||||
More examples:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./replay --first=start_submit_n --last=last_submit_n test_replay.rd
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./replay --override=0 --generator=./generate_rd test_replay.rd
|
||||
|
||||
Editing Command Stream (a6xx+)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
While replaying a fault is useful in itself, modifying the capture to
|
||||
understand what causes the fault could be even more useful.
|
||||
|
||||
``rddecompiler`` decompiles a single cmdstream from ``rd`` into compilable C source.
|
||||
Given the address space bounds the generated program creates a new ``rd`` which
|
||||
could be used to override cmdstream with 'replay'. Generated ``rd`` is not replayable
|
||||
on its own and depends on buffers provided by the source ``rd``.
|
||||
|
||||
C source could be compiled using rdcompiler-meson.build as an example.
|
||||
|
||||
The workflow would look like this:
|
||||
|
||||
1. Find the cmdstream № you want to edit;
|
||||
2. Decompile it:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./rddecompiler -s %cmd_stream_n% example.rd > generate_rd.c
|
||||
|
||||
3. Edit the command stream;
|
||||
4. Compile it back, see rdcompiler-meson.build for the instructions;
|
||||
5. Plug the generator into cmdstream replay:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
./replay --override=%cmd_stream_№% --generator=~/generate_rd
|
||||
|
||||
6. Repeat 3-5.
|
||||
|
||||
GPU Hang Debugging
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Not a guide for how to do it but mostly an enumeration of methods.
|
||||
|
||||
Useful ``TU_DEBUG`` (for Turnip) options to narrow down the hang cause:
|
||||
|
||||
``sysmem``, ``gmem``, ``nobin``, ``forcebin``, ``noubwc``, ``nolrz``, ``flushall``, ``syncdraw``, ``rast_order``
|
||||
|
||||
Useful ``FD_MESA_DEBUG`` (for Freedreno) options:
|
||||
|
||||
``sysmem``, ``gmem``, ``nobin``, ``noubwc``, ``nolrz``, ``notile``, ``dclear``, ``ddraw``, ``flush``, ``inorder``, ``noblit``
|
||||
|
||||
Useful ``IR3_SHADER_DEBUG`` options:
|
||||
|
||||
``nouboopt``, ``spillall``, ``nopreamble``, ``nofp16``
|
||||
|
||||
Use Graphics Flight Recorder to narrow down the place which hangs,
|
||||
use our own breadcrumbs implementation in case of unrecoverable hangs.
|
||||
|
||||
In case of faults use RenderDoc to find the problematic command. If it's
|
||||
a draw call, edit shader in RenderDoc to find whether it culprit is a shader.
|
||||
If yes, bisect it.
|
||||
|
||||
If editing the shader messes the assembly too much and the issue becomes unreproducible
|
||||
try editing the assembly itself via ``IR3_SHADER_OVERRIDE_PATH``.
|
||||
|
||||
If fault or hang is transient try capturing an ``rd`` and replay it. If issue
|
||||
is reproduced - bisect the GPU packets until the culprit is found.
|
||||
|
||||
Do the above if culprit is not a shader.
|
||||
|
||||
The hang recovery mechanism in Kernel is not perfect, in case of unrecoverable
|
||||
hangs check whether the kernel is up to date and look for unmerged patches
|
||||
which could improve the recovery.
|
||||
|
||||
GPU Breadcrumbs
|
||||
+++++++++++++++
|
||||
|
||||
Breadcrumbs described below are available only in Turnip.
|
||||
|
||||
Freedreno has simpler breadcrumbs, in debug build writes breadcrumbs
|
||||
into ``CP_SCRATCH_REG[6]`` and per-tile breadcrumbs into ``CP_SCRATCH_REG[7]``,
|
||||
in this way they are available in the devcoredump. TODO: generalize Tunip's
|
||||
breadcrumbs implementation.
|
||||
|
||||
This is a simple implementations of breadcrumbs tracking of GPU progress
|
||||
intended to be a last resort when debugging unrecoverable hangs.
|
||||
For best results use Vulkan traces to have a predictable place of hang.
|
||||
|
||||
For ordinary hangs as a more user-friendly solution use GFR
|
||||
"Graphics Flight Recorder".
|
||||
|
||||
Or breadcrumbs implementation aims to handle cases where nothing can be done
|
||||
after the hang. In-driver breadcrumbs also allow more precise tracking since
|
||||
we could target a single GPU packet.
|
||||
|
||||
While breadcrumbs support gmem, try to reproduce the hang in a sysmem mode
|
||||
because it would require much less breadcrumb writes and syncs.
|
||||
|
||||
Breadcrumbs settings:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
TU_BREADCRUMBS=%IP%:%PORT%,break=%BREAKPOINT%:%BREAKPOINT_HITS%
|
||||
|
||||
``BREAKPOINT``
|
||||
The breadcrumb starting from which we require explicit ack.
|
||||
``BREAKPOINT_HITS``
|
||||
How many times breakpoint should be reached for break to occur.
|
||||
Necessary for a gmem mode and re-usable cmdbuffers in both of which
|
||||
the same cmdstream could be executed several times.
|
||||
|
||||
A typical work flow would be:
|
||||
|
||||
- Start listening for breadcrumbs on a remote host:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
nc -lvup $PORT | stdbuf -o0 xxd -pc -c 4 | awk -Wposix '{printf("%u:%u\n", "0x" $0, a[$0]++)}'
|
||||
|
||||
- Start capturing command stream;
|
||||
- Replay the hanging trace with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
TU_BREADCRUMBS=$IP:$PORT,break=-1:0
|
||||
|
||||
- Increase hangcheck period:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
echo -n 60000 > /sys/kernel/debug/dri/0/hangcheck_period_ms
|
||||
|
||||
- After GPU hang note the last breadcrumb and relaunch trace with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
TU_BREADCRUMBS=%IP%:%PORT%,break=%LAST_BREADCRUMB%:%HITS%
|
||||
|
||||
- After the breakpoint is reached each breadcrumb would require
|
||||
explicit ack from the user. This way it's possible to find
|
||||
the last packet which did't hang.
|
||||
|
||||
- Find the packet in the decoded cmdstream.
|
||||
See the `Freedreno Wiki <https://github.com/freedreno/freedreno/wiki>`__
|
||||
for more details.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user