Compare commits
243 Commits
mesa-22.2.
...
mesa-22.0.
Author | SHA1 | Date | |
---|---|---|---|
|
4a8d3189fd | ||
|
7591d62248 | ||
|
b66aad0909 | ||
|
3e37848f50 | ||
|
847a5dceea | ||
|
81573439e2 | ||
|
77699bb3bf | ||
|
420717c6eb | ||
|
e7398410b5 | ||
|
b25f04acbe | ||
|
1178741fd0 | ||
|
93f5f9b75f | ||
|
3348844bc4 | ||
|
29960e0b4a | ||
|
8bd46a976b | ||
|
4286b5c02a | ||
|
8c4d047a73 | ||
|
e5af048e71 | ||
|
d743f9589d | ||
|
28922eabe0 | ||
|
15ecd5a917 | ||
|
6d31e112e3 | ||
|
d842baf7bc | ||
|
9fef791f47 | ||
|
8f64820495 | ||
|
3b0d0fbe4b | ||
|
7559a9f8f6 | ||
|
157e3046bd | ||
|
08fd564c16 | ||
|
3c03823941 | ||
|
553c60c6ff | ||
|
d63a6f52a7 | ||
|
ecf3675b31 | ||
|
1dc2900731 | ||
|
9c93c285ff | ||
|
f254c2ebf4 | ||
|
7976ceda55 | ||
|
ab468e1e0b | ||
|
c457e5b793 | ||
|
76b8a28d5a | ||
|
bbdcc0d1e4 | ||
|
0185719f0a | ||
|
3aca53b5dd | ||
|
6822ea6304 | ||
|
41a8a3b0a0 | ||
|
3c04049345 | ||
|
b82412510b | ||
|
f00548eeac | ||
|
deb0225609 | ||
|
8bd9c64fef | ||
|
63e9e33987 | ||
|
a46dbbdc12 | ||
|
c2541d65c8 | ||
|
4656dbee08 | ||
|
17836a6d64 | ||
|
42bf17f6cc | ||
|
bf12c7cde4 | ||
|
7412747fe0 | ||
|
9de53fe559 | ||
|
7c997acb9e | ||
|
919b08343a | ||
|
d6c9119926 | ||
|
37d4a31eba | ||
|
092f43be3a | ||
|
f7ee7575a4 | ||
|
b651d7525f | ||
|
e6e021dc5f | ||
|
3fc09b282a | ||
|
b799d764e2 | ||
|
acdab738d2 | ||
|
4b45d80f05 | ||
|
cc1bd5c987 | ||
|
a82920af6e | ||
|
9848a7b7fc | ||
|
5bb4e9a087 | ||
|
db9c1251f1 | ||
|
e6f159d626 | ||
|
c75e89962e | ||
|
d8e5624984 | ||
|
2caf7b788f | ||
|
da86354624 | ||
|
4f1a3be5f6 | ||
|
eadc089854 | ||
|
5f326287c9 | ||
|
e86cc99649 | ||
|
085a5d96e5 | ||
|
909b675c19 | ||
|
8a22cc32af | ||
|
4a60a6be41 | ||
|
c3497b6e45 | ||
|
1ab727add7 | ||
|
03082e84c5 | ||
|
8821f6cd24 | ||
|
aa43f9816e | ||
|
afbc24a234 | ||
|
ec045339f6 | ||
|
e5a966781d | ||
|
517d35c5da | ||
|
dbc62f271c | ||
|
e8685e4b3f | ||
|
3f1c460c91 | ||
|
f3ff1dceff | ||
|
7991189f9a | ||
|
0c5203423b | ||
|
2254dcedfc | ||
|
7d3ff8ec16 | ||
|
ee42fa621d | ||
|
65a55ea2a8 | ||
|
d45eaf087d | ||
|
31e12f38d0 | ||
|
f2f6902a48 | ||
|
1a31377e63 | ||
|
716ca39489 | ||
|
3386e2a3ed | ||
|
f2a263bb12 | ||
|
29822cf29a | ||
|
dcb32ae3a5 | ||
|
9dcf2c0f3c | ||
|
9175d1a2b0 | ||
|
fea4ef272e | ||
|
1aa5f5e575 | ||
|
b4245e3893 | ||
|
01db75635a | ||
|
6961f75931 | ||
|
962197c5fd | ||
|
db716caf8c | ||
|
a5dfdd54c0 | ||
|
4d5eea6732 | ||
|
381fe26b50 | ||
|
711b82e6ae | ||
|
43be854eef | ||
|
85a5ec74f9 | ||
|
379697197a | ||
|
bfa73c568e | ||
|
148aac1b3b | ||
|
949b25d6fd | ||
|
1dcb8306b1 | ||
|
f0ba075c41 | ||
|
8facc12855 | ||
|
5cc0bcf6d7 | ||
|
b3f333348d | ||
|
4fee724faf | ||
|
f47fdc49a9 | ||
|
76b498bd80 | ||
|
9db0bfb073 | ||
|
23032ee9db | ||
|
94f5c307cb | ||
|
de55b090e4 | ||
|
38c88f36ff | ||
|
ace12c4da3 | ||
|
383369f1bd | ||
|
e4f1fafea9 | ||
|
bf5e7bcebb | ||
|
0b51f5e3ca | ||
|
9c94e7e724 | ||
|
60ad4707d4 | ||
|
a736424506 | ||
|
07a9bc76d3 | ||
|
e8d25b50c9 | ||
|
bd29a0689b | ||
|
183e19f23a | ||
|
9b9bc8ad64 | ||
|
ea23bb04f4 | ||
|
e280130626 | ||
|
d1e61f595d | ||
|
6f56bad4dd | ||
|
c454ff42f0 | ||
|
0906f9f7d2 | ||
|
6353a869ca | ||
|
fdd351cc81 | ||
|
5998d19a96 | ||
|
4aa73d5310 | ||
|
1d25a226fa | ||
|
fa4d9f5934 | ||
|
2a03c296fd | ||
|
527bac358d | ||
|
277d7fe83d | ||
|
e76b049b2b | ||
|
bd23929d01 | ||
|
47c9c42764 | ||
|
db7f26a944 | ||
|
d191aa607c | ||
|
c9215c391a | ||
|
53f532bac2 | ||
|
7885a3d1b1 | ||
|
081ea7fc89 | ||
|
fc450c2f54 | ||
|
1c3a61979b | ||
|
0c22f3ffaf | ||
|
295022cb75 | ||
|
1e7d39f9db | ||
|
95f87609de | ||
|
0fd825b4f4 | ||
|
fcd93800bb | ||
|
3241c61b68 | ||
|
e791cc3ca5 | ||
|
21dfddc2f6 | ||
|
1830da60e8 | ||
|
ea3afa5690 | ||
|
8cca40e8a3 | ||
|
2b73963911 | ||
|
8d42870798 | ||
|
cc1511acd7 | ||
|
a32927b187 | ||
|
1758082912 | ||
|
d653651b49 | ||
|
7ba68c86f7 | ||
|
166fc8e9f3 | ||
|
c1822e7222 | ||
|
fb8a9be463 | ||
|
b0ad08de15 | ||
|
c820bbced0 | ||
|
b8e9c345d0 | ||
|
3b1563dcfb | ||
|
e93350a833 | ||
|
716fc5280a | ||
|
2e1387c752 | ||
|
a910e58ad8 | ||
|
54f49993d1 | ||
|
2b282fb3b5 | ||
|
4e67d2aad4 | ||
|
8f5fb1eb10 | ||
|
4587268d2b | ||
|
a04818a500 | ||
|
59b2c1ddde | ||
|
2ce020120a | ||
|
ba2d22e95f | ||
|
5ff5f3cbf7 | ||
|
5a7a564d7c | ||
|
2c7d0e1b49 | ||
|
83eef372a2 | ||
|
0a1f166f4d | ||
|
68242654f8 | ||
|
c7bd1f0720 | ||
|
88762cf59b | ||
|
6420dc86cf | ||
|
a58a01050c | ||
|
b6e296f823 | ||
|
fabb6b5c5e | ||
|
0ec3de0563 | ||
|
b2be43a192 | ||
|
9e17fcbed2 | ||
|
c69a870f86 |
@@ -16,14 +16,26 @@ max_line_length = 78
|
||||
[{Makefile*,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.py]
|
||||
[{*.py,SCons*}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.pl]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.m4]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.yml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.html]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.rst]
|
||||
indent_style = space
|
||||
indent_size = 3
|
||||
@@ -34,8 +46,3 @@ trim_trailing_whitespace = false
|
||||
[{meson.build,meson_options.txt}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
|
||||
[*.ps1]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
39
.github/workflows/ci.yml
vendored
39
.github/workflows/ci.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: CI
|
||||
on: push
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
CI:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
cat > Brewfile <<EOL
|
||||
brew "bison"
|
||||
brew "expat"
|
||||
brew "gettext"
|
||||
brew "libx11"
|
||||
brew "libxcb"
|
||||
brew "libxdamage"
|
||||
brew "libxext"
|
||||
brew "meson"
|
||||
brew "pkg-config"
|
||||
brew "python@3.10"
|
||||
EOL
|
||||
|
||||
brew update
|
||||
brew bundle --verbose
|
||||
- name: Install Mako
|
||||
run: pip3 install --user mako
|
||||
- name: Configure
|
||||
run: meson . build -Dbuild-tests=true -Dosmesa=true
|
||||
- name: Build
|
||||
run: meson compile -C build
|
||||
- name: Test
|
||||
run: meson test -C build --print-errorlogs
|
||||
- name: Install
|
||||
run: meson install -C build
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.out
|
||||
/build
|
||||
build
|
||||
|
1189
.gitlab-ci.yml
1189
.gitlab-ci.yml
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,9 @@
|
||||
# reliable to be run in parallel with other tests due to CPU-side timing.
|
||||
dEQP-GLES[0-9]*.functional.flush_finish.*
|
||||
|
||||
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/4575
|
||||
dEQP-VK.wsi.display.get_display_plane_capabilities
|
||||
|
||||
# piglit: WGL is Windows-only
|
||||
wgl@.*
|
||||
|
||||
|
@@ -1,67 +0,0 @@
|
||||
version: 1
|
||||
|
||||
# Rules to match for a machine to qualify
|
||||
target:
|
||||
{% if tags %}
|
||||
{% set b2ctags = tags.split(',') %}
|
||||
tags:
|
||||
{% for tag in b2ctags %}
|
||||
- '{{ tag | trim }}'
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
timeouts:
|
||||
first_console_activity: # This limits the time it can take to receive the first console log
|
||||
minutes: {{ timeout_first_minutes }}
|
||||
retries: {{ timeout_first_retries }}
|
||||
console_activity: # Reset every time we receive a message from the logs
|
||||
minutes: {{ timeout_minutes }}
|
||||
retries: {{ timeout_retries }}
|
||||
boot_cycle:
|
||||
minutes: {{ timeout_boot_minutes }}
|
||||
retries: {{ timeout_boot_retries }}
|
||||
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
|
||||
minutes: {{ timeout_overall_minutes }}
|
||||
retries: 0
|
||||
# no retries possible here
|
||||
|
||||
console_patterns:
|
||||
session_end:
|
||||
regex: >-
|
||||
{{ session_end_regex }}
|
||||
session_reboot:
|
||||
regex: >-
|
||||
{{ session_reboot_regex }}
|
||||
job_success:
|
||||
regex: >-
|
||||
{{ job_success_regex }}
|
||||
job_warn:
|
||||
regex: >-
|
||||
{{ job_warn_regex }}
|
||||
|
||||
# Environment to deploy
|
||||
deployment:
|
||||
# Initial boot
|
||||
start:
|
||||
kernel:
|
||||
url: '{{ kernel_url }}'
|
||||
cmdline: >
|
||||
SALAD.machine_id={{ '{{' }} machine_id }}
|
||||
console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
|
||||
loglevel={{ log_level }} no_hash_pointers
|
||||
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/telegraf-container:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
|
||||
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/machine_registration:latest check"
|
||||
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
|
||||
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
||||
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},expiration=pipeline_end,preserve"
|
||||
{% for volume in volumes %}
|
||||
b2c.volume={{ volume }}
|
||||
{% endfor %}
|
||||
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
|
||||
{% if cmdline_extras is defined %}
|
||||
{{ cmdline_extras }}
|
||||
{% endif %}
|
||||
|
||||
initramfs:
|
||||
url: '{{ initramfs_url }}'
|
||||
|
@@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright © 2022 Valve Corporation
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from argparse import ArgumentParser
|
||||
from os import environ, path
|
||||
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--ci-job-id')
|
||||
parser.add_argument('--container-cmd')
|
||||
parser.add_argument('--initramfs-url')
|
||||
parser.add_argument('--job-success-regex')
|
||||
parser.add_argument('--job-warn-regex')
|
||||
parser.add_argument('--kernel-url')
|
||||
parser.add_argument('--log-level', type=int)
|
||||
parser.add_argument('--poweroff-delay', type=int)
|
||||
parser.add_argument('--session-end-regex')
|
||||
parser.add_argument('--session-reboot-regex')
|
||||
parser.add_argument('--tags', nargs='?', default='')
|
||||
parser.add_argument('--template', default='b2c.yml.jinja2.jinja2')
|
||||
parser.add_argument('--timeout-boot-minutes', type=int)
|
||||
parser.add_argument('--timeout-boot-retries', type=int)
|
||||
parser.add_argument('--timeout-first-minutes', type=int)
|
||||
parser.add_argument('--timeout-first-retries', type=int)
|
||||
parser.add_argument('--timeout-minutes', type=int)
|
||||
parser.add_argument('--timeout-overall-minutes', type=int)
|
||||
parser.add_argument('--timeout-retries', type=int)
|
||||
parser.add_argument('--job-volume-exclusions', nargs='?', default='')
|
||||
parser.add_argument('--volume', action='append')
|
||||
parser.add_argument('--mount-volume', action='append')
|
||||
parser.add_argument('--local-container', default=environ.get('B2C_LOCAL_CONTAINER', 'alpine:latest'))
|
||||
parser.add_argument('--working-dir')
|
||||
args = parser.parse_args()
|
||||
|
||||
env = Environment(loader=FileSystemLoader(path.dirname(args.template)),
|
||||
trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
template = env.get_template(path.basename(args.template))
|
||||
|
||||
values = {}
|
||||
values['ci_job_id'] = args.ci_job_id
|
||||
values['container_cmd'] = args.container_cmd
|
||||
values['initramfs_url'] = args.initramfs_url
|
||||
values['job_success_regex'] = args.job_success_regex
|
||||
values['job_warn_regex'] = args.job_warn_regex
|
||||
values['kernel_url'] = args.kernel_url
|
||||
values['log_level'] = args.log_level
|
||||
values['poweroff_delay'] = args.poweroff_delay
|
||||
values['session_end_regex'] = args.session_end_regex
|
||||
values['session_reboot_regex'] = args.session_reboot_regex
|
||||
values['tags'] = args.tags
|
||||
values['template'] = args.template
|
||||
values['timeout_boot_minutes'] = args.timeout_boot_minutes
|
||||
values['timeout_boot_retries'] = args.timeout_boot_retries
|
||||
values['timeout_first_minutes'] = args.timeout_first_minutes
|
||||
values['timeout_first_retries'] = args.timeout_first_retries
|
||||
values['timeout_minutes'] = args.timeout_minutes
|
||||
values['timeout_overall_minutes'] = args.timeout_overall_minutes
|
||||
values['timeout_retries'] = args.timeout_retries
|
||||
if len(args.job_volume_exclusions) > 0:
|
||||
exclusions = args.job_volume_exclusions.split(",")
|
||||
values['job_volume_exclusions'] = [excl for excl in exclusions if len(excl) > 0]
|
||||
if args.volume is not None:
|
||||
values['volumes'] = args.volume
|
||||
if args.mount_volume is not None:
|
||||
values['mount_volumes'] = args.mount_volume
|
||||
values['working_dir'] = args.working_dir
|
||||
|
||||
assert(len(args.local_container) > 0)
|
||||
values['local_container'] = args.local_container.replace(
|
||||
# Use the gateway's pull-through registry cache to reduce load on fd.o.
|
||||
'registry.freedesktop.org', '{{ fdo_proxy_registry }}'
|
||||
)
|
||||
|
||||
if 'B2C_KERNEL_CMDLINE_EXTRAS' in environ:
|
||||
values['cmdline_extras'] = environ['B2C_KERNEL_CMDLINE_EXTRAS']
|
||||
|
||||
f = open(path.splitext(path.basename(args.template))[0], "w")
|
||||
f.write(template.render(values))
|
||||
f.close()
|
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||
echo "Must supply the PoE Interface to power down"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
||||
echo "Must supply the PoE Switch host"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
||||
SNMP_ON="i 1"
|
||||
SNMP_OFF="i 4"
|
||||
|
||||
snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
|
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||
echo "Must supply the PoE Interface to power up"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
||||
echo "Must supply the PoE Switch host"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -ex
|
||||
|
||||
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
||||
SNMP_ON="i 1"
|
||||
SNMP_OFF="i 4"
|
||||
|
||||
snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
|
||||
sleep 3s
|
||||
snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON
|
@@ -90,8 +90,7 @@ echo "$BM_CMDLINE" > /tftp/cmdline
|
||||
set +e
|
||||
python3 $BM/cros_servo_run.py \
|
||||
--cpu $BM_SERIAL \
|
||||
--ec $BM_SERIAL_EC \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||
--ec $BM_SERIAL_EC
|
||||
ret=$?
|
||||
set -e
|
||||
|
||||
|
@@ -31,18 +31,52 @@ import threading
|
||||
|
||||
|
||||
class CrosServoRun:
|
||||
def __init__(self, cpu, ec, test_timeout):
|
||||
def __init__(self, cpu, ec):
|
||||
# Merged FIFO for the two serial buffers, fed by threads.
|
||||
self.serial_queue = queue.Queue()
|
||||
self.sentinel = object()
|
||||
self.threads_done = 0
|
||||
|
||||
self.ec_ser = SerialBuffer(
|
||||
ec, "results/serial-ec.txt", "R SERIAL-EC> ")
|
||||
self.cpu_ser = SerialBuffer(
|
||||
cpu, "results/serial.txt", "R SERIAL-CPU> ")
|
||||
# Merge the EC serial into the cpu_ser's line stream so that we can
|
||||
# effectively poll on both at the same time and not have to worry about
|
||||
self.ec_ser = SerialBuffer(
|
||||
ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
|
||||
self.test_timeout = test_timeout
|
||||
|
||||
self.iter_feed_ec = threading.Thread(
|
||||
target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
|
||||
self.iter_feed_ec.start()
|
||||
|
||||
self.iter_feed_cpu = threading.Thread(
|
||||
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
|
||||
self.iter_feed_cpu.start()
|
||||
|
||||
def close(self):
|
||||
self.ec_ser.close()
|
||||
self.cpu_ser.close()
|
||||
self.iter_feed_ec.join()
|
||||
self.iter_feed_cpu.join()
|
||||
|
||||
# Feed lines from our serial queues into the merged queue, marking when our
|
||||
# input is done.
|
||||
def iter_feed_queue(self, it):
|
||||
for i in it:
|
||||
self.serial_queue.put(i)
|
||||
self.serial_queue.put(self.sentinel)
|
||||
|
||||
# Return the next line from the queue, counting how many threads have
|
||||
# terminated and joining when done
|
||||
def get_serial_queue_line(self):
|
||||
line = self.serial_queue.get()
|
||||
if line == self.sentinel:
|
||||
self.threads_done = self.threads_done + 1
|
||||
if self.threads_done == 2:
|
||||
self.iter_feed_cpu.join()
|
||||
self.iter_feed_ec.join()
|
||||
return line
|
||||
|
||||
# Returns an iterator for getting the next line.
|
||||
def serial_queue_lines(self):
|
||||
return iter(self.get_serial_queue_line, self.sentinel)
|
||||
|
||||
def ec_write(self, s):
|
||||
print("W SERIAL-EC> %s" % s)
|
||||
@@ -62,36 +96,23 @@ class CrosServoRun:
|
||||
self.ec_write("\n")
|
||||
self.ec_write("reboot\n")
|
||||
|
||||
bootloader_done = False
|
||||
# This is emitted right when the bootloader pauses to check for input.
|
||||
# Emit a ^N character to request network boot, because we don't have a
|
||||
# direct-to-netboot firmware on cheza.
|
||||
for line in self.cpu_ser.lines(timeout=120, phase="bootloader"):
|
||||
for line in self.serial_queue_lines():
|
||||
if re.search("load_archive: loading locale_en.bin", line):
|
||||
self.cpu_write("\016")
|
||||
bootloader_done = True
|
||||
break
|
||||
|
||||
# If the board has a netboot firmware and we made it to booting the
|
||||
# kernel, proceed to processing of the test run.
|
||||
if re.search("Booting Linux", line):
|
||||
bootloader_done = True
|
||||
break
|
||||
|
||||
# The Cheza boards have issues with failing to bring up power to
|
||||
# the system sometimes, possibly dependent on ambient temperature
|
||||
# in the farm.
|
||||
if re.search("POWER_GOOD not seen in time", line):
|
||||
self.print_error(
|
||||
"Detected intermittent poweron failure, restarting run...")
|
||||
self.print_error("Detected intermittent poweron failure, restarting run...")
|
||||
return 2
|
||||
|
||||
if not bootloader_done:
|
||||
print("Failed to make it through bootloader, restarting run...")
|
||||
return 2
|
||||
|
||||
tftp_failures = 0
|
||||
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.serial_queue_lines():
|
||||
if re.search("---. end Kernel panic", line):
|
||||
return 1
|
||||
|
||||
@@ -102,15 +123,13 @@ class CrosServoRun:
|
||||
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
||||
tftp_failures += 1
|
||||
if tftp_failures >= 100:
|
||||
self.print_error(
|
||||
"Detected intermittent tftp failure, restarting run...")
|
||||
self.print_error("Detected intermittent tftp failure, restarting run...")
|
||||
return 2
|
||||
|
||||
# There are very infrequent bus errors during power management transitions
|
||||
# on cheza, which we don't expect to be the case on future boards.
|
||||
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
||||
self.print_error(
|
||||
"Detected cheza power management bus error, restarting run...")
|
||||
self.print_error("Detected cheza power management bus error, restarting run...")
|
||||
return 2
|
||||
|
||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||
@@ -129,8 +148,7 @@ class CrosServoRun:
|
||||
# Given that it seems to trigger randomly near a GPU fault and then
|
||||
# break many tests after that, just restart the whole run.
|
||||
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
||||
self.print_error(
|
||||
"Detected cheza power management bus error, restarting run...")
|
||||
self.print_error("Detected cheza power management bus error, restarting run...")
|
||||
return 2
|
||||
|
||||
if re.search("coreboot.*bootblock starting", line):
|
||||
@@ -138,10 +156,6 @@ class CrosServoRun:
|
||||
"Detected spontaneous reboot, restarting run...")
|
||||
return 2
|
||||
|
||||
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
|
||||
self.print_error("Detected cheza MMU fail, restarting run...")
|
||||
return 2
|
||||
|
||||
result = re.search("hwci: mesa: (\S*)", line)
|
||||
if result:
|
||||
if result.group(1) == "pass":
|
||||
@@ -149,9 +163,8 @@ class CrosServoRun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result")
|
||||
return 2
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result")
|
||||
return 1
|
||||
|
||||
|
||||
def main():
|
||||
@@ -160,11 +173,9 @@ def main():
|
||||
help='CPU Serial device', required=True)
|
||||
parser.add_argument(
|
||||
'--ec', type=str, help='EC Serial device', required=True)
|
||||
parser.add_argument(
|
||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
|
||||
servo = CrosServoRun(args.cpu, args.ec)
|
||||
|
||||
while True:
|
||||
retval = servo.run()
|
||||
|
@@ -133,7 +133,6 @@ fi
|
||||
set +e
|
||||
$BM/fastboot_run.py \
|
||||
--dev="$BM_SERIAL" \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20} \
|
||||
--fbserial="$BM_FASTBOOT_SERIAL" \
|
||||
--powerup="$BM_POWERUP" \
|
||||
--powerdown="$BM_POWERDOWN"
|
||||
|
@@ -22,21 +22,19 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import os
|
||||
import re
|
||||
from serial_buffer import SerialBuffer
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
class FastbootRun:
|
||||
def __init__(self, args, test_timeout):
|
||||
def __init__(self, args):
|
||||
self.powerup = args.powerup
|
||||
self.ser = SerialBuffer(
|
||||
args.dev, "results/serial-output.txt", "R SERIAL> ")
|
||||
self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format(
|
||||
ser=args.fbserial)
|
||||
self.test_timeout = test_timeout
|
||||
# We would like something like a 1 minute timeout, but the piglit traces
|
||||
# jobs stall out for long periods of time.
|
||||
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600)
|
||||
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
|
||||
|
||||
def close(self):
|
||||
self.ser.close()
|
||||
@@ -46,40 +44,34 @@ class FastbootRun:
|
||||
NO_COLOR = '\033[0m'
|
||||
print(RED + message + NO_COLOR)
|
||||
|
||||
def logged_system(self, cmd, timeout=60):
|
||||
def logged_system(self, cmd):
|
||||
print("Running '{}'".format(cmd))
|
||||
try:
|
||||
return subprocess.call(cmd, shell=True, timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.print_error("timeout, restarting run...")
|
||||
return 2
|
||||
return os.system(cmd)
|
||||
|
||||
def run(self):
|
||||
if ret := self.logged_system(self.powerup):
|
||||
return ret
|
||||
if self.logged_system(self.powerup) != 0:
|
||||
return 1
|
||||
|
||||
fastboot_ready = False
|
||||
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("fastboot: processing commands", line) or \
|
||||
re.search("Listening for fastboot command on", line):
|
||||
re.search("Listening for fastboot command on", line):
|
||||
fastboot_ready = True
|
||||
break
|
||||
|
||||
if re.search("data abort", line):
|
||||
self.print_error(
|
||||
"Detected crash during boot, restarting run...")
|
||||
self.print_error("Detected crash during boot, restarting run...")
|
||||
return 2
|
||||
|
||||
if not fastboot_ready:
|
||||
self.print_error(
|
||||
"Failed to get to fastboot prompt, restarting run...")
|
||||
self.print_error("Failed to get to fastboot prompt, restarting run...")
|
||||
return 2
|
||||
|
||||
if ret := self.logged_system(self.fastboot):
|
||||
return ret
|
||||
if self.logged_system(self.fastboot) != 0:
|
||||
return 1
|
||||
|
||||
print_more_lines = -1
|
||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.ser.lines():
|
||||
if print_more_lines == 0:
|
||||
return 2
|
||||
if print_more_lines > 0:
|
||||
@@ -91,8 +83,7 @@ class FastbootRun:
|
||||
# The db820c boards intermittently reboot. Just restart the run
|
||||
# when if we see a reboot after we got past fastboot.
|
||||
if re.search("PON REASON", line):
|
||||
self.print_error(
|
||||
"Detected spontaneous reboot, restarting run...")
|
||||
self.print_error("Detected spontaneous reboot, restarting run...")
|
||||
return 2
|
||||
|
||||
# db820c sometimes wedges around iommu fault recovery
|
||||
@@ -126,26 +117,18 @@ class FastbootRun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result, restarting run...")
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result, restarting run...")
|
||||
return 2
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||
parser.add_argument('--powerup', type=str,
|
||||
help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str,
|
||||
help='shell command for powering off', required=True)
|
||||
parser.add_argument('--fbserial', type=str,
|
||||
help='fastboot serial number of the board', required=True)
|
||||
parser.add_argument('--test-timeout', type=int,
|
||||
help='Test phase timeout (minutes)', required=True)
|
||||
parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||
parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||
fastboot = FastbootRun(args)
|
||||
|
||||
while True:
|
||||
retval = fastboot.run()
|
||||
@@ -153,12 +136,11 @@ def main():
|
||||
if retval != 2:
|
||||
break
|
||||
|
||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||
fastboot = FastbootRun(args)
|
||||
|
||||
fastboot.logged_system(args.powerdown)
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@@ -8,8 +8,8 @@ relay = sys.argv[2]
|
||||
|
||||
# our relays are "off" means "board is powered".
|
||||
mode_swap = {
|
||||
"on": "off",
|
||||
"off": "on",
|
||||
"on" : "off",
|
||||
"off" : "on",
|
||||
}
|
||||
mode = mode_swap[mode]
|
||||
|
||||
|
@@ -20,6 +20,18 @@ if [ -z "$BM_POE_ADDRESS" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BM_POE_USERNAME" ]; then
|
||||
echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
|
||||
echo "This is the PoE switch username."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BM_POE_PASSWORD" ]; then
|
||||
echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
|
||||
echo "This is the PoE switch password."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
|
||||
echo "This is the PoE switch interface where the device is connected."
|
||||
@@ -95,25 +107,11 @@ fi
|
||||
|
||||
# Install kernel modules (it could be either in /lib/modules or
|
||||
# /usr/lib/modules, but we want to install in the latter)
|
||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a --delete $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a --delete $BM_BOOTFS/lib/modules/ /nfs/usr/lib/modules/
|
||||
|
||||
# Install kernel image + bootloader files
|
||||
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
||||
|
||||
# Set up the pxelinux config for Jetson Nano
|
||||
mkdir -p /tftp/pxelinux.cfg
|
||||
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000
|
||||
PROMPT 0
|
||||
TIMEOUT 30
|
||||
DEFAULT primary
|
||||
MENU TITLE jetson nano boot options
|
||||
LABEL primary
|
||||
MENU LABEL CI kernel on TFTP
|
||||
LINUX Image
|
||||
FDT tegra210-p3450-0000.dtb
|
||||
APPEND \${cbootargs} $BM_CMDLINE
|
||||
EOF
|
||||
rsync -a --delete $BM_BOOTFS/boot/ /tftp/
|
||||
|
||||
# Create the rootfs in the NFS directory
|
||||
mkdir -p /nfs/results
|
||||
@@ -125,13 +123,13 @@ echo "$BM_CMDLINE" > /tftp/cmdline.txt
|
||||
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
|
||||
|
||||
set +e
|
||||
ATTEMPTS=10
|
||||
ATTEMPTS=2
|
||||
while [ $((ATTEMPTS--)) -gt 0 ]; do
|
||||
python3 $BM/poe_run.py \
|
||||
--dev="$BM_SERIAL" \
|
||||
--powerup="$BM_POWERUP" \
|
||||
--powerdown="$BM_POWERDOWN" \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||
--timeout="${BM_POE_TIMEOUT:-60}"
|
||||
ret=$?
|
||||
|
||||
if [ $ret -eq 2 ]; then
|
||||
|
@@ -28,14 +28,11 @@ from serial_buffer import SerialBuffer
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
class PoERun:
|
||||
def __init__(self, args, test_timeout):
|
||||
def __init__(self, args):
|
||||
self.powerup = args.powerup
|
||||
self.powerdown = args.powerdown
|
||||
self.ser = SerialBuffer(
|
||||
args.dev, "results/serial-output.txt", "")
|
||||
self.test_timeout = test_timeout
|
||||
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout)
|
||||
|
||||
def print_error(self, message):
|
||||
RED = '\033[0;31m'
|
||||
@@ -51,17 +48,16 @@ class PoERun:
|
||||
return 1
|
||||
|
||||
boot_detected = False
|
||||
for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("Booting Linux", line):
|
||||
boot_detected = True
|
||||
break
|
||||
|
||||
if not boot_detected:
|
||||
self.print_error(
|
||||
"Something wrong; couldn't detect the boot start up sequence")
|
||||
self.print_error("Something wrong; couldn't detect the boot start up sequence")
|
||||
return 2
|
||||
|
||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("---. end Kernel panic", line):
|
||||
return 1
|
||||
|
||||
@@ -70,10 +66,6 @@ class PoERun:
|
||||
self.print_error("Memory overflow in the binner; GPU hang")
|
||||
return 1
|
||||
|
||||
if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
|
||||
self.print_error("nouveau jetson boot bug, retrying.")
|
||||
return 2
|
||||
|
||||
result = re.search("hwci: mesa: (\S*)", line)
|
||||
if result:
|
||||
if result.group(1) == "pass":
|
||||
@@ -81,30 +73,24 @@ class PoERun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result")
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result")
|
||||
return 2
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--dev', type=str,
|
||||
help='Serial device to monitor', required=True)
|
||||
parser.add_argument('--powerup', type=str,
|
||||
help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str,
|
||||
help='shell command for powering off', required=True)
|
||||
parser.add_argument(
|
||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||
parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
|
||||
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||
parser.add_argument('--timeout', type=int, default=60,
|
||||
help='time in seconds to wait for activity', required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
poe = PoERun(args, args.test_timeout * 60)
|
||||
poe = PoERun(args)
|
||||
retval = poe.run()
|
||||
|
||||
poe.logged_system(args.powerdown)
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@@ -13,7 +13,6 @@ cp $CI_COMMON/init*.sh $rootfs_dst/
|
||||
cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
|
||||
|
||||
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
|
||||
cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
|
||||
|
||||
set +x
|
||||
|
||||
|
@@ -28,9 +28,8 @@ import serial
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
class SerialBuffer:
|
||||
def __init__(self, dev, filename, prefix, timeout=None, line_queue=None):
|
||||
def __init__(self, dev, filename, prefix, timeout = None):
|
||||
self.filename = filename
|
||||
self.dev = dev
|
||||
|
||||
@@ -42,13 +41,7 @@ class SerialBuffer:
|
||||
self.serial = None
|
||||
|
||||
self.byte_queue = queue.Queue()
|
||||
# allow multiple SerialBuffers to share a line queue so you can merge
|
||||
# servo's CPU and EC streams into one thing to watch the boot/test
|
||||
# progress on.
|
||||
if line_queue:
|
||||
self.line_queue = line_queue
|
||||
else:
|
||||
self.line_queue = queue.Queue()
|
||||
self.line_queue = queue.Queue()
|
||||
self.prefix = prefix
|
||||
self.timeout = timeout
|
||||
self.sentinel = object()
|
||||
@@ -137,30 +130,14 @@ class SerialBuffer:
|
||||
self.line_queue.put(line)
|
||||
line = bytearray()
|
||||
|
||||
def lines(self, timeout=None, phase=None):
|
||||
start_time = time.monotonic()
|
||||
while True:
|
||||
read_timeout = None
|
||||
if timeout:
|
||||
read_timeout = timeout - (time.monotonic() - start_time)
|
||||
if read_timeout <= 0:
|
||||
print("read timeout waiting for serial during {}".format(phase))
|
||||
self.close()
|
||||
break
|
||||
def get_line(self):
|
||||
line = self.line_queue.get()
|
||||
if line == self.sentinel:
|
||||
self.lines_thread.join()
|
||||
return line
|
||||
|
||||
try:
|
||||
line = self.line_queue.get(timeout=read_timeout)
|
||||
except queue.Empty:
|
||||
print("read timeout waiting for serial during {}".format(phase))
|
||||
self.close()
|
||||
break
|
||||
|
||||
if line == self.sentinel:
|
||||
print("End of serial output")
|
||||
self.lines_thread.join()
|
||||
break
|
||||
|
||||
yield line
|
||||
def lines(self):
|
||||
return iter(self.get_line, self.sentinel)
|
||||
|
||||
|
||||
def main():
|
||||
|
@@ -28,8 +28,8 @@
|
||||
import sys
|
||||
import telnetlib
|
||||
|
||||
host = sys.argv[1]
|
||||
port = sys.argv[2]
|
||||
host=sys.argv[1]
|
||||
port=sys.argv[2]
|
||||
|
||||
tn = telnetlib.Telnet(host, port, 1000000)
|
||||
|
||||
|
@@ -1,303 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright © 2020 - 2022 Collabora Ltd.
|
||||
# Authors:
|
||||
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
# David Heidelberg <david.heidelberg@collabora.com>
|
||||
#
|
||||
# TODO GraphQL for dependencies
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Helper script to restrict running only required CI jobs
|
||||
and show the job(s) logs.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from functools import partial
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import argparse
|
||||
import sys
|
||||
import gitlab
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
REFRESH_WAIT_LOG = 10
|
||||
REFRESH_WAIT_JOBS = 6
|
||||
|
||||
URL_START = "\033]8;;"
|
||||
URL_END = "\033]8;;\a"
|
||||
|
||||
STATUS_COLORS = {
|
||||
"created": "",
|
||||
"running": Fore.BLUE,
|
||||
"success": Fore.GREEN,
|
||||
"failed": Fore.RED,
|
||||
"canceled": Fore.MAGENTA,
|
||||
"manual": "",
|
||||
"pending": "",
|
||||
"skipped": "",
|
||||
}
|
||||
|
||||
# TODO: This hardcoded list should be replaced by querying the pipeline's
|
||||
# dependency graph to see which jobs the target jobs need
|
||||
DEPENDENCIES = [
|
||||
"debian/x86_build-base",
|
||||
"debian/x86_build",
|
||||
"debian/x86_test-base",
|
||||
"debian/x86_test-gl",
|
||||
"debian/arm_build",
|
||||
"debian/arm_test",
|
||||
"kernel+rootfs_amd64",
|
||||
"kernel+rootfs_arm64",
|
||||
"kernel+rootfs_armhf",
|
||||
"debian-testing",
|
||||
"debian-arm64",
|
||||
]
|
||||
|
||||
COMPLETED_STATUSES = ["success", "failed"]
|
||||
|
||||
|
||||
def get_gitlab_project(glab, name: str):
|
||||
"""Finds a specified gitlab project for given user"""
|
||||
glab.auth()
|
||||
username = glab.user.username
|
||||
return glab.projects.get(f"{username}/mesa")
|
||||
|
||||
|
||||
def wait_for_pipeline(project, sha: str):
|
||||
"""await until pipeline appears in Gitlab"""
|
||||
print("⏲ for the pipeline to appear..", end="")
|
||||
while True:
|
||||
pipelines = project.pipelines.list(sha=sha)
|
||||
if pipelines:
|
||||
print("", flush=True)
|
||||
return pipelines[0]
|
||||
print("", end=".", flush=True)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def print_job_status(job) -> None:
|
||||
"""It prints a nice, colored job status with a link to the job."""
|
||||
if job.status == "canceled":
|
||||
return
|
||||
|
||||
print(
|
||||
STATUS_COLORS[job.status]
|
||||
+ "🞋 job "
|
||||
+ URL_START
|
||||
+ f"{job.web_url}\a{job.name}"
|
||||
+ URL_END
|
||||
+ f" :: {job.status}"
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_job_status_change(job) -> None:
|
||||
"""It reports job status changes."""
|
||||
if job.status == "canceled":
|
||||
return
|
||||
|
||||
print(
|
||||
STATUS_COLORS[job.status]
|
||||
+ "🗘 job "
|
||||
+ URL_START
|
||||
+ f"{job.web_url}\a{job.name}"
|
||||
+ URL_END
|
||||
+ f" has new status: {job.status}"
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def pretty_wait(sec: int) -> None:
|
||||
"""shows progressbar in dots"""
|
||||
for val in range(sec, 0, -1):
|
||||
print(f"⏲ {val} seconds", end="\r")
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def monitor_pipeline(
|
||||
project, pipeline, target_job: Optional[str], dependencies, force_manual: bool
|
||||
) -> tuple[Optional[int], Optional[int]]:
|
||||
"""Monitors pipeline and delegate canceling jobs"""
|
||||
statuses = {}
|
||||
target_statuses = {}
|
||||
|
||||
if not dependencies:
|
||||
dependencies = []
|
||||
dependencies.extend(DEPENDENCIES)
|
||||
|
||||
if target_job:
|
||||
target_jobs_regex = re.compile(target_job.strip())
|
||||
|
||||
while True:
|
||||
to_cancel = []
|
||||
for job in pipeline.jobs.list(all=True, sort="desc"):
|
||||
# target jobs
|
||||
if target_job and target_jobs_regex.match(job.name):
|
||||
if force_manual and job.status == "manual":
|
||||
enable_job(project, job, True)
|
||||
|
||||
if (job.id not in target_statuses) or (
|
||||
job.status not in target_statuses[job.id]
|
||||
):
|
||||
print_job_status_change(job)
|
||||
target_statuses[job.id] = job.status
|
||||
else:
|
||||
print_job_status(job)
|
||||
|
||||
continue
|
||||
|
||||
# all jobs
|
||||
if (job.id not in statuses) or (job.status not in statuses[job.id]):
|
||||
print_job_status_change(job)
|
||||
statuses[job.id] = job.status
|
||||
|
||||
# dependencies and cancelling the rest
|
||||
if job.name in dependencies:
|
||||
if job.status == "manual":
|
||||
enable_job(project, job, False)
|
||||
|
||||
elif target_job and job.status not in [
|
||||
"canceled",
|
||||
"success",
|
||||
"failed",
|
||||
"skipped",
|
||||
]:
|
||||
to_cancel.append(job)
|
||||
|
||||
if target_job:
|
||||
cancel_jobs(project, to_cancel)
|
||||
|
||||
print("---------------------------------", flush=False)
|
||||
|
||||
if len(target_statuses) == 1 and {"running"}.intersection(
|
||||
target_statuses.values()
|
||||
):
|
||||
return next(iter(target_statuses)), None
|
||||
|
||||
if {"failed", "canceled"}.intersection(target_statuses.values()):
|
||||
return None, 1
|
||||
|
||||
if {"success", "manual"}.issuperset(target_statuses.values()):
|
||||
return None, 0
|
||||
|
||||
pretty_wait(REFRESH_WAIT_JOBS)
|
||||
|
||||
|
||||
def enable_job(project, job, target: bool) -> None:
|
||||
"""enable manual job"""
|
||||
pjob = project.jobs.get(job.id, lazy=True)
|
||||
pjob.play()
|
||||
if target:
|
||||
jtype = "🞋 "
|
||||
else:
|
||||
jtype = "(dependency)"
|
||||
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
|
||||
|
||||
|
||||
def cancel_job(project, job) -> None:
|
||||
"""Cancel GitLab job"""
|
||||
pjob = project.jobs.get(job.id, lazy=True)
|
||||
pjob.cancel()
|
||||
print(f"♲ {job.name}")
|
||||
|
||||
|
||||
def cancel_jobs(project, to_cancel) -> None:
|
||||
"""Cancel unwanted GitLab jobs"""
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
with ThreadPoolExecutor(max_workers=6) as exe:
|
||||
part = partial(cancel_job, project)
|
||||
exe.map(part, to_cancel)
|
||||
|
||||
|
||||
def print_log(project, job_id) -> None:
|
||||
"""Print job log into output"""
|
||||
printed_lines = 0
|
||||
while True:
|
||||
job = project.jobs.get(job_id)
|
||||
|
||||
# GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
|
||||
lines = job.trace().decode("unicode_escape").splitlines()
|
||||
for line in lines[printed_lines:]:
|
||||
print(line)
|
||||
printed_lines = len(lines)
|
||||
|
||||
if job.status in COMPLETED_STATUSES:
|
||||
print(Fore.GREEN + f"Job finished: {job.web_url}" + Style.RESET_ALL)
|
||||
return
|
||||
pretty_wait(REFRESH_WAIT_LOG)
|
||||
|
||||
|
||||
def parse_args() -> None:
|
||||
"""Parse args"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Tool to trigger a subset of container jobs "
|
||||
+ "and monitor the progress of a test job",
|
||||
epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
|
||||
+ '--target ".*traces" ',
|
||||
)
|
||||
parser.add_argument("--target", metavar="target-job", help="Target job")
|
||||
parser.add_argument("--deps", nargs="+", help="Job dependencies")
|
||||
parser.add_argument(
|
||||
"--rev", metavar="revision", help="repository git revision", required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
metavar="token",
|
||||
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force-manual", action="store_true", help="Force jobs marked as manual"
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def read_token(token_arg: Optional[str]) -> str:
|
||||
"""pick token from args or file"""
|
||||
if token_arg:
|
||||
return token_arg
|
||||
return (
|
||||
open(os.path.expanduser("~/.config/gitlab-token"), encoding="utf-8")
|
||||
.readline()
|
||||
.rstrip()
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
t_start = time.perf_counter()
|
||||
|
||||
args = parse_args()
|
||||
|
||||
token = read_token(args.token)
|
||||
|
||||
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
|
||||
|
||||
cur_project = get_gitlab_project(gl, "mesa")
|
||||
|
||||
print(f"Revision: {args.rev}")
|
||||
pipe = wait_for_pipeline(cur_project, args.rev)
|
||||
print(f"Pipeline: {pipe.web_url}")
|
||||
if args.target:
|
||||
print("🞋 job: " + Fore.BLUE + args.target + Style.RESET_ALL)
|
||||
print(f"Extra dependencies: {args.deps}")
|
||||
target_job_id, ret = monitor_pipeline(
|
||||
cur_project, pipe, args.target, args.deps, args.force_manual
|
||||
)
|
||||
|
||||
if target_job_id:
|
||||
print_log(cur_project, target_job_id)
|
||||
|
||||
t_end = time.perf_counter()
|
||||
spend_minutes = (t_end - t_start) / 60
|
||||
print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes")
|
||||
|
||||
sys.exit(ret)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
@@ -1,2 +0,0 @@
|
||||
colorama==0.4.5
|
||||
python-gitlab==3.5.0
|
@@ -1,561 +0,0 @@
|
||||
# Shared between windows and Linux
|
||||
.build-common:
|
||||
extends: .build-rules
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
artifacts:
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
when: always
|
||||
paths:
|
||||
- _build/meson-logs/*.txt
|
||||
- _build/meson-logs/strace
|
||||
- shader-db
|
||||
|
||||
# Just Linux
|
||||
.build-linux:
|
||||
extends: .build-common
|
||||
variables:
|
||||
CCACHE_COMPILERCHECK: "content"
|
||||
CCACHE_COMPRESS: "true"
|
||||
CCACHE_DIR: /cache/mesa/ccache
|
||||
# Use ccache transparently, and print stats before/after
|
||||
before_script:
|
||||
- !reference [default, before_script]
|
||||
- export PATH="/usr/lib/ccache:$PATH"
|
||||
- export CCACHE_BASEDIR="$PWD"
|
||||
- echo -e "\e[0Ksection_start:$(date +%s):ccache_before[collapsed=true]\r\e[0Kccache stats before build"
|
||||
- ccache --show-stats
|
||||
- echo -e "\e[0Ksection_end:$(date +%s):ccache_before\r\e[0K"
|
||||
after_script:
|
||||
- echo -e "\e[0Ksection_start:$(date +%s):ccache_after[collapsed=true]\r\e[0Kccache stats after build"
|
||||
- ccache --show-stats
|
||||
- echo -e "\e[0Ksection_end:$(date +%s):ccache_after\r\e[0K"
|
||||
- !reference [default, after_script]
|
||||
|
||||
.build-windows:
|
||||
extends: .build-common
|
||||
tags:
|
||||
- windows
|
||||
- docker
|
||||
- "2022"
|
||||
- mesa
|
||||
cache:
|
||||
key: ${CI_JOB_NAME}
|
||||
paths:
|
||||
- subprojects/packagecache
|
||||
|
||||
.meson-build:
|
||||
extends:
|
||||
- .build-linux
|
||||
- .use-debian/x86_build
|
||||
stage: build-x86_64
|
||||
variables:
|
||||
LLVM_VERSION: 11
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
.meson-build_mingw:
|
||||
extends:
|
||||
- .build-linux
|
||||
- .use-debian/x86_build_mingw
|
||||
- .use-wine
|
||||
stage: build-x86_64
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-testing:
|
||||
extends:
|
||||
- .meson-build
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D platforms=x11
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-va=enabled
|
||||
GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915"
|
||||
VULKAN_DRIVERS: "swrast,amd,intel"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D valgrind=false
|
||||
MINIO_ARTIFACT_NAME: mesa-amd64
|
||||
LLVM_VERSION: "13"
|
||||
script:
|
||||
- .gitlab-ci/lava/lava-pytest.sh
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
artifacts:
|
||||
reports:
|
||||
junit: artifacts/ci_scripts_report.xml
|
||||
|
||||
debian-testing-asan:
|
||||
extends:
|
||||
- debian-testing
|
||||
variables:
|
||||
C_ARGS: >
|
||||
-Wno-error=stringop-truncation
|
||||
EXTRA_OPTION: >
|
||||
-D b_sanitize=address
|
||||
-D valgrind=false
|
||||
-D tools=dlclose-skip
|
||||
MINIO_ARTIFACT_NAME: ""
|
||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
||||
|
||||
debian-testing-msan:
|
||||
extends:
|
||||
- debian-clang
|
||||
variables:
|
||||
# l_undef is incompatible with msan
|
||||
EXTRA_OPTION:
|
||||
-D b_sanitize=memory
|
||||
-D b_lundef=false
|
||||
MINIO_ARTIFACT_NAME: ""
|
||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
||||
# Don't run all the tests yet:
|
||||
# GLSL has some issues in sexpression reading.
|
||||
# gtest has issues in its test initialization.
|
||||
MESON_TEST_ARGS: "--suite glcpp --suite gallium --suite format"
|
||||
# Freedreno dropped because freedreno tools fail at msan.
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
|
||||
VULKAN_DRIVERS: intel,amd,broadcom,virtio-experimental
|
||||
|
||||
debian-clover-testing:
|
||||
extends:
|
||||
- .meson-build
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D egl=disabled
|
||||
-D gbm=disabled
|
||||
GALLIUM_ST: >
|
||||
-D gallium-opencl=icd
|
||||
-D opencl-spirv=true
|
||||
GALLIUM_DRIVERS: "swrast"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
EXTRA_OPTION: >
|
||||
-D valgrind=false
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
|
||||
debian-gallium:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D platforms=x11,wayland
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=enabled
|
||||
-D gallium-omx=bellagio
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=true
|
||||
-D gallium-opencl=disabled
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
|
||||
VULKAN_DRIVERS: swrast
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,xvmc,lima,panfrost,asahi
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/run-shader-db.sh
|
||||
|
||||
# Test a release build with -Werror so new warnings don't sneak in.
|
||||
debian-release:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D platforms=x11,wayland
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D llvm=enabled
|
||||
GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
|
||||
VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental"
|
||||
BUILDTYPE: "release"
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D tools=all
|
||||
-D intel-clc=enabled
|
||||
-D imagination-srv=true
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
fedora-release:
|
||||
extends:
|
||||
- .meson-build
|
||||
- .use-fedora/x86_build
|
||||
variables:
|
||||
BUILDTYPE: "release"
|
||||
C_ARGS: >
|
||||
-Wno-error=array-bounds
|
||||
-Wno-error=stringop-overread
|
||||
-Wno-error=uninitialized
|
||||
CPP_ARGS: >
|
||||
-Wno-error=array-bounds
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D glvnd=true
|
||||
-D platforms=x11,wayland
|
||||
EXTRA_OPTION: >
|
||||
-D osmesa=true
|
||||
-D selinux=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
|
||||
-D intel-clc=enabled
|
||||
-D imagination-srv=true
|
||||
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=icd
|
||||
-D gles1=disabled
|
||||
-D gles2=enabled
|
||||
-D llvm=enabled
|
||||
-D microsoft-clc=disabled
|
||||
-D shared-llvm=enabled
|
||||
-D vulkan-device-select-layer=true
|
||||
LLVM_VERSION: ""
|
||||
UNWIND: "disabled"
|
||||
VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-android:
|
||||
extends:
|
||||
- .meson-cross
|
||||
- .use-debian/android_build
|
||||
variables:
|
||||
UNWIND: "disabled"
|
||||
C_ARGS: >
|
||||
-Wno-error=asm-operand-widths
|
||||
-Wno-error=constant-conversion
|
||||
-Wno-error=enum-conversion
|
||||
-Wno-error=initializer-overrides
|
||||
-Wno-error=missing-braces
|
||||
-Wno-error=sometimes-uninitialized
|
||||
-Wno-error=unused-function
|
||||
CPP_ARGS: >
|
||||
-Wno-error=deprecated-declarations
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D gbm=disabled
|
||||
-D egl=enabled
|
||||
-D platforms=android
|
||||
EXTRA_OPTION: >
|
||||
-D android-stub=true
|
||||
-D llvm=disabled
|
||||
-D platform-sdk-version=29
|
||||
-D valgrind=false
|
||||
GALLIUM_ST: >
|
||||
-D dri3=disabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
LLVM_VERSION: ""
|
||||
PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files"
|
||||
script:
|
||||
- PKG_CONFIG_PATH=/usr/local/lib/aarch64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/pkgconfig/ CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio-experimental .gitlab-ci/meson/build.sh
|
||||
# x86_64 build:
|
||||
# Can't do Intel because gen_decoder.c currently requires libexpat, which
|
||||
# is not a dependency that AOSP wants to accept. Can't do Radeon Gallium
|
||||
# drivers because they requires LLVM, which we don't have an Android build
|
||||
# of.
|
||||
- PKG_CONFIG_PATH=/usr/local/lib/x86_64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/x86_64-linux-android/pkgconfig/ CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh
|
||||
|
||||
.meson-cross:
|
||||
extends:
|
||||
- .meson-build
|
||||
stage: build-misc
|
||||
variables:
|
||||
UNWIND: "disabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D platforms=x11
|
||||
-D osmesa=false
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
|
||||
.meson-arm:
|
||||
extends:
|
||||
- .meson-cross
|
||||
- .use-debian/arm_build
|
||||
needs:
|
||||
- debian/arm_build
|
||||
variables:
|
||||
VULKAN_DRIVERS: freedreno,broadcom
|
||||
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
tags:
|
||||
- aarch64
|
||||
|
||||
debian-armhf:
|
||||
extends:
|
||||
- .meson-arm
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
CROSS: armhf
|
||||
EXTRA_OPTION: >
|
||||
-D llvm=disabled
|
||||
-D valgrind=false
|
||||
MINIO_ARTIFACT_NAME: mesa-armhf
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
|
||||
debian-arm64:
|
||||
extends:
|
||||
- .meson-arm
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental"
|
||||
EXTRA_OPTION: >
|
||||
-D llvm=disabled
|
||||
-D valgrind=false
|
||||
-D imagination-srv=true
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
|
||||
debian-arm64-asan:
|
||||
extends:
|
||||
- debian-arm64
|
||||
variables:
|
||||
C_ARGS: >
|
||||
-Wno-error=stringop-truncation
|
||||
EXTRA_OPTION: >
|
||||
-D llvm=disabled
|
||||
-D b_sanitize=address
|
||||
-D valgrind=false
|
||||
-D tools=dlclose-skip
|
||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64-asan
|
||||
MESON_TEST_ARGS: "--no-suite mesa:compiler"
|
||||
|
||||
debian-arm64-build-test:
|
||||
extends:
|
||||
- .meson-arm
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
VULKAN_DRIVERS: "amd"
|
||||
EXTRA_OPTION: >
|
||||
-Dtools=panfrost,imagination
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-clang:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
C_ARGS: >
|
||||
-Wno-error=constant-conversion
|
||||
-Wno-error=enum-conversion
|
||||
-Wno-error=implicit-const-int-float-conversion
|
||||
-Wno-error=initializer-overrides
|
||||
-Wno-error=sometimes-uninitialized
|
||||
-Wno-error=unused-function
|
||||
CPP_ARGS: >
|
||||
-Wno-error=c99-designator
|
||||
-Wno-error=deprecated-declarations
|
||||
-Wno-error=implicit-const-int-float-conversion
|
||||
-Wno-error=missing-braces
|
||||
-Wno-error=overloaded-virtual
|
||||
-Wno-error=tautological-constant-out-of-range-compare
|
||||
-Wno-error=unused-const-variable
|
||||
-Wno-error=unused-private-field
|
||||
DRI_LOADERS: >
|
||||
-D glvnd=true
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,swrast,panfrost,imagination-experimental,microsoft-experimental
|
||||
EXTRA_OPTIONS:
|
||||
-D spirv-to-dxil=true
|
||||
-D imagination-srv=true
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
|
||||
windows-vs2019:
|
||||
extends:
|
||||
- .build-windows
|
||||
- .use-windows_build_vs2019
|
||||
- .windows-build-rules
|
||||
stage: build-misc
|
||||
script:
|
||||
- pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1
|
||||
artifacts:
|
||||
paths:
|
||||
- _build/meson-logs/*.txt
|
||||
- _install/
|
||||
|
||||
debian-clover:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D egl=disabled
|
||||
-D gbm=disabled
|
||||
GALLIUM_DRIVERS: "r600,radeonsi"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=disabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=icd
|
||||
EXTRA_OPTION: >
|
||||
-D valgrind=false
|
||||
script:
|
||||
- LLVM_VERSION=9 GALLIUM_DRIVERS=r600,swrast .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-vulkan:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "disabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D gbm=disabled
|
||||
-D egl=disabled
|
||||
-D platforms=x11,wayland
|
||||
-D osmesa=false
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D b_sanitize=undefined
|
||||
-D c_args=-fno-sanitize-recover=all
|
||||
-D cpp_args=-fno-sanitize-recover=all
|
||||
UBSAN_OPTIONS: "print_stacktrace=1"
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,imagination-experimental,microsoft-experimental
|
||||
EXTRA_OPTION: >
|
||||
-D vulkan-layers=device-select,overlay
|
||||
-D build-aco-tests=true
|
||||
-D intel-clc=enabled
|
||||
-D imagination-srv=true
|
||||
|
||||
debian-i386:
|
||||
extends:
|
||||
- .meson-cross
|
||||
- .use-debian/i386_build
|
||||
variables:
|
||||
CROSS: i386
|
||||
VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental
|
||||
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
|
||||
EXTRA_OPTION: >
|
||||
-D vulkan-layers=device-select,overlay
|
||||
|
||||
debian-s390x:
|
||||
extends:
|
||||
- debian-ppc64el
|
||||
- .use-debian/s390x_build
|
||||
- .s390x-rules
|
||||
tags:
|
||||
- kvm
|
||||
variables:
|
||||
CROSS: s390x
|
||||
GALLIUM_DRIVERS: "swrast,zink"
|
||||
# The lp_test_blend test times out with LLVM 11
|
||||
LLVM_VERSION: 9
|
||||
VULKAN_DRIVERS: "swrast"
|
||||
|
||||
debian-ppc64el:
|
||||
extends:
|
||||
- .meson-cross
|
||||
- .use-debian/ppc64el_build
|
||||
- .ppc64el-rules
|
||||
variables:
|
||||
CROSS: ppc64el
|
||||
GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink"
|
||||
VULKAN_DRIVERS: "amd,swrast"
|
||||
|
||||
debian-mingw32-x86_64:
|
||||
extends: .meson-build_mingw
|
||||
stage: build-misc
|
||||
variables:
|
||||
UNWIND: "disabled"
|
||||
C_ARGS: >
|
||||
-Wno-error=format
|
||||
-Wno-error=format-extra-args
|
||||
-Wno-error=deprecated-declarations
|
||||
-Wno-error=unused-function
|
||||
-Wno-error=unused-variable
|
||||
-Wno-error=unused-but-set-variable
|
||||
-Wno-error=unused-value
|
||||
-Wno-error=switch
|
||||
-Wno-error=parentheses
|
||||
-Wno-error=missing-prototypes
|
||||
-Wno-error=sign-compare
|
||||
-Wno-error=narrowing
|
||||
-Wno-error=overflow
|
||||
CPP_ARGS: $C_ARGS
|
||||
GALLIUM_DRIVERS: "swrast,d3d12,zink"
|
||||
VULKAN_DRIVERS: "swrast,amd,microsoft-experimental"
|
||||
GALLIUM_ST: >
|
||||
-D gallium-opencl=icd
|
||||
-D opencl-native=false
|
||||
-D opencl-spirv=true
|
||||
-D microsoft-clc=enabled
|
||||
-D static-libclc=all
|
||||
-D llvm=enabled
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D gles1=enabled
|
||||
-D gles2=enabled
|
||||
-D osmesa=true
|
||||
-D cpp_rtti=true
|
||||
-D shared-glapi=enabled
|
||||
-D zlib=enabled
|
||||
--cross-file=.gitlab-ci/x86_64-w64-mingw32
|
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
for var in \
|
||||
ACO_DEBUG \
|
||||
ASAN_OPTIONS \
|
||||
BASE_SYSTEM_FORK_HOST_PREFIX \
|
||||
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
|
||||
@@ -27,6 +26,7 @@ for var in \
|
||||
CI_SERVER_URL \
|
||||
CROSVM_GALLIUM_DRIVER \
|
||||
CROSVM_GPU_ARGS \
|
||||
CROSVM_TEST_SCRIPT \
|
||||
DEQP_BIN_DIR \
|
||||
DEQP_CASELIST_FILTER \
|
||||
DEQP_CASELIST_INV_FILTER \
|
||||
@@ -49,7 +49,6 @@ for var in \
|
||||
FDO_UPSTREAM_REPO \
|
||||
FD_MESA_DEBUG \
|
||||
FLAKES_CHANNEL \
|
||||
FREEDRENO_HANGCHECK_MS \
|
||||
GALLIUM_DRIVER \
|
||||
GALLIVM_PERF \
|
||||
GPU_VERSION \
|
||||
@@ -61,15 +60,12 @@ for var in \
|
||||
GTEST_SKIPS \
|
||||
HWCI_FREQ_MAX \
|
||||
HWCI_KERNEL_MODULES \
|
||||
HWCI_KVM \
|
||||
HWCI_START_XORG \
|
||||
HWCI_TEST_SCRIPT \
|
||||
IR3_SHADER_DEBUG \
|
||||
JOB_ARTIFACTS_BASE \
|
||||
JOB_RESULTS_PATH \
|
||||
JOB_ROOTFS_OVERLAY_PATH \
|
||||
KERNEL_IMAGE_BASE_URL \
|
||||
KERNEL_IMAGE_NAME \
|
||||
LD_LIBRARY_PATH \
|
||||
LP_NUM_THREADS \
|
||||
MESA_BASE_TAG \
|
||||
@@ -81,12 +77,9 @@ for var in \
|
||||
MESA_IMAGE \
|
||||
MESA_IMAGE_PATH \
|
||||
MESA_IMAGE_TAG \
|
||||
MESA_LOADER_DRIVER_OVERRIDE \
|
||||
MESA_TEMPLATES_COMMIT \
|
||||
MESA_VK_IGNORE_CONFORMANCE_WARNING \
|
||||
MESA_SPIRV_LOG_LEVEL \
|
||||
MINIO_HOST \
|
||||
MINIO_RESULTS_UPLOAD \
|
||||
NIR_DEBUG \
|
||||
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
|
||||
PAN_MESA_DEBUG \
|
||||
@@ -99,14 +92,11 @@ for var in \
|
||||
PIGLIT_REPLAY_DESCRIPTION_FILE \
|
||||
PIGLIT_REPLAY_DEVICE_NAME \
|
||||
PIGLIT_REPLAY_EXTRA_ARGS \
|
||||
PIGLIT_REPLAY_LOOP_TIMES \
|
||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
|
||||
PIGLIT_REPLAY_SUBCOMMAND \
|
||||
PIGLIT_RESULTS \
|
||||
PIGLIT_TESTS \
|
||||
PIPELINE_ARTIFACTS_BASE \
|
||||
RADV_DEBUG \
|
||||
RADV_PERFTEST \
|
||||
SKQP_ASSETS_DIR \
|
||||
SKQP_BACKENDS \
|
||||
TU_DEBUG \
|
||||
@@ -114,7 +104,6 @@ for var in \
|
||||
VK_CPU \
|
||||
VK_DRIVER \
|
||||
VK_ICD_FILENAMES \
|
||||
VKD3D_PROTON_RESULTS \
|
||||
; do
|
||||
if [ -n "${!var+x}" ]; then
|
||||
echo "export $var=${!var@Q}"
|
||||
|
@@ -9,7 +9,6 @@ cd /
|
||||
|
||||
mount -t proc none /proc
|
||||
mount -t sysfs none /sys
|
||||
mount -t debugfs none /sys/kernel/debug
|
||||
mount -t devtmpfs none /dev || echo possibly already mounted
|
||||
mkdir -p /dev/pts
|
||||
mount -t devpts devpts /dev/pts
|
||||
|
@@ -1,38 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Make sure to kill itself and all the children process from this script on
|
||||
# exiting, since any console output may interfere with LAVA signals handling,
|
||||
# which based on the log console.
|
||||
cleanup() {
|
||||
if [ "$BACKGROUND_PIDS" = "" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
set +x
|
||||
echo "Killing all child processes"
|
||||
for pid in $BACKGROUND_PIDS
|
||||
do
|
||||
kill "$pid" 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Sleep just a little to give enough time for subprocesses to be gracefully
|
||||
# killed. Then apply a SIGKILL if necessary.
|
||||
sleep 5
|
||||
for pid in $BACKGROUND_PIDS
|
||||
do
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
done
|
||||
|
||||
BACKGROUND_PIDS=
|
||||
set -x
|
||||
}
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
# Space separated values with the PIDS of the processes started in the
|
||||
# background by this script
|
||||
BACKGROUND_PIDS=
|
||||
|
||||
|
||||
# Second-stage init, used to set up devices and our job environment before
|
||||
# running tests.
|
||||
|
||||
@@ -41,31 +8,7 @@ BACKGROUND_PIDS=
|
||||
set -ex
|
||||
|
||||
# Set up any devices required by the jobs
|
||||
[ -z "$HWCI_KERNEL_MODULES" ] || {
|
||||
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
||||
}
|
||||
|
||||
#
|
||||
# Load the KVM module specific to the detected CPU virtualization extensions:
|
||||
# - vmx for Intel VT
|
||||
# - svm for AMD-V
|
||||
#
|
||||
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
|
||||
#
|
||||
if [ "$HWCI_KVM" = "true" ]; then
|
||||
unset KVM_KERNEL_MODULE
|
||||
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || {
|
||||
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
||||
}
|
||||
|
||||
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
||||
echo "WARNING: Failed to detect CPU virtualization extensions" || \
|
||||
modprobe ${KVM_KERNEL_MODULE}
|
||||
|
||||
mkdir -p /lava-files
|
||||
wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \
|
||||
"${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
|
||||
fi
|
||||
[ -z "$HWCI_KERNEL_MODULES" ] || (echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe)
|
||||
|
||||
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
||||
# it in /install
|
||||
@@ -93,23 +36,11 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
||||
# Disable GPU runtime power management
|
||||
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
|
||||
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
||||
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
||||
# and enable throttling detection & reporting.
|
||||
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
||||
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
||||
./intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
||||
fi
|
||||
|
||||
# Increase freedreno hangcheck timer because it's right at the edge of the
|
||||
# spilling tests timing out (and some traces, too)
|
||||
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
|
||||
echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
|
||||
fi
|
||||
|
||||
# Start a little daemon to capture the first devcoredump we encounter. (They
|
||||
# expire after 5 minutes, so we poll for them).
|
||||
/capture-devcoredump.sh &
|
||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
./capture-devcoredump.sh &
|
||||
|
||||
# If we want Xorg to be running for the test, then we start it up before the
|
||||
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
||||
@@ -119,7 +50,6 @@ if [ -n "$HWCI_START_XORG" ]; then
|
||||
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
||||
env \
|
||||
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
|
||||
# Wait for xorg to be ready for connections.
|
||||
for i in 1 2 3 4 5; do
|
||||
@@ -132,34 +62,17 @@ if [ -n "$HWCI_START_XORG" ]; then
|
||||
fi
|
||||
|
||||
RESULT=fail
|
||||
set +e
|
||||
sh -c "$HWCI_TEST_SCRIPT"
|
||||
EXIT_CODE=$?
|
||||
set -e
|
||||
|
||||
# Let's make sure the results are always stored in current working directory
|
||||
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
||||
|
||||
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
||||
|
||||
# Make sure that capture-devcoredump is done before we start trying to tar up
|
||||
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
||||
# kill the job.
|
||||
cleanup
|
||||
|
||||
# upload artifacts
|
||||
if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
|
||||
tar -czf results.tar.gz results/;
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
|
||||
ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
|
||||
if sh $HWCI_TEST_SCRIPT; then
|
||||
RESULT=pass
|
||||
rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
|
||||
fi
|
||||
|
||||
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
||||
# as the python ones inside the bare-metal folder
|
||||
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass
|
||||
# upload artifacts
|
||||
MINIO=$(cat /proc/cmdline | tr ' ' '\n' | grep minio_results | cut -d '=' -f 2 || true)
|
||||
if [ -n "$MINIO" ]; then
|
||||
tar -czf results.tar.gz results/;
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
|
||||
ci-fairy minio cp results.tar.gz minio://"$MINIO"/results.tar.gz;
|
||||
fi
|
||||
|
||||
set +x
|
||||
echo "hwci: mesa: $RESULT"
|
||||
# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
|
||||
sleep 1
|
||||
exit $EXIT_CODE
|
||||
|
@@ -1,758 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# This is an utility script to manage Intel GPU frequencies.
|
||||
# It can be used for debugging performance problems or trying to obtain a stable
|
||||
# frequency while benchmarking.
|
||||
#
|
||||
# Note the Intel i915 GPU driver allows to change the minimum, maximum and boost
|
||||
# frequencies in steps of 50 MHz via:
|
||||
#
|
||||
# /sys/class/drm/card<n>/<freq_info>
|
||||
#
|
||||
# Where <n> is the DRM card index and <freq_info> one of the following:
|
||||
#
|
||||
# - gt_max_freq_mhz (enforced maximum freq)
|
||||
# - gt_min_freq_mhz (enforced minimum freq)
|
||||
# - gt_boost_freq_mhz (enforced boost freq)
|
||||
#
|
||||
# The hardware capabilities can be accessed via:
|
||||
#
|
||||
# - gt_RP0_freq_mhz (supported maximum freq)
|
||||
# - gt_RPn_freq_mhz (supported minimum freq)
|
||||
# - gt_RP1_freq_mhz (most efficient freq)
|
||||
#
|
||||
# The current frequency can be read from:
|
||||
# - gt_act_freq_mhz (the actual GPU freq)
|
||||
# - gt_cur_freq_mhz (the last requested freq)
|
||||
#
|
||||
# Also note that in addition to GPU management, the script offers the
|
||||
# possibility to adjust CPU operating frequencies. However, this is currently
|
||||
# limited to just setting the maximum scaling frequency as percentage of the
|
||||
# maximum frequency allowed by the hardware.
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Ltd.
|
||||
# Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
# GPU
|
||||
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
|
||||
ENF_FREQ_INFO="max min boost"
|
||||
CAP_FREQ_INFO="RP0 RPn RP1"
|
||||
ACT_FREQ_INFO="act cur"
|
||||
THROTT_DETECT_SLEEP_SEC=2
|
||||
THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
|
||||
|
||||
# CPU
|
||||
CPU_SYSFS_PREFIX=/sys/devices/system/cpu
|
||||
CPU_PSTATE_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/intel_pstate/%s"
|
||||
CPU_FREQ_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/cpu%s/cpufreq/%s_freq"
|
||||
CAP_CPU_FREQ_INFO="cpuinfo_max cpuinfo_min"
|
||||
ENF_CPU_FREQ_INFO="scaling_max scaling_min"
|
||||
ACT_CPU_FREQ_INFO="scaling_cur"
|
||||
|
||||
#
|
||||
# Global variables.
|
||||
#
|
||||
unset INTEL_DRM_CARD_INDEX
|
||||
unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ
|
||||
unset SET_MIN_FREQ SET_MAX_FREQ
|
||||
unset MONITOR_FREQ
|
||||
unset CPU_SET_MAX_FREQ
|
||||
unset DETECT_THROTT
|
||||
unset DRY_RUN
|
||||
|
||||
#
|
||||
# Simple printf based stderr logger.
|
||||
#
|
||||
log() {
|
||||
local msg_type=$1
|
||||
|
||||
shift
|
||||
printf "%s: %s: " "${msg_type}" "${0##*/}" >&2
|
||||
printf "$@" >&2
|
||||
printf "\n" >&2
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print sysfs path for the given card index and freq info.
|
||||
#
|
||||
# arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above
|
||||
# arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX
|
||||
#
|
||||
print_freq_sysfs_path() {
|
||||
printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1"
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card.
|
||||
#
|
||||
identify_intel_gpu() {
|
||||
local i=0 vendor path
|
||||
|
||||
while [ ${i} -lt 16 ]; do
|
||||
[ -c "/dev/dri/card$i" ] || {
|
||||
i=$((i + 1))
|
||||
continue
|
||||
}
|
||||
|
||||
path=$(print_freq_sysfs_path "" ${i})
|
||||
path=${path%/*}/device/vendor
|
||||
|
||||
[ -r "${path}" ] && read vendor < "${path}" && \
|
||||
[ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
|
||||
|
||||
i=$((i + 1))
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Read the specified freq info from sysfs.
|
||||
#
|
||||
# arg1: Flag (y/n) to also enable printing the freq info.
|
||||
# arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above
|
||||
# return: Global variable(s) FREQ_${arg} containing the requested information
|
||||
#
|
||||
read_freq_info() {
|
||||
local var val info path print=0 ret=0
|
||||
|
||||
[ "$1" = "y" ] && print=1
|
||||
shift
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
info=$1
|
||||
shift
|
||||
var=FREQ_${info}
|
||||
path=$(print_freq_sysfs_path "${info}")
|
||||
|
||||
[ -r ${path} ] && read ${var} < ${path} || {
|
||||
log ERROR "Failed to read freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ -n "${var}" ] || {
|
||||
log ERROR "Got empty freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ ${print} -eq 1 ] && {
|
||||
eval val=\$${var}
|
||||
printf "%6s: %4s MHz\n" "${info}" "${val}"
|
||||
}
|
||||
done
|
||||
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
#
|
||||
# Display requested info.
|
||||
#
|
||||
print_freq_info() {
|
||||
local req_freq
|
||||
|
||||
[ -n "${GET_CAP_FREQ}" ] && {
|
||||
printf "* Hardware capabilities\n"
|
||||
read_freq_info y ${CAP_FREQ_INFO}
|
||||
printf "\n"
|
||||
}
|
||||
|
||||
[ -n "${GET_ENF_FREQ}" ] && {
|
||||
printf "* Enforcements\n"
|
||||
read_freq_info y ${ENF_FREQ_INFO}
|
||||
printf "\n"
|
||||
}
|
||||
|
||||
[ -n "${GET_ACT_FREQ}" ] && {
|
||||
printf "* Actual\n"
|
||||
read_freq_info y ${ACT_FREQ_INFO}
|
||||
printf "\n"
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print frequency value as requested by user via '-s, --set' option.
|
||||
# arg1: user requested freq value
|
||||
#
|
||||
compute_freq_set() {
|
||||
local val
|
||||
|
||||
case "$1" in
|
||||
+)
|
||||
val=${FREQ_RP0}
|
||||
;;
|
||||
-)
|
||||
val=${FREQ_RPn}
|
||||
;;
|
||||
*%)
|
||||
val=$((${1%?} * ${FREQ_RP0} / 100))
|
||||
# Adjust freq to comply with 50 MHz increments
|
||||
val=$((val / 50 * 50))
|
||||
;;
|
||||
*[!0-9]*)
|
||||
log ERROR "Cannot set freq to invalid value: %s" "$1"
|
||||
return 1
|
||||
;;
|
||||
"")
|
||||
log ERROR "Cannot set freq to unspecified value"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
# Adjust freq to comply with 50 MHz increments
|
||||
val=$(($1 / 50 * 50))
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "%s" "${val}"
|
||||
}
|
||||
|
||||
#
|
||||
# Helper for set_freq().
|
||||
#
|
||||
set_freq_max() {
|
||||
log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}"
|
||||
|
||||
read_freq_info n min || return $?
|
||||
|
||||
[ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && {
|
||||
log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
|
||||
"${SET_MAX_FREQ}" "${FREQ_RP0}"
|
||||
return 1
|
||||
}
|
||||
|
||||
[ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && {
|
||||
log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
|
||||
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
||||
return 1
|
||||
}
|
||||
|
||||
[ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && {
|
||||
log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \
|
||||
"${SET_MAX_FREQ}" "${FREQ_min}"
|
||||
return 1
|
||||
}
|
||||
|
||||
[ -z "${DRY_RUN}" ] || return 0
|
||||
|
||||
printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
|
||||
$(print_freq_sysfs_path boost) > /dev/null
|
||||
[ $? -eq 0 ] || {
|
||||
log ERROR "Failed to set GPU max frequency"
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Helper for set_freq().
|
||||
#
|
||||
set_freq_min() {
|
||||
log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}"
|
||||
|
||||
read_freq_info n max || return $?
|
||||
|
||||
[ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && {
|
||||
log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \
|
||||
"${SET_MIN_FREQ}" "${FREQ_max}"
|
||||
return 1
|
||||
}
|
||||
|
||||
[ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && {
|
||||
log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
|
||||
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
||||
return 1
|
||||
}
|
||||
|
||||
[ -z "${DRY_RUN}" ] || return 0
|
||||
|
||||
printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min)
|
||||
[ $? -eq 0 ] || {
|
||||
log ERROR "Failed to set GPU min frequency"
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Set min or max or both GPU frequencies to the user indicated values.
|
||||
#
|
||||
set_freq() {
|
||||
# Get hw max & min frequencies
|
||||
read_freq_info n RP0 RPn || return $?
|
||||
|
||||
[ -z "${SET_MAX_FREQ}" ] || {
|
||||
SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
|
||||
[ -z "${SET_MAX_FREQ}" ] && return 1
|
||||
}
|
||||
|
||||
[ -z "${SET_MIN_FREQ}" ] || {
|
||||
SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}")
|
||||
[ -z "${SET_MIN_FREQ}" ] && return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Ensure correct operation order, to avoid setting min freq
|
||||
# to a value which is larger than max freq.
|
||||
#
|
||||
# E.g.:
|
||||
# crt_min=crt_max=600; new_min=new_max=700
|
||||
# > operation order: max=700; min=700
|
||||
#
|
||||
# crt_min=crt_max=600; new_min=new_max=500
|
||||
# > operation order: min=500; max=500
|
||||
#
|
||||
if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then
|
||||
[ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && {
|
||||
log ERROR "Cannot set GPU max freq to be less than min freq"
|
||||
return 1
|
||||
}
|
||||
|
||||
read_freq_info n min || return $?
|
||||
|
||||
if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then
|
||||
set_freq_min || return $?
|
||||
set_freq_max
|
||||
else
|
||||
set_freq_max || return $?
|
||||
set_freq_min
|
||||
fi
|
||||
elif [ -n "${SET_MAX_FREQ}" ]; then
|
||||
set_freq_max
|
||||
elif [ -n "${SET_MIN_FREQ}" ]; then
|
||||
set_freq_min
|
||||
else
|
||||
log "Unexpected call to set_freq()"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Helper for detect_throttling().
|
||||
#
|
||||
get_thrott_detect_pid() {
|
||||
[ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0
|
||||
|
||||
local pid
|
||||
read pid < ${THROTT_DETECT_PID_FILE_PATH} || {
|
||||
log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}"
|
||||
return 1
|
||||
}
|
||||
|
||||
local proc_path=/proc/${pid:-invalid}/cmdline
|
||||
[ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && {
|
||||
printf "%s" "${pid}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Remove orphaned PID file
|
||||
rm -rf ${THROTT_DETECT_PID_FILE_PATH}
|
||||
return 1
|
||||
}
|
||||
|
||||
#
|
||||
# Control detection and reporting of GPU throttling events.
|
||||
# arg1: start - run throttle detector in background
|
||||
# stop - stop throttle detector process, if any
|
||||
# status - verify if throttle detector is running
|
||||
#
|
||||
detect_throttling() {
|
||||
local pid
|
||||
pid=$(get_thrott_detect_pid)
|
||||
|
||||
case "$1" in
|
||||
status)
|
||||
printf "Throttling detector is "
|
||||
[ -z "${pid}" ] && printf "not running\n" && return 0
|
||||
printf "running (pid=%s)\n" ${pid}
|
||||
;;
|
||||
|
||||
stop)
|
||||
[ -z "${pid}" ] && return 0
|
||||
|
||||
log INFO "Stopping throttling detector (pid=%s)" "${pid}"
|
||||
kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid}
|
||||
rm -rf ${THROTT_DETECT_PID_FILE_PATH}
|
||||
;;
|
||||
|
||||
start)
|
||||
[ -n "${pid}" ] && {
|
||||
log WARN "Throttling detector is already running (pid=%s)" ${pid}
|
||||
return 0
|
||||
}
|
||||
|
||||
(
|
||||
read_freq_info n RPn || exit $?
|
||||
|
||||
while true; do
|
||||
sleep ${THROTT_DETECT_SLEEP_SEC}
|
||||
read_freq_info n act min cur || exit $?
|
||||
|
||||
#
|
||||
# The throttling seems to occur when act freq goes below min.
|
||||
# However, it's necessary to exclude the idle states, where
|
||||
# act freq normally reaches RPn and cur goes below min.
|
||||
#
|
||||
[ ${FREQ_act} -lt ${FREQ_min} ] && \
|
||||
[ ${FREQ_act} -gt ${FREQ_RPn} ] && \
|
||||
[ ${FREQ_cur} -ge ${FREQ_min} ] && \
|
||||
printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \
|
||||
${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn}
|
||||
done
|
||||
) &
|
||||
|
||||
pid=$!
|
||||
log INFO "Started GPU throttling detector (pid=%s)" ${pid}
|
||||
|
||||
printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \
|
||||
log WARN "Failed to write throttle detector PID file"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
#
|
||||
# Retrieve the list of online CPUs.
|
||||
#
|
||||
get_online_cpus() {
|
||||
local path cpu_index
|
||||
|
||||
printf "0"
|
||||
for path in $(grep 1 ${CPU_SYSFS_PREFIX}/cpu*/online); do
|
||||
cpu_index=${path##*/cpu}
|
||||
printf " %s" ${cpu_index%%/*}
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print sysfs path for the given CPU index and freq info.
|
||||
#
|
||||
# arg1: Frequency info sysfs name, one of *_CPU_FREQ_INFO constants above
|
||||
# arg2: CPU index
|
||||
#
|
||||
print_cpu_freq_sysfs_path() {
|
||||
printf ${CPU_FREQ_SYSFS_PATTERN} "$2" "$1"
|
||||
}
|
||||
|
||||
#
|
||||
# Read the specified CPU freq info from sysfs.
|
||||
#
|
||||
# arg1: CPU index
|
||||
# arg2: Flag (y/n) to also enable printing the freq info.
|
||||
# arg3...: Frequency info sysfs name(s), see *_CPU_FREQ_INFO constants above
|
||||
# return: Global variable(s) CPU_FREQ_${arg} containing the requested information
|
||||
#
|
||||
read_cpu_freq_info() {
|
||||
local var val info path cpu_index print=0 ret=0
|
||||
|
||||
cpu_index=$1
|
||||
[ "$2" = "y" ] && print=1
|
||||
shift 2
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
info=$1
|
||||
shift
|
||||
var=CPU_FREQ_${info}
|
||||
path=$(print_cpu_freq_sysfs_path "${info}" ${cpu_index})
|
||||
|
||||
[ -r ${path} ] && read ${var} < ${path} || {
|
||||
log ERROR "Failed to read CPU freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ -n "${var}" ] || {
|
||||
log ERROR "Got empty CPU freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ ${print} -eq 1 ] && {
|
||||
eval val=\$${var}
|
||||
printf "%6s: %4s Hz\n" "${info}" "${val}"
|
||||
}
|
||||
done
|
||||
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print freq. value as requested by user via '--cpu-set-max' option.
|
||||
# arg1: user requested freq value
|
||||
#
|
||||
compute_cpu_freq_set() {
|
||||
local val
|
||||
|
||||
case "$1" in
|
||||
+)
|
||||
val=${CPU_FREQ_cpuinfo_max}
|
||||
;;
|
||||
-)
|
||||
val=${CPU_FREQ_cpuinfo_min}
|
||||
;;
|
||||
*%)
|
||||
val=$((${1%?} * ${CPU_FREQ_cpuinfo_max} / 100))
|
||||
;;
|
||||
*[!0-9]*)
|
||||
log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
|
||||
return 1
|
||||
;;
|
||||
"")
|
||||
log ERROR "Cannot set CPU freq to unspecified value"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
log ERROR "Cannot set CPU freq to custom value; use +, -, or % instead"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "%s" "${val}"
|
||||
}
|
||||
|
||||
#
|
||||
# Adjust CPU max scaling frequency.
|
||||
#
|
||||
set_cpu_freq_max() {
|
||||
local target_freq res=0
|
||||
case "${CPU_SET_MAX_FREQ}" in
|
||||
+)
|
||||
target_freq=100
|
||||
;;
|
||||
-)
|
||||
target_freq=1
|
||||
;;
|
||||
*%)
|
||||
target_freq=${CPU_SET_MAX_FREQ%?}
|
||||
;;
|
||||
*)
|
||||
log ERROR "Invalid CPU freq"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
|
||||
[ -e "${pstate_info}" ] && {
|
||||
log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
|
||||
printf "%s" "${target_freq}" > "${pstate_info}"
|
||||
[ $? -eq 0 ] || {
|
||||
log ERROR "Failed to set intel_pstate max perf"
|
||||
res=1
|
||||
}
|
||||
}
|
||||
|
||||
local cpu_index
|
||||
for cpu_index in $(get_online_cpus); do
|
||||
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
|
||||
|
||||
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
|
||||
[ -z "${target_freq}" ] && { res=$?; continue; }
|
||||
|
||||
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
|
||||
[ -n "${DRY_RUN}" ] && continue
|
||||
|
||||
printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index})
|
||||
[ $? -eq 0 ] || {
|
||||
res=1
|
||||
log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
|
||||
}
|
||||
done
|
||||
|
||||
return ${res}
|
||||
}
|
||||
|
||||
#
|
||||
# Show help message.
|
||||
#
|
||||
print_usage() {
|
||||
cat <<EOF
|
||||
Usage: ${0##*/} [OPTION]...
|
||||
|
||||
A script to manage Intel GPU frequencies. Can be used for debugging performance
|
||||
problems or trying to obtain a stable frequency while benchmarking.
|
||||
|
||||
Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz.
|
||||
|
||||
Options:
|
||||
-g, --get [act|enf|cap|all]
|
||||
Get frequency information: active (default), enforced,
|
||||
hardware capabilities or all of them.
|
||||
|
||||
-s, --set [{min|max}=]{FREQUENCY[%]|+|-}
|
||||
Set min or max frequency to the given value (MHz).
|
||||
Append '%' to interpret FREQUENCY as % of hw max.
|
||||
Use '+' or '-' to set frequency to hardware max or min.
|
||||
Omit min/max prefix to set both frequencies.
|
||||
|
||||
-r, --reset Reset frequencies to hardware defaults.
|
||||
|
||||
-m, --monitor [act|enf|cap|all]
|
||||
Monitor the indicated frequencies via 'watch' utility.
|
||||
See '-g, --get' option for more details.
|
||||
|
||||
-d|--detect-thrott [start|stop|status]
|
||||
Start (default operation) the throttling detector
|
||||
as a background process. Use 'stop' or 'status' to
|
||||
terminate the detector process or verify its status.
|
||||
|
||||
--cpu-set-max [FREQUENCY%|+|-}
|
||||
Set CPU max scaling frequency as % of hw max.
|
||||
Use '+' or '-' to set frequency to hardware max or min.
|
||||
|
||||
-r, --reset Reset frequencies to hardware defaults.
|
||||
|
||||
--dry-run See what the script will do without applying any
|
||||
frequency changes.
|
||||
|
||||
-h, --help Display this help text and exit.
|
||||
EOF
|
||||
}
|
||||
|
||||
#
|
||||
# Parse user input for '-g, --get' option.
|
||||
# Returns 0 if a value has been provided, otherwise 1.
|
||||
#
|
||||
parse_option_get() {
|
||||
local ret=0
|
||||
|
||||
case "$1" in
|
||||
act) GET_ACT_FREQ=1;;
|
||||
enf) GET_ENF_FREQ=1;;
|
||||
cap) GET_CAP_FREQ=1;;
|
||||
all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;;
|
||||
-*|"")
|
||||
# No value provided, using default.
|
||||
GET_ACT_FREQ=1
|
||||
ret=1
|
||||
;;
|
||||
*)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
#
|
||||
# Validate user input for '-s, --set' option.
|
||||
# arg1: input value to be validated
|
||||
# arg2: optional flag indicating input is restricted to %
|
||||
#
|
||||
validate_option_set() {
|
||||
case "$1" in
|
||||
+|-|[0-9]%|[0-9][0-9]%)
|
||||
return 0
|
||||
;;
|
||||
*[!0-9]*|"")
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
[ -z "$2" ] || { print_usage; exit 1; }
|
||||
}
|
||||
|
||||
#
|
||||
# Parse script arguments.
|
||||
#
|
||||
[ $# -eq 0 ] && { print_usage; exit 1; }
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-g|--get)
|
||||
parse_option_get "$2" && shift
|
||||
;;
|
||||
|
||||
-s|--set)
|
||||
shift
|
||||
case "$1" in
|
||||
min=*)
|
||||
SET_MIN_FREQ=${1#min=}
|
||||
validate_option_set "${SET_MIN_FREQ}"
|
||||
;;
|
||||
max=*)
|
||||
SET_MAX_FREQ=${1#max=}
|
||||
validate_option_set "${SET_MAX_FREQ}"
|
||||
;;
|
||||
*)
|
||||
SET_MIN_FREQ=$1
|
||||
validate_option_set "${SET_MIN_FREQ}"
|
||||
SET_MAX_FREQ=${SET_MIN_FREQ}
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
-r|--reset)
|
||||
RESET_FREQ=1
|
||||
SET_MIN_FREQ="-"
|
||||
SET_MAX_FREQ="+"
|
||||
;;
|
||||
|
||||
-m|--monitor)
|
||||
MONITOR_FREQ=act
|
||||
parse_option_get "$2" && MONITOR_FREQ=$2 && shift
|
||||
;;
|
||||
|
||||
-d|--detect-thrott)
|
||||
DETECT_THROTT=start
|
||||
case "$2" in
|
||||
start|stop|status)
|
||||
DETECT_THROTT=$2
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
--cpu-set-max)
|
||||
shift
|
||||
CPU_SET_MAX_FREQ=$1
|
||||
validate_option_set "${CPU_SET_MAX_FREQ}" restricted
|
||||
;;
|
||||
|
||||
--dry-run)
|
||||
DRY_RUN=1
|
||||
;;
|
||||
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
*)
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
#
|
||||
# Main
|
||||
#
|
||||
RET=0
|
||||
|
||||
identify_intel_gpu || {
|
||||
log INFO "No Intel GPU detected"
|
||||
exit 0
|
||||
}
|
||||
|
||||
[ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; }
|
||||
print_freq_info
|
||||
|
||||
[ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT}
|
||||
|
||||
[ -n "${CPU_SET_MAX_FREQ}" ] && { set_cpu_freq_max || RET=$?; }
|
||||
|
||||
[ -n "${MONITOR_FREQ}" ] && {
|
||||
log INFO "Entering frequency monitoring mode"
|
||||
sleep 2
|
||||
exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}"
|
||||
}
|
||||
|
||||
exit ${RET}
|
@@ -157,16 +157,3 @@ CONFIG_HW_RANDOM_MTK=y
|
||||
CONFIG_MTK_DEVAPC=y
|
||||
CONFIG_PWM_MTK_DISP=y
|
||||
CONFIG_MTK_CMDQ=y
|
||||
|
||||
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
|
||||
CONFIG_ARCH_TEGRA=y
|
||||
CONFIG_DRM_NOUVEAU=m
|
||||
CONFIG_DRM_TEGRA=m
|
||||
CONFIG_R8169=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_DRM_TEGRA_STAGING=y
|
||||
CONFIG_TEGRA_HOST1X=y
|
||||
CONFIG_ARM_TEGRA_DEVFREQ=y
|
||||
CONFIG_TEGRA_SOCTHERM=y
|
||||
CONFIG_DRM_TEGRA_DEBUG=y
|
||||
CONFIG_PWM_TEGRA=y
|
||||
|
@@ -25,10 +25,7 @@ if [[ $arch == "arm64" ]]; then
|
||||
wget ${ARTIFACTS_URL}/Image.gz
|
||||
wget ${ARTIFACTS_URL}/cheza-kernel
|
||||
|
||||
DEVICE_TREES=""
|
||||
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
||||
DEVICE_TREES="apq8016-sbc.dtb apq8096-db820c.dtb"
|
||||
|
||||
for DTB in $DEVICE_TREES; do
|
||||
wget ${ARTIFACTS_URL}/$DTB
|
||||
|
@@ -2,14 +2,14 @@
|
||||
|
||||
set -ex
|
||||
|
||||
APITRACE_VERSION="790380e05854d5c9d315555444ffcc7acb8f4037"
|
||||
APITRACE_VERSION="170424754bb46002ba706e16ee5404b61988d74a"
|
||||
|
||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
||||
pushd /apitrace
|
||||
git checkout "$APITRACE_VERSION"
|
||||
git submodule update --init --depth 1 --recursive
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
|
||||
cmake --build _build --parallel --target apitrace eglretrace
|
||||
ninja -C _build
|
||||
mkdir build
|
||||
cp _build/apitrace build
|
||||
cp _build/eglretrace build
|
||||
|
@@ -2,18 +2,13 @@
|
||||
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR="$(pwd)"
|
||||
|
||||
CROSVM_VERSION=c7cd0e0114c8363b884ba56d8e12adee718dcc93
|
||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
|
||||
CROSVM_VERSION=d2b6a64dd31c92a284a905c0f2483d0b222b1220
|
||||
git clone --single-branch -b for-mesa-ci --no-checkout https://gitlab.freedesktop.org/tomeu/crosvm.git /platform/crosvm
|
||||
pushd /platform/crosvm
|
||||
git checkout "$CROSVM_VERSION"
|
||||
git submodule update --init
|
||||
# Apply all crosvm patches for Mesa CI
|
||||
cat "$SCRIPT_DIR"/.gitlab-ci/container/build-crosvm_*.patch |
|
||||
patch -p1
|
||||
|
||||
VIRGLRENDERER_VERSION=dd301caf7e05ec9c09634fb7872067542aad89b7
|
||||
VIRGLRENDERER_VERSION=e420a5aab92de8fb42fad50762f0ac3b5fcb3bfb
|
||||
rm -rf third_party/virglrenderer
|
||||
git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
||||
pushd third_party/virglrenderer
|
||||
@@ -38,4 +33,4 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||
|
||||
popd
|
||||
|
||||
rm -rf /platform/crosvm
|
||||
rm -rf $PLATFORM2_ROOT $AOSP_EXTERNAL_ROOT/minijail $THIRD_PARTY_ROOT/adhd $THIRD_PARTY_ROOT/rust-vmm /platform/crosvm
|
||||
|
@@ -1,43 +0,0 @@
|
||||
From 3c57ec558bccc67fd53363c23deea20646be5c47 Mon Sep 17 00:00:00 2001
|
||||
From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
Date: Wed, 17 Nov 2021 10:18:04 +0100
|
||||
Subject: [PATCH] Hack syslog out
|
||||
|
||||
It's causing stability problems when running several Crosvm instances in
|
||||
parallel.
|
||||
|
||||
Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
---
|
||||
base/src/unix/linux/syslog.rs | 2 +-
|
||||
common/sys_util/src/linux/syslog.rs | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/base/src/unix/linux/syslog.rs b/base/src/unix/linux/syslog.rs
|
||||
index 05972a3a..f0db3781 100644
|
||||
--- a/base/src/unix/linux/syslog.rs
|
||||
+++ b/base/src/unix/linux/syslog.rs
|
||||
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||
impl Syslog for PlatformSyslog {
|
||||
fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
- socket: Some(openlog_and_get_socket()?),
|
||||
+ socket: None,
|
||||
})
|
||||
}
|
||||
|
||||
diff --git a/common/sys_util/src/linux/syslog.rs b/common/sys_util/src/linux/syslog.rs
|
||||
index 05972a3a..f0db3781 100644
|
||||
--- a/common/sys_util/src/linux/syslog.rs
|
||||
+++ b/common/sys_util/src/linux/syslog.rs
|
||||
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||
impl Syslog for PlatformSyslog {
|
||||
fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
- socket: Some(openlog_and_get_socket()?),
|
||||
+ socket: None,
|
||||
})
|
||||
}
|
||||
|
||||
--
|
||||
2.25.1
|
||||
|
@@ -4,21 +4,19 @@ set -ex
|
||||
|
||||
if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
||||
# Build and install from source
|
||||
DEQP_RUNNER_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git}"
|
||||
EXTRA_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git} ${EXTRA_CARGO_ARGS}"
|
||||
|
||||
if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
|
||||
DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
|
||||
EXTRA_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${EXTRA_CARGO_ARGS}"
|
||||
else
|
||||
DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
|
||||
EXTRA_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${EXTRA_CARGO_ARGS}"
|
||||
fi
|
||||
|
||||
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
|
||||
else
|
||||
# Install from package registry
|
||||
DEQP_RUNNER_CARGO_ARGS="--version 0.13.1 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
EXTRA_CARGO_ARGS="--version 0.11.0 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
fi
|
||||
|
||||
cargo install --locked \
|
||||
-j ${FDO_CI_CONCURRENT:-4} \
|
||||
--root /usr/local \
|
||||
${DEQP_RUNNER_CARGO_ARGS}
|
||||
${EXTRA_CARGO_ARGS}
|
||||
|
@@ -6,7 +6,7 @@ git config --global user.email "mesa@example.com"
|
||||
git config --global user.name "Mesa CI"
|
||||
git clone \
|
||||
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
||||
-b vulkan-cts-1.3.3.0 \
|
||||
-b vulkan-cts-1.2.7.2 \
|
||||
--depth 1 \
|
||||
/VK-GL-CTS
|
||||
pushd /VK-GL-CTS
|
||||
@@ -43,8 +43,8 @@ mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl
|
||||
|
||||
# Copy out the mustpass lists we want.
|
||||
mkdir /deqp/mustpass
|
||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
||||
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt) ; do
|
||||
cat /VK-GL-CTS/external/vulkancts/mustpass/master/$mustpass \
|
||||
>> /deqp/mustpass/vk-master.txt
|
||||
done
|
||||
|
||||
|
@@ -4,7 +4,7 @@ set -ex
|
||||
|
||||
git clone https://github.com/ValveSoftware/Fossilize.git
|
||||
cd Fossilize
|
||||
git checkout 16fba1b8b5d9310126bb02323d7bae3227338461
|
||||
git checkout 72088685d90bc814d14aad5505354ffa8a642789
|
||||
git submodule update --init
|
||||
mkdir build
|
||||
cd build
|
||||
|
@@ -2,18 +2,18 @@
|
||||
|
||||
set -ex
|
||||
|
||||
GFXRECONSTRUCT_VERSION=5ed3caeecc46e976c4df31e263df8451ae176c26
|
||||
GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1
|
||||
|
||||
git clone https://github.com/LunarG/gfxreconstruct.git \
|
||||
--single-branch \
|
||||
-b master \
|
||||
--no-checkout \
|
||||
/gfxreconstruct
|
||||
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct
|
||||
pushd /gfxreconstruct
|
||||
git checkout "$GFXRECONSTRUCT_VERSION"
|
||||
git submodule update --init
|
||||
git submodule update
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH=/gfxreconstruct/build -DBUILD_WERROR=OFF
|
||||
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||
ninja -C _build gfxrecon-replay gfxrecon-info
|
||||
mkdir -p build/bin
|
||||
install _build/tools/replay/gfxrecon-replay build/bin
|
||||
install _build/tools/info/gfxrecon-info build/bin
|
||||
strip build/bin/*
|
||||
find . -not -path './build' -not -path './build/*' -delete
|
||||
popd
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
|
||||
PARALLEL_DEQP_RUNNER_VERSION=6596b71cf37a7efb4d54acd48c770ed2d4ad6b7e
|
||||
|
||||
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
|
||||
pushd /parallel-deqp-runner
|
||||
|
@@ -28,7 +28,7 @@ if [[ -n ${DEVICE_TREES} ]]; then
|
||||
cp ${DEVICE_TREES} /lava-files/.
|
||||
fi
|
||||
|
||||
if [[ ${DEBIAN_ARCH} = "amd64" || ${DEBIAN_ARCH} = "arm64" ]]; then
|
||||
if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
|
||||
make modules
|
||||
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
|
||||
fi
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
export LIBDRM_VERSION=libdrm-2.4.110
|
||||
export LIBDRM_VERSION=libdrm-2.4.109
|
||||
|
||||
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||
|
@@ -4,7 +4,7 @@ set -ex
|
||||
|
||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||
pushd /piglit
|
||||
git checkout b2c9d8f56b45d79f804f4cb5ac62520f0edd8988
|
||||
git checkout af1785f31f65622d9b1ca1c08c75cf140bc7ed22
|
||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
||||
ninja $PIGLIT_BUILD_TARGETS
|
||||
|
@@ -1,76 +1,61 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
create_gn_args() {
|
||||
# gn can be configured to cross-compile skia and its tools
|
||||
# It is important to set the target_cpu to guarantee the intended target
|
||||
# machine
|
||||
cp "${BASE_ARGS_GN_FILE}" "${SKQP_OUT_DIR}"/args.gn
|
||||
echo "target_cpu = \"${SKQP_ARCH}\"" >> "${SKQP_OUT_DIR}"/args.gn
|
||||
}
|
||||
|
||||
download_skqp_models() (
|
||||
# The download_model.py script needs a checksum file to know what models
|
||||
# version to download.
|
||||
|
||||
download_skia_source() {
|
||||
if [ -z ${SKIA_DIR+x} ]
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
# This is the most recent commit available in the skia repository with a
|
||||
# valid files.checksum
|
||||
SKIA_LAST_SKQP_CUT_COMMIT_SHA=ccf5f0d75b6a6b54756f2c62d57e3730eed8aa45
|
||||
git fetch origin "${SKIA_LAST_SKQP_CUT_COMMIT_SHA}:refs/remotes/origin/${SKIA_LAST_SKQP_CUT_COMMIT_SHA}"
|
||||
git checkout "${SKIA_LAST_SKQP_CUT_COMMIT_SHA}" -- \
|
||||
platform_tools/android/apps/skqp/src/main/assets/files.checksum
|
||||
|
||||
# Skia cloned from https://android.googlesource.com/platform/external/skqp
|
||||
# has all needed assets tracked on git-fs
|
||||
SKQP_REPO=https://android.googlesource.com/platform/external/skqp
|
||||
SKQP_BRANCH=android-cts-11.0_r7
|
||||
# The following patch transforms download_model.py from python2 to python3.
|
||||
git apply "${DOWNLOAD_MODEL_PATCH_FILE}"
|
||||
python3 tools/skqp/download_model.py
|
||||
|
||||
git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
|
||||
}
|
||||
# Copy resources from skia to skqp directory
|
||||
python3 tools/skqp/setup_resources
|
||||
)
|
||||
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(realpath "$(dirname "$0")")
|
||||
SKQP_PATCH_DIR="${SCRIPT_DIR}"
|
||||
FETCH_GN_PATCH_FILE="${SCRIPT_DIR}/build-skqp_fetch-gn.patch"
|
||||
BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
|
||||
DOWNLOAD_MODEL_PATCH_FILE="${SCRIPT_DIR}/build-skqp_download_model.patch"
|
||||
|
||||
SKQP_ARCH=${SKQP_ARCH:-x64}
|
||||
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
|
||||
SKQP_DIR=${SKQP_DIR:-$(mktemp -d)}
|
||||
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
|
||||
SKQP_INSTALL_DIR=/skqp
|
||||
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
|
||||
SKQP_BINARIES=(skqp)
|
||||
# Build list_gpu_unit_tests to update the unittests.txt file properly to the
|
||||
# target hardware.
|
||||
SKQP_BINARIES=(skqp list_gpu_unit_tests)
|
||||
|
||||
download_skia_source
|
||||
# Using a recent release version to mitigate instability during test phase
|
||||
SKIA_COMMIT_SHA="canvaskit/0.32.0"
|
||||
|
||||
git clone 'https://skia.googlesource.com/skia/' \
|
||||
--single-branch \
|
||||
-b "${SKIA_COMMIT_SHA}" \
|
||||
"${SKIA_DIR}"
|
||||
|
||||
pushd "${SKIA_DIR}"
|
||||
|
||||
# Apply all skqp patches for Mesa CI
|
||||
cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
|
||||
patch -p1
|
||||
|
||||
# Fetch some needed build tools needed to build skia/skqp.
|
||||
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
|
||||
# directory.
|
||||
python tools/git-sync-deps
|
||||
git apply "${FETCH_GN_PATCH_FILE}"
|
||||
# Fetch some needed build tools needed to build skia/skqp
|
||||
# Basically, it clones repositories with commits SHAs from
|
||||
# ${SKIA_DIR}/DEPS directory
|
||||
python3 tools/git-sync-deps
|
||||
|
||||
mkdir -p "${SKQP_OUT_DIR}"
|
||||
mkdir -p "${SKQP_INSTALL_DIR}"
|
||||
@@ -83,15 +68,15 @@ bin/gn gen "${SKQP_OUT_DIR}"
|
||||
for BINARY in "${SKQP_BINARIES[@]}"
|
||||
do
|
||||
/usr/bin/ninja -C "${SKQP_OUT_DIR}" "${BINARY}"
|
||||
# Strip binary, since gn is not stripping it even when `is_debug == false`
|
||||
${STRIP_CMD:-strip} "${SKQP_OUT_DIR}/${BINARY}"
|
||||
install -m 0755 "${SKQP_OUT_DIR}/${BINARY}" "${SKQP_INSTALL_DIR}"
|
||||
done
|
||||
|
||||
# Move assets to the target directory, which will reside in rootfs.
|
||||
# Acquire assets and move them to the target directory.
|
||||
download_skqp_models
|
||||
mv platform_tools/android/apps/skqp/src/main/assets/ "${SKQP_ASSETS_DIR}"
|
||||
|
||||
popd
|
||||
rm -Rf "${SKQP_DIR}"
|
||||
rm -Rf "${SKIA_DIR}"
|
||||
|
||||
set +ex
|
||||
|
@@ -1,13 +0,0 @@
|
||||
diff --git a/BUILD.gn b/BUILD.gn
|
||||
index d2b1407..7b60c90 100644
|
||||
--- a/BUILD.gn
|
||||
+++ b/BUILD.gn
|
||||
@@ -144,7 +144,7 @@ config("skia_public") {
|
||||
|
||||
# Skia internal APIs, used by Skia itself and a few test tools.
|
||||
config("skia_private") {
|
||||
- visibility = [ ":*" ]
|
||||
+ visibility = [ "*" ]
|
||||
|
||||
include_dirs = [
|
||||
"include/private",
|
@@ -1,30 +1,8 @@
|
||||
cc = "clang"
|
||||
cxx = "clang++"
|
||||
cc = "gcc"
|
||||
cxx = "g++"
|
||||
|
||||
extra_cflags = [ "-DSK_ENABLE_DUMP_GPU", "-DSK_BUILD_FOR_SKQP" ]
|
||||
extra_cflags_cc = [
|
||||
"-Wno-error",
|
||||
|
||||
# skqp build process produces a lot of compilation warnings, silencing
|
||||
# most of them to remove clutter and avoid the CI job log to exceed the
|
||||
# maximum size
|
||||
|
||||
# GCC flags
|
||||
"-Wno-redundant-move",
|
||||
"-Wno-suggest-override",
|
||||
"-Wno-class-memaccess",
|
||||
"-Wno-deprecated-copy",
|
||||
"-Wno-uninitialized",
|
||||
|
||||
# Clang flags
|
||||
"-Wno-macro-redefined",
|
||||
"-Wno-anon-enum-enum-conversion",
|
||||
"-Wno-suggest-destructor-override",
|
||||
"-Wno-return-std-move-in-c++11",
|
||||
"-Wno-extra-semi-stmt",
|
||||
]
|
||||
|
||||
cc_wrapper = "ccache"
|
||||
extra_cflags_cc = [ "-static", "-Wno-error", "-Wno-macro-redefined", "-Wno-suggest-destructor-override", "-Wno-suggest-override" ]
|
||||
|
||||
is_debug = false
|
||||
|
||||
|
22
.gitlab-ci/container/build-skqp_download_model.patch
Normal file
22
.gitlab-ci/container/build-skqp_download_model.patch
Normal file
@@ -0,0 +1,22 @@
|
||||
diff --git a/tools/skqp/download_model.py b/tools/skqp/download_model.py
|
||||
index fb0020e481..a5d8a03754 100755
|
||||
--- a/tools/skqp/download_model.py
|
||||
+++ b/tools/skqp/download_model.py
|
||||
@@ -10,7 +10,7 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
-import urllib2
|
||||
+import urllib.request, urllib.error, urllib.parse
|
||||
|
||||
def checksum(path):
|
||||
if not os.path.exists(path):
|
||||
@@ -33,7 +33,7 @@ def download(md5, path):
|
||||
pass # ignore race condition
|
||||
url = 'https://storage.googleapis.com/skia-skqp-assets/' + md5
|
||||
with open(path, 'wb') as o:
|
||||
- shutil.copyfileobj(urllib2.urlopen(url), o)
|
||||
+ shutil.copyfileobj(urllib.request.urlopen(url), o)
|
||||
|
||||
def tmp(prefix):
|
||||
fd, path = tempfile.mkstemp(prefix=prefix)
|
13
.gitlab-ci/container/build-skqp_fetch-gn.patch
Normal file
13
.gitlab-ci/container/build-skqp_fetch-gn.patch
Normal file
@@ -0,0 +1,13 @@
|
||||
diff --git a/bin/fetch-gn b/bin/fetch-gn
|
||||
index b4bb14c630..59c4591a30 100755
|
||||
--- a/bin/fetch-gn
|
||||
+++ b/bin/fetch-gn
|
||||
@@ -23,7 +23,7 @@ os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
|
||||
gnzip = os.path.join(tempfile.mkdtemp(), 'gn.zip')
|
||||
with open(gnzip, 'wb') as f:
|
||||
OS = {'darwin': 'mac', 'linux': 'linux', 'linux2': 'linux', 'win32': 'windows'}[sys.platform]
|
||||
- cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64'}[platform.machine().lower()]
|
||||
+ cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64', 'aarch64': 'arm64'}[platform.machine().lower()]
|
||||
|
||||
rev = 'd62642c920e6a0d1756316d225a90fd6faa9e21e'
|
||||
url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/{}-{}/+/git_revision:{}'.format(
|
@@ -1,68 +0,0 @@
|
||||
diff --git a/bin/fetch-gn b/bin/fetch-gn
|
||||
index d5e94a2..59c4591 100755
|
||||
--- a/bin/fetch-gn
|
||||
+++ b/bin/fetch-gn
|
||||
@@ -5,39 +5,44 @@
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
-import hashlib
|
||||
import os
|
||||
+import platform
|
||||
import shutil
|
||||
import stat
|
||||
import sys
|
||||
-import urllib2
|
||||
+import tempfile
|
||||
+import zipfile
|
||||
+
|
||||
+if sys.version_info[0] < 3:
|
||||
+ from urllib2 import urlopen
|
||||
+else:
|
||||
+ from urllib.request import urlopen
|
||||
|
||||
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
|
||||
|
||||
-dst = 'bin/gn.exe' if 'win32' in sys.platform else 'bin/gn'
|
||||
+gnzip = os.path.join(tempfile.mkdtemp(), 'gn.zip')
|
||||
+with open(gnzip, 'wb') as f:
|
||||
+ OS = {'darwin': 'mac', 'linux': 'linux', 'linux2': 'linux', 'win32': 'windows'}[sys.platform]
|
||||
+ cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64', 'aarch64': 'arm64'}[platform.machine().lower()]
|
||||
|
||||
-sha1 = '2f27ff0b6118e5886df976da5effa6003d19d1ce' if 'linux' in sys.platform else \
|
||||
- '9be792dd9010ce303a9c3a497a67bcc5ac8c7666' if 'darwin' in sys.platform else \
|
||||
- 'eb69be2d984b4df60a8c21f598135991f0ad1742' # Windows
|
||||
+ rev = 'd62642c920e6a0d1756316d225a90fd6faa9e21e'
|
||||
+ url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/{}-{}/+/git_revision:{}'.format(
|
||||
+ OS,cpu,rev)
|
||||
+ f.write(urlopen(url).read())
|
||||
|
||||
-def sha1_of_file(path):
|
||||
- h = hashlib.sha1()
|
||||
- if os.path.isfile(path):
|
||||
- with open(path, 'rb') as f:
|
||||
- h.update(f.read())
|
||||
- return h.hexdigest()
|
||||
+gn = 'gn.exe' if 'win32' in sys.platform else 'gn'
|
||||
+with zipfile.ZipFile(gnzip, 'r') as f:
|
||||
+ f.extract(gn, 'bin')
|
||||
|
||||
-if sha1_of_file(dst) != sha1:
|
||||
- with open(dst, 'wb') as f:
|
||||
- f.write(urllib2.urlopen('https://chromium-gn.storage-download.googleapis.com/' + sha1).read())
|
||||
+gn = os.path.join('bin', gn)
|
||||
|
||||
- os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
|
||||
- stat.S_IRGRP | stat.S_IXGRP |
|
||||
- stat.S_IROTH | stat.S_IXOTH )
|
||||
+os.chmod(gn, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
|
||||
+ stat.S_IRGRP | stat.S_IXGRP |
|
||||
+ stat.S_IROTH | stat.S_IXOTH )
|
||||
|
||||
# We'll also copy to a path that depot_tools' GN wrapper will expect to find the binary.
|
||||
copy_path = 'buildtools/linux64/gn' if 'linux' in sys.platform else \
|
||||
'buildtools/mac/gn' if 'darwin' in sys.platform else \
|
||||
'buildtools/win/gn.exe'
|
||||
if os.path.isdir(os.path.dirname(copy_path)):
|
||||
- shutil.copy(dst, copy_path)
|
||||
+ shutil.copy(gn, copy_path)
|
@@ -1,142 +0,0 @@
|
||||
Patch based from diff with skia repository from commit
|
||||
013397884c73959dc07cb0a26ee742b1cdfbda8a
|
||||
|
||||
Adds support for Python3, but removes the constraint of only SHA based refs in
|
||||
DEPS
|
||||
diff --git a/tools/git-sync-deps b/tools/git-sync-deps
|
||||
index c7379c0b5c..f63d4d9ccf 100755
|
||||
--- a/tools/git-sync-deps
|
||||
+++ b/tools/git-sync-deps
|
||||
@@ -43,7 +43,7 @@ def git_executable():
|
||||
A string suitable for passing to subprocess functions, or None.
|
||||
"""
|
||||
envgit = os.environ.get('GIT_EXECUTABLE')
|
||||
- searchlist = ['git']
|
||||
+ searchlist = ['git', 'git.bat']
|
||||
if envgit:
|
||||
searchlist.insert(0, envgit)
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
@@ -94,21 +94,25 @@ def is_git_toplevel(git, directory):
|
||||
try:
|
||||
toplevel = subprocess.check_output(
|
||||
[git, 'rev-parse', '--show-toplevel'], cwd=directory).strip()
|
||||
- return os.path.realpath(directory) == os.path.realpath(toplevel)
|
||||
+ return os.path.realpath(directory) == os.path.realpath(toplevel.decode())
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
|
||||
-def status(directory, checkoutable):
|
||||
- def truncate(s, length):
|
||||
+def status(directory, commithash, change):
|
||||
+ def truncate_beginning(s, length):
|
||||
+ return s if len(s) <= length else '...' + s[-(length-3):]
|
||||
+ def truncate_end(s, length):
|
||||
return s if len(s) <= length else s[:(length - 3)] + '...'
|
||||
+
|
||||
dlen = 36
|
||||
- directory = truncate(directory, dlen)
|
||||
- checkoutable = truncate(checkoutable, 40)
|
||||
- sys.stdout.write('%-*s @ %s\n' % (dlen, directory, checkoutable))
|
||||
+ directory = truncate_beginning(directory, dlen)
|
||||
+ commithash = truncate_end(commithash, 40)
|
||||
+ symbol = '>' if change else '@'
|
||||
+ sys.stdout.write('%-*s %s %s\n' % (dlen, directory, symbol, commithash))
|
||||
|
||||
|
||||
-def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
|
||||
+def git_checkout_to_directory(git, repo, commithash, directory, verbose):
|
||||
"""Checkout (and clone if needed) a Git repository.
|
||||
|
||||
Args:
|
||||
@@ -117,8 +121,7 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
|
||||
repo (string) the location of the repository, suitable
|
||||
for passing to `git clone`.
|
||||
|
||||
- checkoutable (string) a tag, branch, or commit, suitable for
|
||||
- passing to `git checkout`
|
||||
+ commithash (string) a commit, suitable for passing to `git checkout`
|
||||
|
||||
directory (string) the path into which the repository
|
||||
should be checked out.
|
||||
@@ -129,7 +132,12 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
|
||||
"""
|
||||
if not os.path.isdir(directory):
|
||||
subprocess.check_call(
|
||||
- [git, 'clone', '--quiet', repo, directory])
|
||||
+ [git, 'clone', '--quiet', '--no-checkout', repo, directory])
|
||||
+ subprocess.check_call([git, 'checkout', '--quiet', commithash],
|
||||
+ cwd=directory)
|
||||
+ if verbose:
|
||||
+ status(directory, commithash, True)
|
||||
+ return
|
||||
|
||||
if not is_git_toplevel(git, directory):
|
||||
# if the directory exists, but isn't a git repo, you will modify
|
||||
@@ -145,11 +153,11 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
# If this fails, we will fetch before trying again. Don't spam user
|
||||
# with error infomation.
|
||||
- if 0 == subprocess.call([git, 'checkout', '--quiet', checkoutable],
|
||||
+ if 0 == subprocess.call([git, 'checkout', '--quiet', commithash],
|
||||
cwd=directory, stderr=devnull):
|
||||
# if this succeeds, skip slow `git fetch`.
|
||||
if verbose:
|
||||
- status(directory, checkoutable) # Success.
|
||||
+ status(directory, commithash, False) # Success.
|
||||
return
|
||||
|
||||
# If the repo has changed, always force use of the correct repo.
|
||||
@@ -159,18 +167,24 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
|
||||
|
||||
subprocess.check_call([git, 'fetch', '--quiet'], cwd=directory)
|
||||
|
||||
- subprocess.check_call([git, 'checkout', '--quiet', checkoutable], cwd=directory)
|
||||
+ subprocess.check_call([git, 'checkout', '--quiet', commithash], cwd=directory)
|
||||
|
||||
if verbose:
|
||||
- status(directory, checkoutable) # Success.
|
||||
+ status(directory, commithash, True) # Success.
|
||||
|
||||
|
||||
def parse_file_to_dict(path):
|
||||
dictionary = {}
|
||||
- execfile(path, dictionary)
|
||||
+ with open(path) as f:
|
||||
+ exec('def Var(x): return vars[x]\n' + f.read(), dictionary)
|
||||
return dictionary
|
||||
|
||||
|
||||
+def is_sha1_sum(s):
|
||||
+ """SHA1 sums are 160 bits, encoded as lowercase hexadecimal."""
|
||||
+ return len(s) == 40 and all(c in '0123456789abcdef' for c in s)
|
||||
+
|
||||
+
|
||||
def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
|
||||
"""Grab dependencies, with optional platform support.
|
||||
|
||||
@@ -204,19 +218,19 @@ def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
|
||||
raise Exception('%r is parent of %r' % (other_dir, directory))
|
||||
list_of_arg_lists = []
|
||||
for directory in sorted(dependencies):
|
||||
- if not isinstance(dependencies[directory], basestring):
|
||||
+ if not isinstance(dependencies[directory], str):
|
||||
if verbose:
|
||||
- print 'Skipping "%s".' % directory
|
||||
+ sys.stdout.write( 'Skipping "%s".\n' % directory)
|
||||
continue
|
||||
if '@' in dependencies[directory]:
|
||||
- repo, checkoutable = dependencies[directory].split('@', 1)
|
||||
+ repo, commithash = dependencies[directory].split('@', 1)
|
||||
else:
|
||||
- raise Exception("please specify commit or tag")
|
||||
+ raise Exception("please specify commit")
|
||||
|
||||
relative_directory = os.path.join(deps_file_directory, directory)
|
||||
|
||||
list_of_arg_lists.append(
|
||||
- (git, repo, checkoutable, relative_directory, verbose))
|
||||
+ (git, repo, commithash, relative_directory, verbose))
|
||||
|
||||
multithread(git_checkout_to_directory, list_of_arg_lists)
|
||||
|
@@ -1,41 +0,0 @@
|
||||
diff --git a/tools/skqp/src/skqp.cpp b/tools/skqp/src/skqp.cpp
|
||||
index 50ed9db01d..938217000d 100644
|
||||
--- a/tools/skqp/src/skqp.cpp
|
||||
+++ b/tools/skqp/src/skqp.cpp
|
||||
@@ -448,7 +448,7 @@ inline void write(SkWStream* wStream, const T& text) {
|
||||
|
||||
void SkQP::makeReport() {
|
||||
SkASSERT_RELEASE(fAssetManager);
|
||||
- int glesErrorCount = 0, vkErrorCount = 0, gles = 0, vk = 0;
|
||||
+ int glErrorCount = 0, glesErrorCount = 0, vkErrorCount = 0, gl = 0, gles = 0, vk = 0;
|
||||
|
||||
if (!sk_isdir(fReportDirectory.c_str())) {
|
||||
SkDebugf("Report destination does not exist: '%s'\n", fReportDirectory.c_str());
|
||||
@@ -460,6 +460,7 @@ void SkQP::makeReport() {
|
||||
htmOut.writeText(kDocHead);
|
||||
for (const SkQP::RenderResult& run : fRenderResults) {
|
||||
switch (run.fBackend) {
|
||||
+ case SkQP::SkiaBackend::kGL: ++gl; break;
|
||||
case SkQP::SkiaBackend::kGLES: ++gles; break;
|
||||
case SkQP::SkiaBackend::kVulkan: ++vk; break;
|
||||
default: break;
|
||||
@@ -477,15 +478,17 @@ void SkQP::makeReport() {
|
||||
}
|
||||
write(&htmOut, SkStringPrintf(" f(%s);\n", str.c_str()));
|
||||
switch (run.fBackend) {
|
||||
+ case SkQP::SkiaBackend::kGL: ++glErrorCount; break;
|
||||
case SkQP::SkiaBackend::kGLES: ++glesErrorCount; break;
|
||||
case SkQP::SkiaBackend::kVulkan: ++vkErrorCount; break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
htmOut.writeText(kDocMiddle);
|
||||
- write(&htmOut, SkStringPrintf("<p>gles errors: %d (of %d)</br>\n"
|
||||
+ write(&htmOut, SkStringPrintf("<p>gl errors: %d (of %d)</br>\n"
|
||||
+ "gles errors: %d (of %d)</br>\n"
|
||||
"vk errors: %d (of %d)</p>\n",
|
||||
- glesErrorCount, gles, vkErrorCount, vk));
|
||||
+ glErrorCount, gl, glesErrorCount, gles, vkErrorCount, vk));
|
||||
htmOut.writeText(kDocTail);
|
||||
SkFILEWStream unitOut(SkOSPath::Join(fReportDirectory.c_str(), kUnitTestReportPath).c_str());
|
||||
SkASSERT_RELEASE(unitOut.isValid());
|
@@ -1,13 +0,0 @@
|
||||
diff --git a/gn/BUILDCONFIG.gn b/gn/BUILDCONFIG.gn
|
||||
index 454334a..1797594 100644
|
||||
--- a/gn/BUILDCONFIG.gn
|
||||
+++ b/gn/BUILDCONFIG.gn
|
||||
@@ -80,7 +80,7 @@ if (current_cpu == "") {
|
||||
is_clang = is_android || is_ios || is_mac ||
|
||||
(cc == "clang" && cxx == "clang++") || clang_win != ""
|
||||
if (!is_clang && !is_win) {
|
||||
- is_clang = exec_script("gn/is_clang.py",
|
||||
+ is_clang = exec_script("//gn/is_clang.py",
|
||||
[
|
||||
cc,
|
||||
cxx,
|
@@ -2,7 +2,8 @@
|
||||
|
||||
set -ex
|
||||
|
||||
VKD3D_PROTON_COMMIT="5b73139f182d86cd58a757e4b5f0d4cfad96d319"
|
||||
VKD3D_PROTON_VERSION="2.3.1"
|
||||
VKD3D_PROTON_COMMIT="3ed3526332f53d7d35cf1b685fa8096b01f26ff0"
|
||||
|
||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
||||
@@ -15,7 +16,7 @@ function build_arch {
|
||||
meson "$@" \
|
||||
-Denable_tests=true \
|
||||
--buildtype release \
|
||||
--prefix "$VKD3D_PROTON_DST_DIR" \
|
||||
--prefix "$VKD3D_PROTON_BUILD_DIR" \
|
||||
--strip \
|
||||
--bindir "x${arch}" \
|
||||
--libdir "x${arch}" \
|
||||
@@ -23,17 +24,20 @@ function build_arch {
|
||||
|
||||
ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
|
||||
|
||||
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/d3d12"
|
||||
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/"*.exe
|
||||
}
|
||||
|
||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b "v$VKD3D_PROTON_VERSION" --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
||||
pushd "$VKD3D_PROTON_SRC_DIR"
|
||||
git checkout "$VKD3D_PROTON_COMMIT"
|
||||
git submodule update --init --recursive
|
||||
git submodule update --recursive
|
||||
build_arch 64
|
||||
build_arch 86
|
||||
build_arch 64 --cross-file build-win64.txt
|
||||
build_arch 86 --cross-file build-win32.txt
|
||||
cp "setup_vkd3d_proton.sh" "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
|
||||
chmod +x "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
|
||||
popd
|
||||
|
||||
"$VKD3D_PROTON_BUILD_DIR"/setup_vkd3d_proton.sh install
|
||||
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
||||
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
||||
|
@@ -10,7 +10,7 @@ fi
|
||||
|
||||
export CCACHE_COMPILERCHECK=content
|
||||
export CCACHE_COMPRESS=true
|
||||
export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache
|
||||
export CCACHE_DIR=/cache/mesa/ccache
|
||||
export PATH=$CCACHE_PATH:$PATH
|
||||
|
||||
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
|
||||
@@ -34,9 +34,3 @@ chmod +x /usr/local/bin/ninja
|
||||
# Set MAKEFLAGS so that all make invocations in container builds include the
|
||||
# flags (doesn't apply to non-container builds, but we don't run make there)
|
||||
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
|
||||
|
||||
# make wget to try more than once, when download fails or timeout
|
||||
echo -e "retry_connrefused = on\n" \
|
||||
"read_timeout = 300\n" \
|
||||
"tries = 4\n" \
|
||||
"wait_retry = 32" >> /etc/wgetrc
|
||||
|
@@ -4,35 +4,16 @@ set -ex
|
||||
|
||||
if [ $DEBIAN_ARCH = arm64 ]; then
|
||||
ARCH_PACKAGES="firmware-qcom-media
|
||||
firmware-linux-nonfree
|
||||
libfontconfig1
|
||||
libgl1
|
||||
libglu1-mesa
|
||||
libvulkan-dev
|
||||
"
|
||||
elif [ $DEBIAN_ARCH = amd64 ]; then
|
||||
# Add llvm 13 to the build image
|
||||
apt-get -y install --no-install-recommends wget gnupg2 software-properties-common
|
||||
apt-key add /llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
|
||||
ARCH_PACKAGES="firmware-amd-graphics
|
||||
inetutils-syslogd
|
||||
iptables
|
||||
libcap2
|
||||
libfontconfig1
|
||||
libelf1
|
||||
libfdt1
|
||||
libgl1
|
||||
libglu1-mesa
|
||||
libllvm13
|
||||
libllvm11
|
||||
libva2
|
||||
libva-drm2
|
||||
libvulkan-dev
|
||||
socat
|
||||
spirv-tools
|
||||
sysvinit-core
|
||||
"
|
||||
fi
|
||||
|
||||
@@ -43,12 +24,9 @@ INSTALL_CI_FAIRY_PACKAGES="git
|
||||
python3-wheel
|
||||
"
|
||||
|
||||
apt-get update
|
||||
apt-get -y install --no-install-recommends \
|
||||
$ARCH_PACKAGES \
|
||||
$INSTALL_CI_FAIRY_PACKAGES \
|
||||
$EXTRA_LOCAL_PACKAGES \
|
||||
bash \
|
||||
ca-certificates \
|
||||
firmware-realtek \
|
||||
initramfs-tools \
|
||||
@@ -92,7 +70,8 @@ apt-get -y install --no-install-recommends \
|
||||
waffle-utils \
|
||||
wget \
|
||||
xinit \
|
||||
xserver-xorg-core
|
||||
xserver-xorg-core \
|
||||
xz-utils
|
||||
|
||||
# Needed for ci-fairy, this revision is able to upload files to
|
||||
# MinIO and doesn't depend on git
|
||||
@@ -115,6 +94,10 @@ chmod +x /init
|
||||
# Strip the image to a small minimal system without removing the debian
|
||||
# toolchain.
|
||||
|
||||
# xz compress firmware so it doesn't waste RAM at runtime on ramdisk systems
|
||||
find /lib/firmware -type f -print0 | \
|
||||
xargs -0r -P4 -n4 xz -T1 -C crc32
|
||||
|
||||
# Copy timezone file and remove tzdata package
|
||||
rm -rf /etc/localtime
|
||||
cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
|
||||
@@ -183,7 +166,9 @@ UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
|
||||
"insserv "\
|
||||
"udev "\
|
||||
"init-system-helpers "\
|
||||
"bash "\
|
||||
"cpio "\
|
||||
"xz-utils "\
|
||||
"passwd "\
|
||||
"libsemanage1 libsemanage-common "\
|
||||
"libsepol1 "\
|
||||
@@ -199,8 +184,6 @@ UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
|
||||
"libgles2-mesa-dev "\
|
||||
"libglx-mesa0 "\
|
||||
"mesa-common-dev "\
|
||||
"gnupg2 "\
|
||||
"software-properties-common " \
|
||||
|
||||
# Removing unneeded packages
|
||||
for PACKAGE in ${UNNEEDED_PACKAGES}
|
||||
@@ -230,7 +213,7 @@ rm -rf var/* opt srv share
|
||||
# ca-certificates are in /etc drop the source
|
||||
rm -rf usr/share/ca-certificates
|
||||
|
||||
# No need for completions
|
||||
# No bash, no need for completions
|
||||
rm -rf usr/share/bash-completion
|
||||
|
||||
# No zsh, no need for comletions
|
||||
|
@@ -30,7 +30,7 @@ sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi
|
||||
|
||||
# Not using build-libdrm.sh because we don't want its cleanup after building
|
||||
# each arch. Fetch and extract now.
|
||||
export LIBDRM_VERSION=libdrm-2.4.110
|
||||
export LIBDRM_VERSION=libdrm-2.4.109
|
||||
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||
|
||||
|
@@ -9,7 +9,6 @@ echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/bu
|
||||
apt-get update
|
||||
|
||||
apt-get -y install \
|
||||
${EXTRA_LOCAL_PACKAGES} \
|
||||
abootimg \
|
||||
autoconf \
|
||||
automake \
|
||||
@@ -22,13 +21,11 @@ apt-get -y install \
|
||||
flex \
|
||||
g++ \
|
||||
git \
|
||||
glslang-tools \
|
||||
kmod \
|
||||
libasan6 \
|
||||
libdrm-dev \
|
||||
libelf-dev \
|
||||
libexpat1-dev \
|
||||
libvulkan-dev \
|
||||
libx11-dev \
|
||||
libx11-xcb-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
|
@@ -31,9 +31,3 @@ arch=armhf . .gitlab-ci/container/baremetal_build.sh
|
||||
# This firmware file from Debian bullseye causes hangs
|
||||
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
|
||||
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw
|
||||
|
||||
mkdir -p /baremetal-files/jetson-nano/boot/
|
||||
ln -s \
|
||||
/baremetal-files/Image \
|
||||
/baremetal-files/tegra210-p3450-0000.dtb \
|
||||
/baremetal-files/jetson-nano/boot/
|
||||
|
@@ -1,52 +0,0 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1.4.12 (GNU/Linux)
|
||||
|
||||
mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM
|
||||
EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM
|
||||
R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2
|
||||
B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY
|
||||
Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT
|
||||
DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1
|
||||
G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/
|
||||
ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU
|
||||
cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq
|
||||
7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc
|
||||
Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB
|
||||
tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz
|
||||
dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE
|
||||
FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC
|
||||
9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR
|
||||
udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX
|
||||
wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn
|
||||
l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv
|
||||
gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W
|
||||
R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg
|
||||
hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx
|
||||
K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya
|
||||
KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B
|
||||
MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7
|
||||
BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g
|
||||
zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc
|
||||
bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC
|
||||
DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw
|
||||
F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta
|
||||
RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/
|
||||
21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV
|
||||
ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+
|
||||
M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa
|
||||
xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ
|
||||
d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/
|
||||
fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X
|
||||
OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB
|
||||
pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML
|
||||
PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL
|
||||
wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd
|
||||
oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l
|
||||
tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG
|
||||
5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP
|
||||
LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov
|
||||
1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3
|
||||
krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN
|
||||
bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw==
|
||||
=j+4q
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@@ -1,53 +0,0 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQGNBFwOmrgBDAC9FZW3dFpew1hwDaqRfdQQ1ABcmOYu1NKZHwYjd+bGvcR2LRGe
|
||||
R5dfRqG1Uc/5r6CPCMvnWxFprymkqKEADn8eFn+aCnPx03HrhA+lNEbciPfTHylt
|
||||
NTTuRua7YpJIgEOjhXUbxXxnvF8fhUf5NJpJg6H6fPQARUW+5M//BlVgwn2jhzlW
|
||||
U+uwgeJthhiuTXkls9Yo3EoJzmkUih+ABZgvaiBpr7GZRw9GO1aucITct0YDNTVX
|
||||
KA6el78/udi5GZSCKT94yY9ArN4W6NiOFCLV7MU5d6qMjwGFhfg46NBv9nqpGinK
|
||||
3NDjqCevKouhtKl2J+nr3Ju3Spzuv6Iex7tsOqt+XdZCoY+8+dy3G5zbJwBYsMiS
|
||||
rTNF55PHtBH1S0QK5OoN2UR1ie/aURAyAFEMhTzvFB2B2v7C0IKIOmYMEG+DPMs9
|
||||
FQs/vZ1UnAQgWk02ZiPryoHfjFO80+XYMrdWN+RSo5q9ODClloaKXjqI/aWLGirm
|
||||
KXw2R8tz31go3NMAEQEAAbQnV2luZUhRIHBhY2thZ2VzIDx3aW5lLWRldmVsQHdp
|
||||
bmVocS5vcmc+iQHOBBMBCgA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEE
|
||||
1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmyUACgkQdvGiD/mHZy/zkwv7B+nKFlDY
|
||||
Bzz/7j0gqIODbs5FRZRtuf/IuPP3vZdWlNfAW/VyaLtVLJCM/mmaf/O6/gJ+D+E9
|
||||
BBoSmHdHzBBOQHIj5IbRedynNcHT5qXsdBeU2ZPR50sdE+jmukvw3Wa5JijoDgUu
|
||||
LGLGtU48Z3JsBXQ54OlnTZXQ2SMFhRUa10JANXSJQ+QY2Wo2Pi2+MEAHcrd71A2S
|
||||
0mT2DQSSBQ92c6WPfUpOSBawd8P0ipT7rVFNLJh8HVQGyEWxPl8ecDEHoVfG2rdV
|
||||
D0ADbNLx9031UUwpUicO6vW/2Ec7c3VNG1cpOtyNTw/lEgvsXOh3GQs/DvFvMy/h
|
||||
QzaeF3Qq6cAPlKuxieJe4lLYFBTmCAT4iB1J8oeFs4G7ScfZH4+4NBe3VGoeCD/M
|
||||
Wl+qxntAroblxiFuqtPJg+NKZYWBzkptJNhnrBxcBnRinGZLw2k/GR/qPMgsR2L4
|
||||
cP+OUuka+R2gp9oDVTZTyMowz+ROIxnEijF50pkj2VBFRB02rfiMp7q6iQIzBBAB
|
||||
CgAdFiEE2iNXmnTUrZr50/lFzvrI6q8XUZ0FAlwOm3AACgkQzvrI6q8XUZ3KKg/+
|
||||
MD8CgvLiHEX90fXQ23RZQRm2J21w3gxdIen/N8yJVIbK7NIgYhgWfGWsGQedtM7D
|
||||
hMwUlDSRb4rWy9vrXBaiZoF3+nK9AcLvPChkZz28U59Jft6/l0gVrykey/ERU7EV
|
||||
w1Ie1eRu0tRSXsKvMZyQH8897iHZ7uqoJgyk8U8CvSW+V80yqLB2M8Tk8ECZq34f
|
||||
HqUIGs4Wo0UZh0vV4+dEQHBh1BYpmmWl+UPf7nzNwFWXu/EpjVhkExRqTnkEJ+Ai
|
||||
OxbtrRn6ETKzpV4DjyifqQF639bMIem7DRRf+mkcrAXetvWkUkE76e3E9KLvETCZ
|
||||
l4SBfgqSZs2vNngmpX6Qnoh883aFo5ZgVN3v6uTS+LgTwMt/XlnDQ7+Zw+ehCZ2R
|
||||
CO21Y9Kbw6ZEWls/8srZdCQ2LxnyeyQeIzsLnqT/waGjQj35i4exzYeWpojVDb3r
|
||||
tvvOALYGVlSYqZXIALTx2/tHXKLHyrn1C0VgHRnl+hwv7U49f7RvfQXpx47YQN/C
|
||||
PWrpbG69wlKuJptr+olbyoKAWfl+UzoO8vLMo5njWQNAoAwh1H8aFUVNyhtbkRuq
|
||||
l0kpy1Cmcq8uo6taK9lvYp8jak7eV8lHSSiGUKTAovNTwfZG2JboGV4/qLDUKvpa
|
||||
lPp2xVpF9MzA8VlXTOzLpSyIVxZnPTpL+xR5P9WQjMS5AY0EXA6auAEMAMReKL89
|
||||
0z0SL+/i/geB/agfG/k6AXiG2a9kVWeIjAqFwHKl9W/DTNvOqCDgAt51oiHGRRjt
|
||||
1Xm3XZD4p+GM1uZWn9qIFL49Gt5x94TqdrsKTVCJr0Kazn2mKQc7aja0zac+WtZG
|
||||
OFn7KbniuAcwtC780cyikfmmExLI1/Vjg+NiMlMtZfpK6FIW+ulPiDQPdzIhVppx
|
||||
w9/KlR2Fvh4TbzDsUqkFQSSAFdQ65BWgvzLpZHdKO/ILpDkThLbipjtvbBv/pHKM
|
||||
O/NFTNoYkJ3cNW/kfcynwV+4AcKwdRz2A3Mez+g5TKFYPZROIbayOo01yTMLfz2p
|
||||
jcqki/t4PACtwFOhkAs+MYPPyZDUkTFcEJQCPDstkAgmJWI3K2qELtDOLQyps3WY
|
||||
Mfp+mntOdc8bKjFTMcCEk1zcm14K4Oms+w6dw2UnYsX1FAYYhPm8HUYwE4kP8M+D
|
||||
9HGLMjLqqF/kanlCFZs5Avx3mDSAx6zS8vtNdGh+64oDNk4x4A2j8GTUuQARAQAB
|
||||
iQG8BBgBCgAmFiEE1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmrgCGwwFCQPCZwAA
|
||||
CgkQdvGiD/mHZy9FnAwAgfUkxsO53Pm2iaHhtF4+BUc8MNJj64Jvm1tghr6PBRtM
|
||||
hpbvvN8SSOFwYIsS+2BMsJ2ldox4zMYhuvBcgNUlix0G0Z7h1MjftDdsLFi1DNv2
|
||||
J9dJ9LdpWdiZbyg4Sy7WakIZ/VvH1Znd89Imo7kCScRdXTjIw2yCkotE5lK7A6Ns
|
||||
NbVuoYEN+dbGioF4csYehnjTdojwF/19mHFxrXkdDZ/V6ZYFIFxEsxL8FEuyI4+o
|
||||
LC3DFSA4+QAFdkjGFXqFPlaEJxWt5d7wk0y+tt68v+ulkJ900BvR+OOMqQURwrAi
|
||||
iP3I28aRrMjZYwyqHl8i/qyIv+WRakoDKV+wWteR5DmRAPHmX2vnlPlCmY8ysR6J
|
||||
2jUAfuDFVu4/qzJe6vw5tmPJMdfvy0W5oogX6sEdin5M5w2b3WrN8nXZcjbWymqP
|
||||
6jCdl6eoCCkKNOIbr/MMSkd2KqAqDVM5cnnlQ7q+AXzwNpj3RGJVoBxbS0nn9JWY
|
||||
QNQrWh9rAcMIGT+b1le0
|
||||
=4lsa
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# Installing wine, need this for testing mingw or nine
|
||||
|
||||
# We need multiarch for Wine
|
||||
dpkg --add-architecture i386
|
||||
apt-get update
|
||||
apt-get install -y --no-remove \
|
||||
wine \
|
||||
wine32 \
|
||||
wine64 \
|
||||
xvfb
|
||||
|
||||
# Used to initialize the Wine environment to reduce build time
|
||||
wine64 whoami.exe
|
||||
|
@@ -5,11 +5,7 @@ set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get install -y ca-certificates gnupg2 software-properties-common
|
||||
|
||||
# Add llvm 13 to the build image
|
||||
apt-key add .gitlab-ci/container/debian/llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
apt-get install -y ca-certificates
|
||||
|
||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
|
||||
@@ -29,12 +25,11 @@ apt-get install -y --no-remove \
|
||||
dpkg-cross \
|
||||
flex \
|
||||
g++ \
|
||||
cmake \
|
||||
g++-mingw-w64-x86-64 \
|
||||
gcc \
|
||||
git \
|
||||
glslang-tools \
|
||||
kmod \
|
||||
libclang-13-dev \
|
||||
libclang-11-dev \
|
||||
libclang-9-dev \
|
||||
libclc-dev \
|
||||
@@ -42,7 +37,6 @@ apt-get install -y --no-remove \
|
||||
libepoxy-dev \
|
||||
libexpat1-dev \
|
||||
libgtk-3-dev \
|
||||
libllvm13 \
|
||||
libllvm11 \
|
||||
libllvm9 \
|
||||
libomxil-bellagio-dev \
|
||||
@@ -60,6 +54,7 @@ apt-get install -y --no-remove \
|
||||
libxshmfence-dev \
|
||||
libxvmc-dev \
|
||||
libxxf86vm-dev \
|
||||
libz-mingw-w64-dev \
|
||||
make \
|
||||
meson \
|
||||
pkg-config \
|
||||
@@ -69,6 +64,7 @@ apt-get install -y --no-remove \
|
||||
qemu-user \
|
||||
valgrind \
|
||||
wget \
|
||||
wine64 \
|
||||
x11proto-dri2-dev \
|
||||
x11proto-gl-dev \
|
||||
x11proto-randr-dev \
|
||||
@@ -78,8 +74,6 @@ apt-get install -y --no-remove \
|
||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
|
||||
|
||||
. .gitlab-ci/container/debian/x86_build-base-wine.sh
|
||||
|
||||
############### Uninstall ephemeral packages
|
||||
|
||||
apt-get purge -y $STABLE_EPHEMERAL
|
||||
|
@@ -1,74 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Pull packages from msys2 repository that can be directly used.
|
||||
# We can use https://packages.msys2.org/ to retrieve the newest package
|
||||
mkdir ~/tmp
|
||||
pushd ~/tmp
|
||||
MINGW_PACKET_LIST="
|
||||
mingw-w64-x86_64-headers-git-10.0.0.r14.ga08c638f8-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-vulkan-loader-1.3.211-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-libelf-0.8.13-6-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-zlib-1.2.12-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-zstd-1.5.2-2-any.pkg.tar.zst
|
||||
"
|
||||
|
||||
for i in $MINGW_PACKET_LIST
|
||||
do
|
||||
wget -q https://mirror.msys2.org/mingw/mingw64/$i
|
||||
tar xf $i --strip-components=1 -C /usr/x86_64-w64-mingw32/
|
||||
done
|
||||
popd
|
||||
rm -rf ~/tmp
|
||||
|
||||
mkdir -p /usr/x86_64-w64-mingw32/bin
|
||||
|
||||
# The output of `wine64 llvm-config --system-libs --cxxflags mcdisassembler`
|
||||
# containes absolute path like '-IZ:'
|
||||
# The sed is used to replace `-IZ:/usr/x86_64-w64-mingw32/include`
|
||||
# to `-I/usr/x86_64-w64-mingw32/include`
|
||||
|
||||
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
|
||||
# they're going to be fixed, so we'll just have to fix it ourselves
|
||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
|
||||
cat >/usr/x86_64-w64-mingw32/bin/pkg-config <<EOF
|
||||
#!/bin/sh
|
||||
|
||||
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig:/usr/x86_64-w64-mingw32/share/pkgconfig pkg-config \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/pkg-config
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-config <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-config \$@ | sed -e "s,Z:/,/,gi"
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-config
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/clang <<EOF
|
||||
#!/bin/sh
|
||||
wine64 clang \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/clang
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-as <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-as \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-as
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-link <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-link \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-link
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/opt <<EOF
|
||||
#!/bin/sh
|
||||
wine64 opt \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/opt
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-spirv <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-spirv \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-spirv
|
@@ -1,100 +0,0 @@
|
||||
#!/bin/bash
|
||||
wd=$PWD
|
||||
CMAKE_TOOLCHAIN_MINGW_PATH=$wd/.gitlab-ci/container/debian/x86_mingw-toolchain.cmake
|
||||
mkdir -p ~/tmp
|
||||
pushd ~/tmp
|
||||
|
||||
# Building DirectX-Headers
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.606.3 --depth 1
|
||||
mkdir -p DirectX-Headers/build
|
||||
pushd DirectX-Headers/build
|
||||
meson .. \
|
||||
--backend=ninja \
|
||||
--buildtype=release -Dbuild-test=false \
|
||||
-Dprefix=/usr/x86_64-w64-mingw32/ \
|
||||
--cross-file=$wd/.gitlab-ci/x86_64-w64-mingw32
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
export VULKAN_SDK_VERSION=1.3.211.0
|
||||
|
||||
# Building SPIRV Tools
|
||||
git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-Tools SPIRV-Tools
|
||||
|
||||
git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
|
||||
|
||||
mkdir -p SPIRV-Tools/build
|
||||
pushd SPIRV-Tools/build
|
||||
cmake .. \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DCMAKE_POLICY_DEFAULT_CMP0091=NEW
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
# Building LLVM
|
||||
git clone -b release/14.x --depth=1 \
|
||||
https://github.com/llvm/llvm-project llvm-project
|
||||
|
||||
git clone -b v14.0.0 --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
|
||||
|
||||
mkdir llvm-project/build
|
||||
pushd llvm-project/build
|
||||
cmake ../llvm \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DLLVM_ENABLE_RTTI=ON \
|
||||
-DCROSS_TOOLCHAIN_FLAGS_NATIVE=-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
|
||||
-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" \
|
||||
-DLLVM_OPTIMIZED_TABLEGEN=TRUE \
|
||||
-DLLVM_ENABLE_ASSERTIONS=TRUE \
|
||||
-DLLVM_INCLUDE_UTILS=OFF \
|
||||
-DLLVM_INCLUDE_RUNTIMES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_GO_TESTS=OFF \
|
||||
-DLLVM_INCLUDE_BENCHMARKS=OFF \
|
||||
-DLLVM_BUILD_LLVM_C_DYLIB=OFF \
|
||||
-DLLVM_ENABLE_DIA_SDK=OFF \
|
||||
-DCLANG_BUILD_TOOLS=ON \
|
||||
-DLLVM_SPIRV_INCLUDE_TESTS=OFF
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
# Building libclc
|
||||
mkdir llvm-project/build-libclc
|
||||
pushd llvm-project/build-libclc
|
||||
cmake ../libclc \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DCMAKE_POLICY_DEFAULT_CMP0091=NEW \
|
||||
-DCMAKE_CXX_FLAGS="-m64" \
|
||||
-DLLVM_CONFIG="/usr/x86_64-w64-mingw32/bin/llvm-config" \
|
||||
-DLLVM_CLANG="/usr/x86_64-w64-mingw32/bin/clang" \
|
||||
-DLLVM_AS="/usr/x86_64-w64-mingw32/bin/llvm-as" \
|
||||
-DLLVM_LINK="/usr/x86_64-w64-mingw32/bin/llvm-link" \
|
||||
-DLLVM_OPT="/usr/x86_64-w64-mingw32/bin/opt" \
|
||||
-DLLVM_SPIRV="/usr/x86_64-w64-mingw32/bin/llvm-spirv" \
|
||||
-DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-"
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
popd # ~/tmp
|
||||
|
||||
# Cleanup ~/tmp
|
||||
rm -rf ~/tmp
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-remove \
|
||||
zstd \
|
||||
g++-mingw-w64-i686 \
|
||||
g++-mingw-w64-x86-64
|
||||
|
||||
. .gitlab-ci/container/debian/x86_build-mingw-patch.sh
|
||||
. .gitlab-ci/container/debian/x86_build-mingw-source-deps.sh
|
@@ -15,15 +15,17 @@ STABLE_EPHEMERAL=" \
|
||||
python3-pip \
|
||||
"
|
||||
|
||||
# We need multiarch for Wine
|
||||
dpkg --add-architecture i386
|
||||
apt-get update
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
check \
|
||||
clang \
|
||||
cmake \
|
||||
libasan6 \
|
||||
libarchive-dev \
|
||||
libclang-cpp13-dev \
|
||||
libclang-cpp11-dev \
|
||||
libgbm-dev \
|
||||
libglvnd-dev \
|
||||
@@ -39,20 +41,31 @@ apt-get install -y --no-remove \
|
||||
libxcb-xfixes0-dev \
|
||||
libxcb1-dev \
|
||||
libxml2-dev \
|
||||
llvm-13-dev \
|
||||
llvm-11-dev \
|
||||
llvm-9-dev \
|
||||
ocl-icd-opencl-dev \
|
||||
python3-freezegun \
|
||||
python3-pytest \
|
||||
procps \
|
||||
spirv-tools \
|
||||
strace \
|
||||
time
|
||||
time \
|
||||
wine \
|
||||
wine32
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
|
||||
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
|
||||
# they're going to be fixed, so we'll just have to fix it ourselves
|
||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
|
||||
cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF
|
||||
#!/bin/sh
|
||||
|
||||
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@
|
||||
EOF
|
||||
chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
|
||||
|
||||
|
||||
# dependencies where we want a specific version
|
||||
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
|
||||
|
||||
@@ -74,9 +87,10 @@ cd shader-db
|
||||
make
|
||||
popd
|
||||
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.606.3 --depth 1
|
||||
mkdir -p DirectX-Headers/build
|
||||
pushd DirectX-Headers/build
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1
|
||||
pushd DirectX-Headers
|
||||
mkdir build
|
||||
cd build
|
||||
meson .. --backend=ninja --buildtype=release -Dbuild-test=false
|
||||
ninja
|
||||
ninja install
|
||||
|
@@ -1,8 +0,0 @@
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_SYSROOT /usr/x86_64-w64-mingw32/)
|
||||
set(ENV{PKG_CONFIG} /usr/x86_64-w64-mingw32/bin/pkg-config)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc-posix)
|
||||
set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++-posix)
|
@@ -5,7 +5,7 @@ set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get install -y ca-certificates gnupg2 software-properties-common
|
||||
apt-get install -y ca-certificates
|
||||
|
||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
|
||||
@@ -19,10 +19,6 @@ STABLE_EPHEMERAL=" \
|
||||
python3-wheel \
|
||||
"
|
||||
|
||||
# Add llvm 13 to the build image
|
||||
apt-key add .gitlab-ci/container/debian/llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
|
||||
@@ -31,7 +27,6 @@ apt-get install -y --no-remove \
|
||||
git-lfs \
|
||||
libasan6 \
|
||||
libexpat1 \
|
||||
libllvm13 \
|
||||
libllvm11 \
|
||||
libllvm9 \
|
||||
liblz4-1 \
|
||||
|
@@ -13,7 +13,6 @@ STABLE_EPHEMERAL=" \
|
||||
bison \
|
||||
bzip2 \
|
||||
ccache \
|
||||
clang-13 \
|
||||
clang-11 \
|
||||
cmake \
|
||||
flex \
|
||||
@@ -21,7 +20,6 @@ STABLE_EPHEMERAL=" \
|
||||
glslang-tools \
|
||||
libasound2-dev \
|
||||
libcap-dev \
|
||||
libclang-cpp13-dev \
|
||||
libclang-cpp11-dev \
|
||||
libelf-dev \
|
||||
libexpat1-dev \
|
||||
@@ -39,7 +37,6 @@ STABLE_EPHEMERAL=" \
|
||||
libxext-dev \
|
||||
libxkbcommon-dev \
|
||||
libxrender-dev \
|
||||
llvm-13-dev \
|
||||
llvm-11-dev \
|
||||
llvm-spirv \
|
||||
make \
|
||||
@@ -51,15 +48,11 @@ STABLE_EPHEMERAL=" \
|
||||
xz-utils \
|
||||
"
|
||||
|
||||
apt-get update
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
clinfo \
|
||||
iptables \
|
||||
libclang-common-13-dev \
|
||||
libclang-common-11-dev \
|
||||
libclang-cpp13 \
|
||||
libclang-cpp11 \
|
||||
libcap2 \
|
||||
libegl1 \
|
||||
@@ -71,7 +64,6 @@ apt-get install -y --no-remove \
|
||||
python3-lxml \
|
||||
python3-renderdoc \
|
||||
python3-simplejson \
|
||||
socat \
|
||||
spirv-tools \
|
||||
sysvinit-core \
|
||||
wget
|
||||
|
@@ -1,6 +1,4 @@
|
||||
#!/bin/bash
|
||||
# The relative paths in this file only become valid at runtime.
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -16,7 +14,6 @@ STABLE_EPHEMERAL=" \
|
||||
g++-mingw-w64-x86-64-posix \
|
||||
glslang-tools \
|
||||
libexpat1-dev \
|
||||
gnupg2 \
|
||||
libgbm-dev \
|
||||
libgles2-mesa-dev \
|
||||
liblz4-dev \
|
||||
@@ -38,21 +35,14 @@ STABLE_EPHEMERAL=" \
|
||||
p7zip \
|
||||
patch \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-distutils \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
software-properties-common \
|
||||
wget \
|
||||
wine64-tools \
|
||||
xz-utils \
|
||||
"
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
libxcb-shm0 \
|
||||
pciutils \
|
||||
python3-lxml \
|
||||
python3-simplejson \
|
||||
xinit \
|
||||
@@ -62,16 +52,12 @@ apt-get install -y --no-remove \
|
||||
# We need multiarch for Wine
|
||||
dpkg --add-architecture i386
|
||||
|
||||
# Install a more recent version of Wine than exists in Debian.
|
||||
apt-key add .gitlab-ci/container/debian/winehq.gpg.key
|
||||
apt-add-repository https://dl.winehq.org/wine-builds/debian/
|
||||
apt update -qyy
|
||||
apt-get update
|
||||
|
||||
# Needed for Valve's tracing jobs to collect information about the graphics
|
||||
# hardware on the test devices.
|
||||
pip3 install gfxinfo-mupuf==0.0.9
|
||||
|
||||
apt install -y --no-remove --install-recommends winehq-stable
|
||||
apt-get install -y --no-remove \
|
||||
wine \
|
||||
wine32 \
|
||||
wine64
|
||||
|
||||
function setup_wine() {
|
||||
export WINEDEBUG="-all"
|
||||
@@ -100,44 +86,28 @@ EOF
|
||||
|
||||
############### Install DXVK
|
||||
|
||||
dxvk_install_release() {
|
||||
local DXVK_VERSION=${1:-"1.10.1"}
|
||||
DXVK_VERSION="1.8.1"
|
||||
|
||||
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
|
||||
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
|
||||
"dxvk-${DXVK_VERSION}"/setup_dxvk.sh install
|
||||
rm -rf "dxvk-${DXVK_VERSION}"
|
||||
rm dxvk-"${DXVK_VERSION}".tar.gz
|
||||
}
|
||||
|
||||
# Install from a Github PR number
|
||||
dxvk_install_pr() {
|
||||
local __prnum=$1
|
||||
|
||||
# NOTE: Clone all the ensite history of the repo so as not to think
|
||||
# harder about cloning just enough for 'git describe' to work. 'git
|
||||
# describe' is used by the dxvk build system to generate a
|
||||
# dxvk_version Meson variable, which is nice-to-have.
|
||||
git clone https://github.com/doitsujin/dxvk
|
||||
pushd dxvk
|
||||
git fetch origin pull/"$__prnum"/head:pr
|
||||
git checkout pr
|
||||
./package-release.sh pr ../dxvk-build --no-package
|
||||
popd
|
||||
pushd ./dxvk-build/dxvk-pr
|
||||
./setup_dxvk.sh install
|
||||
popd
|
||||
rm -rf ./dxvk-build ./dxvk
|
||||
}
|
||||
|
||||
# Sets up the WINEPREFIX for the DXVK installation commands below.
|
||||
setup_wine "/dxvk-wine64"
|
||||
dxvk_install_release "1.10.1"
|
||||
#dxvk_install_pr 2359
|
||||
|
||||
############### Install apitrace binaries for wine
|
||||
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
|
||||
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
|
||||
dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install
|
||||
rm -rf dxvk-"${DXVK_VERSION}"
|
||||
rm dxvk-"${DXVK_VERSION}".tar.gz
|
||||
|
||||
############### Install Windows' apitrace binaries
|
||||
|
||||
APITRACE_VERSION="10.0"
|
||||
APITRACE_VERSION_DATE=""
|
||||
|
||||
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
|
||||
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
|
||||
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
|
||||
. .gitlab-ci/container/install-wine-apitrace.sh
|
||||
# Add the apitrace path to the registry
|
||||
wine \
|
||||
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
|
||||
@@ -174,10 +144,6 @@ PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
|
||||
|
||||
. .gitlab-ci/container/build-deqp.sh
|
||||
|
||||
############### Build apitrace
|
||||
|
||||
. .gitlab-ci/container/build-apitrace.sh
|
||||
|
||||
############### Build gfxreconstruct
|
||||
|
||||
. .gitlab-ci/container/build-gfxreconstruct.sh
|
||||
|
@@ -25,10 +25,8 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
gettext \
|
||||
glslang \
|
||||
kernel-headers \
|
||||
llvm-devel \
|
||||
clang-devel \
|
||||
meson \
|
||||
"pkgconfig(dri2proto)" \
|
||||
"pkgconfig(expat)" \
|
||||
@@ -65,8 +63,6 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
python3-devel \
|
||||
python3-mako \
|
||||
vulkan-headers \
|
||||
spirv-tools-devel \
|
||||
spirv-llvm-translator-devel \
|
||||
$EPHEMERAL
|
||||
|
||||
|
||||
|
@@ -1,458 +0,0 @@
|
||||
# Docker image tag helper templates
|
||||
|
||||
.incorporate-templates-commit:
|
||||
variables:
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
|
||||
.incorporate-base-tag+templates-commit:
|
||||
variables:
|
||||
FDO_BASE_IMAGE: "${CI_REGISTRY_IMAGE}/${MESA_BASE_IMAGE}:${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
|
||||
.set-image:
|
||||
extends:
|
||||
- .incorporate-templates-commit
|
||||
variables:
|
||||
MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
|
||||
image: "$MESA_IMAGE"
|
||||
|
||||
.set-image-base-tag:
|
||||
extends:
|
||||
- .set-image
|
||||
- .incorporate-base-tag+templates-commit
|
||||
variables:
|
||||
MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
|
||||
|
||||
.use-wine:
|
||||
variables:
|
||||
WINEPATH: "/usr/x86_64-w64-mingw32/bin;/usr/x86_64-w64-mingw32/lib;/usr/lib/gcc/x86_64-w64-mingw32/10-posix;c:/windows;c:/windows/system32"
|
||||
|
||||
# Build the CI docker images.
|
||||
#
|
||||
# MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the
|
||||
# image doesn't exist yet, the container stage job generates it.
|
||||
#
|
||||
# In order to generate a new image, one should generally change the tag.
|
||||
# While removing the image from the registry would also work, that's not
|
||||
# recommended except for ephemeral images during development: Replacing
|
||||
# an image after a significant amount of time might pull in newer
|
||||
# versions of gcc/clang or other packages, which might break the build
|
||||
# with older commits using the same tag.
|
||||
#
|
||||
# After merging a change resulting in generating a new image to the
|
||||
# main repository, it's recommended to remove the image from the source
|
||||
# repository's container registry, so that the image from the main
|
||||
# repository's registry will be used there as well.
|
||||
|
||||
.container:
|
||||
stage: container
|
||||
extends:
|
||||
- .container-rules
|
||||
- .incorporate-templates-commit
|
||||
- .use-wine
|
||||
variables:
|
||||
FDO_DISTRIBUTION_VERSION: bullseye-slim
|
||||
FDO_REPO_SUFFIX: $CI_JOB_NAME
|
||||
FDO_DISTRIBUTION_EXEC: 'env "WINEPATH=${WINEPATH}" FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
# no need to pull the whole repo to build the container image
|
||||
GIT_STRATEGY: none
|
||||
|
||||
.use-base-image:
|
||||
extends:
|
||||
- .container
|
||||
- .incorporate-base-tag+templates-commit
|
||||
# Don't want the .container rules
|
||||
- .build-rules
|
||||
|
||||
# Debian 11 based x86 build image base
|
||||
debian/x86_build-base:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .container
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_build-base ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/x86_build-base:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .use-base-image
|
||||
variables:
|
||||
MESA_BASE_IMAGE: ${DEBIAN_X86_BUILD_BASE_IMAGE}
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_ARTIFACTS_BASE_TAG: *debian-x86_build-base
|
||||
needs:
|
||||
- debian/x86_build-base
|
||||
|
||||
# Debian 11 based x86 main build image
|
||||
debian/x86_build:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_build ${DEBIAN_BUILD_TAG}
|
||||
|
||||
.use-debian/x86_build:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_IMAGE_PATH}
|
||||
MESA_IMAGE_TAG: *debian-x86_build
|
||||
needs:
|
||||
- debian/x86_build
|
||||
|
||||
# Debian 11 based i386 cross-build image
|
||||
debian/i386_build:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-i386_build ${DEBIAN_BUILD_TAG}
|
||||
|
||||
.use-debian/i386_build:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: "debian/i386_build"
|
||||
MESA_IMAGE_TAG: *debian-i386_build
|
||||
needs:
|
||||
- debian/i386_build
|
||||
|
||||
# Debian 11 based x86-mingw cross main build image
|
||||
debian/x86_build-mingw:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_build_mingw ${DEBIAN_BUILD_MINGW_TAG}
|
||||
|
||||
.use-debian/x86_build_mingw:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_MINGW_IMAGE_PATH}
|
||||
MESA_IMAGE_TAG: *debian-x86_build_mingw
|
||||
needs:
|
||||
- debian/x86_build-mingw
|
||||
|
||||
# Debian 11 based ppc64el cross-build image
|
||||
debian/ppc64el_build:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-ppc64el_build ${DEBIAN_BUILD_TAG}
|
||||
|
||||
.use-debian/ppc64el_build:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: "debian/ppc64el_build"
|
||||
MESA_IMAGE_TAG: *debian-ppc64el_build
|
||||
needs:
|
||||
- debian/ppc64el_build
|
||||
|
||||
# Debian 11 based s390x cross-build image
|
||||
debian/s390x_build:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-s390x_build ${DEBIAN_BUILD_TAG}
|
||||
|
||||
.use-debian/s390x_build:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: "debian/s390x_build"
|
||||
MESA_IMAGE_TAG: *debian-s390x_build
|
||||
needs:
|
||||
- debian/s390x_build
|
||||
|
||||
# Android NDK cross-build image
|
||||
debian/android_build:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-android_build ${DEBIAN_BUILD_TAG}
|
||||
|
||||
.use-debian/android_build:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: "debian/android_build"
|
||||
MESA_IMAGE_TAG: *debian-android_build
|
||||
needs:
|
||||
- debian/android_build
|
||||
|
||||
# Debian 11 based x86 test image base
|
||||
debian/x86_test-base:
|
||||
extends: debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_test-base ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/x86_test-base:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .use-base-image
|
||||
variables:
|
||||
MESA_BASE_IMAGE: ${DEBIAN_X86_TEST_BASE_IMAGE}
|
||||
MESA_BASE_TAG: *debian-x86_test-base
|
||||
needs:
|
||||
- debian/x86_test-base
|
||||
|
||||
# Debian 11 based x86 test image for GL
|
||||
debian/x86_test-gl:
|
||||
extends: .use-debian/x86_test-base
|
||||
variables:
|
||||
FDO_DISTRIBUTION_EXEC: 'env KERNEL_URL=${KERNEL_URL} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.17-for-mesa-ci-b78f7870d97b/linux-v5.17-for-mesa-ci-b78f7870d97b.tar.bz2"
|
||||
MESA_IMAGE_TAG: &debian-x86_test-gl ${DEBIAN_X86_TEST_GL_TAG}
|
||||
|
||||
.use-debian/x86_test-gl:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_test-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_PATH}
|
||||
MESA_IMAGE_TAG: *debian-x86_test-gl
|
||||
needs:
|
||||
- debian/x86_test-gl
|
||||
|
||||
# Debian 11 based x86 test image for VK
|
||||
debian/x86_test-vk:
|
||||
extends: .use-debian/x86_test-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_test-vk ${DEBIAN_X86_TEST_VK_TAG}
|
||||
|
||||
.use-debian/x86_test-vk:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_test-base
|
||||
MESA_IMAGE_PATH: "debian/x86_test-vk"
|
||||
MESA_IMAGE_TAG: *debian-x86_test-vk
|
||||
needs:
|
||||
- debian/x86_test-vk
|
||||
|
||||
# Debian 11 based ARM build image
|
||||
debian/arm_build:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .container
|
||||
tags:
|
||||
- aarch64
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-arm_build ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/arm_build:
|
||||
extends:
|
||||
- .set-image
|
||||
variables:
|
||||
MESA_IMAGE_PATH: "debian/arm_build"
|
||||
MESA_IMAGE_TAG: *debian-arm_build
|
||||
MESA_ARTIFACTS_TAG: *debian-arm_build
|
||||
needs:
|
||||
- debian/arm_build
|
||||
|
||||
|
||||
# Fedora 34 based x86 build image
|
||||
fedora/x86_build:
|
||||
extends:
|
||||
- .fdo.container-build@fedora
|
||||
- .container
|
||||
variables:
|
||||
FDO_DISTRIBUTION_VERSION: 34
|
||||
MESA_IMAGE_TAG: &fedora-x86_build ${FEDORA_X86_BUILD_TAG}
|
||||
|
||||
.use-fedora/x86_build:
|
||||
extends:
|
||||
- .set-image
|
||||
variables:
|
||||
MESA_IMAGE_PATH: "fedora/x86_build"
|
||||
MESA_IMAGE_TAG: *fedora-x86_build
|
||||
needs:
|
||||
- fedora/x86_build
|
||||
|
||||
|
||||
.kernel+rootfs:
|
||||
extends:
|
||||
- .build-rules
|
||||
stage: container
|
||||
variables:
|
||||
GIT_STRATEGY: fetch
|
||||
KERNEL_URL: *kernel-rootfs-url
|
||||
MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG}
|
||||
DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
script:
|
||||
- .gitlab-ci/container/lava_build.sh
|
||||
|
||||
kernel+rootfs_amd64:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
- .kernel+rootfs
|
||||
image: "$FDO_BASE_IMAGE"
|
||||
variables:
|
||||
DEBIAN_ARCH: "amd64"
|
||||
DISTRIBUTION_TAG: &distribution-tag-amd64 "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
|
||||
kernel+rootfs_arm64:
|
||||
extends:
|
||||
- .use-debian/arm_build
|
||||
- .kernel+rootfs
|
||||
tags:
|
||||
- aarch64
|
||||
variables:
|
||||
DEBIAN_ARCH: "arm64"
|
||||
|
||||
kernel+rootfs_armhf:
|
||||
extends:
|
||||
- kernel+rootfs_arm64
|
||||
variables:
|
||||
DEBIAN_ARCH: "armhf"
|
||||
|
||||
# Cannot use anchors defined here from included files, so use extends: instead
|
||||
.use-kernel+rootfs-arm:
|
||||
variables:
|
||||
DISTRIBUTION_TAG: *distribution-tag-arm
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
.use-kernel+rootfs-amd64:
|
||||
variables:
|
||||
DISTRIBUTION_TAG: *distribution-tag-amd64
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
# x86 image with ARM64 & armhf kernel & rootfs for baremetal testing
|
||||
debian/arm_test:
|
||||
extends:
|
||||
- .fdo.container-build@debian
|
||||
- .container
|
||||
# Don't want the .container rules
|
||||
- .build-rules
|
||||
needs:
|
||||
- kernel+rootfs_arm64
|
||||
- kernel+rootfs_armhf
|
||||
variables:
|
||||
FDO_DISTRIBUTION_EXEC: 'env ARTIFACTS_PREFIX=https://${MINIO_HOST}/mesa-lava ARTIFACTS_SUFFIX=${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT} CI_PROJECT_PATH=${CI_PROJECT_PATH} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} FDO_UPSTREAM_REPO=${FDO_UPSTREAM_REPO} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
MESA_ARM_BUILD_TAG: *debian-arm_build
|
||||
MESA_IMAGE_TAG: &debian-arm_test ${DEBIAN_BASE_TAG}
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
|
||||
.use-debian/arm_test:
|
||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
variables:
|
||||
MESA_ARM_BUILD_TAG: *debian-arm_build
|
||||
MESA_IMAGE_PATH: "debian/arm_test"
|
||||
MESA_IMAGE_TAG: *debian-arm_test
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
needs:
|
||||
- debian/arm_test
|
||||
|
||||
# Native Windows docker builds
|
||||
#
|
||||
# Unlike the above Linux-based builds - including MinGW builds which
|
||||
# cross-compile for Windows - which use the freedesktop ci-templates, we
|
||||
# cannot use the same scheme here. As Windows lacks support for
|
||||
# Docker-in-Docker, and Podman does not run natively on Windows, we have
|
||||
# to open-code much of the same ourselves.
|
||||
#
|
||||
# This is achieved by first running in a native Windows shell instance
|
||||
# (host PowerShell) in the container stage to build and push the image,
|
||||
# then in the build stage by executing inside Docker.
|
||||
|
||||
.windows-docker-vs2019:
|
||||
variables:
|
||||
MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}"
|
||||
MESA_UPSTREAM_IMAGE: "$CI_REGISTRY/$FDO_UPSTREAM_REPO/$MESA_IMAGE_PATH:${MESA_IMAGE_TAG}"
|
||||
|
||||
.windows_container_build:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends:
|
||||
- .container
|
||||
- .windows-docker-vs2019
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.container-rules, rules]
|
||||
variables:
|
||||
GIT_STRATEGY: fetch # we do actually need the full repository though
|
||||
MESA_BASE_IMAGE: None
|
||||
tags:
|
||||
- windows
|
||||
- shell
|
||||
- "2022"
|
||||
- mesa
|
||||
script:
|
||||
- .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE}
|
||||
|
||||
windows_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends:
|
||||
- .windows_container_build
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_vs_image_path ${WINDOWS_X64_VS_PATH}
|
||||
MESA_IMAGE_TAG: &windows_vs_image_tag ${WINDOWS_X64_VS_TAG}
|
||||
DOCKERFILE: Dockerfile_vs
|
||||
MESA_BASE_IMAGE: "mcr.microsoft.com/windows/server:ltsc2022"
|
||||
|
||||
windows_build_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends:
|
||||
- .windows_container_build
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.build-rules, rules]
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH}
|
||||
MESA_IMAGE_TAG: &windows_build_image_tag ${WINDOWS_X64_BUILD_TAG}
|
||||
DOCKERFILE: Dockerfile_build
|
||||
MESA_BASE_IMAGE_PATH: *windows_vs_image_path
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
|
||||
timeout: 2h 30m # LLVM takes ages
|
||||
needs:
|
||||
- windows_vs2019
|
||||
|
||||
windows_test_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends:
|
||||
- .windows_container_build
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.build-rules, rules]
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH}
|
||||
MESA_IMAGE_TAG: &windows_test_image_tag ${WINDOWS_X64_BUILD_TAG}--${WINDOWS_X64_TEST_TAG}
|
||||
DOCKERFILE: Dockerfile_test
|
||||
MESA_BASE_IMAGE_PATH: *windows_vs_image_path
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
|
||||
needs:
|
||||
- windows_vs2019
|
||||
|
||||
.use-windows_build_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends: .windows-docker-vs2019
|
||||
image: "$MESA_IMAGE"
|
||||
variables:
|
||||
MESA_IMAGE_PATH: *windows_build_image_path
|
||||
MESA_IMAGE_TAG: *windows_build_image_tag
|
||||
needs:
|
||||
- windows_build_vs2019
|
||||
|
||||
.use-windows_test_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends: .windows-docker-vs2019
|
||||
image: "$MESA_IMAGE"
|
||||
variables:
|
||||
MESA_IMAGE_PATH: *windows_test_image_path
|
||||
MESA_IMAGE_TAG: *windows_test_image_tag
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
APITRACE_VERSION="11.1"
|
||||
APITRACE_VERSION_DATE=""
|
||||
|
||||
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
|
||||
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
|
||||
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
|
||||
|
@@ -25,7 +25,6 @@ check_minio "${CI_PROJECT_PATH}"
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
GCC_ARCH="aarch64-linux-gnu"
|
||||
KERNEL_ARCH="arm64"
|
||||
SKQP_ARCH="arm64"
|
||||
DEFCONFIG="arch/arm64/configs/defconfig"
|
||||
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
|
||||
@@ -35,14 +34,11 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dtb"
|
||||
KERNEL_IMAGE_NAME="Image"
|
||||
|
||||
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
|
||||
GCC_ARCH="arm-linux-gnueabihf"
|
||||
KERNEL_ARCH="arm"
|
||||
SKQP_ARCH="arm"
|
||||
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
|
||||
DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb"
|
||||
DEVICE_TREES+=" arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
|
||||
@@ -52,11 +48,10 @@ elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
|
||||
else
|
||||
GCC_ARCH="x86_64-linux-gnu"
|
||||
KERNEL_ARCH="x86_64"
|
||||
SKQP_ARCH="x64"
|
||||
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
|
||||
DEVICE_TREES=""
|
||||
KERNEL_IMAGE_NAME="bzImage"
|
||||
ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols"
|
||||
ARCH_PACKAGES="libva-dev"
|
||||
fi
|
||||
|
||||
# Determine if we're in a cross build.
|
||||
@@ -81,14 +76,12 @@ apt-get install -y --no-remove \
|
||||
${ARCH_PACKAGES} \
|
||||
automake \
|
||||
bc \
|
||||
clang \
|
||||
cmake \
|
||||
debootstrap \
|
||||
git \
|
||||
glslang-tools \
|
||||
libdrm-dev \
|
||||
libegl1-mesa-dev \
|
||||
libxext-dev \
|
||||
libfontconfig-dev \
|
||||
libgbm-dev \
|
||||
libgl-dev \
|
||||
@@ -133,7 +126,7 @@ fi
|
||||
|
||||
############### Building
|
||||
STRIP_CMD="${GCC_ARCH}-strip"
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}
|
||||
|
||||
|
||||
############### Build apitrace
|
||||
@@ -156,12 +149,12 @@ mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
|
||||
|
||||
############### Build SKQP
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]] \
|
||||
|| [[ "$DEBIAN_ARCH" = "amd64" ]]; then
|
||||
. .gitlab-ci/container/build-skqp.sh
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
SKQP_ARCH="arm64" . .gitlab-ci/container/build-skqp.sh
|
||||
mv /skqp /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
fi
|
||||
|
||||
|
||||
############### Build piglit
|
||||
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
|
||||
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
@@ -172,26 +165,10 @@ if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
|
||||
mv /va/bin/* /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
|
||||
fi
|
||||
|
||||
############### Build Crosvm
|
||||
if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
|
||||
. .gitlab-ci/container/build-crosvm.sh
|
||||
mv /usr/local/bin/crosvm /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
|
||||
mv /usr/local/lib/$GCC_ARCH/libvirglrenderer.* /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/
|
||||
fi
|
||||
|
||||
############### Build libdrm
|
||||
EXTRA_MESON_ARGS+=" -D prefix=/libdrm"
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
|
||||
############### Build local stuff for use by igt and kernel testing, which
|
||||
############### will reuse most of our container build process from a specific
|
||||
############### hash of the Mesa tree.
|
||||
if [[ -e ".gitlab-ci/local/build-rootfs.sh" ]]; then
|
||||
. .gitlab-ci/local/build-rootfs.sh
|
||||
fi
|
||||
|
||||
|
||||
############### Build kernel
|
||||
. .gitlab-ci/container/build-kernel.sh
|
||||
|
||||
@@ -214,9 +191,7 @@ fi
|
||||
set -e
|
||||
|
||||
cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
cp .gitlab-ci/container/debian/llvm-snapshot.gpg.key /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/llvm-snapshot.gpg.key
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
|
||||
|
||||
|
||||
@@ -224,6 +199,7 @@ rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
|
||||
# Dependencies pulled during the creation of the rootfs may overwrite
|
||||
# the built libdrm. Hence, we add it after the rootfs has been already
|
||||
# created.
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
|
||||
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/
|
||||
cp -Rp /libdrm/share /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/share
|
||||
|
@@ -72,11 +72,9 @@ CONFIG_PARPORT_PC=y
|
||||
CONFIG_PARPORT_SERIAL=y
|
||||
CONFIG_SERIAL_8250_DW=y
|
||||
CONFIG_CHROME_PLATFORMS=y
|
||||
CONFIG_KVM_AMD=m
|
||||
|
||||
#options for Intel devices
|
||||
CONFIG_MFD_INTEL_LPSS_PCI=y
|
||||
CONFIG_KVM_INTEL=m
|
||||
|
||||
#options for KVM guests
|
||||
CONFIG_FUSE_FS=y
|
||||
@@ -100,6 +98,3 @@ CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
CONFIG_BLK_MQ_VIRTIO=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VIRTIO_VSOCKETS=y
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
|
@@ -2,9 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
VSOCK_STDOUT=$1
|
||||
VSOCK_STDERR=$2
|
||||
VM_TEMP_DIR=$3
|
||||
export DEQP_TEMP_DIR="$1"
|
||||
|
||||
mount -t proc none /proc
|
||||
mount -t sysfs none /sys
|
||||
@@ -12,31 +10,29 @@ mkdir -p /dev/pts
|
||||
mount -t devpts devpts /dev/pts
|
||||
mount -t tmpfs tmpfs /tmp
|
||||
|
||||
. ${VM_TEMP_DIR}/crosvm-env.sh
|
||||
. $DEQP_TEMP_DIR/crosvm-env.sh
|
||||
|
||||
# .gitlab-ci.yml script variable is using relative paths to install directory,
|
||||
# so change to that dir before running `crosvm-script`
|
||||
cd "${CI_PROJECT_DIR}"
|
||||
|
||||
# The exception is the dEQP binary, as it needs to run from its own directory
|
||||
[ -z "${DEQP_BIN_DIR}" ] || cd "${DEQP_BIN_DIR}"
|
||||
# The exception is the dEQP binary, since it needs to run from the directory
|
||||
# it's in
|
||||
if [ -d "${DEQP_BIN_DIR}" ]
|
||||
then
|
||||
cd "${DEQP_BIN_DIR}"
|
||||
fi
|
||||
|
||||
# Use a FIFO to collect relevant error messages
|
||||
STDERR_FIFO=/tmp/crosvm-stderr.fifo
|
||||
mkfifo -m 600 ${STDERR_FIFO}
|
||||
dmesg --level crit,err,warn -w >> $DEQP_TEMP_DIR/stderr &
|
||||
|
||||
dmesg --level crit,err,warn -w > ${STDERR_FIFO} &
|
||||
DMESG_PID=$!
|
||||
|
||||
# Transfer the errors and crosvm-script output via a pair of virtio-vsocks
|
||||
socat -d -u pipe:${STDERR_FIFO} vsock-listen:${VSOCK_STDERR} &
|
||||
socat -d -U vsock-listen:${VSOCK_STDOUT} \
|
||||
system:"stdbuf -eL sh ${VM_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VM_TEMP_DIR}/exit_code",nofork
|
||||
|
||||
kill ${DMESG_PID}
|
||||
wait
|
||||
set +e
|
||||
stdbuf -oL sh $DEQP_TEMP_DIR/crosvm-script.sh 2>> $DEQP_TEMP_DIR/stderr >> $DEQP_TEMP_DIR/stdout
|
||||
echo $? > $DEQP_TEMP_DIR/exit_code
|
||||
set -e
|
||||
|
||||
sync
|
||||
sleep 1
|
||||
|
||||
poweroff -d -n -f || true
|
||||
|
||||
sleep 1 # Just in case init would exit before the kernel shuts down the VM
|
||||
|
@@ -1,121 +1,58 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -ex
|
||||
|
||||
# If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act
|
||||
# the same as the first thread in its threadpool.
|
||||
THREAD=${DEQP_RUNNER_THREAD:-0}
|
||||
|
||||
#
|
||||
# Helper to generate CIDs for virtio-vsock based communication with processes
|
||||
# running inside crosvm guests.
|
||||
#
|
||||
# A CID is a 32-bit Context Identifier to be assigned to a crosvm instance
|
||||
# and must be unique across the host system. For this purpose, let's take
|
||||
# the least significant 25 bits from CI_JOB_ID as a base and generate a 7-bit
|
||||
# prefix number to handle up to 128 concurrent crosvm instances per job runner.
|
||||
#
|
||||
# As a result, the following variables are set:
|
||||
# - VSOCK_CID: the crosvm unique CID to be passed as a run argument
|
||||
#
|
||||
# - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept
|
||||
# vsock connections on in order to transfer output messages
|
||||
#
|
||||
# - VM_TEMP_DIR: the temporary directory path used to pass additional
|
||||
# context data towards the guest
|
||||
#
|
||||
set_vsock_context() {
|
||||
[ -n "${CI_JOB_ID}" ] || {
|
||||
echo "Missing or unset CI_JOB_ID env variable" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
VM_TEMP_DIR="/tmp-vm.${THREAD}"
|
||||
# Clear out any leftover files from a previous run.
|
||||
rm -rf $VM_TEMP_DIR
|
||||
mkdir $VM_TEMP_DIR || return 1
|
||||
|
||||
VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((${THREAD} & 0x7f) << 25)))
|
||||
VSOCK_STDOUT=5001
|
||||
VSOCK_STDERR=5002
|
||||
|
||||
return 0
|
||||
}
|
||||
# This script can be called concurrently, pass arguments and env in a
|
||||
# per-instance tmp dir
|
||||
DEQP_TEMP_DIR=$(mktemp -d /tmp.XXXXXXXXXX)
|
||||
export DEQP_TEMP_DIR
|
||||
|
||||
# The dEQP binary needs to run from the directory it's in
|
||||
if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then
|
||||
DEQP_BIN_DIR=$(dirname "$1")
|
||||
export DEQP_BIN_DIR
|
||||
DEQP_BIN_DIR=$(dirname "$1")
|
||||
export DEQP_BIN_DIR
|
||||
fi
|
||||
|
||||
VM_SOCKET=crosvm-${THREAD}.sock
|
||||
|
||||
# Terminate any existing crosvm, if a previous invocation of this shell script
|
||||
# was terminated due to timeouts. This "vm stop" may fail if the crosvm died
|
||||
# without cleaning itself up.
|
||||
if [ -e $VM_SOCKET ]; then
|
||||
crosvm stop $VM_SOCKET || rm -rf $VM_SOCKET
|
||||
# Wait for socats from that invocation to drain
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
|
||||
|
||||
# Securely pass the current variables to the crosvm environment
|
||||
CI_COMMON="$CI_PROJECT_DIR"/install/common
|
||||
echo "Variables passed through:"
|
||||
SCRIPT_DIR=$(readlink -en "${0%/*}")
|
||||
${SCRIPT_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
|
||||
"${CI_COMMON}"/generate-env.sh | tee ${DEQP_TEMP_DIR}/crosvm-env.sh
|
||||
|
||||
# Set the crosvm-script as the arguments of the current script
|
||||
echo "$@" > ${VM_TEMP_DIR}/crosvm-script.sh
|
||||
CROSVM_KERNEL_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs init=$CI_PROJECT_DIR/install/crosvm-init.sh ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0 -- $DEQP_TEMP_DIR"
|
||||
|
||||
# Setup networking
|
||||
/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
# Set the crosvm-script as the arguments of the current script.
|
||||
echo "$@" > $DEQP_TEMP_DIR/crosvm-script.sh
|
||||
|
||||
# Start background processes to receive output from guest
|
||||
socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr &
|
||||
socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout &
|
||||
|
||||
# Prepare to start crosvm
|
||||
unset DISPLAY
|
||||
unset XDG_RUNTIME_DIR
|
||||
|
||||
CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
|
||||
CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
|
||||
/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
|
||||
[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \
|
||||
CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
|
||||
# Send output from guest to host
|
||||
touch $DEQP_TEMP_DIR/stderr $DEQP_TEMP_DIR/stdout
|
||||
tail -f $DEQP_TEMP_DIR/stderr >> /dev/stderr &
|
||||
ERR_TAIL_PID=$!
|
||||
tail -f $DEQP_TEMP_DIR/stdout >> /dev/stdout &
|
||||
OUT_TAIL_PID=$!
|
||||
|
||||
set +e -x
|
||||
trap "exit \$exit_code" INT TERM
|
||||
trap "exit_code=\$?; kill $ERR_TAIL_PID $OUT_TAIL_PID; rm -rf $DEQP_TEMP_DIR" EXIT
|
||||
|
||||
# We aren't testing the host driver here, so we don't need to validate NIR on the host
|
||||
NIR_DEBUG="novalidate" \
|
||||
LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
|
||||
GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
|
||||
crosvm run \
|
||||
--gpu "${CROSVM_GPU_ARGS}" -m 4096 -c 2 --disable-sandbox \
|
||||
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
|
||||
--host_ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \
|
||||
-s $VM_SOCKET \
|
||||
--cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \
|
||||
/lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VM_TEMP_DIR}/crosvm 2>&1
|
||||
# We aren't testing LLVMPipe here, so we don't need to validate NIR on the host
|
||||
NIR_DEBUG="novalidate" LIBGL_ALWAYS_SOFTWARE="true" GALLIUM_DRIVER="$CROSVM_GALLIUM_DRIVER" crosvm run \
|
||||
--gpu "$CROSVM_GPU_ARGS" \
|
||||
-m 4096 \
|
||||
-c 2 \
|
||||
--disable-sandbox \
|
||||
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
|
||||
--host_ip=192.168.30.1 --netmask=255.255.255.0 --mac "AA:BB:CC:00:00:12" \
|
||||
-p "$CROSVM_KERNEL_ARGS" \
|
||||
/lava-files/bzImage > $DEQP_TEMP_DIR/crosvm 2>&1
|
||||
|
||||
CROSVM_RET=$?
|
||||
RET=$(cat $DEQP_TEMP_DIR/exit_code || true)
|
||||
|
||||
[ ${CROSVM_RET} -eq 0 ] && {
|
||||
# The actual return code is the crosvm guest script's exit code
|
||||
CROSVM_RET=$(cat ${VM_TEMP_DIR}/exit_code 2>/dev/null)
|
||||
# Force error when the guest script's exit code is not available
|
||||
CROSVM_RET=${CROSVM_RET:-1}
|
||||
}
|
||||
# Got no exit code from the script, show crosvm output to help with debugging
|
||||
[ -n "$RET" ] || cat $DEQP_TEMP_DIR/crosvm || true
|
||||
|
||||
# Show crosvm output on error to help with debugging
|
||||
[ ${CROSVM_RET} -eq 0 ] || {
|
||||
set +x
|
||||
echo "Dumping crosvm output.." >&2
|
||||
cat ${VM_TEMP_DIR}/crosvm >&2
|
||||
set -x
|
||||
}
|
||||
|
||||
exit ${CROSVM_RET}
|
||||
exit ${RET:-1}
|
||||
|
@@ -1,6 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo -e "\e[0Ksection_start:$(date +%s):test_setup[collapsed=true]\r\e[0Kpreparing test setup"
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -22,15 +20,6 @@ export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_C
|
||||
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
|
||||
mkdir -p $RESULTS
|
||||
|
||||
# Ensure Mesa Shader Cache resides on tmpfs.
|
||||
SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
|
||||
SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
|
||||
|
||||
findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
|
||||
mkdir -p ${SHADER_CACHE_DIR}
|
||||
mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
|
||||
}
|
||||
|
||||
HANG_DETECTION_CMD=""
|
||||
|
||||
if [ -z "$DEQP_SUITE" ]; then
|
||||
@@ -123,6 +112,8 @@ if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
|
||||
DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
|
||||
fi
|
||||
|
||||
set +e
|
||||
|
||||
report_load() {
|
||||
echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
|
||||
echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)"
|
||||
@@ -157,16 +148,7 @@ if [ -z "$DEQP_SUITE" ]; then
|
||||
if [ $DEQP_VER != vk -a $DEQP_VER != egl ]; then
|
||||
export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check `cat $INSTALL/VERSION | sed 's/[() ]/./g'`"
|
||||
fi
|
||||
fi
|
||||
|
||||
set +x
|
||||
echo -e "\e[0Ksection_end:$(date +%s):test_setup\r\e[0K"
|
||||
|
||||
echo -e "\e[0Ksection_start:$(date +%s):deqp[collapsed=false]\r\e[0Kdeqp-runner"
|
||||
set -x
|
||||
|
||||
set +e
|
||||
if [ -z "$DEQP_SUITE" ]; then
|
||||
deqp-runner \
|
||||
run \
|
||||
--deqp $DEQP \
|
||||
@@ -188,20 +170,14 @@ else
|
||||
--flakes $INSTALL/$GPU_VERSION-flakes.txt \
|
||||
--testlog-to-xml /deqp/executor/testlog-to-xml \
|
||||
--fraction-start $CI_NODE_INDEX \
|
||||
--fraction `expr $CI_NODE_TOTAL \* ${DEQP_FRACTION:-1}` \
|
||||
--fraction $CI_NODE_TOTAL \
|
||||
--jobs ${FDO_CI_CONCURRENT:-4} \
|
||||
$DEQP_RUNNER_OPTIONS
|
||||
fi
|
||||
|
||||
DEQP_EXITCODE=$?
|
||||
|
||||
set +x
|
||||
echo -e "\e[0Ksection_end:$(date +%s):deqp\r\e[0K"
|
||||
|
||||
report_load
|
||||
|
||||
echo -e "\e[0Ksection_start:$(date +%s):test_post_process[collapsed=true]\r\e[0Kpost-processing test results"
|
||||
set -x
|
||||
quiet report_load
|
||||
|
||||
# Remove all but the first 50 individual XML files uploaded as artifacts, to
|
||||
# save fd.o space when you break everything.
|
||||
@@ -237,6 +213,4 @@ if [ -n "$FLAKES_CHANNEL" ]; then
|
||||
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
|
||||
fi
|
||||
|
||||
echo -e "\e[0Ksection_end:$(date +%s):test_post_process\r\e[0K"
|
||||
|
||||
exit $DEQP_EXITCODE
|
||||
|
@@ -5,7 +5,7 @@ set -o xtrace
|
||||
|
||||
# if we run this script outside of gitlab-ci for testing, ensure
|
||||
# we got meaningful variables
|
||||
CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/$CI_PROJECT_NAME}
|
||||
CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/mesa}
|
||||
|
||||
if [[ -e $CI_PROJECT_DIR/.git ]]
|
||||
then
|
||||
@@ -16,8 +16,8 @@ fi
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
echo "Downloading archived master..."
|
||||
/usr/bin/wget -O $TMP_DIR/$CI_PROJECT_NAME.tar.gz \
|
||||
https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz
|
||||
/usr/bin/wget -O $TMP_DIR/mesa.tar.gz \
|
||||
https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/mesa.tar.gz
|
||||
|
||||
# check wget error code
|
||||
if [[ $? -ne 0 ]]
|
||||
@@ -31,6 +31,6 @@ set -e
|
||||
rm -rf "$CI_PROJECT_DIR"
|
||||
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
|
||||
mkdir -p "$CI_PROJECT_DIR"
|
||||
tar xzf "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" -C "$CI_PROJECT_DIR"
|
||||
tar xzf "$TMP_DIR/mesa.tar.gz" -C "$CI_PROJECT_DIR"
|
||||
rm -rf "$TMP_DIR"
|
||||
chmod a+w "$CI_PROJECT_DIR"
|
||||
|
0
.gitlab-ci/gtest-runner.sh
Executable file → Normal file
0
.gitlab-ci/gtest-runner.sh
Executable file → Normal file
@@ -1,27 +1,21 @@
|
||||
variables:
|
||||
DEBIAN_X86_BUILD_BASE_IMAGE: "debian/x86_build-base"
|
||||
DEBIAN_BASE_TAG: "2022-07-01-bb-llvm13"
|
||||
DEBIAN_BASE_TAG: "2022-01-02-deqp-runner"
|
||||
|
||||
DEBIAN_X86_BUILD_IMAGE_PATH: "debian/x86_build"
|
||||
DEBIAN_BUILD_TAG: "2022-07-14-directx-headers"
|
||||
|
||||
DEBIAN_X86_BUILD_MINGW_IMAGE_PATH: "debian/x86_build-mingw"
|
||||
DEBIAN_BUILD_MINGW_TAG: "2022-07-14-directx-headers"
|
||||
DEBIAN_BUILD_TAG: "2021-12-31-keep-cmake"
|
||||
|
||||
DEBIAN_X86_TEST_BASE_IMAGE: "debian/x86_test-base"
|
||||
|
||||
DEBIAN_X86_TEST_IMAGE_PATH: "debian/x86_test-gl"
|
||||
DEBIAN_X86_TEST_GL_TAG: "2022-07-06-virgl-update"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-07-18-apitrace-11-1"
|
||||
DEBIAN_X86_TEST_GL_TAG: "2022-01-18-kernel"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-01-02-deqp-runner"
|
||||
|
||||
FEDORA_X86_BUILD_TAG: "2022-04-24-spirv-tools-5"
|
||||
KERNEL_ROOTFS_TAG: "2022-07-06-virgl-update"
|
||||
|
||||
WINDOWS_X64_VS_PATH: "windows/x64_vs"
|
||||
WINDOWS_X64_VS_TAG: "2022-06-15-vs-winsdk"
|
||||
FEDORA_X86_BUILD_TAG: "2021-12-31-refactor"
|
||||
KERNEL_ROOTFS_TAG: "2022-01-18-kernel"
|
||||
|
||||
WINDOWS_X64_BUILD_PATH: "windows/x64_build"
|
||||
WINDOWS_X64_BUILD_TAG: "2022-06-15-vs-winsdk"
|
||||
WINDOWS_X64_BUILD_TAG: "2022-20-02-base_split"
|
||||
|
||||
WINDOWS_X64_TEST_PATH: "windows/x64_test"
|
||||
WINDOWS_X64_TEST_TAG: "2022-06-15-vs-winsdk"
|
||||
WINDOWS_X64_TEST_TAG: "2022-20-02-base_split"
|
||||
|
@@ -1,28 +0,0 @@
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
class MesaCIException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MesaCITimeoutError(MesaCIException):
|
||||
def __init__(self, *args, timeout_duration: timedelta) -> None:
|
||||
super().__init__(*args)
|
||||
self.timeout_duration = timeout_duration
|
||||
|
||||
|
||||
class MesaCIRetryError(MesaCIException):
|
||||
def __init__(self, *args, retry_count: int) -> None:
|
||||
super().__init__(*args)
|
||||
self.retry_count = retry_count
|
||||
|
||||
|
||||
class MesaCIParseException(MesaCIException):
|
||||
pass
|
||||
|
||||
|
||||
class MesaCIKnownIssueException(MesaCIException):
|
||||
"""Exception raised when the Mesa CI script finds something in the logs that
|
||||
is known to cause the LAVA job to eventually fail"""
|
||||
|
||||
pass
|
@@ -1,4 +1,6 @@
|
||||
.lava-test:
|
||||
extends:
|
||||
- .ci-run-policy
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
variables:
|
||||
@@ -12,23 +14,20 @@
|
||||
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}"
|
||||
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}"
|
||||
# per-job build artifacts
|
||||
BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/${CI_PROJECT_NAME}-${ARCH}.tar.gz"
|
||||
MESA_BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/mesa-${ARCH}.tar.gz"
|
||||
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
|
||||
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz"
|
||||
MINIO_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
|
||||
PIGLIT_NO_WINDOW: 1
|
||||
VISIBILITY_GROUP: "Collabora+fdo"
|
||||
script:
|
||||
- ./artifacts/lava/lava-submit.sh
|
||||
artifacts:
|
||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
when: always
|
||||
paths:
|
||||
- results/
|
||||
exclude:
|
||||
- results/*.shader_cache
|
||||
tags:
|
||||
- $RUNNER_TAG
|
||||
after_script:
|
||||
- wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz
|
||||
|
||||
|
@@ -16,11 +16,7 @@ mkdir -p results/job-rootfs-overlay/
|
||||
|
||||
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
|
||||
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
|
||||
cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
|
||||
|
||||
# Prepare env vars for upload.
|
||||
KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
|
||||
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
|
||||
|
||||
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||
@@ -28,13 +24,13 @@ ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}"
|
||||
|
||||
touch results/lava.log
|
||||
tail -f results/lava.log &
|
||||
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
|
||||
artifacts/lava/lava_job_submitter.py \
|
||||
--dump-yaml \
|
||||
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
|
||||
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
--kernel-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
--build-url "${FDO_HTTP_CACHE_URI:-}https://${BUILD_PATH}" \
|
||||
--base-system-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
--mesa-build-url "${FDO_HTTP_CACHE_URI:-}https://${MESA_BUILD_PATH}" \
|
||||
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
|
||||
--job-artifacts-base ${JOB_ARTIFACTS_BASE} \
|
||||
--job-timeout ${JOB_TIMEOUT:-30} \
|
||||
--first-stage-init artifacts/ci-common/init-stage1.sh \
|
||||
--ci-project-dir ${CI_PROJECT_DIR} \
|
||||
@@ -45,6 +41,4 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
|
||||
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
|
||||
--boot-method ${BOOT_METHOD} \
|
||||
--visibility-group ${VISIBILITY_GROUP} \
|
||||
--lava-tags "${LAVA_TAGS}" \
|
||||
--mesa-job-name "$CI_JOB_NAME" \
|
||||
>> results/lava.log
|
||||
--lava-tags "${LAVA_TAGS}" >> results/lava.log
|
||||
|
@@ -1,64 +1,74 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2020 - 2022 Collabora Limited
|
||||
# Authors:
|
||||
# Gustavo Padovan <gustavo.padovan@collabora.com>
|
||||
# Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
# Copyright (C) 2020, 2021 Collabora Limited
|
||||
# Author: Gustavo Padovan <gustavo.padovan@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
"""Send a job to LAVA, track it and collect log back"""
|
||||
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
import xmlrpc.client
|
||||
import xmlrpc
|
||||
from datetime import datetime, timedelta
|
||||
from os import getenv
|
||||
from typing import Any, Optional
|
||||
|
||||
import lavacli
|
||||
import yaml
|
||||
from lava.exceptions import (
|
||||
MesaCIException,
|
||||
MesaCIKnownIssueException,
|
||||
MesaCIParseException,
|
||||
MesaCIRetryError,
|
||||
MesaCITimeoutError,
|
||||
)
|
||||
from lava.utils import (
|
||||
CONSOLE_LOG,
|
||||
GitlabSection,
|
||||
LogFollower,
|
||||
LogSectionType,
|
||||
fatal_err,
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
)
|
||||
from lavacli.utils import loader
|
||||
|
||||
# Timeout in seconds to decide if the device from the dispatched LAVA job has
|
||||
# Timeout in minutes to decide if the device from the dispatched LAVA job has
|
||||
# hung or not due to the lack of new log output.
|
||||
DEVICE_HANGING_TIMEOUT_SEC = int(getenv("LAVA_DEVICE_HANGING_TIMEOUT_SEC", 5*60))
|
||||
DEVICE_HANGING_TIMEOUT_MIN = 5
|
||||
|
||||
# How many seconds the script should wait before try a new polling iteration to
|
||||
# check if the dispatched LAVA job is running or waiting in the job queue.
|
||||
WAIT_FOR_DEVICE_POLLING_TIME_SEC = int(getenv("LAVA_WAIT_FOR_DEVICE_POLLING_TIME_SEC", 10))
|
||||
WAIT_FOR_DEVICE_POLLING_TIME_SEC = 10
|
||||
|
||||
# How many seconds to wait between log output LAVA RPC calls.
|
||||
LOG_POLLING_TIME_SEC = int(getenv("LAVA_LOG_POLLING_TIME_SEC", 5))
|
||||
LOG_POLLING_TIME_SEC = 5
|
||||
|
||||
# How many retries should be made when a timeout happen.
|
||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT_DETECTION", 2))
|
||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION = 2
|
||||
|
||||
# How many attempts should be made when a timeout happen during LAVA device boot.
|
||||
NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
|
||||
|
||||
def print_log(msg):
|
||||
print("{}: {}".format(datetime.now(), msg))
|
||||
|
||||
def fatal_err(msg):
|
||||
print_log(msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||
out_data = ""
|
||||
|
||||
for line in yaml_data.splitlines(True):
|
||||
if hide_tag in line:
|
||||
continue
|
||||
out_data += line
|
||||
|
||||
return out_data
|
||||
|
||||
|
||||
def generate_lava_yaml(args):
|
||||
@@ -69,15 +79,11 @@ def generate_lava_yaml(args):
|
||||
'visibility': { 'group': [ args.visibility_group ] },
|
||||
'priority': 75,
|
||||
'context': {
|
||||
'extra_nfsroot_args': ' init=/init rootwait usbcore.quirks=0bda:8153:k'
|
||||
'extra_nfsroot_args': ' init=/init rootwait minio_results={}'.format(args.job_artifacts_base)
|
||||
},
|
||||
"timeouts": {
|
||||
"job": {"minutes": args.job_timeout},
|
||||
"action": {"minutes": 3},
|
||||
"actions": {
|
||||
"depthcharge-action": {
|
||||
"minutes": 3 * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
}
|
||||
'timeouts': {
|
||||
'job': {
|
||||
'minutes': args.job_timeout
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -92,10 +98,10 @@ def generate_lava_yaml(args):
|
||||
'to': 'tftp',
|
||||
'os': 'oe',
|
||||
'kernel': {
|
||||
'url': '{}/{}'.format(args.kernel_url_prefix, args.kernel_image_name),
|
||||
'url': '{}/{}'.format(args.base_system_url_prefix, args.kernel_image_name),
|
||||
},
|
||||
'nfsrootfs': {
|
||||
'url': '{}/lava-rootfs.tgz'.format(args.rootfs_url_prefix),
|
||||
'url': '{}/lava-rootfs.tgz'.format(args.base_system_url_prefix),
|
||||
'compression': 'gz',
|
||||
}
|
||||
}
|
||||
@@ -103,27 +109,25 @@ def generate_lava_yaml(args):
|
||||
deploy['kernel']['type'] = args.kernel_image_type
|
||||
if args.dtb:
|
||||
deploy['dtb'] = {
|
||||
'url': '{}/{}.dtb'.format(args.kernel_url_prefix, args.dtb)
|
||||
'url': '{}/{}.dtb'.format(args.base_system_url_prefix, args.dtb)
|
||||
}
|
||||
|
||||
# always boot over NFS
|
||||
boot = {
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"commands": "nfs",
|
||||
"prompts": ["lava-shell:"],
|
||||
'timeout': { 'minutes': 25 },
|
||||
'method': args.boot_method,
|
||||
'commands': 'nfs',
|
||||
'prompts': ['lava-shell:'],
|
||||
}
|
||||
|
||||
# skeleton test definition: only declaring each job as a single 'test'
|
||||
# since LAVA's test parsing is not useful to us
|
||||
run_steps = []
|
||||
test = {
|
||||
'timeout': { 'minutes': args.job_timeout },
|
||||
'failure_retry': 1,
|
||||
'definitions': [ {
|
||||
'name': 'mesa',
|
||||
'from': 'inline',
|
||||
'lava-signal': 'kmsg',
|
||||
'path': 'inline/mesa.yaml',
|
||||
'repository': {
|
||||
'metadata': {
|
||||
@@ -133,8 +137,10 @@ def generate_lava_yaml(args):
|
||||
'scope': [ 'functional' ],
|
||||
'format': 'Lava-Test Test Definition 1.0',
|
||||
},
|
||||
'parse': {
|
||||
'pattern': r'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
|
||||
},
|
||||
'run': {
|
||||
"steps": run_steps
|
||||
},
|
||||
},
|
||||
} ],
|
||||
@@ -144,39 +150,27 @@ def generate_lava_yaml(args):
|
||||
# - inline .gitlab-ci/common/init-stage1.sh
|
||||
# - fetch and unpack per-pipeline build artifacts from build job
|
||||
# - fetch and unpack per-job environment from lava-submit.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
init_lines = []
|
||||
|
||||
with open(args.first_stage_init, 'r') as init_sh:
|
||||
run_steps += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
|
||||
init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
|
||||
|
||||
if args.jwt_file:
|
||||
with open(args.jwt_file) as jwt_file:
|
||||
run_steps += [
|
||||
"set +x",
|
||||
f'echo -n "{jwt_file.read()}" > "{args.jwt_file}" # HIDEME',
|
||||
"set -x",
|
||||
f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
|
||||
]
|
||||
else:
|
||||
run_steps += [
|
||||
"echo Could not find jwt file, disabling MINIO requests...",
|
||||
"sed -i '/MINIO_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
|
||||
with open(args.jwt_file) as jwt_file:
|
||||
init_lines += [
|
||||
"set +x",
|
||||
f'echo -n "{jwt_file.read()}" > "{args.jwt_file}" # HIDEME',
|
||||
"set -x",
|
||||
]
|
||||
|
||||
run_steps += [
|
||||
init_lines += [
|
||||
'mkdir -p {}'.format(args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.build_url, args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.mesa_build_url, args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
|
||||
|
||||
# Sleep a bit to give time for bash to dump shell xtrace messages into
|
||||
# console which may cause interleaving with LAVA_SIGNAL_STARTTC in some
|
||||
# devices like a618.
|
||||
'sleep 1',
|
||||
|
||||
# Putting CI_JOB name as the testcase name, it may help LAVA farm
|
||||
# maintainers with monitoring
|
||||
f"lava-test-case 'mesa-ci_{args.mesa_job_name}' --shell /init-stage2.sh",
|
||||
f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
|
||||
'exec /init-stage2.sh',
|
||||
]
|
||||
test['definitions'][0]['repository']['run']['steps'] = init_lines
|
||||
|
||||
values['actions'] = [
|
||||
{ 'deploy': deploy },
|
||||
@@ -217,318 +211,138 @@ def _call_proxy(fn, *args):
|
||||
fatal_err("A protocol error occurred (Err {} {})".format(err.errcode, err.errmsg))
|
||||
else:
|
||||
time.sleep(15)
|
||||
pass
|
||||
except xmlrpc.client.Fault as err:
|
||||
traceback.print_exc()
|
||||
fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode))
|
||||
|
||||
|
||||
class LAVAJob:
|
||||
COLOR_STATUS_MAP = {
|
||||
"pass": CONSOLE_LOG["FG_GREEN"],
|
||||
"hung": CONSOLE_LOG["FG_YELLOW"],
|
||||
"fail": CONSOLE_LOG["FG_RED"],
|
||||
"canceled": CONSOLE_LOG["FG_MAGENTA"],
|
||||
}
|
||||
|
||||
def __init__(self, proxy, definition):
|
||||
self.job_id = None
|
||||
self.proxy = proxy
|
||||
self.definition = definition
|
||||
self.last_log_line = 0
|
||||
self.last_log_time = None
|
||||
self.is_finished = False
|
||||
self.status = "created"
|
||||
|
||||
def heartbeat(self):
|
||||
self.last_log_time = datetime.now()
|
||||
self.status = "running"
|
||||
|
||||
def validate(self) -> Optional[dict]:
|
||||
"""Returns a dict with errors, if the validation fails.
|
||||
|
||||
Returns:
|
||||
Optional[dict]: a dict with the validation errors, if any
|
||||
"""
|
||||
return _call_proxy(self.proxy.scheduler.jobs.validate, self.definition, True)
|
||||
|
||||
def submit(self):
|
||||
try:
|
||||
self.job_id = _call_proxy(self.proxy.scheduler.jobs.submit, self.definition)
|
||||
except MesaCIException:
|
||||
return False
|
||||
return True
|
||||
|
||||
def cancel(self):
|
||||
if self.job_id:
|
||||
self.proxy.scheduler.jobs.cancel(self.job_id)
|
||||
|
||||
def is_started(self) -> bool:
|
||||
waiting_states = ["Submitted", "Scheduling", "Scheduled"]
|
||||
job_state: dict[str, str] = _call_proxy(
|
||||
self.proxy.scheduler.job_state, self.job_id
|
||||
)
|
||||
return job_state["job_state"] not in waiting_states
|
||||
|
||||
def _load_log_from_data(self, data) -> list[str]:
|
||||
lines = []
|
||||
# When there is no new log data, the YAML is empty
|
||||
if loaded_lines := yaml.load(str(data), Loader=loader(False)):
|
||||
lines = loaded_lines
|
||||
self.last_log_line += len(lines)
|
||||
return lines
|
||||
|
||||
def get_logs(self) -> list[str]:
|
||||
try:
|
||||
(finished, data) = _call_proxy(
|
||||
self.proxy.scheduler.jobs.logs, self.job_id, self.last_log_line
|
||||
)
|
||||
self.is_finished = finished
|
||||
return self._load_log_from_data(data)
|
||||
|
||||
except Exception as mesa_ci_err:
|
||||
raise MesaCIParseException(
|
||||
f"Could not get LAVA job logs. Reason: {mesa_ci_err}"
|
||||
) from mesa_ci_err
|
||||
|
||||
def parse_job_result_from_log(
|
||||
self, lava_lines: list[dict[str, str]]
|
||||
) -> list[dict[str, str]]:
|
||||
"""Use the console log to catch if the job has completed successfully or
|
||||
not. Returns the list of log lines until the result line."""
|
||||
|
||||
last_line = None # Print all lines. lines[:None] == lines[:]
|
||||
|
||||
for idx, line in enumerate(lava_lines):
|
||||
if result := re.search(r"hwci: mesa: (pass|fail)", line):
|
||||
self.is_finished = True
|
||||
self.status = result.group(1)
|
||||
|
||||
last_line = idx + 1
|
||||
# We reached the log end here. hwci script has finished.
|
||||
break
|
||||
return lava_lines[:last_line]
|
||||
|
||||
|
||||
def find_exception_from_metadata(metadata, job_id):
|
||||
if "result" not in metadata or metadata["result"] != "fail":
|
||||
return
|
||||
if "error_type" in metadata:
|
||||
error_type = metadata["error_type"]
|
||||
if error_type == "Infrastructure":
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed with Infrastructure Error. Retry."
|
||||
)
|
||||
if error_type == "Job":
|
||||
# This happens when LAVA assumes that the job cannot terminate or
|
||||
# with mal-formed job definitions. As we are always validating the
|
||||
# jobs, only the former is probable to happen. E.g.: When some LAVA
|
||||
# action timed out more times than expected in job definition.
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed with JobError "
|
||||
"(possible LAVA timeout misconfiguration/bug). Retry."
|
||||
)
|
||||
if "case" in metadata and metadata["case"] == "validate":
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed validation (possible download error). Retry."
|
||||
)
|
||||
return metadata
|
||||
|
||||
|
||||
def find_lava_error(job) -> None:
|
||||
def get_job_results(proxy, job_id, test_suite, test_case):
|
||||
# Look for infrastructure errors and retry if we see them.
|
||||
results_yaml = _call_proxy(job.proxy.results.get_testjob_results_yaml, job.job_id)
|
||||
results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
|
||||
results = yaml.load(results_yaml, Loader=loader(False))
|
||||
for res in results:
|
||||
metadata = res["metadata"]
|
||||
find_exception_from_metadata(metadata, job.job_id)
|
||||
metadata = res['metadata']
|
||||
if not 'result' in metadata or metadata['result'] != 'fail':
|
||||
continue
|
||||
if 'error_type' in metadata and metadata['error_type'] == "Infrastructure":
|
||||
print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id))
|
||||
return False
|
||||
if 'case' in metadata and metadata['case'] == "validate":
|
||||
print_log("LAVA job {} failed validation (possible download error). Retry.".format(job_id))
|
||||
return False
|
||||
|
||||
# If we reach this far, it means that the job ended without hwci script
|
||||
# result and no LAVA infrastructure problem was found
|
||||
job.status = "fail"
|
||||
results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case)
|
||||
results = yaml.load(results_yaml, Loader=loader(False))
|
||||
if not results:
|
||||
fatal_err("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case))
|
||||
|
||||
print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result']))
|
||||
if results[0]['result'] != 'pass':
|
||||
fatal_err("FAIL")
|
||||
|
||||
def show_job_data(job):
|
||||
with GitlabSection(
|
||||
"job_data",
|
||||
"LAVA job info",
|
||||
type=LogSectionType.LAVA_POST_PROCESSING,
|
||||
start_collapsed=True,
|
||||
):
|
||||
show = _call_proxy(job.proxy.scheduler.jobs.show, job.job_id)
|
||||
for field, value in show.items():
|
||||
print("{}\t: {}".format(field, value))
|
||||
return True
|
||||
|
||||
def wait_until_job_is_started(proxy, job_id):
|
||||
print_log(f"Waiting for job {job_id} to start.")
|
||||
current_state = "Submitted"
|
||||
waiting_states = ["Submitted", "Scheduling", "Scheduled"]
|
||||
while current_state in waiting_states:
|
||||
job_state = _call_proxy(proxy.scheduler.job_state, job_id)
|
||||
current_state = job_state["job_state"]
|
||||
|
||||
def fetch_logs(job, max_idle_time, log_follower) -> None:
|
||||
# Poll to check for new logs, assuming that a prolonged period of
|
||||
# silence means that the device has died and we should try it again
|
||||
if datetime.now() - job.last_log_time > max_idle_time:
|
||||
max_idle_time_min = max_idle_time.total_seconds() / 60
|
||||
|
||||
raise MesaCITimeoutError(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"{CONSOLE_LOG['FG_YELLOW']}"
|
||||
f"LAVA job {job.job_id} does not respond for {max_idle_time_min} "
|
||||
"minutes. Retry."
|
||||
f"{CONSOLE_LOG['RESET']}",
|
||||
timeout_duration=max_idle_time,
|
||||
)
|
||||
|
||||
time.sleep(LOG_POLLING_TIME_SEC)
|
||||
|
||||
# The XMLRPC binary packet may be corrupted, causing a YAML scanner error.
|
||||
# Retry the log fetching several times before exposing the error.
|
||||
for _ in range(5):
|
||||
with contextlib.suppress(MesaCIParseException):
|
||||
new_log_lines = job.get_logs()
|
||||
break
|
||||
else:
|
||||
raise MesaCIParseException
|
||||
|
||||
if log_follower.feed(new_log_lines):
|
||||
# If we had non-empty log data, we can assure that the device is alive.
|
||||
job.heartbeat()
|
||||
parsed_lines = log_follower.flush()
|
||||
|
||||
# Only parse job results when the script reaches the end of the logs.
|
||||
# Depending on how much payload the RPC scheduler.jobs.logs get, it may
|
||||
# reach the LAVA_POST_PROCESSING phase.
|
||||
if log_follower.current_section.type in (
|
||||
LogSectionType.TEST_CASE,
|
||||
LogSectionType.LAVA_POST_PROCESSING,
|
||||
):
|
||||
parsed_lines = job.parse_job_result_from_log(parsed_lines)
|
||||
|
||||
for line in parsed_lines:
|
||||
print_log(line)
|
||||
|
||||
|
||||
def follow_job_execution(job):
|
||||
try:
|
||||
job.submit()
|
||||
except Exception as mesa_ci_err:
|
||||
raise MesaCIException(
|
||||
f"Could not submit LAVA job. Reason: {mesa_ci_err}"
|
||||
) from mesa_ci_err
|
||||
|
||||
print_log(f"Waiting for job {job.job_id} to start.")
|
||||
while not job.is_started():
|
||||
time.sleep(WAIT_FOR_DEVICE_POLLING_TIME_SEC)
|
||||
print_log(f"Job {job.job_id} started.")
|
||||
print_log(f"Job {job_id} started.")
|
||||
|
||||
gl = GitlabSection(
|
||||
id="lava_boot",
|
||||
header="LAVA boot",
|
||||
type=LogSectionType.LAVA_BOOT,
|
||||
start_collapsed=True,
|
||||
)
|
||||
print(gl.start())
|
||||
max_idle_time = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
|
||||
with LogFollower(current_section=gl) as lf:
|
||||
def follow_job_execution(proxy, job_id):
|
||||
line_count = 0
|
||||
finished = False
|
||||
last_time_logs = datetime.now()
|
||||
while not finished:
|
||||
(finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count)
|
||||
logs = yaml.load(str(data), Loader=loader(False))
|
||||
if logs:
|
||||
# Reset the timeout
|
||||
last_time_logs = datetime.now()
|
||||
for line in logs:
|
||||
print("{} {}".format(line["dt"], line["msg"]))
|
||||
|
||||
max_idle_time = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
|
||||
# Start to check job's health
|
||||
job.heartbeat()
|
||||
while not job.is_finished:
|
||||
fetch_logs(job, max_idle_time, lf)
|
||||
line_count += len(logs)
|
||||
|
||||
show_job_data(job)
|
||||
else:
|
||||
time_limit = timedelta(minutes=DEVICE_HANGING_TIMEOUT_MIN)
|
||||
if datetime.now() - last_time_logs > time_limit:
|
||||
print_log("LAVA job {} doesn't advance (machine got hung?). Retry.".format(job_id))
|
||||
return False
|
||||
|
||||
# Mesa Developers expect to have a simple pass/fail job result.
|
||||
# If this does not happen, it probably means a LAVA infrastructure error
|
||||
# happened.
|
||||
if job.status not in ["pass", "fail"]:
|
||||
find_lava_error(job)
|
||||
# `proxy.scheduler.jobs.logs` does not block, even when there is no
|
||||
# new log to be fetched. To avoid dosing the LAVA dispatcher
|
||||
# machine, let's add a sleep to save them some stamina.
|
||||
time.sleep(LOG_POLLING_TIME_SEC)
|
||||
|
||||
return True
|
||||
|
||||
def show_job_data(proxy, job_id):
|
||||
show = _call_proxy(proxy.scheduler.jobs.show, job_id)
|
||||
for field, value in show.items():
|
||||
print("{}\t: {}".format(field, value))
|
||||
|
||||
|
||||
def print_job_final_status(job):
|
||||
if job.status == "running":
|
||||
job.status = "hung"
|
||||
def validate_job(proxy, job_file):
|
||||
try:
|
||||
return _call_proxy(proxy.scheduler.jobs.validate, job_file, True)
|
||||
except:
|
||||
return False
|
||||
|
||||
color = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"])
|
||||
print_log(
|
||||
f"{color}"
|
||||
f"LAVA Job finished with status: {job.status}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
|
||||
|
||||
def retriable_follow_job(proxy, job_definition) -> LAVAJob:
|
||||
retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
|
||||
|
||||
for attempt_no in range(1, retry_count + 2):
|
||||
job = LAVAJob(proxy, job_definition)
|
||||
try:
|
||||
follow_job_execution(job)
|
||||
return job
|
||||
except MesaCIKnownIssueException as found_issue:
|
||||
print_log(found_issue)
|
||||
job.status = "canceled"
|
||||
except MesaCIException as mesa_exception:
|
||||
print_log(mesa_exception)
|
||||
job.cancel()
|
||||
except KeyboardInterrupt as e:
|
||||
print_log("LAVA job submitter was interrupted. Cancelling the job.")
|
||||
job.cancel()
|
||||
raise e
|
||||
finally:
|
||||
print_log(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"Finished executing LAVA job in the attempt #{attempt_no}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
print_job_final_status(job)
|
||||
|
||||
raise MesaCIRetryError(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"{CONSOLE_LOG['FG_RED']}"
|
||||
"Job failed after it exceeded the number of "
|
||||
f"{retry_count} retries."
|
||||
f"{CONSOLE_LOG['RESET']}",
|
||||
retry_count=retry_count,
|
||||
)
|
||||
|
||||
|
||||
def treat_mesa_job_name(args):
|
||||
# Remove mesa job names with spaces, which breaks the lava-test-case command
|
||||
args.mesa_job_name = args.mesa_job_name.split(" ")[0]
|
||||
def submit_job(proxy, job_file):
|
||||
return _call_proxy(proxy.scheduler.jobs.submit, job_file)
|
||||
|
||||
|
||||
def main(args):
|
||||
proxy = setup_lava_proxy()
|
||||
|
||||
job_definition = generate_lava_yaml(args)
|
||||
yaml_file = generate_lava_yaml(args)
|
||||
|
||||
if args.dump_yaml:
|
||||
with GitlabSection(
|
||||
"yaml_dump",
|
||||
"LAVA job definition (YAML)",
|
||||
type=LogSectionType.LAVA_BOOT,
|
||||
start_collapsed=True,
|
||||
):
|
||||
print(hide_sensitive_data(job_definition))
|
||||
job = LAVAJob(proxy, job_definition)
|
||||
|
||||
if errors := job.validate():
|
||||
fatal_err(f"Error in LAVA job definition: {errors}")
|
||||
print_log("LAVA job definition validated successfully")
|
||||
print(hide_sensitive_data(generate_lava_yaml(args)))
|
||||
|
||||
if args.validate_only:
|
||||
ret = validate_job(proxy, yaml_file)
|
||||
if not ret:
|
||||
fatal_err("Error in LAVA job definition")
|
||||
print("LAVA job definition validated successfully")
|
||||
return
|
||||
|
||||
finished_job = retriable_follow_job(proxy, job_definition)
|
||||
exit_code = 0 if finished_job.status == "pass" else 1
|
||||
sys.exit(exit_code)
|
||||
retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
|
||||
|
||||
while retry_count >= 0:
|
||||
job_id = submit_job(proxy, yaml_file)
|
||||
|
||||
print_log("LAVA job id: {}".format(job_id))
|
||||
|
||||
wait_until_job_is_started(proxy, job_id)
|
||||
|
||||
if not follow_job_execution(proxy, job_id):
|
||||
print_log(f"Job {job_id} has timed out. Cancelling it.")
|
||||
# Cancel the job as it is considered unreachable by Mesa CI.
|
||||
proxy.scheduler.jobs.cancel(job_id)
|
||||
|
||||
retry_count -= 1
|
||||
continue
|
||||
|
||||
show_job_data(proxy, job_id)
|
||||
|
||||
if get_job_results(proxy, job_id, "0_mesa", "mesa") == True:
|
||||
break
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser("LAVA job submitter")
|
||||
|
||||
parser.add_argument("--pipeline-info")
|
||||
parser.add_argument("--rootfs-url-prefix")
|
||||
parser.add_argument("--kernel-url-prefix")
|
||||
parser.add_argument("--build-url")
|
||||
parser.add_argument("--base-system-url-prefix")
|
||||
parser.add_argument("--mesa-build-url")
|
||||
parser.add_argument("--job-rootfs-overlay-url")
|
||||
parser.add_argument("--job-artifacts-base")
|
||||
parser.add_argument("--job-timeout", type=int)
|
||||
parser.add_argument("--first-stage-init")
|
||||
parser.add_argument("--ci-project-dir")
|
||||
@@ -542,11 +356,9 @@ def create_parser():
|
||||
parser.add_argument("--validate-only", action='store_true')
|
||||
parser.add_argument("--dump-yaml", action='store_true')
|
||||
parser.add_argument("--visibility-group")
|
||||
parser.add_argument("--mesa-job-name")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
|
||||
# GitLab runner -> GitLab primary -> user, safe to say we don't need any
|
||||
@@ -558,5 +370,4 @@ if __name__ == "__main__":
|
||||
|
||||
parser.set_defaults(func=main)
|
||||
args = parser.parse_args()
|
||||
treat_mesa_job_name(args)
|
||||
args.func(args)
|
||||
|
@@ -1,11 +0,0 @@
|
||||
from .console_format import CONSOLE_LOG
|
||||
from .gitlab_section import GitlabSection
|
||||
from .log_follower import (
|
||||
LogFollower,
|
||||
fatal_err,
|
||||
fix_lava_color_log,
|
||||
fix_lava_gitlab_section_log,
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
)
|
||||
from .log_section import LogSection, LogSectionType
|
@@ -1,10 +0,0 @@
|
||||
CONSOLE_LOG = {
|
||||
"FG_GREEN": "\x1b[1;32;5;197m",
|
||||
"FG_RED": "\x1b[1;38;5;197m",
|
||||
"FG_YELLOW": "\x1b[1;33;5;197m",
|
||||
"FG_MAGENTA": "\x1b[1;35;5;197m",
|
||||
"RESET": "\x1b[0m",
|
||||
"UNDERLINED": "\x1b[3m",
|
||||
"BOLD": "\x1b[1m",
|
||||
"DIM": "\x1b[2m",
|
||||
}
|
@@ -1,84 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lava.utils.log_section import LogSectionType
|
||||
|
||||
|
||||
@dataclass
|
||||
class GitlabSection:
|
||||
id: str
|
||||
header: str
|
||||
type: LogSectionType
|
||||
start_collapsed: bool = False
|
||||
escape: str = "\x1b[0K"
|
||||
colour: str = f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"
|
||||
__start_time: Optional[datetime] = field(default=None, init=False)
|
||||
__end_time: Optional[datetime] = field(default=None, init=False)
|
||||
|
||||
@classmethod
|
||||
def section_id_filter(cls, value) -> str:
|
||||
return str(re.sub(r"[^\w_-]+", "-", value))
|
||||
|
||||
def __post_init__(self):
|
||||
self.id = self.section_id_filter(self.id)
|
||||
|
||||
@property
|
||||
def has_started(self) -> bool:
|
||||
return self.__start_time is not None
|
||||
|
||||
@property
|
||||
def has_finished(self) -> bool:
|
||||
return self.__end_time is not None
|
||||
|
||||
def get_timestamp(self, time: datetime) -> str:
|
||||
unix_ts = datetime.timestamp(time)
|
||||
return str(int(unix_ts))
|
||||
|
||||
def section(self, marker: str, header: str, time: datetime) -> str:
|
||||
preamble = f"{self.escape}section_{marker}"
|
||||
collapse = marker == "start" and self.start_collapsed
|
||||
collapsed = "[collapsed=true]" if collapse else ""
|
||||
section_id = f"{self.id}{collapsed}"
|
||||
|
||||
timestamp = self.get_timestamp(time)
|
||||
before_header = ":".join([preamble, timestamp, section_id])
|
||||
colored_header = f"{self.colour}{header}\x1b[0m" if header else ""
|
||||
header_wrapper = "\r" + f"{self.escape}{colored_header}"
|
||||
|
||||
return f"{before_header}{header_wrapper}"
|
||||
|
||||
def __enter__(self):
|
||||
print(self.start())
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
print(self.end())
|
||||
|
||||
def start(self) -> str:
|
||||
assert not self.has_finished, "Starting an already finished section"
|
||||
self.__start_time = datetime.now()
|
||||
return self.section(marker="start", header=self.header, time=self.__start_time)
|
||||
|
||||
def end(self) -> str:
|
||||
assert self.has_started, "Ending an uninitialized section"
|
||||
self.__end_time = datetime.now()
|
||||
assert (
|
||||
self.__end_time >= self.__start_time
|
||||
), "Section execution time will be negative"
|
||||
return self.section(marker="end", header="", time=self.__end_time)
|
||||
|
||||
def delta_time(self) -> Optional[timedelta]:
|
||||
if self.__start_time and self.__end_time:
|
||||
return self.__end_time - self.__start_time
|
||||
|
||||
if self.has_started:
|
||||
return datetime.now() - self.__start_time
|
||||
|
||||
return None
|
@@ -1,43 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lava.utils import LogFollower
|
||||
|
||||
from lava.exceptions import MesaCIKnownIssueException
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
from lava.utils.log_section import LogSectionType
|
||||
|
||||
|
||||
@dataclass
|
||||
class LAVALogHints:
|
||||
log_follower: LogFollower
|
||||
has_r8152_issue_history: bool = field(default=False, init=False)
|
||||
|
||||
def detect_failure(self, new_lines: list[dict[str, Any]]):
|
||||
for line in new_lines:
|
||||
self.detect_r8152_issue(line)
|
||||
|
||||
def detect_r8152_issue(self, line):
|
||||
if (
|
||||
self.log_follower.phase == LogSectionType.TEST_CASE
|
||||
and line["lvl"] == "target"
|
||||
):
|
||||
if re.search(r"r8152 \S+ eth0: Tx status -71", line["msg"]):
|
||||
self.has_r8152_issue_history = True
|
||||
return
|
||||
|
||||
if self.has_r8152_issue_history and re.search(
|
||||
r"nfs: server \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} not responding, still trying",
|
||||
line["msg"],
|
||||
):
|
||||
raise MesaCIKnownIssueException(
|
||||
f"{CONSOLE_LOG['FG_MAGENTA']}"
|
||||
"Probable network issue failure encountered, retrying the job"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
|
||||
self.has_r8152_issue_history = False
|
@@ -1,218 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Some utilities to analyse logs, create gitlab sections and other quality of life
|
||||
improvements
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Union
|
||||
|
||||
from lava.exceptions import MesaCITimeoutError
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
from lava.utils.gitlab_section import GitlabSection
|
||||
from lava.utils.lava_log_hints import LAVALogHints
|
||||
from lava.utils.log_section import (
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
||||
LOG_SECTIONS,
|
||||
LogSectionType,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LogFollower:
|
||||
current_section: Optional[GitlabSection] = None
|
||||
timeout_durations: dict[LogSectionType, timedelta] = field(
|
||||
default_factory=lambda: DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
)
|
||||
fallback_timeout: timedelta = FALLBACK_GITLAB_SECTION_TIMEOUT
|
||||
_buffer: list[str] = field(default_factory=list, init=False)
|
||||
log_hints: LAVALogHints = field(init=False)
|
||||
|
||||
def __post_init__(self):
|
||||
section_is_created = bool(self.current_section)
|
||||
section_has_started = bool(
|
||||
self.current_section and self.current_section.has_started
|
||||
)
|
||||
self.log_hints = LAVALogHints(self)
|
||||
assert (
|
||||
section_is_created == section_has_started
|
||||
), "Can't follow logs beginning from uninitialized GitLab sections."
|
||||
|
||||
@property
|
||||
def phase(self) -> LogSectionType:
|
||||
return (
|
||||
self.current_section.type
|
||||
if self.current_section
|
||||
else LogSectionType.UNKNOWN
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Cleanup existing buffer if this object gets out from the context"""
|
||||
self.clear_current_section()
|
||||
last_lines = self.flush()
|
||||
for line in last_lines:
|
||||
print(line)
|
||||
|
||||
def watchdog(self):
|
||||
if not self.current_section:
|
||||
return
|
||||
|
||||
timeout_duration = self.timeout_durations.get(
|
||||
self.current_section.type, self.fallback_timeout
|
||||
)
|
||||
|
||||
if self.current_section.delta_time() > timeout_duration:
|
||||
raise MesaCITimeoutError(
|
||||
f"Gitlab Section {self.current_section} has timed out",
|
||||
timeout_duration=timeout_duration,
|
||||
)
|
||||
|
||||
def clear_current_section(self):
|
||||
if self.current_section and not self.current_section.has_finished:
|
||||
self._buffer.append(self.current_section.end())
|
||||
self.current_section = None
|
||||
|
||||
def update_section(self, new_section: GitlabSection):
|
||||
# Sections can have redundant regex to find them to mitigate LAVA
|
||||
# interleaving kmsg and stderr/stdout issue.
|
||||
if self.current_section and self.current_section.id == new_section.id:
|
||||
return
|
||||
self.clear_current_section()
|
||||
self.current_section = new_section
|
||||
self._buffer.append(new_section.start())
|
||||
|
||||
def manage_gl_sections(self, line):
|
||||
if isinstance(line["msg"], list):
|
||||
logging.debug("Ignoring messages as list. Kernel dumps.")
|
||||
return
|
||||
|
||||
for log_section in LOG_SECTIONS:
|
||||
if new_section := log_section.from_log_line_to_section(line):
|
||||
self.update_section(new_section)
|
||||
|
||||
def detect_kernel_dump_line(self, line: dict[str, Union[str, list]]) -> bool:
|
||||
# line["msg"] can be a list[str] when there is a kernel dump
|
||||
if isinstance(line["msg"], list):
|
||||
return line["lvl"] == "debug"
|
||||
|
||||
# result level has dict line["msg"]
|
||||
if not isinstance(line["msg"], str):
|
||||
return False
|
||||
|
||||
# we have a line, check if it is a kernel message
|
||||
if re.search(r"\[[\d\s]{5}\.[\d\s]{6}\] +\S{2,}", line["msg"]):
|
||||
print_log(f"{CONSOLE_LOG['BOLD']}{line['msg']}{CONSOLE_LOG['RESET']}")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def feed(self, new_lines: list[dict[str, str]]) -> bool:
|
||||
"""Input data to be processed by LogFollower instance
|
||||
Returns true if the DUT (device under test) seems to be alive.
|
||||
"""
|
||||
|
||||
self.watchdog()
|
||||
|
||||
# No signal of job health in the log
|
||||
is_job_healthy = False
|
||||
|
||||
for line in new_lines:
|
||||
if self.detect_kernel_dump_line(line):
|
||||
continue
|
||||
|
||||
# At least we are fed with a non-kernel dump log, it seems that the
|
||||
# job is progressing
|
||||
is_job_healthy = True
|
||||
self.manage_gl_sections(line)
|
||||
if parsed_line := parse_lava_line(line):
|
||||
self._buffer.append(parsed_line)
|
||||
|
||||
self.log_hints.detect_failure(new_lines)
|
||||
|
||||
return is_job_healthy
|
||||
|
||||
def flush(self) -> list[str]:
|
||||
buffer = self._buffer
|
||||
self._buffer = []
|
||||
return buffer
|
||||
|
||||
|
||||
def fix_lava_color_log(line):
|
||||
"""This function is a temporary solution for the color escape codes mangling
|
||||
problem. There is some problem in message passing between the LAVA
|
||||
dispatcher and the device under test (DUT). Here \x1b character is missing
|
||||
before `[:digit::digit:?:digit:?m` ANSI TTY color codes, or the more
|
||||
complicated ones with number values for text format before background and
|
||||
foreground colors.
|
||||
When this problem is fixed on the LAVA side, one should remove this function.
|
||||
"""
|
||||
line["msg"] = re.sub(r"(\[(\d+;){0,2}\d{1,3}m)", "\x1b" + r"\1", line["msg"])
|
||||
|
||||
|
||||
def fix_lava_gitlab_section_log(line):
|
||||
"""This function is a temporary solution for the Gitlab section markers
|
||||
mangling problem. Gitlab parses the following lines to define a collapsible
|
||||
gitlab section in their log:
|
||||
- \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
|
||||
- \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
|
||||
There is some problem in message passing between the LAVA dispatcher and the
|
||||
device under test (DUT), that digests \x1b and \r control characters
|
||||
incorrectly. When this problem is fixed on the LAVA side, one should remove
|
||||
this function.
|
||||
"""
|
||||
if match := re.match(r"\[0K(section_\w+):(\d+):(\S+)\[0K([\S ]+)?", line["msg"]):
|
||||
marker, timestamp, id_collapsible, header = match.groups()
|
||||
# The above regex serves for both section start and end lines.
|
||||
# When the header is None, it means we are dealing with `section_end` line
|
||||
header = header or ""
|
||||
line["msg"] = f"\x1b[0K{marker}:{timestamp}:{id_collapsible}\r\x1b[0K{header}"
|
||||
|
||||
|
||||
def parse_lava_line(line) -> Optional[str]:
|
||||
prefix = ""
|
||||
suffix = ""
|
||||
|
||||
if line["lvl"] in ["results", "feedback", "debug"]:
|
||||
return
|
||||
elif line["lvl"] in ["warning", "error"]:
|
||||
prefix = CONSOLE_LOG["FG_RED"]
|
||||
suffix = CONSOLE_LOG["RESET"]
|
||||
elif line["lvl"] == "input":
|
||||
prefix = "$ "
|
||||
suffix = ""
|
||||
elif line["lvl"] == "target":
|
||||
fix_lava_color_log(line)
|
||||
fix_lava_gitlab_section_log(line)
|
||||
|
||||
return f'{prefix}{line["msg"]}{suffix}'
|
||||
|
||||
|
||||
def print_log(msg):
|
||||
# Reset color from timestamp, since `msg` can tint the terminal color
|
||||
print(f"{CONSOLE_LOG['RESET']}{datetime.now()}: {msg}")
|
||||
|
||||
|
||||
def fatal_err(msg):
|
||||
colored_msg = f"{CONSOLE_LOG['FG_RED']}"
|
||||
f"{msg}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
print_log(colored_msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
@@ -1,88 +0,0 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
from enum import Enum, auto
|
||||
from typing import Optional, Pattern, Union
|
||||
|
||||
from lava.utils.gitlab_section import GitlabSection
|
||||
|
||||
|
||||
class LogSectionType(Enum):
|
||||
UNKNOWN = auto()
|
||||
LAVA_BOOT = auto()
|
||||
TEST_SUITE = auto()
|
||||
TEST_CASE = auto()
|
||||
LAVA_POST_PROCESSING = auto()
|
||||
|
||||
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT = timedelta(minutes=10)
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS = {
|
||||
# Empirically, successful device boot in LAVA time takes less than 3
|
||||
# minutes.
|
||||
# LAVA itself is configured to attempt thrice to boot the device,
|
||||
# summing up to 9 minutes.
|
||||
# It is better to retry the boot than cancel the job and re-submit to avoid
|
||||
# the enqueue delay.
|
||||
LogSectionType.LAVA_BOOT: timedelta(minutes=9),
|
||||
# Test suite phase is where the initialization happens.
|
||||
LogSectionType.TEST_SUITE: timedelta(minutes=5),
|
||||
# Test cases may take a long time, this script has no right to interrupt
|
||||
# them. But if the test case takes almost 1h, it will never succeed due to
|
||||
# Gitlab job timeout.
|
||||
LogSectionType.TEST_CASE: timedelta(minutes=60),
|
||||
# LAVA post processing may refer to a test suite teardown, or the
|
||||
# adjustments to start the next test_case
|
||||
LogSectionType.LAVA_POST_PROCESSING: timedelta(minutes=5),
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LogSection:
|
||||
regex: Union[Pattern, str]
|
||||
levels: tuple[str]
|
||||
section_id: str
|
||||
section_header: str
|
||||
section_type: LogSectionType
|
||||
collapsed: bool = False
|
||||
|
||||
def from_log_line_to_section(
|
||||
self, lava_log_line: dict[str, str]
|
||||
) -> Optional[GitlabSection]:
|
||||
if lava_log_line["lvl"] not in self.levels:
|
||||
return
|
||||
|
||||
if match := re.search(self.regex, lava_log_line["msg"]):
|
||||
section_id = self.section_id.format(*match.groups())
|
||||
section_header = self.section_header.format(*match.groups())
|
||||
return GitlabSection(
|
||||
id=section_id,
|
||||
header=section_header,
|
||||
type=self.section_type,
|
||||
start_collapsed=self.collapsed,
|
||||
)
|
||||
|
||||
|
||||
LOG_SECTIONS = (
|
||||
LogSection(
|
||||
regex=re.compile(r"<?STARTTC>? ([^>]*)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="{}",
|
||||
section_header="test_case {}",
|
||||
section_type=LogSectionType.TEST_CASE,
|
||||
),
|
||||
LogSection(
|
||||
regex=re.compile(r"<?STARTRUN>? ([^>]*)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="{}",
|
||||
section_header="test_suite {}",
|
||||
section_type=LogSectionType.TEST_SUITE,
|
||||
),
|
||||
LogSection(
|
||||
regex=re.compile(r"ENDTC>? ([^>]+)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="post-{}",
|
||||
section_header="Post test_case {}",
|
||||
collapsed=True,
|
||||
section_type=LogSectionType.LAVA_POST_PROCESSING,
|
||||
),
|
||||
)
|
@@ -48,8 +48,6 @@ case $CI_JOB_NAME in
|
||||
if test -f /usr/bin/time; then
|
||||
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
|
||||
fi
|
||||
Xvfb :0 -screen 0 1024x768x16 &
|
||||
export DISPLAY=:0.0
|
||||
;;
|
||||
*)
|
||||
if test -f /usr/bin/time -a -f /usr/bin/strace; then
|
||||
@@ -65,7 +63,7 @@ meson _build --native-file=native.file \
|
||||
-D prefix=`pwd`/install \
|
||||
-D libdir=lib \
|
||||
-D buildtype=${BUILDTYPE:-debug} \
|
||||
-D build-tests=false \
|
||||
-D build-tests=true \
|
||||
-D c_args="$(echo -n $C_ARGS)" \
|
||||
-D cpp_args="$(echo -n $CPP_ARGS)" \
|
||||
-D libunwind=${UNWIND} \
|
||||
@@ -73,12 +71,11 @@ meson _build --native-file=native.file \
|
||||
${GALLIUM_ST} \
|
||||
-D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
|
||||
-D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
|
||||
-D video-codecs=h264dec,h264enc,h265dec,h265enc,vc1dec \
|
||||
-D werror=true \
|
||||
${EXTRA_OPTION}
|
||||
cd _build
|
||||
meson configure
|
||||
ninja
|
||||
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} --print-errorlogs ${MESON_TEST_ARGS}
|
||||
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} ${MESON_TEST_ARGS}
|
||||
ninja install
|
||||
cd ..
|
||||
|
@@ -17,15 +17,6 @@ export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_C
|
||||
RESULTS=`pwd`/${PIGLIT_RESULTS_DIR:-results}
|
||||
mkdir -p $RESULTS
|
||||
|
||||
# Ensure Mesa Shader Cache resides on tmpfs.
|
||||
SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
|
||||
SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
|
||||
|
||||
findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
|
||||
mkdir -p ${SHADER_CACHE_DIR}
|
||||
mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
|
||||
}
|
||||
|
||||
if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
|
||||
# deqp is to use virpipe, and virgl_test_server llvmpipe
|
||||
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
|
||||
|
@@ -13,7 +13,6 @@ mkdir -p "$RESULTS"
|
||||
# using a command wrapper. Hence, we will just set it when running the
|
||||
# command.
|
||||
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.${VK_CPU:-`uname -m`}.json"
|
||||
|
||||
# Sanity check to ensure that our environment is sufficient to make our tests
|
||||
# run against the Mesa built by CI, rather than any installed distro version.
|
||||
@@ -34,48 +33,96 @@ quiet() {
|
||||
set -x
|
||||
}
|
||||
|
||||
# Set environment for apitrace executable.
|
||||
export PATH="/apitrace/build:$PATH"
|
||||
if [ "$VK_DRIVER" ]; then
|
||||
|
||||
# Our rootfs may not have "less", which apitrace uses during
|
||||
# apitrace dump
|
||||
export PAGER=cat
|
||||
### VULKAN ###
|
||||
|
||||
SANITY_MESA_VERSION_CMD="wflinfo"
|
||||
# Set the Vulkan driver to use.
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
|
||||
|
||||
HANG_DETECTION_CMD=""
|
||||
# Set environment for Wine.
|
||||
export WINEDEBUG="-all"
|
||||
export WINEPREFIX="/dxvk-wine64"
|
||||
export WINEESYNC=1
|
||||
|
||||
# Set environment for DXVK.
|
||||
export DXVK_LOG_LEVEL="none"
|
||||
export DXVK_STATE_CACHE=0
|
||||
|
||||
# Set environment for gfxreconstruct executables.
|
||||
export PATH="/gfxreconstruct/build/bin:$PATH"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="vulkaninfo"
|
||||
|
||||
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
|
||||
|
||||
|
||||
# Set up the platform windowing system.
|
||||
# Set up the Window System Interface (WSI)
|
||||
|
||||
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
|
||||
# Use the surfaceless EGL platform.
|
||||
export DISPLAY=
|
||||
export WAFFLE_PLATFORM="surfaceless_egl"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
|
||||
|
||||
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
|
||||
# piglit is to use virpipe, and virgl_test_server llvmpipe
|
||||
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
|
||||
|
||||
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
|
||||
GALLIUM_DRIVER=llvmpipe \
|
||||
VTEST_USE_EGL_SURFACELESS=1 \
|
||||
VTEST_USE_GLES=1 \
|
||||
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
|
||||
|
||||
sleep 1
|
||||
if [ ${TEST_START_XORG:-0} -eq 1 ]; then
|
||||
"$INSTALL"/common/start-x.sh "$INSTALL"
|
||||
export DISPLAY=:0
|
||||
else
|
||||
# Run vulkan against the host's running X server (xvfb doesn't
|
||||
# have DRI3 support).
|
||||
# Set the DISPLAY env variable in each gitlab-runner's
|
||||
# configuration file:
|
||||
# https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
|
||||
quiet printf "%s%s\n" "Running against the hosts' X server. " \
|
||||
"DISPLAY is \"$DISPLAY\"."
|
||||
fi
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
|
||||
# It is assumed that you have already brought up your X server before
|
||||
# calling this script.
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
|
||||
else
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
|
||||
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
|
||||
|
||||
### GL/ES ###
|
||||
|
||||
# Set environment for apitrace executable.
|
||||
export PATH="/apitrace/build:$PATH"
|
||||
|
||||
# Our rootfs may not have "less", which apitrace uses during
|
||||
# apitrace dump
|
||||
export PAGER=cat
|
||||
|
||||
SANITY_MESA_VERSION_CMD="wflinfo"
|
||||
|
||||
HANG_DETECTION_CMD=""
|
||||
|
||||
|
||||
# Set up the platform windowing system.
|
||||
|
||||
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
|
||||
|
||||
# Use the surfaceless EGL platform.
|
||||
export DISPLAY=
|
||||
export WAFFLE_PLATFORM="surfaceless_egl"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
|
||||
|
||||
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
|
||||
# piglit is to use virpipe, and virgl_test_server llvmpipe
|
||||
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
|
||||
|
||||
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
|
||||
GALLIUM_DRIVER=llvmpipe \
|
||||
VTEST_USE_EGL_SURFACELESS=1 \
|
||||
VTEST_USE_GLES=1 \
|
||||
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
|
||||
# It is assumed that you have already brought up your X server before
|
||||
# calling this script.
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
|
||||
else
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
|
||||
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$ZINK_USE_LAVAPIPE" ]; then
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
|
||||
fi
|
||||
|
||||
# If the job is parallel at the gitlab job level, will take the corresponding
|
||||
@@ -131,7 +178,7 @@ PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
|
||||
|
||||
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
|
||||
|
||||
PIGLIT_CMD="./piglit run -l verbose --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
|
||||
PIGLIT_CMD="./piglit run --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
|
||||
|
||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
|
||||
|
||||
@@ -141,13 +188,6 @@ fi
|
||||
|
||||
ci-fairy minio login $MINIO_ARGS --token-file "${CI_JOB_JWT_FILE}"
|
||||
|
||||
# The replayer doesn't do any size or checksum verification for the traces in
|
||||
# the replayer db, so if we had to restart the system due to intermittent device
|
||||
# errors (or tried to cache replayer-db between runs, which would be nice to
|
||||
# have), you could get a corrupted local trace that would spuriously fail the
|
||||
# run.
|
||||
rm -rf replayer-db
|
||||
|
||||
eval $RUN_CMD
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
|
@@ -34,7 +34,6 @@ cp -Rp .gitlab-ci/fossilize-runner.sh install/
|
||||
cp -Rp .gitlab-ci/crosvm-init.sh install/
|
||||
cp -Rp .gitlab-ci/*.txt install/
|
||||
cp -Rp .gitlab-ci/report-flakes.py install/
|
||||
cp -Rp .gitlab-ci/valve install/
|
||||
cp -Rp .gitlab-ci/vkd3d-proton install/
|
||||
cp -Rp .gitlab-ci/*-runner.sh install/
|
||||
find . -path \*/ci/\*.txt \
|
||||
@@ -48,7 +47,6 @@ mkdir -p artifacts/
|
||||
tar -cf artifacts/install.tar install
|
||||
cp -Rp .gitlab-ci/common artifacts/ci-common
|
||||
cp -Rp .gitlab-ci/lava artifacts/
|
||||
cp -Rp .gitlab-ci/b2c artifacts/
|
||||
|
||||
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
|
||||
# Pass needed files to the test stage
|
||||
|
@@ -16,12 +16,3 @@ for driver in freedreno intel v3d; do
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/${driver}-shader-db.txt
|
||||
done
|
||||
|
||||
# Run shader-db over a number of supported chipsets for nouveau
|
||||
for chipset in 40 a3 c0 e4 f0 134 162; do
|
||||
echo "Running drm-shim for nouveau - $chipset"
|
||||
env LD_PRELOAD=$LIBDIR/libnouveau_noop_drm_shim.so \
|
||||
NOUVEAU_CHIPSET=${chipset} \
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt
|
||||
done
|
||||
|
@@ -1,40 +1,12 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
copy_tests_files() (
|
||||
# Copy either unit test or render test files from a specific driver given by
|
||||
# GPU VERSION variable.
|
||||
# If there is no test file at the expected location, this function will
|
||||
# return error_code 1
|
||||
SKQP_BACKEND="${1}"
|
||||
SKQP_FILE_PREFIX="${INSTALL}/${GPU_VERSION}-skqp"
|
||||
|
||||
if echo "${SKQP_BACKEND}" | grep -qE 'vk|gl(es)?'
|
||||
if echo "${SKQP_BACKEND}" | grep -qE 'gl(es)?'
|
||||
then
|
||||
SKQP_RENDER_TESTS_FILE="${SKQP_FILE_PREFIX}-${SKQP_BACKEND}_rendertests.txt"
|
||||
[ -f "${SKQP_RENDER_TESTS_FILE}" ] || return 1
|
||||
cp "${SKQP_RENDER_TESTS_FILE}" "${SKQP_ASSETS_DIR}"/skqp/rendertests.txt
|
||||
return 0
|
||||
fi
|
||||
@@ -43,75 +15,37 @@ copy_tests_files() (
|
||||
# that is why it needs to be a special case.
|
||||
if echo "${SKQP_BACKEND}" | grep -qE "unitTest"
|
||||
then
|
||||
SKQP_UNIT_TESTS_FILE="${SKQP_FILE_PREFIX}_unittests.txt"
|
||||
[ -f "${SKQP_UNIT_TESTS_FILE}" ] || return 1
|
||||
cp "${SKQP_UNIT_TESTS_FILE}" "${SKQP_ASSETS_DIR}"/skqp/unittests.txt
|
||||
cp "${SKQP_FILE_PREFIX}_unittests.txt" "${SKQP_ASSETS_DIR}"/skqp/unittests.txt
|
||||
fi
|
||||
)
|
||||
|
||||
test_vk_backend() {
|
||||
if echo "${SKQP_BACKENDS}" | grep -qE 'vk'
|
||||
then
|
||||
if [ -n "$VK_DRIVER" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "VK_DRIVER environment variable is missing."
|
||||
VK_DRIVERS=$(ls "$INSTALL"/share/vulkan/icd.d/ | cut -f 1 -d '_')
|
||||
if [ -n "${VK_DRIVERS}" ]
|
||||
then
|
||||
echo "Please set VK_DRIVER to the correct driver from the list:"
|
||||
echo "${VK_DRIVERS}"
|
||||
fi
|
||||
echo "No Vulkan tests will be executed, but it was requested in SKQP_BACKENDS variable. Exiting."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Vulkan environment is not configured, but it was not requested by the job
|
||||
return 1
|
||||
}
|
||||
|
||||
setup_backends() {
|
||||
if test_vk_backend
|
||||
then
|
||||
export VK_ICD_FILENAMES="$INSTALL"/share/vulkan/icd.d/"$VK_DRIVER"_icd."${VK_CPU:-$(uname -m)}".json
|
||||
fi
|
||||
}
|
||||
|
||||
set -ex
|
||||
|
||||
# Needed so configuration files can contain paths to files in /install
|
||||
ln -sf "$CI_PROJECT_DIR"/install /install
|
||||
|
||||
INSTALL=${PWD}/install
|
||||
|
||||
if [ -z "$GPU_VERSION" ]; then
|
||||
echo 'GPU_VERSION must be set to something like "llvmpipe" or
|
||||
"freedreno-a630" (it will serve as a component to find the path for files
|
||||
residing in src/**/ci/*.txt)'
|
||||
exit 1
|
||||
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/gpu-version-*.txt)'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LD_LIBRARY_PATH=$INSTALL:$LD_LIBRARY_PATH
|
||||
setup_backends
|
||||
|
||||
SKQP_ASSETS_DIR=/skqp/assets
|
||||
SKQP_RESULTS_DIR="${SKQP_RESULTS_DIR:-$PWD/results}"
|
||||
SKQP_RESULTS_DIR="${SKQP_RESULTS_DIR:-results}"
|
||||
|
||||
mkdir -p "${SKQP_ASSETS_DIR}"/skqp
|
||||
mkdir "${SKQP_ASSETS_DIR}"/skqp
|
||||
|
||||
SKQP_EXITCODE=0
|
||||
for SKQP_BACKEND in ${SKQP_BACKENDS}
|
||||
do
|
||||
set -e
|
||||
if ! copy_tests_files "${SKQP_BACKEND}"
|
||||
then
|
||||
echo "No override test file found for ${SKQP_BACKEND}. Using the default one."
|
||||
fi
|
||||
copy_tests_files "${SKQP_BACKEND}"
|
||||
|
||||
set +e
|
||||
SKQP_BACKEND_RESULTS_DIR="${SKQP_RESULTS_DIR}"/"${SKQP_BACKEND}"
|
||||
mkdir -p "${SKQP_BACKEND_RESULTS_DIR}"
|
||||
/skqp/skqp "${SKQP_ASSETS_DIR}" "${SKQP_BACKEND_RESULTS_DIR}" "${SKQP_BACKEND}_"
|
||||
/skqp/skqp "${SKQP_ASSETS_DIR}" '' "${SKQP_BACKEND_RESULTS_DIR}" "${SKQP_BACKEND}_"
|
||||
BACKEND_EXITCODE=$?
|
||||
|
||||
if [ ! $BACKEND_EXITCODE -eq 0 ]
|
||||
@@ -135,17 +69,15 @@ then
|
||||
echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/${SKQP_RESULTS_DIR}/unitTest/unit_tests.txt"
|
||||
fi
|
||||
|
||||
REPORT_FILES=$(mktemp)
|
||||
find "${SKQP_RESULTS_DIR}"/**/report.html -type f > "${REPORT_FILES}"
|
||||
while read -r REPORT
|
||||
for REPORT in "${SKQP_RESULTS_DIR}"/**/report.html
|
||||
do
|
||||
BACKEND_NAME=$(echo "${REPORT}" | sed 's@.*/\([^/]*\)/report.html@\1@')
|
||||
echo "See skqp ${BACKEND_NAME} render tests report at:"
|
||||
echo "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/${REPORT}"
|
||||
done < "${REPORT_FILES}"
|
||||
done
|
||||
|
||||
# If there is no report available, tell the user that something is wrong.
|
||||
if [ ! -s "${REPORT_FILES}" ]
|
||||
if [ ! -f "${REPORT}" ]
|
||||
then
|
||||
echo "No skqp report available. Probably some fatal error has occured during the skqp execution."
|
||||
fi
|
||||
|
@@ -1,25 +1,19 @@
|
||||
# This file list source dependencies to avoid creating/running jobs
|
||||
# those outcome cannot be changed by the modifications from a branch.
|
||||
|
||||
# Rule to filter for only scheduled pipelines.
|
||||
.scheduled_pipeline-rules:
|
||||
rules:
|
||||
- if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: on_success
|
||||
# Generic rule to not run the job during scheduled pipelines
|
||||
# ----------------------------------------------------------
|
||||
.scheduled_pipelines-rules:
|
||||
rules: &ignore_scheduled_pipelines
|
||||
if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
|
||||
# something like a nightly run should include this rule.
|
||||
.no_scheduled_pipelines-rules:
|
||||
# Mesa core source file dependencies
|
||||
# ----------------------------------
|
||||
.mesa-rules:
|
||||
rules:
|
||||
- if: *is-scheduled-pipeline
|
||||
when: never
|
||||
|
||||
# Mesa core source file dependencies that may impact any test job
|
||||
# ---------------------------------------------------------------
|
||||
.core-rules:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- changes: &core_file_list
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes: &mesa_core_file_list
|
||||
- .gitlab-ci.yml
|
||||
- .gitlab-ci/**/*
|
||||
- include/**/*
|
||||
@@ -28,24 +22,12 @@
|
||||
- src/*
|
||||
- src/compiler/**/*
|
||||
- src/drm-shim/**/*
|
||||
- src/egl/**/*
|
||||
- src/gbm/**/*
|
||||
- src/glx/**/*
|
||||
- src/gtest/**/*
|
||||
- src/hgl/**/*
|
||||
- src/include/**/*
|
||||
# Some src/util and src/compiler files use headers from mesa/ (e.g.
|
||||
# mtypes.h). We should clean that up.
|
||||
- src/mesa/**/*.h
|
||||
- src/tool/**/*
|
||||
- src/util/**/*
|
||||
when: on_success
|
||||
|
||||
# Mesa source file dependencies that may impact any GL driver test job.
|
||||
.gl-rules:
|
||||
rules:
|
||||
- !reference [.core-rules, rules]
|
||||
- changes: &mesa_core_file_list
|
||||
- src/egl/**/*
|
||||
- src/glx/**/*
|
||||
- src/loader/**/*
|
||||
- src/mapi/**/*
|
||||
- src/mesa/*
|
||||
@@ -59,7 +41,22 @@
|
||||
- src/mesa/vbo/**/*
|
||||
- src/mesa/x86/**/*
|
||||
- src/mesa/x86-64/**/*
|
||||
- src/tool/**/*
|
||||
- src/util/**/*
|
||||
|
||||
.vulkan-rules:
|
||||
rules:
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes: &vulkan_file_list
|
||||
- src/vulkan/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
# Gallium core source file dependencies
|
||||
# -------------------------------------
|
||||
.gallium-rules:
|
||||
rules:
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes: &gallium_core_file_list
|
||||
- src/gallium/*
|
||||
- src/gallium/auxiliary/**/*
|
||||
@@ -70,50 +67,63 @@
|
||||
- src/gallium/targets/**/*
|
||||
- src/gallium/tests/**/*
|
||||
- src/gallium/winsys/*
|
||||
when: on_success
|
||||
|
||||
# Source file dependencies that may impact any Vulkan driver build or test
|
||||
.vulkan-rules:
|
||||
rules:
|
||||
- !reference [.core-rules, rules]
|
||||
- changes: &vulkan_file_list
|
||||
- src/vulkan/**/*
|
||||
when: on_success
|
||||
|
||||
.softpipe-rules:
|
||||
stage: software-renderer
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &softpipe_file_list
|
||||
- src/gallium/drivers/softpipe/**/*
|
||||
- src/gallium/winsys/sw/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.llvmpipe-rules:
|
||||
stage: software-renderer
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &llvmpipe_file_list
|
||||
- src/gallium/drivers/llvmpipe/**/*
|
||||
- src/gallium/winsys/sw/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.lavapipe-rules:
|
||||
stage: software-renderer
|
||||
rules:
|
||||
- !reference [.vulkan-rules, rules]
|
||||
# One could probably be a little more clever here and skip non-gallium Mesa changes (see also .llvmpipe-cl-rules).
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &lavapipe_file_list
|
||||
- src/gallium/drivers/llvmpipe/**/*
|
||||
- src/gallium/frontends/lavapipe/**/*
|
||||
- src/gallium/winsys/sw/**/*
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.llvmpipe-cl-rules:
|
||||
stage: software-renderer
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
- .gitlab-ci.yml
|
||||
- .gitlab-ci/**/*
|
||||
@@ -133,82 +143,69 @@
|
||||
- changes: &clover_file_list
|
||||
- src/gallium/frontends/clover/**/*
|
||||
when: on_success
|
||||
|
||||
.collabora-farm-rules:
|
||||
rules:
|
||||
- if: '$COLLABORA_FARM == "offline" && $RUNNER_TAG =~ /^mesa-ci-x86-64-lava-/'
|
||||
when: never
|
||||
|
||||
.igalia-farm-rules:
|
||||
rules:
|
||||
- if: '$IGALIA_FARM == "offline"'
|
||||
when: never
|
||||
|
||||
# Skips freedreno jobs if either of the farms we use are offline.
|
||||
.freedreno-farm-rules:
|
||||
rules:
|
||||
- if: '$FD_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
|
||||
# Rules for changes that impact either freedreno or turnip.
|
||||
.freedreno-common-rules:
|
||||
rules:
|
||||
- !reference [.freedreno-farm-rules, rules]
|
||||
- changes: &freedreno_core_file_list
|
||||
- src/freedreno/ci/**/*
|
||||
- src/freedreno/common/**/*
|
||||
- src/freedreno/drm/**/*
|
||||
- src/freedreno/fdl/**/*
|
||||
- src/freedreno/ir3/**/*
|
||||
- src/freedreno/isa/**/*
|
||||
- src/freedreno/registers/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.freedreno-rules:
|
||||
stage: freedreno
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- !reference [.freedreno-common-rules, rules]
|
||||
- changes: &freedreno_gl_file_list
|
||||
- src/freedreno/ir2/**/*
|
||||
- if: '$FD_FARM == "offline"'
|
||||
when: never
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes: &freedreno_file_list
|
||||
# Note: when https://gitlab.com/gitlab-org/gitlab/-/issues/198688
|
||||
# is supported, we can change the src/freedreno/ rule to explicitly
|
||||
# exclude tools
|
||||
- src/freedreno/**/*
|
||||
- src/gallium/drivers/freedreno/**/*
|
||||
- src/gallium/winsys/freedreno/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.turnip-rules:
|
||||
stage: freedreno
|
||||
rules:
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- !reference [.freedreno-common-rules, rules]
|
||||
- changes:
|
||||
- src/freedreno/vulkan/**/*
|
||||
when: on_success
|
||||
|
||||
# For piglit and skqp test jobs that run both GL and VK tests.
|
||||
.freedreno-turnip-rules:
|
||||
rules:
|
||||
- !reference [.freedreno-rules, rules]
|
||||
- !reference [.turnip-rules, rules]
|
||||
|
||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
|
||||
# rules duplication manually
|
||||
.freedreno-rules-restricted:
|
||||
stage: freedreno
|
||||
rules:
|
||||
# If the triggerer has access to the restricted traces and if it is pre-merge
|
||||
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias)$/") &&
|
||||
($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
|
||||
- if: '$FD_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.freedreno-rules, rules]
|
||||
# If the triggerer has access to the restricted traces and if it is pre-merge
|
||||
- if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu)$/") &&
|
||||
($GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME)'
|
||||
when: never
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*freedreno_file_list
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
# Rules for GL driver performance tracking. We want them to run as code is
|
||||
# merged to main, but we don't want them to block marge. So, they need to have
|
||||
# only when: never or when: manual, and a separate script maintained by
|
||||
# Collabora triggers the manual job after merge to main.
|
||||
.gl-rules-performance:
|
||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
|
||||
# rules duplication manually
|
||||
.freedreno-rules-performance:
|
||||
stage: freedreno
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- if: '$FD_FARM == "offline"'
|
||||
when: never
|
||||
- *ignore_scheduled_pipelines
|
||||
# Run only on pre-merge pipelines from Marge
|
||||
- if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH'
|
||||
- if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
|
||||
when: never
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
@@ -216,37 +213,24 @@
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: manual
|
||||
|
||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
|
||||
# rules duplication manually
|
||||
.freedreno-rules-performance:
|
||||
stage: freedreno
|
||||
rules:
|
||||
- !reference [.freedreno-farm-rules, rules]
|
||||
- !reference [.gl-rules-performance, rules]
|
||||
- changes:
|
||||
*freedreno_core_file_list
|
||||
*vulkan_file_list
|
||||
when: manual
|
||||
- changes:
|
||||
*freedreno_gl_file_list
|
||||
*freedreno_file_list
|
||||
when: manual
|
||||
|
||||
.nouveau-rules:
|
||||
stage: nouveau
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- changes:
|
||||
- src/nouveau/**/*
|
||||
- src/gallium/drivers/nouveau/**/*
|
||||
- src/gallium/winsys/kmsro/**/*
|
||||
- src/gallium/winsys/nouveau/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.panfrost-midgard-rules:
|
||||
stage: arm
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &panfrost_gallium_file_list
|
||||
- src/gallium/drivers/panfrost/**/*
|
||||
- src/gallium/winsys/panfrost/**/*
|
||||
@@ -261,85 +245,126 @@
|
||||
- changes:
|
||||
- src/panfrost/midgard/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.panfrost-bifrost-rules:
|
||||
stage: arm
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*panfrost_common_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*panfrost_gallium_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes: &panfrost_vulkan_file_list
|
||||
- src/panfrost/vulkan/*
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/panfrost/bifrost/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.vc4-rules:
|
||||
stage: broadcom
|
||||
rules:
|
||||
- !reference [.igalia-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/broadcom/**/*
|
||||
- src/gallium/drivers/vc4/**/*
|
||||
- src/gallium/winsys/kmsro/**/*
|
||||
- src/gallium/winsys/vc4/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.v3d-rules:
|
||||
stage: broadcom
|
||||
rules:
|
||||
- !reference [.igalia-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/broadcom/**/*
|
||||
- src/gallium/drivers/v3d/**/*
|
||||
- src/gallium/winsys/kmsro/**/*
|
||||
- src/gallium/winsys/v3d/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.v3dv-rules:
|
||||
stage: broadcom
|
||||
rules:
|
||||
- !reference [.igalia-farm-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/broadcom/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.lima-rules:
|
||||
stage: arm
|
||||
rules:
|
||||
- if: '$LIMA_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/gallium/drivers/lima/**/*
|
||||
- src/gallium/winsys/lima/**/*
|
||||
- src/lima/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.radv-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes: &radv_file_list
|
||||
- src/amd/**/*
|
||||
- src/vulkan/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.virgl-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*llvmpipe_file_list
|
||||
when: on_success
|
||||
@@ -347,30 +372,20 @@
|
||||
- src/gallium/drivers/virgl/**/*
|
||||
- src/gallium/winsys/virgl/**/*
|
||||
when: on_success
|
||||
|
||||
# Unfortunately we can't sed the on_success from another rules set, so we have
|
||||
# to do duplicate the files lists to set the job to manual (see
|
||||
# .gl-rules-performance)
|
||||
.virgl-iris-rules-performance:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules-performance, rules]
|
||||
- changes:
|
||||
*llvmpipe_file_list
|
||||
when: manual
|
||||
- changes:
|
||||
*virgl_file_list
|
||||
when: manual
|
||||
- when: never
|
||||
|
||||
.radeonsi-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &radeonsi_file_list
|
||||
- src/gallium/drivers/radeonsi/**/*
|
||||
- src/gallium/include/winsys/**/*
|
||||
- src/gallium/winsys/amdgpu/**/*
|
||||
- src/amd/*
|
||||
- src/amd/addrlib/**/*
|
||||
@@ -378,178 +393,196 @@
|
||||
- src/amd/llvm/**/*
|
||||
- src/amd/registers/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.radeonsi-vaapi-rules:
|
||||
stage: amd
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*radeonsi_file_list
|
||||
when: on_success
|
||||
- changes: &radeon_vcn_file_list
|
||||
- src/gallium/frontends/va/**/*
|
||||
- src/gallium/drivers/radeon/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.i915g-rules:
|
||||
stage: intel
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/gallium/drivers/i915/**/*
|
||||
- src/gallium/winsys/i915/**/*
|
||||
- src/intel/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.crocus-rules:
|
||||
stage: intel
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/gallium/drivers/crocus/**/*
|
||||
- src/gallium/winsys/crocus/**/*
|
||||
- src/intel/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.iris-rules:
|
||||
stage: intel
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes: &iris_file_list
|
||||
- src/gallium/drivers/iris/**/*
|
||||
- src/gallium/winsys/iris/**/*
|
||||
- src/intel/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
# Unfortunately we can't sed the on_success from another rules set, so we have
|
||||
# to do duplicate the files lists to set the job to manual (see
|
||||
# .gl-rules-performance)
|
||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
|
||||
# rules duplication manually
|
||||
.iris-rules-performance:
|
||||
stage: intel
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.gl-rules-performance, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
# Run only on pre-merge pipelines from Marge
|
||||
- if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
|
||||
when: never
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: manual
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: manual
|
||||
- changes:
|
||||
*iris_file_list
|
||||
when: manual
|
||||
- when: never
|
||||
|
||||
.anv-rules:
|
||||
stage: intel
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/intel/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.intel-rules:
|
||||
stage: intel
|
||||
.zink-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.collabora-farm-rules, rules]
|
||||
# Note that we trigger on changes both anv and iris, because piglit and skqp jobs test both.
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- !reference [.gl-rules, rules]
|
||||
- changes: &iris_file_list
|
||||
- src/gallium/drivers/iris/**/*
|
||||
- src/gallium/winsys/iris/**/*
|
||||
- src/intel/**/*
|
||||
when: on_success
|
||||
|
||||
.zink-common-rules:
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*lavapipe_file_list
|
||||
when: on_success
|
||||
- changes: &zink_file_list
|
||||
- src/gallium/drivers/zink/**/*
|
||||
when: on_success
|
||||
|
||||
.zink-lvp-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- !reference [.lavapipe-rules, rules]
|
||||
|
||||
.zink-anv-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- !reference [.anv-rules, rules]
|
||||
|
||||
.zink-turnip-rules:
|
||||
stage: layered-backends
|
||||
rules:
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- !reference [.turnip-rules, rules]
|
||||
- when: never
|
||||
|
||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
|
||||
# rules duplication manually
|
||||
.windows-build-rules:
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*softpipe_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*lavapipe_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes: &d3d12_file_list
|
||||
- src/gallium/drivers/d3d12/**/*
|
||||
- src/microsoft/**/*
|
||||
- src/gallium/frontends/wgl/*
|
||||
- src/gallium/winsys/d3d12/wgl/*
|
||||
- src/gallium/targets/libgl-gdi/*
|
||||
- src/gallium/targets/libgl-d3d12/*
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/microsoft/**/*
|
||||
*zink_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*radv_file_list
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.glon12-test-rules:
|
||||
.windows-test-rules:
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*lavapipe_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*vulkan_file_list
|
||||
when: on_success
|
||||
- changes: *d3d12_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/microsoft/compiler/*
|
||||
when: on_success
|
||||
|
||||
.spirv2dxil-test-rules:
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.core-rules, rules]
|
||||
- changes: &spirv2dxil_file_list
|
||||
- src/microsoft/ci/*
|
||||
- src/microsoft/compiler/*
|
||||
- src/microsoft/spirv_to_dxil/*
|
||||
when: on_success
|
||||
|
||||
.dozen-test-rules:
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.vulkan-rules, rules]
|
||||
- changes:
|
||||
*spirv2dxil_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/microsoft/vulkan/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.etnaviv-rules:
|
||||
stage: etnaviv
|
||||
rules:
|
||||
- !reference [.gl-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
- src/etnaviv/**/*
|
||||
- src/gallium/drivers/etnaviv/**/*
|
||||
@@ -558,12 +591,18 @@
|
||||
- src/gallium/winsys/kmsro/**/*
|
||||
- src/gallium/winsys/etnaviv/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
# Rules for unusual architectures that only build a subset of drivers
|
||||
.ppc64el-rules:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*softpipe_file_list
|
||||
when: on_success
|
||||
@@ -579,6 +618,9 @@
|
||||
- changes:
|
||||
*radeonsi_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*zink_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*virgl_file_list
|
||||
when: on_success
|
||||
@@ -586,11 +628,17 @@
|
||||
- src/gallium/drivers/nouveau/**/*
|
||||
- src/gallium/winsys/nouveau/**/*
|
||||
when: on_success
|
||||
- when: never
|
||||
|
||||
.s390x-rules:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- !reference [.zink-common-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- changes:
|
||||
*mesa_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*gallium_core_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*softpipe_file_list
|
||||
when: on_success
|
||||
@@ -600,3 +648,7 @@
|
||||
- changes:
|
||||
*lavapipe_file_list
|
||||
when: on_success
|
||||
- changes:
|
||||
*zink_file_list
|
||||
when: on_success
|
||||
- when: never
|
||||
|
@@ -1,345 +0,0 @@
|
||||
.test:
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
variables:
|
||||
GIT_STRATEGY: none # testing doesn't build anything from source
|
||||
before_script:
|
||||
- !reference [default, before_script]
|
||||
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
|
||||
- rm -rf install
|
||||
- tar -xf artifacts/install.tar
|
||||
- echo -e "\e[0Ksection_start:$(date +%s):ldd_section[collapsed=true]\r\e[0KChecking ldd on driver build"
|
||||
- LD_LIBRARY_PATH=install/lib find install/lib -name "*.so" -print -exec ldd {} \;
|
||||
- echo -e "\e[0Ksection_end:$(date +%s):ldd_section\r\e[0K"
|
||||
artifacts:
|
||||
when: always
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- results/
|
||||
|
||||
.test-gl:
|
||||
extends:
|
||||
- .test
|
||||
- .use-debian/x86_test-gl
|
||||
needs:
|
||||
- debian/x86_test-gl
|
||||
- debian-testing
|
||||
|
||||
.test-vk:
|
||||
extends:
|
||||
- .test
|
||||
- .use-debian/x86_test-vk
|
||||
needs:
|
||||
- debian-testing
|
||||
- debian/x86_test-vk
|
||||
|
||||
.test-cl:
|
||||
extends:
|
||||
- .test
|
||||
- .use-debian/x86_test-gl
|
||||
needs:
|
||||
- debian/x86_test-gl
|
||||
- debian-clover-testing
|
||||
|
||||
.vkd3d-proton-test:
|
||||
artifacts:
|
||||
when: on_failure
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- results/vkd3d-proton.log
|
||||
script:
|
||||
- ./install/vkd3d-proton/run.sh
|
||||
|
||||
.piglit-test:
|
||||
artifacts:
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- results
|
||||
reports:
|
||||
junit: results/junit.xml
|
||||
variables:
|
||||
PIGLIT_NO_WINDOW: 1
|
||||
HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
|
||||
script:
|
||||
- install/piglit/piglit-runner.sh
|
||||
|
||||
.piglit-traces-test:
|
||||
extends:
|
||||
- .piglit-test
|
||||
cache:
|
||||
key: ${CI_JOB_NAME}
|
||||
paths:
|
||||
- replayer-db/
|
||||
artifacts:
|
||||
when: on_failure
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
reports:
|
||||
junit: results/junit.xml
|
||||
paths:
|
||||
- results/summary/
|
||||
- results/*.txt
|
||||
variables:
|
||||
PIGLIT_REPLAY_EXTRA_ARGS: --keep-image --db-path ${CI_PROJECT_DIR}/replayer-db/ --minio_host=minio-packet.freedesktop.org --minio_bucket=mesa-tracie-public --role-session-name=${CI_PROJECT_PATH}:${CI_JOB_ID} --jwt-file=${CI_JOB_JWT_FILE}
|
||||
script:
|
||||
- install/piglit/piglit-traces.sh
|
||||
|
||||
.deqp-test:
|
||||
script:
|
||||
- ./install/deqp-runner.sh
|
||||
artifacts:
|
||||
exclude:
|
||||
- results/*.shader_cache
|
||||
reports:
|
||||
junit: results/junit.xml
|
||||
|
||||
.deqp-test-vk:
|
||||
extends:
|
||||
- .deqp-test
|
||||
variables:
|
||||
DEQP_VER: vk
|
||||
|
||||
.skqp-test:
|
||||
variables:
|
||||
HWCI_START_XORG: 1
|
||||
HWCI_TEST_SCRIPT: "/install/skqp-runner.sh"
|
||||
|
||||
.fossilize-test:
|
||||
script:
|
||||
- ./install/fossilize-runner.sh
|
||||
artifacts:
|
||||
when: on_failure
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- results/
|
||||
|
||||
.baremetal-test:
|
||||
extends:
|
||||
- .test
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
before_script:
|
||||
- !reference [default, before_script]
|
||||
# Use this instead of gitlab's artifacts download because it hits packet.net
|
||||
# instead of fd.o. Set FDO_HTTP_CACHE_URI to an http cache for your test lab to
|
||||
# improve it even more (see https://docs.mesa3d.org/ci/bare-metal.html for
|
||||
# setup).
|
||||
- echo -e "\e[0Ksection_start:$(date +%s):artifacts_download[collapsed=true]\r\e[0KDownloading artifacts from minio"
|
||||
- wget ${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}.tar.gz -S --progress=dot:giga -O- | tar -xz
|
||||
- echo -e "\e[0Ksection_end:$(date +%s):artifacts_download\r\e[0K"
|
||||
artifacts:
|
||||
when: always
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- results/
|
||||
- serial*.txt
|
||||
exclude:
|
||||
- results/*.shader_cache
|
||||
reports:
|
||||
junit: results/junit.xml
|
||||
|
||||
# ARM testing of bare-metal boards attached to an x86 gitlab-runner system
|
||||
.baremetal-test-armhf:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
BM_ROOTFS: /rootfs-armhf
|
||||
MINIO_ARTIFACT_NAME: mesa-armhf
|
||||
needs:
|
||||
- debian/arm_test
|
||||
- job: debian-armhf
|
||||
artifacts: false
|
||||
|
||||
# ARM64 testing of bare-metal boards attached to an x86 gitlab-runner system
|
||||
.baremetal-test-arm64:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
BM_ROOTFS: /rootfs-arm64
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64
|
||||
needs:
|
||||
- debian/arm_test
|
||||
- job: debian-arm64
|
||||
artifacts: false
|
||||
|
||||
# ARM64 testing of bare-metal boards attached to an x86 gitlab-runner system, using an asan mesa build
|
||||
.baremetal-arm64-asan-test:
|
||||
extends:
|
||||
- .baremetal-test
|
||||
- .use-debian/arm_test
|
||||
variables:
|
||||
DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.6:/install/lib/libdlclose-skip.so"
|
||||
MINIO_ARTIFACT_NAME: mesa-arm64-asan
|
||||
needs:
|
||||
- debian/arm_test
|
||||
- job: debian-arm64-asan
|
||||
artifacts: false
|
||||
|
||||
.baremetal-deqp-test:
|
||||
variables:
|
||||
HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
|
||||
FDO_CI_CONCURRENT: 0 # Default to number of CPUs
|
||||
|
||||
.baremetal-skqp-test:
|
||||
variables:
|
||||
HWCI_START_XORG: 1
|
||||
HWCI_TEST_SCRIPT: "/install/skqp-runner.sh"
|
||||
|
||||
# For Valve's bare-metal testing farm jobs.
|
||||
.b2c-test:
|
||||
# It would be nice to use ci-templates within Mesa CI for this job's
|
||||
# image:, but the integration is not possible for the current
|
||||
# use-case. Within this job, two containers are managed. 1) the
|
||||
# gitlab runner container from which the job is submitted to the
|
||||
# DUT, and 2) the test container (e.g. debian/x86_test-vk) within
|
||||
# which the test cases will run on the DUT. Since ci-templates and
|
||||
# the associated image setting macros in this file rely on variables
|
||||
# like FDO_DISTRIBUTION_TAG for *the* image, there is no way to
|
||||
# depend on more than one image per job. So, the job container is
|
||||
# built as part of the CI in the boot2container project.
|
||||
image: registry.freedesktop.org/mupuf/valve-infra/mesa-trigger:2022-03-03.2
|
||||
timeout: 1h 40m
|
||||
variables:
|
||||
# No need by default to pull the whole repo
|
||||
GIT_STRATEGY: none
|
||||
# boot2container initrd configuration parameters.
|
||||
B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/mupuf/valve-infra/-/package_files/144/download' # 5.17.1
|
||||
B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.6/downloads/initramfs.linux_amd64.cpio.xz'
|
||||
B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$'
|
||||
B2C_JOB_WARN_REGEX: '\*ERROR\* ring .* timeout, but soft recovered'
|
||||
B2C_LOG_LEVEL: 6
|
||||
B2C_POWEROFF_DELAY: 15
|
||||
B2C_SESSION_END_REGEX: '^.*It''s now safe to turn off your computer\r$'
|
||||
B2C_SESSION_REBOOT_REGEX: '(GPU hang detected!|\*ERROR\* ring [^\s]+ timeout(?!, but soft recovered)|The CS has been cancelled because the context is lost)'
|
||||
B2C_TIMEOUT_BOOT_MINUTES: 45
|
||||
B2C_TIMEOUT_BOOT_RETRIES: 1
|
||||
B2C_TIMEOUT_FIRST_MINUTES: 5
|
||||
B2C_TIMEOUT_FIRST_RETRIES: 3
|
||||
B2C_TIMEOUT_MINUTES: 2
|
||||
B2C_TIMEOUT_OVERALL_MINUTES: 90
|
||||
B2C_TIMEOUT_RETRIES: 0
|
||||
|
||||
# As noted in the top description, we make a distinction between the
|
||||
# container used by gitlab-runner to queue the work, and the container
|
||||
# used by the DUTs/test machines. To make this distinction quite clear,
|
||||
# we rename the MESA_IMAGE variable into IMAGE_UNDER_TEST.
|
||||
IMAGE_UNDER_TEST: "$MESA_IMAGE"
|
||||
|
||||
INSTALL_TARBALL_NAME: "install.tar"
|
||||
INSTALL_TARBALL: "./artifacts/${INSTALL_TARBALL_NAME}"
|
||||
CI_B2C_ARTIFACTS: "./artifacts/b2c"
|
||||
CI_COMMON_SCRIPTS: "./artifacts/ci-common"
|
||||
GENERATE_ENV_SCRIPT: "${CI_COMMON_SCRIPTS}/generate-env.sh"
|
||||
B2C_JOB_TEMPLATE: "${CI_B2C_ARTIFACTS}/b2c.yml.jinja2.jinja2"
|
||||
JOB_FOLDER: "job_folder"
|
||||
before_script:
|
||||
# We don't want the tarball unpacking of .test, but will take the JWT bits.
|
||||
- !reference [default, before_script]
|
||||
- |
|
||||
set -x
|
||||
|
||||
# Useful as a hook point for runner admins. You may edit the
|
||||
# config.toml for the Gitlab runner and use a bind-mount to
|
||||
# populate the hook script with some executable commands. This
|
||||
# allows quicker feedback than resubmitting pipelines and
|
||||
# potentially having to wait for a debug build of Mesa to
|
||||
# complete.
|
||||
if [ -x /runner-before-script.sh ]; then
|
||||
echo "Executing runner before-script hook..."
|
||||
sh /runner-before-script.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Runner hook failed, goodbye"
|
||||
exit $?
|
||||
fi
|
||||
fi
|
||||
|
||||
[ -s "$INSTALL_TARBALL" ] || exit 1
|
||||
[ -d "$CI_B2C_ARTIFACTS" ] || exit 1
|
||||
[ -d "$CI_COMMON_SCRIPTS" ] || exit 1
|
||||
|
||||
|
||||
B2C_TEST_SCRIPT="bash -c 'source ./set-job-env-vars.sh; tar xf ${INSTALL_TARBALL_NAME}; ${B2C_TEST_SCRIPT}'"
|
||||
|
||||
# The Valve CI gateway receives jobs in a YAML format. Create a
|
||||
# job description from the CI environment.
|
||||
python3 "$CI_B2C_ARTIFACTS"/generate_b2c.py \
|
||||
--ci-job-id "${CI_JOB_ID}" \
|
||||
--container-cmd "${B2C_TEST_SCRIPT}" \
|
||||
--initramfs-url "${B2C_INITRAMFS_URL}" \
|
||||
--job-success-regex "${B2C_JOB_SUCCESS_REGEX}" \
|
||||
--job-warn-regex "${B2C_JOB_WARN_REGEX}" \
|
||||
--kernel-url "${B2C_KERNEL_URL}" \
|
||||
--log-level "${B2C_LOG_LEVEL}" \
|
||||
--poweroff-delay "${B2C_POWEROFF_DELAY}" \
|
||||
--session-end-regex "${B2C_SESSION_END_REGEX}" \
|
||||
--session-reboot-regex "${B2C_SESSION_REBOOT_REGEX}" \
|
||||
--tags "${CI_RUNNER_TAGS}" \
|
||||
--template "${B2C_JOB_TEMPLATE}" \
|
||||
--timeout-boot-minutes "${B2C_TIMEOUT_BOOT_MINUTES}" \
|
||||
--timeout-boot-retries "${B2C_TIMEOUT_BOOT_RETRIES}" \
|
||||
--timeout-first-minutes "${B2C_TIMEOUT_FIRST_MINUTES}" \
|
||||
--timeout-first-retries "${B2C_TIMEOUT_FIRST_RETRIES}" \
|
||||
--timeout-minutes "${B2C_TIMEOUT_MINUTES}" \
|
||||
--timeout-overall-minutes "${B2C_TIMEOUT_OVERALL_MINUTES}" \
|
||||
--timeout-retries "${B2C_TIMEOUT_RETRIES}" \
|
||||
--job-volume-exclusions "${B2C_JOB_VOLUME_EXCLUSIONS}" \
|
||||
--local-container "${IMAGE_UNDER_TEST}" \
|
||||
${B2C_EXTRA_VOLUME_ARGS} \
|
||||
--working-dir "$CI_PROJECT_DIR"
|
||||
|
||||
cat b2c.yml.jinja2
|
||||
|
||||
rm -rf ${JOB_FOLDER} || true
|
||||
mkdir -v ${JOB_FOLDER}
|
||||
# Create a script to regenerate the CI environment when this job
|
||||
# begins running on the remote DUT.
|
||||
set +x
|
||||
"$CI_COMMON_SCRIPTS"/generate-env.sh > ${JOB_FOLDER}/set-job-env-vars.sh
|
||||
chmod +x ${JOB_FOLDER}/set-job-env-vars.sh
|
||||
echo "Variables passed through:"
|
||||
cat ${JOB_FOLDER}/set-job-env-vars.sh
|
||||
echo "export CI_JOB_JWT=${CI_JOB_JWT}" >> ${JOB_FOLDER}/set-job-env-vars.sh
|
||||
set -x
|
||||
|
||||
# Copy the mesa install tarball to the job folder, for later extraction
|
||||
mv "${INSTALL_TARBALL}" "${JOB_FOLDER}"
|
||||
|
||||
script: |
|
||||
slugify () {
|
||||
echo "$1" | sed -r s/[~\^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z
|
||||
}
|
||||
|
||||
# Submit the job to Valve's CI gateway service with the CI
|
||||
# provisioned job_folder.
|
||||
env PYTHONUNBUFFERED=1 executorctl \
|
||||
run -w b2c.yml.jinja2 -j $(slugify "$CI_JOB_NAME") -s ${JOB_FOLDER}
|
||||
|
||||
ls -l
|
||||
# Anything our job places in results/ will be collected by the
|
||||
# Gitlab coordinator for status presentation. results/junit.xml
|
||||
# will be parsed by the UI for more detailed explanations of
|
||||
# test execution.
|
||||
artifacts:
|
||||
when: always
|
||||
name: "mesa_${CI_JOB_NAME}"
|
||||
paths:
|
||||
- ${JOB_FOLDER}/results
|
||||
reports:
|
||||
junit: ${JOB_FOLDER}/results/junit.xml
|
||||
|
||||
.b2c-test-vk:
|
||||
extends:
|
||||
- .use-debian/x86_test-vk
|
||||
- .b2c-test
|
||||
needs:
|
||||
- debian/x86_test-vk
|
||||
- debian-testing
|
||||
|
||||
.b2c-test-gl:
|
||||
extends:
|
||||
- .use-debian/x86_test-gl
|
||||
- .b2c-test
|
||||
needs:
|
||||
- debian/x86_test-gl
|
||||
- debian-testing
|
@@ -1,56 +0,0 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from freezegun import freeze_time
|
||||
|
||||
from .lava.helpers import generate_testsuite_result, jobs_logs_response
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "slow: marks tests as slow (deselect with '-m \"not slow\"')"
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_sleep():
|
||||
"""Mock time.sleep to make test faster"""
|
||||
with patch("time.sleep", return_value=None):
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def frozen_time(mock_sleep):
|
||||
with freeze_time() as frozen_time:
|
||||
yield frozen_time
|
||||
|
||||
|
||||
RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_proxy():
|
||||
def create_proxy_mock(
|
||||
job_results=RESULT_GET_TESTJOB_RESULTS,
|
||||
testsuite_results=[generate_testsuite_result()],
|
||||
**kwargs
|
||||
):
|
||||
proxy_mock = MagicMock()
|
||||
proxy_submit_mock = proxy_mock.scheduler.jobs.submit
|
||||
proxy_submit_mock.return_value = "1234"
|
||||
|
||||
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
|
||||
proxy_results_mock.return_value = yaml.safe_dump(job_results)
|
||||
|
||||
proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
|
||||
proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
|
||||
|
||||
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
|
||||
proxy_logs_mock.return_value = jobs_logs_response()
|
||||
|
||||
for key, value in kwargs.items():
|
||||
setattr(proxy_logs_mock, key, value)
|
||||
|
||||
return proxy_mock
|
||||
|
||||
yield create_proxy_mock
|
@@ -1,139 +0,0 @@
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from datetime import datetime
|
||||
from itertools import cycle
|
||||
from typing import Callable, Generator, Iterable, Optional, Tuple, Union
|
||||
|
||||
import yaml
|
||||
from freezegun import freeze_time
|
||||
from lava.utils.log_section import (
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
||||
LogSectionType,
|
||||
)
|
||||
|
||||
|
||||
def section_timeout(section_type: LogSectionType) -> int:
|
||||
return int(
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS.get(
|
||||
section_type, FALLBACK_GITLAB_SECTION_TIMEOUT
|
||||
).total_seconds()
|
||||
)
|
||||
|
||||
|
||||
def create_lava_yaml_msg(
|
||||
dt: Callable = datetime.now, msg="test", lvl="target"
|
||||
) -> dict[str, str]:
|
||||
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
|
||||
|
||||
|
||||
def generate_testsuite_result(
|
||||
name="test-mesa-ci", result="pass", metadata_extra=None, extra=None
|
||||
):
|
||||
if metadata_extra is None:
|
||||
metadata_extra = {}
|
||||
if extra is None:
|
||||
extra = {}
|
||||
return {"metadata": {"result": result, **metadata_extra}, "name": name}
|
||||
|
||||
|
||||
def jobs_logs_response(
|
||||
finished=False, msg=None, lvl="target", result=None
|
||||
) -> Tuple[bool, str]:
|
||||
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
|
||||
if result:
|
||||
timed_msg["lvl"] = "target"
|
||||
timed_msg["msg"] = f"hwci: mesa: {result}"
|
||||
|
||||
logs = [timed_msg] if msg is None else msg
|
||||
|
||||
return finished, yaml.safe_dump(logs)
|
||||
|
||||
|
||||
def section_aware_message_generator(
|
||||
messages: dict[LogSectionType, Iterable[int]], result: Optional[str] = None
|
||||
) -> Iterable[tuple[dict, Iterable[int]]]:
|
||||
default = [1]
|
||||
|
||||
result_message_section = LogSectionType.TEST_CASE
|
||||
|
||||
for section_type in LogSectionType:
|
||||
delay = messages.get(section_type, default)
|
||||
yield mock_lava_signal(section_type), delay
|
||||
if result and section_type == result_message_section:
|
||||
# To consider the job finished, the result `echo` should be produced
|
||||
# in the correct section
|
||||
yield create_lava_yaml_msg(msg=f"hwci: mesa: {result}"), delay
|
||||
|
||||
|
||||
def message_generator():
|
||||
for section_type in LogSectionType:
|
||||
yield mock_lava_signal(section_type)
|
||||
|
||||
|
||||
def level_generator():
|
||||
# Tests all known levels by default
|
||||
yield from cycle(("results", "feedback", "warning", "error", "debug", "target"))
|
||||
|
||||
|
||||
def generate_n_logs(
|
||||
n=1,
|
||||
tick_fn: Union[Generator, Iterable[int], int] = 1,
|
||||
level_fn=level_generator,
|
||||
result="pass",
|
||||
):
|
||||
"""Simulate a log partitionated in n components"""
|
||||
level_gen = level_fn()
|
||||
|
||||
if isinstance(tick_fn, Generator):
|
||||
tick_gen = tick_fn
|
||||
elif isinstance(tick_fn, Iterable):
|
||||
tick_gen = cycle(tick_fn)
|
||||
else:
|
||||
tick_gen = cycle((tick_fn,))
|
||||
|
||||
with freeze_time(datetime.now()) as time_travel:
|
||||
tick_sec: int = next(tick_gen)
|
||||
while True:
|
||||
# Simulate a scenario where the target job is waiting for being started
|
||||
for _ in range(n - 1):
|
||||
level: str = next(level_gen)
|
||||
|
||||
time_travel.tick(tick_sec)
|
||||
yield jobs_logs_response(finished=False, msg=[], lvl=level)
|
||||
|
||||
time_travel.tick(tick_sec)
|
||||
yield jobs_logs_response(finished=True, result=result)
|
||||
|
||||
|
||||
def to_iterable(tick_fn):
|
||||
if isinstance(tick_fn, Generator):
|
||||
return tick_fn
|
||||
elif isinstance(tick_fn, Iterable):
|
||||
return cycle(tick_fn)
|
||||
else:
|
||||
return cycle((tick_fn,))
|
||||
|
||||
|
||||
def mock_logs(messages=None, result=None):
|
||||
if messages is None:
|
||||
messages = {}
|
||||
with freeze_time(datetime.now()) as time_travel:
|
||||
# Simulate a complete run given by message_fn
|
||||
for msg, tick_list in section_aware_message_generator(messages, result):
|
||||
for tick_sec in tick_list:
|
||||
yield jobs_logs_response(finished=False, msg=[msg])
|
||||
time_travel.tick(tick_sec)
|
||||
|
||||
|
||||
def mock_lava_signal(type: LogSectionType) -> dict[str, str]:
|
||||
return {
|
||||
LogSectionType.TEST_CASE: create_lava_yaml_msg(
|
||||
msg="<STARTTC> case", lvl="debug"
|
||||
),
|
||||
LogSectionType.TEST_SUITE: create_lava_yaml_msg(
|
||||
msg="<STARTRUN> suite", lvl="debug"
|
||||
),
|
||||
LogSectionType.LAVA_POST_PROCESSING: create_lava_yaml_msg(
|
||||
msg="<LAVA_SIGNAL_ENDTC case>", lvl="target"
|
||||
),
|
||||
}.get(type, create_lava_yaml_msg())
|
@@ -1,354 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import xmlrpc.client
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
from datetime import datetime
|
||||
from itertools import chain, repeat
|
||||
|
||||
import pytest
|
||||
from lava.exceptions import MesaCIException, MesaCIRetryError
|
||||
from lava.lava_job_submitter import (
|
||||
DEVICE_HANGING_TIMEOUT_SEC,
|
||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
|
||||
LAVAJob,
|
||||
follow_job_execution,
|
||||
retriable_follow_job,
|
||||
)
|
||||
from lava.utils import LogSectionType
|
||||
|
||||
from .lava.helpers import (
|
||||
generate_n_logs,
|
||||
generate_testsuite_result,
|
||||
jobs_logs_response,
|
||||
mock_lava_signal,
|
||||
mock_logs,
|
||||
section_timeout,
|
||||
)
|
||||
|
||||
NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_proxy_waiting_time(mock_proxy):
|
||||
def update_mock_proxy(frozen_time, **kwargs):
|
||||
wait_time = kwargs.pop("wait_time", 1)
|
||||
proxy_mock = mock_proxy(**kwargs)
|
||||
proxy_job_state = proxy_mock.scheduler.job_state
|
||||
proxy_job_state.return_value = {"job_state": "Running"}
|
||||
proxy_job_state.side_effect = frozen_time.tick(wait_time)
|
||||
|
||||
return proxy_mock
|
||||
|
||||
return update_mock_proxy
|
||||
|
||||
|
||||
@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
|
||||
def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
|
||||
with pytest.raises(MesaCIException):
|
||||
proxy = mock_proxy(side_effect=exception)
|
||||
job = LAVAJob(proxy, '')
|
||||
follow_job_execution(job)
|
||||
|
||||
|
||||
NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
|
||||
XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
|
||||
|
||||
PROXY_SCENARIOS = {
|
||||
"simple pass case": (mock_logs(result="pass"), does_not_raise(), "pass", {}),
|
||||
"simple fail case": (mock_logs(result="fail"), does_not_raise(), "fail", {}),
|
||||
"simple hung case": (
|
||||
mock_logs(
|
||||
messages={
|
||||
LogSectionType.TEST_CASE: [
|
||||
section_timeout(LogSectionType.TEST_CASE) + 1
|
||||
]
|
||||
* 1000
|
||||
},
|
||||
result="fail",
|
||||
),
|
||||
pytest.raises(MesaCIRetryError),
|
||||
"hung",
|
||||
{},
|
||||
),
|
||||
"leftover dump from last job in boot section": (
|
||||
(
|
||||
mock_lava_signal(LogSectionType.LAVA_BOOT),
|
||||
jobs_logs_response(finished=False, msg=None, result="fail"),
|
||||
),
|
||||
pytest.raises(MesaCIRetryError),
|
||||
"hung",
|
||||
{},
|
||||
),
|
||||
"boot works at last retry": (
|
||||
mock_logs(
|
||||
messages={
|
||||
LogSectionType.LAVA_BOOT: [
|
||||
section_timeout(LogSectionType.LAVA_BOOT) + 1
|
||||
]
|
||||
* NUMBER_OF_RETRIES_TIMEOUT_DETECTION
|
||||
+ [1]
|
||||
},
|
||||
result="pass",
|
||||
),
|
||||
does_not_raise(),
|
||||
"pass",
|
||||
{},
|
||||
),
|
||||
"test case took too long": pytest.param(
|
||||
mock_logs(
|
||||
messages={
|
||||
LogSectionType.TEST_CASE: [
|
||||
section_timeout(LogSectionType.TEST_CASE) + 1
|
||||
]
|
||||
* (NUMBER_OF_MAX_ATTEMPTS + 1)
|
||||
},
|
||||
result="pass",
|
||||
),
|
||||
pytest.raises(MesaCIRetryError),
|
||||
"pass",
|
||||
{},
|
||||
),
|
||||
"timed out more times than retry attempts": (
|
||||
generate_n_logs(n=4, tick_fn=9999999),
|
||||
pytest.raises(MesaCIRetryError),
|
||||
"fail",
|
||||
{},
|
||||
),
|
||||
"long log case, no silence": (
|
||||
mock_logs(
|
||||
messages={LogSectionType.TEST_CASE: [1] * (1000)},
|
||||
result="pass",
|
||||
),
|
||||
does_not_raise(),
|
||||
"pass",
|
||||
{},
|
||||
),
|
||||
"no retries, testsuite succeed": (
|
||||
mock_logs(result="pass"),
|
||||
does_not_raise(),
|
||||
"pass",
|
||||
{
|
||||
"testsuite_results": [
|
||||
generate_testsuite_result(result="pass")
|
||||
]
|
||||
},
|
||||
),
|
||||
"no retries, but testsuite fails": (
|
||||
mock_logs(result="fail"),
|
||||
does_not_raise(),
|
||||
"fail",
|
||||
{
|
||||
"testsuite_results": [
|
||||
generate_testsuite_result(result="fail")
|
||||
]
|
||||
},
|
||||
),
|
||||
"no retries, one testsuite fails": (
|
||||
generate_n_logs(n=1, tick_fn=0, result="fail"),
|
||||
does_not_raise(),
|
||||
"fail",
|
||||
{
|
||||
"testsuite_results": [
|
||||
generate_testsuite_result(result="fail"),
|
||||
generate_testsuite_result(result="pass")
|
||||
]
|
||||
},
|
||||
),
|
||||
"very long silence": (
|
||||
generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS + 1, tick_fn=100000),
|
||||
pytest.raises(MesaCIRetryError),
|
||||
"fail",
|
||||
{},
|
||||
),
|
||||
# If a protocol error happens, _call_proxy will retry without affecting timeouts
|
||||
"unstable connection, ProtocolError followed by final message": (
|
||||
(NETWORK_EXCEPTION, *list(mock_logs(result="pass"))),
|
||||
does_not_raise(),
|
||||
"pass",
|
||||
{},
|
||||
),
|
||||
# After an arbitrary number of retries, _call_proxy should call sys.exit
|
||||
"unreachable case, subsequent ProtocolErrors": (
|
||||
repeat(NETWORK_EXCEPTION),
|
||||
pytest.raises(SystemExit),
|
||||
"fail",
|
||||
{},
|
||||
),
|
||||
"XMLRPC Fault": ([XMLRPC_FAULT], pytest.raises(SystemExit, match="1"), False, {}),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_log, expectation, job_result, proxy_args",
|
||||
PROXY_SCENARIOS.values(),
|
||||
ids=PROXY_SCENARIOS.keys(),
|
||||
)
|
||||
def test_retriable_follow_job(
|
||||
mock_sleep,
|
||||
test_log,
|
||||
expectation,
|
||||
job_result,
|
||||
proxy_args,
|
||||
mock_proxy,
|
||||
):
|
||||
with expectation:
|
||||
proxy = mock_proxy(side_effect=test_log, **proxy_args)
|
||||
job: LAVAJob = retriable_follow_job(proxy, "")
|
||||
assert job_result == job.status
|
||||
|
||||
|
||||
WAIT_FOR_JOB_SCENARIOS = {"one log run taking (sec):": (mock_logs(result="pass"))}
|
||||
|
||||
|
||||
@pytest.mark.parametrize("wait_time", (DEVICE_HANGING_TIMEOUT_SEC * 2,))
|
||||
@pytest.mark.parametrize(
|
||||
"side_effect",
|
||||
WAIT_FOR_JOB_SCENARIOS.values(),
|
||||
ids=WAIT_FOR_JOB_SCENARIOS.keys(),
|
||||
)
|
||||
def test_simulate_a_long_wait_to_start_a_job(
|
||||
frozen_time,
|
||||
wait_time,
|
||||
side_effect,
|
||||
mock_proxy_waiting_time,
|
||||
):
|
||||
start_time = datetime.now()
|
||||
job: LAVAJob = retriable_follow_job(
|
||||
mock_proxy_waiting_time(
|
||||
frozen_time, side_effect=side_effect, wait_time=wait_time
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
end_time = datetime.now()
|
||||
delta_time = end_time - start_time
|
||||
|
||||
assert job.status == "pass"
|
||||
assert delta_time.total_seconds() >= wait_time
|
||||
|
||||
|
||||
|
||||
CORRUPTED_LOG_SCENARIOS = {
|
||||
"too much subsequent corrupted data": (
|
||||
[(False, "{'msg': 'Incomplete}")] * 100 + [jobs_logs_response(True)],
|
||||
pytest.raises((MesaCIRetryError)),
|
||||
),
|
||||
"one subsequent corrupted data": (
|
||||
[(False, "{'msg': 'Incomplete}")] * 2 + [jobs_logs_response(True)],
|
||||
does_not_raise(),
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"data_sequence, expected_exception",
|
||||
CORRUPTED_LOG_SCENARIOS.values(),
|
||||
ids=CORRUPTED_LOG_SCENARIOS.keys(),
|
||||
)
|
||||
def test_log_corruption(mock_sleep, data_sequence, expected_exception, mock_proxy):
|
||||
proxy_mock = mock_proxy()
|
||||
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
|
||||
proxy_logs_mock.side_effect = data_sequence
|
||||
with expected_exception:
|
||||
retriable_follow_job(proxy_mock, "")
|
||||
|
||||
|
||||
LAVA_RESULT_LOG_SCENARIOS = {
|
||||
# the submitter should accept xtrace logs
|
||||
"Bash xtrace echo with kmsg interleaving": (
|
||||
"echo hwci: mesa: pass[ 737.673352] <LAVA_SIGNAL_ENDTC mesa-ci>",
|
||||
"pass",
|
||||
),
|
||||
# the submitter should accept xtrace logs
|
||||
"kmsg result print": (
|
||||
"[ 737.673352] hwci: mesa: pass",
|
||||
"pass",
|
||||
),
|
||||
# if the job result echo has a very bad luck, it still can be interleaved
|
||||
# with kmsg
|
||||
"echo output with kmsg interleaving": (
|
||||
"hwci: mesa: pass[ 737.673352] <LAVA_SIGNAL_ENDTC mesa-ci>",
|
||||
"pass",
|
||||
),
|
||||
"fail case": (
|
||||
"hwci: mesa: fail",
|
||||
"fail",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message, expectation",
|
||||
LAVA_RESULT_LOG_SCENARIOS.values(),
|
||||
ids=LAVA_RESULT_LOG_SCENARIOS.keys(),
|
||||
)
|
||||
def test_parse_job_result_from_log(message, expectation, mock_proxy):
|
||||
job = LAVAJob(mock_proxy(), "")
|
||||
job.parse_job_result_from_log([message])
|
||||
|
||||
assert job.status == expectation
|
||||
|
||||
|
||||
@pytest.mark.slow(
|
||||
reason="Slow and sketchy test. Needs a LAVA log raw file at /tmp/log.yaml"
|
||||
)
|
||||
def test_full_yaml_log(mock_proxy, frozen_time):
|
||||
import itertools
|
||||
import random
|
||||
from datetime import datetime
|
||||
|
||||
import yaml
|
||||
|
||||
def time_travel_from_log_chunk(data_chunk):
|
||||
if not data_chunk:
|
||||
return
|
||||
|
||||
first_log_time = data_chunk[0]["dt"]
|
||||
frozen_time.move_to(first_log_time)
|
||||
yield
|
||||
|
||||
last_log_time = data_chunk[-1]["dt"]
|
||||
frozen_time.move_to(last_log_time)
|
||||
return
|
||||
|
||||
def time_travel_to_test_time():
|
||||
# Suppose that the first message timestamp of the entire LAVA job log is
|
||||
# the same of from the job submitter execution
|
||||
with open("/tmp/log.yaml", "r") as f:
|
||||
first_log = f.readline()
|
||||
first_log_time = yaml.safe_load(first_log)[0]["dt"]
|
||||
frozen_time.move_to(first_log_time)
|
||||
|
||||
def load_lines() -> list:
|
||||
with open("/tmp/log.yaml", "r") as f:
|
||||
data = yaml.safe_load(f)
|
||||
chain = itertools.chain(data)
|
||||
try:
|
||||
while True:
|
||||
data_chunk = [next(chain) for _ in range(random.randint(0, 50))]
|
||||
# Suppose that the first message timestamp is the same of
|
||||
# log fetch RPC call
|
||||
time_travel_from_log_chunk(data_chunk)
|
||||
yield False, []
|
||||
# Travel to the same datetime of the last fetched log line
|
||||
# in the chunk
|
||||
time_travel_from_log_chunk(data_chunk)
|
||||
yield False, data_chunk
|
||||
except StopIteration:
|
||||
yield True, data_chunk
|
||||
return
|
||||
|
||||
proxy = mock_proxy()
|
||||
|
||||
def reset_logs(*args):
|
||||
proxy.scheduler.jobs.logs.side_effect = load_lines()
|
||||
|
||||
proxy.scheduler.jobs.submit = reset_logs
|
||||
with pytest.raises(MesaCIRetryError):
|
||||
time_travel_to_test_time()
|
||||
retriable_follow_job(proxy, "")
|
@@ -1,349 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from lava.exceptions import MesaCIKnownIssueException, MesaCITimeoutError
|
||||
from lava.utils import (
|
||||
GitlabSection,
|
||||
LogFollower,
|
||||
LogSectionType,
|
||||
fix_lava_color_log,
|
||||
fix_lava_gitlab_section_log,
|
||||
hide_sensitive_data,
|
||||
)
|
||||
|
||||
from ..lava.helpers import create_lava_yaml_msg, does_not_raise
|
||||
|
||||
GITLAB_SECTION_SCENARIOS = {
|
||||
"start collapsed": (
|
||||
"start",
|
||||
True,
|
||||
f"\x1b[0Ksection_start:mock_date:my_first_section[collapsed=true]\r\x1b[0K{GitlabSection.colour}my_header\x1b[0m",
|
||||
),
|
||||
"start non_collapsed": (
|
||||
"start",
|
||||
False,
|
||||
f"\x1b[0Ksection_start:mock_date:my_first_section\r\x1b[0K{GitlabSection.colour}my_header\x1b[0m",
|
||||
),
|
||||
"end collapsed": (
|
||||
"end",
|
||||
True,
|
||||
"\x1b[0Ksection_end:mock_date:my_first_section\r\x1b[0K",
|
||||
),
|
||||
"end non_collapsed": (
|
||||
"end",
|
||||
False,
|
||||
"\x1b[0Ksection_end:mock_date:my_first_section\r\x1b[0K",
|
||||
),
|
||||
}
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"method, collapsed, expectation",
|
||||
GITLAB_SECTION_SCENARIOS.values(),
|
||||
ids=GITLAB_SECTION_SCENARIOS.keys(),
|
||||
)
|
||||
def test_gitlab_section(method, collapsed, expectation):
|
||||
gs = GitlabSection(
|
||||
id="my_first_section",
|
||||
header="my_header",
|
||||
type=LogSectionType.TEST_CASE,
|
||||
start_collapsed=collapsed,
|
||||
)
|
||||
gs.get_timestamp = lambda x: "mock_date"
|
||||
gs.start()
|
||||
result = getattr(gs, method)()
|
||||
assert result == expectation
|
||||
|
||||
|
||||
def test_gl_sections():
|
||||
lines = [
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "debug",
|
||||
"msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
|
||||
},
|
||||
# Redundant log message which triggers the same Gitlab Section, it
|
||||
# should be ignored, unless the id is different
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "target",
|
||||
"msg": "[ 7.778836] <LAVA_SIGNAL_STARTRUN 0_mesa 5971831_1.3.2.3.1>",
|
||||
},
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "debug",
|
||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||
},
|
||||
# Another redundant log message
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "target",
|
||||
"msg": "[ 16.997829] <LAVA_SIGNAL_STARTTC mesa-ci_iris-kbl-traces>",
|
||||
},
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "target",
|
||||
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||
},
|
||||
]
|
||||
lf = LogFollower()
|
||||
for line in lines:
|
||||
lf.manage_gl_sections(line)
|
||||
|
||||
parsed_lines = lf.flush()
|
||||
assert "section_start" in parsed_lines[0]
|
||||
assert "collapsed=true" not in parsed_lines[0]
|
||||
assert "section_end" in parsed_lines[1]
|
||||
assert "section_start" in parsed_lines[2]
|
||||
assert "collapsed=true" not in parsed_lines[2]
|
||||
assert "section_end" in parsed_lines[3]
|
||||
assert "section_start" in parsed_lines[4]
|
||||
assert "collapsed=true" in parsed_lines[4]
|
||||
|
||||
|
||||
def test_log_follower_flush():
|
||||
lines = [
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "debug",
|
||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||
},
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "target",
|
||||
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||
},
|
||||
]
|
||||
lf = LogFollower()
|
||||
lf.feed(lines)
|
||||
parsed_lines = lf.flush()
|
||||
empty = lf.flush()
|
||||
lf.feed(lines)
|
||||
repeated_parsed_lines = lf.flush()
|
||||
|
||||
assert parsed_lines
|
||||
assert not empty
|
||||
assert repeated_parsed_lines
|
||||
|
||||
|
||||
SENSITIVE_DATA_SCENARIOS = {
|
||||
"no sensitive data tagged": (
|
||||
["bla bla", "mytoken: asdkfjsde1341=="],
|
||||
["bla bla", "mytoken: asdkfjsde1341=="],
|
||||
"HIDEME",
|
||||
),
|
||||
"sensitive data tagged": (
|
||||
["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
|
||||
["bla bla"],
|
||||
"HIDEME",
|
||||
),
|
||||
"sensitive data tagged with custom word": (
|
||||
["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
|
||||
["bla bla", "third line"],
|
||||
"DELETETHISLINE",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input, expectation, tag",
|
||||
SENSITIVE_DATA_SCENARIOS.values(),
|
||||
ids=SENSITIVE_DATA_SCENARIOS.keys(),
|
||||
)
|
||||
def test_hide_sensitive_data(input, expectation, tag):
|
||||
yaml_data = yaml.safe_dump(input)
|
||||
yaml_result = hide_sensitive_data(yaml_data, tag)
|
||||
result = yaml.safe_load(yaml_result)
|
||||
|
||||
assert result == expectation
|
||||
|
||||
|
||||
COLOR_MANGLED_SCENARIOS = {
|
||||
"Mangled error message at target level": (
|
||||
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
|
||||
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
|
||||
),
|
||||
"Mangled pass message at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||
),
|
||||
"Mangled error message with bold formatting at target level": (
|
||||
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
|
||||
"\x1b[1;31mReview the image changes...",
|
||||
),
|
||||
"Mangled error message with high intensity background at target level": (
|
||||
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
|
||||
"\x1b[100mReview the image changes...",
|
||||
),
|
||||
"Mangled error message with underline+bg color+fg color at target level": (
|
||||
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
|
||||
"\x1b[4;41;97mReview the image changes...",
|
||||
),
|
||||
"Bad input for color code.": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[4;97 This message is missing the `m`.", lvl="target"
|
||||
),
|
||||
"[4;97 This message is missing the `m`.",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message, fixed_message",
|
||||
COLOR_MANGLED_SCENARIOS.values(),
|
||||
ids=COLOR_MANGLED_SCENARIOS.keys(),
|
||||
)
|
||||
def test_fix_lava_color_log(message, fixed_message):
|
||||
fix_lava_color_log(message)
|
||||
|
||||
assert message["msg"] == fixed_message
|
||||
|
||||
|
||||
GITLAB_SECTION_MANGLED_SCENARIOS = {
|
||||
"Mangled section_start at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
|
||||
),
|
||||
"Mangled section_start at target level with header with spaces": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
|
||||
),
|
||||
"Mangled section_end at target level": (
|
||||
create_lava_yaml_msg(
|
||||
msg="[0Ksection_end:1652658415:test_setup[0K",
|
||||
lvl="target",
|
||||
),
|
||||
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"message, fixed_message",
|
||||
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
|
||||
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
|
||||
)
|
||||
def test_fix_lava_gitlab_section_log(message, fixed_message):
|
||||
fix_lava_gitlab_section_log(message)
|
||||
|
||||
assert message["msg"] == fixed_message
|
||||
|
||||
|
||||
WATCHDOG_SCENARIOS = {
|
||||
"1 second before timeout": ({"seconds": -1}, does_not_raise()),
|
||||
"1 second after timeout": ({"seconds": 1}, pytest.raises(MesaCITimeoutError)),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"timedelta_kwargs, exception",
|
||||
WATCHDOG_SCENARIOS.values(),
|
||||
ids=WATCHDOG_SCENARIOS.keys(),
|
||||
)
|
||||
def test_log_follower_watchdog(frozen_time, timedelta_kwargs, exception):
|
||||
lines = [
|
||||
{
|
||||
"dt": datetime.now(),
|
||||
"lvl": "debug",
|
||||
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||
},
|
||||
]
|
||||
td = {LogSectionType.TEST_CASE: timedelta(minutes=1)}
|
||||
lf = LogFollower(timeout_durations=td)
|
||||
lf.feed(lines)
|
||||
frozen_time.tick(
|
||||
lf.timeout_durations[LogSectionType.TEST_CASE] + timedelta(**timedelta_kwargs)
|
||||
)
|
||||
lines = [create_lava_yaml_msg()]
|
||||
with exception:
|
||||
lf.feed(lines)
|
||||
|
||||
|
||||
GITLAB_SECTION_ID_SCENARIOS = [
|
||||
("a-good_name", "a-good_name"),
|
||||
("spaces are not welcome", "spaces-are-not-welcome"),
|
||||
("abc:amd64 1/3", "abc-amd64-1-3"),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("case_name, expected_id", GITLAB_SECTION_ID_SCENARIOS)
|
||||
def test_gitlab_section_id(case_name, expected_id):
|
||||
gl = GitlabSection(
|
||||
id=case_name, header=case_name, type=LogSectionType.LAVA_POST_PROCESSING
|
||||
)
|
||||
|
||||
assert gl.id == expected_id
|
||||
|
||||
|
||||
A618_NETWORK_ISSUE_LOGS = [
|
||||
create_lava_yaml_msg(
|
||||
msg="[ 1733.599402] r8152 2-1.3:1.0 eth0: Tx status -71", lvl="target"
|
||||
),
|
||||
create_lava_yaml_msg(
|
||||
msg="[ 1733.604506] nfs: server 192.168.201.1 not responding, still trying",
|
||||
lvl="target",
|
||||
),
|
||||
]
|
||||
TEST_PHASE_LAVA_SIGNAL = create_lava_yaml_msg(
|
||||
msg="Received signal: <STARTTC> mesa-ci_a618_vk", lvl="debug"
|
||||
)
|
||||
|
||||
|
||||
A618_NETWORK_ISSUE_SCENARIOS = {
|
||||
"Pass - R8152 kmsg during boot": (A618_NETWORK_ISSUE_LOGS, does_not_raise()),
|
||||
"Fail - R8152 kmsg during test phase": (
|
||||
[TEST_PHASE_LAVA_SIGNAL, *A618_NETWORK_ISSUE_LOGS],
|
||||
pytest.raises(MesaCIKnownIssueException),
|
||||
),
|
||||
"Pass - Partial (1) R8152 kmsg during test phase": (
|
||||
[TEST_PHASE_LAVA_SIGNAL, A618_NETWORK_ISSUE_LOGS[0]],
|
||||
does_not_raise(),
|
||||
),
|
||||
"Pass - Partial (2) R8152 kmsg during test phase": (
|
||||
[TEST_PHASE_LAVA_SIGNAL, A618_NETWORK_ISSUE_LOGS[1]],
|
||||
does_not_raise(),
|
||||
),
|
||||
"Pass - Partial subsequent (3) R8152 kmsg during test phase": (
|
||||
[
|
||||
TEST_PHASE_LAVA_SIGNAL,
|
||||
A618_NETWORK_ISSUE_LOGS[0],
|
||||
A618_NETWORK_ISSUE_LOGS[0],
|
||||
],
|
||||
does_not_raise(),
|
||||
),
|
||||
"Pass - Partial subsequent (4) R8152 kmsg during test phase": (
|
||||
[
|
||||
TEST_PHASE_LAVA_SIGNAL,
|
||||
A618_NETWORK_ISSUE_LOGS[1],
|
||||
A618_NETWORK_ISSUE_LOGS[1],
|
||||
],
|
||||
does_not_raise(),
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"messages, expectation",
|
||||
A618_NETWORK_ISSUE_SCENARIOS.values(),
|
||||
ids=A618_NETWORK_ISSUE_SCENARIOS.keys(),
|
||||
)
|
||||
def test_detect_failure(messages, expectation):
|
||||
lf = LogFollower()
|
||||
with expectation:
|
||||
lf.feed(messages)
|
@@ -1,87 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -ex
|
||||
|
||||
if [ "x$VK_DRIVER" = "x" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Useful debug output, you rarely know what envirnoment you'll be
|
||||
# running in within container-land, this can be a landmark.
|
||||
ls -l
|
||||
|
||||
INSTALL=$(realpath -s "$PWD"/install)
|
||||
RESULTS=$(realpath -s "$PWD"/results)
|
||||
|
||||
# Set up the driver environment.
|
||||
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
|
||||
# using a command wrapper. Hence, we will just set it when running the
|
||||
# command.
|
||||
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
|
||||
|
||||
# Sanity check to ensure that our environment is sufficient to make our tests
|
||||
# run against the Mesa built by CI, rather than any installed distro version.
|
||||
MESA_VERSION=$(cat "$INSTALL/VERSION" | sed 's/\./\\./g')
|
||||
|
||||
# Force the stdout and stderr streams to be unbuffered in python.
|
||||
export PYTHONUNBUFFERED=1
|
||||
|
||||
# Set the Vulkan driver to use.
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
|
||||
if [ ${VK_DRIVER} = "radeon" ]; then
|
||||
# Disable vsync
|
||||
export MESA_VK_WSI_PRESENT_MODE=mailbox
|
||||
export vblank_mode=0
|
||||
fi
|
||||
|
||||
# Set environment for Wine.
|
||||
export WINEDEBUG="-all"
|
||||
export WINEPREFIX="/dxvk-wine64"
|
||||
export WINEESYNC=1
|
||||
|
||||
# Wait for amdgpu to be fully loaded
|
||||
sleep 1
|
||||
|
||||
# Avoid having to perform nasty command pre-processing to insert the
|
||||
# wine executable in front of the test executables. Instead, use the
|
||||
# kernel's binfmt support to automatically use Wine as an interpreter
|
||||
# when asked to load PE executables.
|
||||
# TODO: Have boot2container mount this filesystem for all jobs?
|
||||
mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
|
||||
echo ':DOSWin:M::MZ::/usr/bin/wine:' > /proc/sys/fs/binfmt_misc/register
|
||||
|
||||
# Set environment for DXVK.
|
||||
export DXVK_LOG_LEVEL="info"
|
||||
export DXVK_LOG="$RESULTS/dxvk"
|
||||
[ -d "$DXVK_LOG" ] || mkdir -pv "$DXVK_LOG"
|
||||
export DXVK_STATE_CACHE=0
|
||||
|
||||
# Set environment for replaying traces.
|
||||
export PATH="/apitrace-msvc-win64/bin:/gfxreconstruct/build/bin:$PATH"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="vulkaninfo"
|
||||
|
||||
# Set up the Window System Interface (WSI)
|
||||
# TODO: Can we get away with GBM?
|
||||
if [ ${TEST_START_XORG:-0} -eq 1 ]; then
|
||||
"$INSTALL"/common/start-x.sh "$INSTALL"
|
||||
export DISPLAY=:0
|
||||
fi
|
||||
|
||||
wine --version
|
||||
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
|
||||
|
||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD"
|
||||
|
||||
set +e
|
||||
eval $RUN_CMD
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
|
||||
fi
|
||||
set -e
|
||||
|
||||
# Just to be sure...
|
||||
chmod +x ./valvetraces-run.sh
|
||||
./valvetraces-run.sh
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user