Compare commits
332 Commits
mesa-22.3.
...
mesa-22.1.
Author | SHA1 | Date | |
---|---|---|---|
|
01113c2eaa | ||
|
1d60c87010 | ||
|
f1b085cd69 | ||
|
39636a083e | ||
|
8894522f41 | ||
|
b8be90b8b4 | ||
|
e42164124f | ||
|
f743f0b451 | ||
|
84b62db7ed | ||
|
3388defe64 | ||
|
cb2fed99ad | ||
|
059a8627f3 | ||
|
f4201c1049 | ||
|
9ecbe45f37 | ||
|
f8ca2e102e | ||
|
1c53c943f0 | ||
|
45219933d0 | ||
|
ea55add405 | ||
|
b7d48604c3 | ||
|
15c79f91c0 | ||
|
e3fb25dacb | ||
|
d3642a5dc6 | ||
|
6c790f145d | ||
|
5bc14505a5 | ||
|
73c79f3cc1 | ||
|
4d27ca6402 | ||
|
fcfbcd0053 | ||
|
c3c6c221c7 | ||
|
fa325e1527 | ||
|
717d494d31 | ||
|
529c1c2b26 | ||
|
9671505fe5 | ||
|
4776f076ec | ||
|
6319554a28 | ||
|
d6009c3fe9 | ||
|
3642934612 | ||
|
8ddded8051 | ||
|
83c680e980 | ||
|
16a9904e63 | ||
|
3b927d3e41 | ||
|
308af4a479 | ||
|
78feb8286f | ||
|
ca4352581b | ||
|
f77526a3b8 | ||
|
e8887c849d | ||
|
28264dd5c9 | ||
|
2cca7dce1e | ||
|
d7ef4f99da | ||
|
aa9b7d349c | ||
|
a21fe41535 | ||
|
cdc27459bf | ||
|
87c98bed29 | ||
|
9c84a34361 | ||
|
bb91ddce0b | ||
|
24626dca6b | ||
|
96eb5bbbb7 | ||
|
caf526716b | ||
|
9546233703 | ||
|
b03a2c10e4 | ||
|
c85d45c884 | ||
|
41967c9107 | ||
|
2649b2c631 | ||
|
1b28c52e44 | ||
|
8a560a87b4 | ||
|
60fa35a4df | ||
|
b2cc7c5b43 | ||
|
6fade22da9 | ||
|
d25f388853 | ||
|
b7366e6acb | ||
|
03d5e496d9 | ||
|
0b90d3dc58 | ||
|
8e84d6fded | ||
|
c6e7e2977f | ||
|
0c50ee124a | ||
|
f9cc403ebe | ||
|
659e41fe79 | ||
|
e2c69cd976 | ||
|
3830fb9769 | ||
|
5b82328bbd | ||
|
7c24f4fbc9 | ||
|
7feaf6c548 | ||
|
cf289ff969 | ||
|
38ac16dbff | ||
|
9adf6d9d7e | ||
|
34261b1084 | ||
|
a4189af168 | ||
|
29b1c19e6c | ||
|
540fa4ecd7 | ||
|
7b8c243ae5 | ||
|
287a776bed | ||
|
f5707aae3c | ||
|
a068671262 | ||
|
07a7306d81 | ||
|
9d05943813 | ||
|
32c596ef90 | ||
|
215b28f5df | ||
|
aef2c5cb70 | ||
|
da84a8ffe7 | ||
|
4b6b9e1889 | ||
|
7533fc00df | ||
|
00e217bf2e | ||
|
be08a23dc0 | ||
|
0be76e7ec1 | ||
|
2493985aae | ||
|
7e0055e4a0 | ||
|
d26843de8b | ||
|
e40dd33d1a | ||
|
a0771d809e | ||
|
7b0d367735 | ||
|
1ed9c24fb9 | ||
|
77127ce4ae | ||
|
0c9f8d3d05 | ||
|
93d28694ce | ||
|
c425166bc9 | ||
|
3d82b71516 | ||
|
1e2b6e131e | ||
|
d3095f5cf0 | ||
|
2c529a9ab9 | ||
|
89d94335e4 | ||
|
907c1bab60 | ||
|
2920e0d843 | ||
|
d39eafe93d | ||
|
5068d98157 | ||
|
6f0a7ae37b | ||
|
ca9c3ece53 | ||
|
efc5ecee9c | ||
|
f8fc9d4c09 | ||
|
0ffaf1002d | ||
|
331bc00d77 | ||
|
76ca02ff7b | ||
|
8b806828ad | ||
|
ef86acd25c | ||
|
b78272daa5 | ||
|
45cbab0442 | ||
|
718a8223ff | ||
|
b929e21fbe | ||
|
b312721d87 | ||
|
cbc697d2d6 | ||
|
533117777f | ||
|
f96b0d640b | ||
|
f857eed827 | ||
|
aab9d6df50 | ||
|
ca8af9c529 | ||
|
5346b6aac4 | ||
|
1cd83f0311 | ||
|
0dcd1ac226 | ||
|
265351f03f | ||
|
5eadf0d634 | ||
|
579a90e3c4 | ||
|
c4fac8fbbe | ||
|
218b619ab2 | ||
|
ebb13496c4 | ||
|
96202a0fab | ||
|
e5225e485d | ||
|
aaba210881 | ||
|
b465069b74 | ||
|
ae44b3365f | ||
|
f936ff8631 | ||
|
fffad80496 | ||
|
9c0b009335 | ||
|
df09d2a0ba | ||
|
6447169dee | ||
|
a5e133250f | ||
|
fdb6bb31af | ||
|
2613cd4b8d | ||
|
381ec467db | ||
|
9b60b2ecdc | ||
|
d9219c0ee6 | ||
|
965bdf7db2 | ||
|
bedc452f8b | ||
|
a9d2d3b0b7 | ||
|
9febc11af9 | ||
|
d29fe64c7e | ||
|
d9083cff8e | ||
|
52ab0ca9b0 | ||
|
ae7c0db4b3 | ||
|
b29e15e515 | ||
|
5949ead315 | ||
|
4046230c13 | ||
|
01345ad5e9 | ||
|
9fdb834853 | ||
|
2f286bfef5 | ||
|
d6accbe040 | ||
|
9b6b1445f7 | ||
|
85065859a2 | ||
|
5d41d82a6f | ||
|
33c3034765 | ||
|
ebc9a11318 | ||
|
40867789ec | ||
|
421e4d445a | ||
|
238b018789 | ||
|
d45ccc05f1 | ||
|
21753f734e | ||
|
2385ca720b | ||
|
fa4ad4f679 | ||
|
51ddb36a2d | ||
|
d630f852b9 | ||
|
0542f4ab03 | ||
|
1d33c5a2fc | ||
|
69c861ad58 | ||
|
e45737c9eb | ||
|
9f02b52ba2 | ||
|
f2d04b4fe9 | ||
|
e0bf9a7ea8 | ||
|
6993615047 | ||
|
8fb66e77e2 | ||
|
9b872cc3a5 | ||
|
721f054bae | ||
|
e8d1650625 | ||
|
c9f29c22b8 | ||
|
0d4b79d111 | ||
|
2096db8aad | ||
|
7decf4056e | ||
|
8ac563d157 | ||
|
8ab5ac0870 | ||
|
5694c04cdd | ||
|
8bae419dfe | ||
|
e1706c29c6 | ||
|
ccdebfb26a | ||
|
b39d53761c | ||
|
b674942ecc | ||
|
2e84b75047 | ||
|
431f07fcd9 | ||
|
46c9e8b811 | ||
|
0783dd7f7e | ||
|
2eba273314 | ||
|
7fb345d84a | ||
|
07a0e16324 | ||
|
a5191667d3 | ||
|
dea703ff2b | ||
|
53fe3ea095 | ||
|
276d23b4bc | ||
|
8cd82ee256 | ||
|
939a0cf876 | ||
|
c074592f3d | ||
|
63d75ac797 | ||
|
5e3b01c515 | ||
|
c7830bf873 | ||
|
e34284b7dc | ||
|
bad73e0959 | ||
|
888b4de9d4 | ||
|
5a1b1a0729 | ||
|
88b07e972b | ||
|
38fd735a72 | ||
|
3ae788a937 | ||
|
68998c0c9a | ||
|
8a53885999 | ||
|
c7fe963150 | ||
|
f6c77aa558 | ||
|
fe83982fa6 | ||
|
8939224350 | ||
|
3dd54edd12 | ||
|
7358faab52 | ||
|
100360a307 | ||
|
5634be6cb8 | ||
|
1e1b7661e2 | ||
|
f11afdbf9c | ||
|
ed6d83f01b | ||
|
12eb1548bd | ||
|
4a434c77fc | ||
|
c203721116 | ||
|
add1e87c4e | ||
|
2379bc19a8 | ||
|
99ce005b93 | ||
|
d71e9fcb15 | ||
|
5d8a4ce3c4 | ||
|
e07d4a02fe | ||
|
ff7e8aec04 | ||
|
001a5c73cc | ||
|
a1c167da48 | ||
|
acc3752a10 | ||
|
1fddd2dbeb | ||
|
e8ba43c8f8 | ||
|
f53d45cbb5 | ||
|
cfc3827890 | ||
|
ae85860679 | ||
|
0776767b15 | ||
|
5286bd22d8 | ||
|
de0c61c03b | ||
|
cbee0572ed | ||
|
c5f6ff9d3e | ||
|
a9a6fb2cf2 | ||
|
2324083fe9 | ||
|
08e22fff13 | ||
|
af570d37a7 | ||
|
a427a88f40 | ||
|
f8dc7e0d37 | ||
|
55d9bd1b64 | ||
|
692d9bce64 | ||
|
503440f0dc | ||
|
2af5c609be | ||
|
f335548405 | ||
|
9d020c5b52 | ||
|
936c5278de | ||
|
1eda70fb31 | ||
|
3bd963d88b | ||
|
7c75d83842 | ||
|
b7e08dbc06 | ||
|
f00600e1a3 | ||
|
68b25a57c1 | ||
|
6a69784335 | ||
|
3987237220 | ||
|
a6c2047ea0 | ||
|
8efeb7e3bf | ||
|
9a28aea2a2 | ||
|
30a980fb94 | ||
|
4f031f35fe | ||
|
df6dc532d2 | ||
|
c849ae36e0 | ||
|
b7fe949ab9 | ||
|
445892367a | ||
|
81b10bd0dd | ||
|
24d6489d0d | ||
|
b27d409cfd | ||
|
df84664032 | ||
|
357e3130ad | ||
|
247ecdcc05 | ||
|
72cc88d205 | ||
|
a27af70c1b | ||
|
424c960492 | ||
|
db3e06f76c | ||
|
353330c094 | ||
|
f1168c53ae | ||
|
62b00f70fd | ||
|
ff76add805 | ||
|
f48a3cffe7 | ||
|
a80f8a5ed0 | ||
|
5f5c562f00 | ||
|
653b560413 | ||
|
9a1ca294a8 | ||
|
252a858bc5 | ||
|
c519c37784 |
@@ -16,14 +16,26 @@ max_line_length = 78
|
||||
[{Makefile*,*.mk}]
|
||||
indent_style = tab
|
||||
|
||||
[*.py]
|
||||
[{*.py,SCons*}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.pl]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.m4]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.yml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.html]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.rst]
|
||||
indent_style = space
|
||||
indent_size = 3
|
||||
@@ -34,11 +46,3 @@ trim_trailing_whitespace = false
|
||||
[{meson.build,meson_options.txt}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.ps1]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[*.rs]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
59
.github/workflows/macos.yml
vendored
59
.github/workflows/macos.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: macOS-CI
|
||||
on: push
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
macOS-CI:
|
||||
strategy:
|
||||
matrix:
|
||||
glx_option: ['dri', 'xlib']
|
||||
runs-on: macos-11
|
||||
env:
|
||||
GALLIUM_DUMP_CPU: true
|
||||
MESON_EXEC: /Users/runner/Library/Python/3.11/bin/meson
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
cat > Brewfile <<EOL
|
||||
brew "bison"
|
||||
brew "expat"
|
||||
brew "gettext"
|
||||
brew "libx11"
|
||||
brew "libxcb"
|
||||
brew "libxdamage"
|
||||
brew "libxext"
|
||||
brew "ninja"
|
||||
brew "pkg-config"
|
||||
brew "python@3.10"
|
||||
EOL
|
||||
|
||||
brew update
|
||||
brew bundle --verbose
|
||||
- name: Install Mako and meson
|
||||
run: pip3 install --user mako meson
|
||||
- name: Configure
|
||||
run: |
|
||||
cat > native_config <<EOL
|
||||
[binaries]
|
||||
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
||||
EOL
|
||||
$MESON_EXEC . build --native-file=native_config -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast -Dglx=${{ matrix.glx_option }}
|
||||
- name: Build
|
||||
run: $MESON_EXEC compile -C build
|
||||
- name: Test
|
||||
run: $MESON_EXEC test -C build --print-errorlogs
|
||||
- name: Install
|
||||
run: $MESON_EXEC install -C build --destdir $PWD/install
|
||||
- name: 'Upload Artifact'
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: macos-${{ matrix.glx_option }}-result
|
||||
path: |
|
||||
build/meson-logs/
|
||||
install/
|
||||
retention-days: 5
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.out
|
||||
/build
|
||||
build
|
||||
|
@@ -1,6 +1,6 @@
|
||||
variables:
|
||||
FDO_UPSTREAM_REPO: mesa/mesa
|
||||
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
|
||||
MESA_TEMPLATES_COMMIT: &ci-templates-commit 290b79e0e78eab67a83766f4e9691be554fc4afd
|
||||
CI_PRE_CLONE_SCRIPT: |-
|
||||
set -o xtrace
|
||||
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
|
||||
@@ -8,7 +8,7 @@ variables:
|
||||
rm download-git-cache.sh
|
||||
set +o xtrace
|
||||
CI_JOB_JWT_FILE: /minio_jwt
|
||||
MINIO_HOST: s3.freedesktop.org
|
||||
MINIO_HOST: minio-packet.freedesktop.org
|
||||
# per-pipeline artifact storage on MinIO
|
||||
PIPELINE_ARTIFACTS_BASE: ${MINIO_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
||||
# per-job artifact storage on MinIO
|
||||
@@ -18,11 +18,8 @@ variables:
|
||||
# Individual CI farm status, set to "offline" to disable jobs
|
||||
# running on a particular CI farm (ie. for outages, etc):
|
||||
FD_FARM: "online"
|
||||
COLLABORA_FARM: "online"
|
||||
MICROSOFT_FARM: "online"
|
||||
LIMA_FARM: "online"
|
||||
IGALIA_FARM: "online"
|
||||
ANHOLT_FARM: "online"
|
||||
COLLABORA_FARM: "offline"
|
||||
MICROSOFT_FARM: "offline"
|
||||
|
||||
default:
|
||||
before_script:
|
||||
@@ -39,15 +36,9 @@ default:
|
||||
export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
|
||||
rm "${CI_JOB_JWT_FILE}"
|
||||
|
||||
# Retry build or test jobs up to twice when the gitlab-runner itself fails somehow.
|
||||
retry:
|
||||
max: 2
|
||||
when:
|
||||
- runner_system_failure
|
||||
|
||||
include:
|
||||
- project: 'freedesktop/ci-templates'
|
||||
ref: ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
ref: 34f4ade99434043f88e164933f570301fd18b125
|
||||
file:
|
||||
- '/templates/ci-fairy.yml'
|
||||
- project: 'freedesktop/ci-templates'
|
||||
@@ -68,9 +59,11 @@ include:
|
||||
- local: 'src/gallium/drivers/crocus/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/d3d12/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/i915/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/iris/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/lima/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/llvmpipe/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/nouveau/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/radeonsi/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/softpipe/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/virgl/ci/gitlab-ci.yml'
|
||||
- local: 'src/gallium/drivers/zink/ci/gitlab-ci.yml'
|
||||
@@ -78,7 +71,6 @@ include:
|
||||
- local: 'src/intel/ci/gitlab-ci.yml'
|
||||
- local: 'src/microsoft/ci/gitlab-ci.yml'
|
||||
- local: 'src/panfrost/ci/gitlab-ci.yml'
|
||||
- local: 'src/virtio/ci/gitlab-ci.yml'
|
||||
|
||||
stages:
|
||||
- sanity
|
||||
@@ -86,7 +78,6 @@ stages:
|
||||
- git-archive
|
||||
- build-x86_64
|
||||
- build-misc
|
||||
- lint
|
||||
- amd
|
||||
- intel
|
||||
- nouveau
|
||||
@@ -98,6 +89,12 @@ stages:
|
||||
- layered-backends
|
||||
- deploy
|
||||
|
||||
# Generic rule to not run the job during scheduled pipelines
|
||||
# ----------------------------------------------------------
|
||||
.scheduled_pipelines-rules:
|
||||
rules: &ignore_scheduled_pipelines
|
||||
if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# YAML anchors for rule conditions
|
||||
# --------------------------------
|
||||
@@ -129,10 +126,10 @@ stages:
|
||||
.docs-base:
|
||||
extends:
|
||||
- .fdo.ci-fairy
|
||||
- .build-rules
|
||||
- .ci-run-policy
|
||||
script:
|
||||
- apk --no-cache add graphviz doxygen
|
||||
- pip3 install sphinx===5.1.1 breathe===4.34.0 mako===1.2.3 sphinx_rtd_theme===1.0.0
|
||||
- pip3 install sphinx breathe mako sphinx_rtd_theme
|
||||
- docs/doxygen-wrapper.py --out-dir=docs/doxygen_xml
|
||||
- sphinx-build -W -b html docs public
|
||||
|
||||
@@ -144,7 +141,7 @@ pages:
|
||||
- public
|
||||
needs: []
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- if: *is-mesa-main
|
||||
changes: &docs-or-ci
|
||||
- docs/**/*
|
||||
@@ -159,7 +156,7 @@ test-docs:
|
||||
stage: deploy
|
||||
needs: []
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- if: *is-forked-branch
|
||||
changes: *docs-or-ci
|
||||
when: manual
|
||||
@@ -180,10 +177,10 @@ test-docs-mr:
|
||||
when: on_success
|
||||
# Other cases default to never
|
||||
|
||||
# When to automatically run the CI for build jobs
|
||||
.build-rules:
|
||||
# When to automatically run the CI
|
||||
.ci-run-policy:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
# If any files affecting the pipeline are changed, build/test jobs run
|
||||
# automatically once all dependency jobs have passed
|
||||
- changes: &all_paths
|
||||
@@ -203,7 +200,12 @@ test-docs-mr:
|
||||
- include/**/*
|
||||
- src/**/*
|
||||
when: on_success
|
||||
# Otherwise, build/test jobs won't run because no rule matched.
|
||||
# Otherwise, build/test jobs won't run
|
||||
- when: never
|
||||
retry:
|
||||
max: 2
|
||||
when:
|
||||
- runner_system_failure
|
||||
|
||||
|
||||
.ci-deqp-artifacts:
|
||||
@@ -219,8 +221,10 @@ test-docs-mr:
|
||||
- _build/meson-logs/strace
|
||||
|
||||
.container-rules:
|
||||
extends:
|
||||
- .ci-run-policy
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
# Run pipeline by default in the main project if any CI pipeline
|
||||
# configuration files were changed, to ensure docker images are up to date
|
||||
- if: *is-post-merge
|
||||
@@ -245,7 +249,8 @@ test-docs-mr:
|
||||
- changes:
|
||||
*all_paths
|
||||
when: manual
|
||||
# Otherwise, container jobs won't run because no rule matched.
|
||||
# Otherwise, container jobs won't run
|
||||
- when: never
|
||||
|
||||
# Git archive
|
||||
|
||||
@@ -254,7 +259,8 @@ make git archive:
|
||||
- .fdo.ci-fairy
|
||||
stage: git-archive
|
||||
rules:
|
||||
- !reference [.scheduled_pipeline-rules, rules]
|
||||
- if: *is-scheduled-pipeline
|
||||
when: on_success
|
||||
# ensure we are running on packet
|
||||
tags:
|
||||
- packet.net
|
||||
@@ -264,7 +270,9 @@ make git archive:
|
||||
# compress the current folder
|
||||
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
|
||||
|
||||
- ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$MINIO_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
||||
# login with the JWT token file
|
||||
- ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||
- ci-fairy minio cp ../$CI_PROJECT_NAME.tar.gz minio://$MINIO_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
||||
|
||||
|
||||
# Sanity checks of MR settings and commit logs
|
||||
@@ -286,17 +294,34 @@ sanity:
|
||||
reports:
|
||||
junit: check-*.xml
|
||||
|
||||
# Rules for tests that should not block merging, but should be available to
|
||||
# optionally run with the "play" button in the UI in pre-merge non-marge
|
||||
# pipelines. This should appear in "extends:" after any includes of
|
||||
# test-source-dep.yml rules, so that these rules replace those.
|
||||
# Rules for tests that should not be present in MRs or the main
|
||||
# project's pipeline (don't block marge or report red on
|
||||
# mesa/mesa main) but should be present on pipelines in personal
|
||||
# branches (so you can opt in to running the flaky test when you want
|
||||
# to).
|
||||
.test-manual:
|
||||
rules:
|
||||
- *ignore_scheduled_pipelines
|
||||
- if: *is-forked-branch
|
||||
changes:
|
||||
*all_paths
|
||||
when: manual
|
||||
- when: never
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
JOB_TIMEOUT: 80
|
||||
|
||||
# The above .test-manual rules doesn't allow the jobs to be available for MRs
|
||||
# but we should have an option to have manual jobs in MRs as well.
|
||||
.test-manual-mr:
|
||||
rules:
|
||||
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||
- *ignore_scheduled_pipelines
|
||||
- if: *is-forked-branch-or-pre-merge-not-for-marge
|
||||
changes:
|
||||
*all_paths
|
||||
when: manual
|
||||
- when: never
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
JOB_TIMEOUT: 80
|
||||
|
||||
|
@@ -90,8 +90,7 @@ echo "$BM_CMDLINE" > /tftp/cmdline
|
||||
set +e
|
||||
python3 $BM/cros_servo_run.py \
|
||||
--cpu $BM_SERIAL \
|
||||
--ec $BM_SERIAL_EC \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||
--ec $BM_SERIAL_EC
|
||||
ret=$?
|
||||
set -e
|
||||
|
||||
|
@@ -31,18 +31,52 @@ import threading
|
||||
|
||||
|
||||
class CrosServoRun:
|
||||
def __init__(self, cpu, ec, test_timeout):
|
||||
def __init__(self, cpu, ec):
|
||||
# Merged FIFO for the two serial buffers, fed by threads.
|
||||
self.serial_queue = queue.Queue()
|
||||
self.sentinel = object()
|
||||
self.threads_done = 0
|
||||
|
||||
self.ec_ser = SerialBuffer(
|
||||
ec, "results/serial-ec.txt", "R SERIAL-EC> ")
|
||||
self.cpu_ser = SerialBuffer(
|
||||
cpu, "results/serial.txt", "R SERIAL-CPU> ")
|
||||
# Merge the EC serial into the cpu_ser's line stream so that we can
|
||||
# effectively poll on both at the same time and not have to worry about
|
||||
self.ec_ser = SerialBuffer(
|
||||
ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
|
||||
self.test_timeout = test_timeout
|
||||
|
||||
self.iter_feed_ec = threading.Thread(
|
||||
target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
|
||||
self.iter_feed_ec.start()
|
||||
|
||||
self.iter_feed_cpu = threading.Thread(
|
||||
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
|
||||
self.iter_feed_cpu.start()
|
||||
|
||||
def close(self):
|
||||
self.ec_ser.close()
|
||||
self.cpu_ser.close()
|
||||
self.iter_feed_ec.join()
|
||||
self.iter_feed_cpu.join()
|
||||
|
||||
# Feed lines from our serial queues into the merged queue, marking when our
|
||||
# input is done.
|
||||
def iter_feed_queue(self, it):
|
||||
for i in it:
|
||||
self.serial_queue.put(i)
|
||||
self.serial_queue.put(self.sentinel)
|
||||
|
||||
# Return the next line from the queue, counting how many threads have
|
||||
# terminated and joining when done
|
||||
def get_serial_queue_line(self):
|
||||
line = self.serial_queue.get()
|
||||
if line == self.sentinel:
|
||||
self.threads_done = self.threads_done + 1
|
||||
if self.threads_done == 2:
|
||||
self.iter_feed_cpu.join()
|
||||
self.iter_feed_ec.join()
|
||||
return line
|
||||
|
||||
# Returns an iterator for getting the next line.
|
||||
def serial_queue_lines(self):
|
||||
return iter(self.get_serial_queue_line, self.sentinel)
|
||||
|
||||
def ec_write(self, s):
|
||||
print("W SERIAL-EC> %s" % s)
|
||||
@@ -62,36 +96,23 @@ class CrosServoRun:
|
||||
self.ec_write("\n")
|
||||
self.ec_write("reboot\n")
|
||||
|
||||
bootloader_done = False
|
||||
# This is emitted right when the bootloader pauses to check for input.
|
||||
# Emit a ^N character to request network boot, because we don't have a
|
||||
# direct-to-netboot firmware on cheza.
|
||||
for line in self.cpu_ser.lines(timeout=120, phase="bootloader"):
|
||||
for line in self.serial_queue_lines():
|
||||
if re.search("load_archive: loading locale_en.bin", line):
|
||||
self.cpu_write("\016")
|
||||
bootloader_done = True
|
||||
break
|
||||
|
||||
# If the board has a netboot firmware and we made it to booting the
|
||||
# kernel, proceed to processing of the test run.
|
||||
if re.search("Booting Linux", line):
|
||||
bootloader_done = True
|
||||
break
|
||||
|
||||
# The Cheza boards have issues with failing to bring up power to
|
||||
# the system sometimes, possibly dependent on ambient temperature
|
||||
# in the farm.
|
||||
if re.search("POWER_GOOD not seen in time", line):
|
||||
self.print_error(
|
||||
"Detected intermittent poweron failure, restarting run...")
|
||||
self.print_error("Detected intermittent poweron failure, restarting run...")
|
||||
return 2
|
||||
|
||||
if not bootloader_done:
|
||||
print("Failed to make it through bootloader, restarting run...")
|
||||
return 2
|
||||
|
||||
tftp_failures = 0
|
||||
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.serial_queue_lines():
|
||||
if re.search("---. end Kernel panic", line):
|
||||
return 1
|
||||
|
||||
@@ -102,15 +123,13 @@ class CrosServoRun:
|
||||
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
||||
tftp_failures += 1
|
||||
if tftp_failures >= 100:
|
||||
self.print_error(
|
||||
"Detected intermittent tftp failure, restarting run...")
|
||||
self.print_error("Detected intermittent tftp failure, restarting run...")
|
||||
return 2
|
||||
|
||||
# There are very infrequent bus errors during power management transitions
|
||||
# on cheza, which we don't expect to be the case on future boards.
|
||||
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
||||
self.print_error(
|
||||
"Detected cheza power management bus error, restarting run...")
|
||||
self.print_error("Detected cheza power management bus error, restarting run...")
|
||||
return 2
|
||||
|
||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||
@@ -129,8 +148,7 @@ class CrosServoRun:
|
||||
# Given that it seems to trigger randomly near a GPU fault and then
|
||||
# break many tests after that, just restart the whole run.
|
||||
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
||||
self.print_error(
|
||||
"Detected cheza power management bus error, restarting run...")
|
||||
self.print_error("Detected cheza power management bus error, restarting run...")
|
||||
return 2
|
||||
|
||||
if re.search("coreboot.*bootblock starting", line):
|
||||
@@ -149,9 +167,8 @@ class CrosServoRun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result")
|
||||
return 2
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result")
|
||||
return 1
|
||||
|
||||
|
||||
def main():
|
||||
@@ -160,20 +177,21 @@ def main():
|
||||
help='CPU Serial device', required=True)
|
||||
parser.add_argument(
|
||||
'--ec', type=str, help='EC Serial device', required=True)
|
||||
parser.add_argument(
|
||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
servo = CrosServoRun(args.cpu, args.ec)
|
||||
|
||||
while True:
|
||||
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
|
||||
retval = servo.run()
|
||||
|
||||
# power down the CPU on the device
|
||||
servo.ec_write("power off\n")
|
||||
servo.close()
|
||||
|
||||
if retval != 2:
|
||||
sys.exit(retval)
|
||||
break
|
||||
|
||||
# power down the CPU on the device
|
||||
servo.ec_write("power off\n")
|
||||
|
||||
servo.close()
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@@ -106,25 +106,20 @@ if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
||||
wget $BM_DTB -O dtb
|
||||
|
||||
cat kernel dtb > Image.gz-dtb
|
||||
rm kernel
|
||||
rm kernel dtb
|
||||
else
|
||||
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
|
||||
cp $BM_DTB dtb
|
||||
fi
|
||||
|
||||
export PATH=$BM:$PATH
|
||||
|
||||
mkdir -p artifacts
|
||||
mkbootimg.py \
|
||||
--kernel Image.gz-dtb \
|
||||
--ramdisk rootfs.cpio.gz \
|
||||
--dtb dtb \
|
||||
--cmdline "$BM_CMDLINE" \
|
||||
$BM_MKBOOT_PARAMS \
|
||||
--header_version 2 \
|
||||
-o artifacts/fastboot.img
|
||||
abootimg \
|
||||
--create artifacts/fastboot.img \
|
||||
-k Image.gz-dtb \
|
||||
-r rootfs.cpio.gz \
|
||||
-c cmdline="$BM_CMDLINE"
|
||||
rm Image.gz-dtb
|
||||
|
||||
rm Image.gz-dtb dtb
|
||||
export PATH=$BM:$PATH
|
||||
|
||||
# Start background command for talking to serial if we have one.
|
||||
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
||||
@@ -138,7 +133,6 @@ fi
|
||||
set +e
|
||||
$BM/fastboot_run.py \
|
||||
--dev="$BM_SERIAL" \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20} \
|
||||
--fbserial="$BM_FASTBOOT_SERIAL" \
|
||||
--powerup="$BM_POWERUP" \
|
||||
--powerdown="$BM_POWERDOWN"
|
||||
|
@@ -22,21 +22,19 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import os
|
||||
import re
|
||||
from serial_buffer import SerialBuffer
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
class FastbootRun:
|
||||
def __init__(self, args, test_timeout):
|
||||
def __init__(self, args):
|
||||
self.powerup = args.powerup
|
||||
self.ser = SerialBuffer(
|
||||
args.dev, "results/serial-output.txt", "R SERIAL> ")
|
||||
self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format(
|
||||
ser=args.fbserial)
|
||||
self.test_timeout = test_timeout
|
||||
# We would like something like a 1 minute timeout, but the piglit traces
|
||||
# jobs stall out for long periods of time.
|
||||
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600)
|
||||
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
|
||||
|
||||
def close(self):
|
||||
self.ser.close()
|
||||
@@ -46,40 +44,34 @@ class FastbootRun:
|
||||
NO_COLOR = '\033[0m'
|
||||
print(RED + message + NO_COLOR)
|
||||
|
||||
def logged_system(self, cmd, timeout=60):
|
||||
def logged_system(self, cmd):
|
||||
print("Running '{}'".format(cmd))
|
||||
try:
|
||||
return subprocess.call(cmd, shell=True, timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.print_error("timeout, restarting run...")
|
||||
return 2
|
||||
return os.system(cmd)
|
||||
|
||||
def run(self):
|
||||
if ret := self.logged_system(self.powerup):
|
||||
return ret
|
||||
if self.logged_system(self.powerup) != 0:
|
||||
return 1
|
||||
|
||||
fastboot_ready = False
|
||||
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("fastboot: processing commands", line) or \
|
||||
re.search("Listening for fastboot command on", line):
|
||||
re.search("Listening for fastboot command on", line):
|
||||
fastboot_ready = True
|
||||
break
|
||||
|
||||
if re.search("data abort", line):
|
||||
self.print_error(
|
||||
"Detected crash during boot, restarting run...")
|
||||
self.print_error("Detected crash during boot, restarting run...")
|
||||
return 2
|
||||
|
||||
if not fastboot_ready:
|
||||
self.print_error(
|
||||
"Failed to get to fastboot prompt, restarting run...")
|
||||
self.print_error("Failed to get to fastboot prompt, restarting run...")
|
||||
return 2
|
||||
|
||||
if ret := self.logged_system(self.fastboot):
|
||||
return ret
|
||||
if self.logged_system(self.fastboot) != 0:
|
||||
return 1
|
||||
|
||||
print_more_lines = -1
|
||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.ser.lines():
|
||||
if print_more_lines == 0:
|
||||
return 2
|
||||
if print_more_lines > 0:
|
||||
@@ -91,8 +83,7 @@ class FastbootRun:
|
||||
# The db820c boards intermittently reboot. Just restart the run
|
||||
# when if we see a reboot after we got past fastboot.
|
||||
if re.search("PON REASON", line):
|
||||
self.print_error(
|
||||
"Detected spontaneous reboot, restarting run...")
|
||||
self.print_error("Detected spontaneous reboot, restarting run...")
|
||||
return 2
|
||||
|
||||
# db820c sometimes wedges around iommu fault recovery
|
||||
@@ -126,26 +117,18 @@ class FastbootRun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result, restarting run...")
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result, restarting run...")
|
||||
return 2
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||
parser.add_argument('--powerup', type=str,
|
||||
help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str,
|
||||
help='shell command for powering off', required=True)
|
||||
parser.add_argument('--fbserial', type=str,
|
||||
help='fastboot serial number of the board', required=True)
|
||||
parser.add_argument('--test-timeout', type=int,
|
||||
help='Test phase timeout (minutes)', required=True)
|
||||
parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||
parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||
fastboot = FastbootRun(args)
|
||||
|
||||
while True:
|
||||
retval = fastboot.run()
|
||||
@@ -153,12 +136,11 @@ def main():
|
||||
if retval != 2:
|
||||
break
|
||||
|
||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||
fastboot = FastbootRun(args)
|
||||
|
||||
fastboot.logged_system(args.powerdown)
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@@ -8,8 +8,8 @@ relay = sys.argv[2]
|
||||
|
||||
# our relays are "off" means "board is powered".
|
||||
mode_swap = {
|
||||
"on": "off",
|
||||
"off": "on",
|
||||
"on" : "off",
|
||||
"off" : "on",
|
||||
}
|
||||
mode = mode_swap[mode]
|
||||
|
||||
|
@@ -1,569 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2015, The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Creates the boot image."""
|
||||
from argparse import (ArgumentParser, ArgumentTypeError,
|
||||
FileType, RawDescriptionHelpFormatter)
|
||||
from hashlib import sha1
|
||||
from os import fstat
|
||||
from struct import pack
|
||||
import array
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
# Constant and structure definition is in
|
||||
# system/tools/mkbootimg/include/bootimg/bootimg.h
|
||||
BOOT_MAGIC = 'ANDROID!'
|
||||
BOOT_MAGIC_SIZE = 8
|
||||
BOOT_NAME_SIZE = 16
|
||||
BOOT_ARGS_SIZE = 512
|
||||
BOOT_EXTRA_ARGS_SIZE = 1024
|
||||
BOOT_IMAGE_HEADER_V1_SIZE = 1648
|
||||
BOOT_IMAGE_HEADER_V2_SIZE = 1660
|
||||
BOOT_IMAGE_HEADER_V3_SIZE = 1580
|
||||
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
||||
BOOT_IMAGE_HEADER_V4_SIZE = 1584
|
||||
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
|
||||
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
|
||||
VENDOR_BOOT_MAGIC_SIZE = 8
|
||||
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
|
||||
VENDOR_BOOT_ARGS_SIZE = 2048
|
||||
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
|
||||
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
|
||||
VENDOR_RAMDISK_TYPE_NONE = 0
|
||||
VENDOR_RAMDISK_TYPE_PLATFORM = 1
|
||||
VENDOR_RAMDISK_TYPE_RECOVERY = 2
|
||||
VENDOR_RAMDISK_TYPE_DLKM = 3
|
||||
VENDOR_RAMDISK_NAME_SIZE = 32
|
||||
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
|
||||
# Names with special meaning, mustn't be specified in --ramdisk_name.
|
||||
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
|
||||
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
|
||||
def filesize(f):
|
||||
if f is None:
|
||||
return 0
|
||||
try:
|
||||
return fstat(f.fileno()).st_size
|
||||
except OSError:
|
||||
return 0
|
||||
def update_sha(sha, f):
|
||||
if f:
|
||||
sha.update(f.read())
|
||||
f.seek(0)
|
||||
sha.update(pack('I', filesize(f)))
|
||||
else:
|
||||
sha.update(pack('I', 0))
|
||||
def pad_file(f, padding):
|
||||
pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
|
||||
f.write(pack(str(pad) + 'x'))
|
||||
def get_number_of_pages(image_size, page_size):
|
||||
"""calculates the number of pages required for the image"""
|
||||
return (image_size + page_size - 1) // page_size
|
||||
def get_recovery_dtbo_offset(args):
|
||||
"""calculates the offset of recovery_dtbo image in the boot image"""
|
||||
num_header_pages = 1 # header occupies a page
|
||||
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
|
||||
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
|
||||
args.pagesize)
|
||||
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
|
||||
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
|
||||
num_ramdisk_pages + num_second_pages)
|
||||
return dtbo_offset
|
||||
def write_header_v3_and_above(args):
|
||||
if args.header_version > 3:
|
||||
boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
|
||||
else:
|
||||
boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
|
||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||
# kernel size in bytes
|
||||
args.output.write(pack('I', filesize(args.kernel)))
|
||||
# ramdisk size in bytes
|
||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||
# os version and patch level
|
||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||
args.output.write(pack('I', boot_header_size))
|
||||
# reserved
|
||||
args.output.write(pack('4I', 0, 0, 0, 0))
|
||||
# version of boot image header
|
||||
args.output.write(pack('I', args.header_version))
|
||||
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
|
||||
args.cmdline))
|
||||
if args.header_version >= 4:
|
||||
# The signature used to verify boot image v4.
|
||||
args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
|
||||
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||
def write_vendor_boot_header(args):
|
||||
if filesize(args.dtb) == 0:
|
||||
raise ValueError('DTB image must not be empty.')
|
||||
if args.header_version > 3:
|
||||
vendor_ramdisk_size = args.vendor_ramdisk_total_size
|
||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
|
||||
else:
|
||||
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
|
||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
|
||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
|
||||
VENDOR_BOOT_MAGIC.encode()))
|
||||
# version of boot image header
|
||||
args.vendor_boot.write(pack('I', args.header_version))
|
||||
# flash page size
|
||||
args.vendor_boot.write(pack('I', args.pagesize))
|
||||
# kernel physical load address
|
||||
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
|
||||
# ramdisk physical load address
|
||||
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
|
||||
# ramdisk size in bytes
|
||||
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
|
||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
|
||||
args.vendor_cmdline))
|
||||
# kernel tags physical load address
|
||||
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
|
||||
# asciiz product name
|
||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
|
||||
# header size in bytes
|
||||
args.vendor_boot.write(pack('I', vendor_boot_header_size))
|
||||
# dtb size in bytes
|
||||
args.vendor_boot.write(pack('I', filesize(args.dtb)))
|
||||
# dtb physical load address
|
||||
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
|
||||
if args.header_version > 3:
|
||||
vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
|
||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
|
||||
# vendor ramdisk table size in bytes
|
||||
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
|
||||
# number of vendor ramdisk table entries
|
||||
args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
|
||||
# vendor ramdisk table entry size in bytes
|
||||
args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
|
||||
# bootconfig section size in bytes
|
||||
args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
|
||||
pad_file(args.vendor_boot, args.pagesize)
|
||||
def write_header(args):
|
||||
if args.header_version > 4:
|
||||
raise ValueError(
|
||||
f'Boot header version {args.header_version} not supported')
|
||||
if args.header_version in {3, 4}:
|
||||
return write_header_v3_and_above(args)
|
||||
ramdisk_load_address = ((args.base + args.ramdisk_offset)
|
||||
if filesize(args.ramdisk) > 0 else 0)
|
||||
second_load_address = ((args.base + args.second_offset)
|
||||
if filesize(args.second) > 0 else 0)
|
||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||
# kernel size in bytes
|
||||
args.output.write(pack('I', filesize(args.kernel)))
|
||||
# kernel physical load address
|
||||
args.output.write(pack('I', args.base + args.kernel_offset))
|
||||
# ramdisk size in bytes
|
||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||
# ramdisk physical load address
|
||||
args.output.write(pack('I', ramdisk_load_address))
|
||||
# second bootloader size in bytes
|
||||
args.output.write(pack('I', filesize(args.second)))
|
||||
# second bootloader physical load address
|
||||
args.output.write(pack('I', second_load_address))
|
||||
# kernel tags physical load address
|
||||
args.output.write(pack('I', args.base + args.tags_offset))
|
||||
# flash page size
|
||||
args.output.write(pack('I', args.pagesize))
|
||||
# version of boot image header
|
||||
args.output.write(pack('I', args.header_version))
|
||||
# os version and patch level
|
||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||
# asciiz product name
|
||||
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
|
||||
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
|
||||
sha = sha1()
|
||||
update_sha(sha, args.kernel)
|
||||
update_sha(sha, args.ramdisk)
|
||||
update_sha(sha, args.second)
|
||||
if args.header_version > 0:
|
||||
update_sha(sha, args.recovery_dtbo)
|
||||
if args.header_version > 1:
|
||||
update_sha(sha, args.dtb)
|
||||
img_id = pack('32s', sha.digest())
|
||||
args.output.write(img_id)
|
||||
args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
|
||||
if args.header_version > 0:
|
||||
if args.recovery_dtbo:
|
||||
# recovery dtbo size in bytes
|
||||
args.output.write(pack('I', filesize(args.recovery_dtbo)))
|
||||
# recovert dtbo offset in the boot image
|
||||
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
|
||||
else:
|
||||
# Set to zero if no recovery dtbo
|
||||
args.output.write(pack('I', 0))
|
||||
args.output.write(pack('Q', 0))
|
||||
# Populate boot image header size for header versions 1 and 2.
|
||||
if args.header_version == 1:
|
||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
|
||||
elif args.header_version == 2:
|
||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
|
||||
if args.header_version > 1:
|
||||
if filesize(args.dtb) == 0:
|
||||
raise ValueError('DTB image must not be empty.')
|
||||
# dtb size in bytes
|
||||
args.output.write(pack('I', filesize(args.dtb)))
|
||||
# dtb physical load address
|
||||
args.output.write(pack('Q', args.base + args.dtb_offset))
|
||||
pad_file(args.output, args.pagesize)
|
||||
return img_id
|
||||
class AsciizBytes:
|
||||
"""Parses a string and encodes it as an asciiz bytes object.
|
||||
>>> AsciizBytes(bufsize=4)('foo')
|
||||
b'foo\\x00'
|
||||
>>> AsciizBytes(bufsize=4)('foob')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
|
||||
"""
|
||||
def __init__(self, bufsize):
|
||||
self.bufsize = bufsize
|
||||
def __call__(self, arg):
|
||||
arg_bytes = arg.encode() + b'\x00'
|
||||
if len(arg_bytes) > self.bufsize:
|
||||
raise ArgumentTypeError(
|
||||
'Encoded asciiz length exceeded: '
|
||||
f'max {self.bufsize}, got {len(arg_bytes)}')
|
||||
return arg_bytes
|
||||
class VendorRamdiskTableBuilder:
|
||||
"""Vendor ramdisk table builder.
|
||||
Attributes:
|
||||
entries: A list of VendorRamdiskTableEntry namedtuple.
|
||||
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
|
||||
"""
|
||||
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
|
||||
'VendorRamdiskTableEntry',
|
||||
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
|
||||
'ramdisk_name', 'board_id'])
|
||||
def __init__(self):
|
||||
self.entries = []
|
||||
self.ramdisk_total_size = 0
|
||||
self.ramdisk_names = set()
|
||||
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
|
||||
# Strip any trailing null for simple comparison.
|
||||
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
|
||||
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
|
||||
raise ValueError(
|
||||
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
|
||||
if stripped_ramdisk_name in self.ramdisk_names:
|
||||
raise ValueError(
|
||||
f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
|
||||
self.ramdisk_names.add(stripped_ramdisk_name)
|
||||
if board_id is None:
|
||||
board_id = array.array(
|
||||
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
|
||||
else:
|
||||
board_id = array.array('I', board_id)
|
||||
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
|
||||
raise ValueError('board_id size must be '
|
||||
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
|
||||
with open(ramdisk_path, 'rb') as f:
|
||||
ramdisk_size = filesize(f)
|
||||
self.entries.append(self.VendorRamdiskTableEntry(
|
||||
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
|
||||
ramdisk_name, board_id))
|
||||
self.ramdisk_total_size += ramdisk_size
|
||||
def write_ramdisks_padded(self, fout, alignment):
|
||||
for entry in self.entries:
|
||||
with open(entry.ramdisk_path, 'rb') as f:
|
||||
fout.write(f.read())
|
||||
pad_file(fout, alignment)
|
||||
def write_entries_padded(self, fout, alignment):
|
||||
for entry in self.entries:
|
||||
fout.write(pack('I', entry.ramdisk_size))
|
||||
fout.write(pack('I', entry.ramdisk_offset))
|
||||
fout.write(pack('I', entry.ramdisk_type))
|
||||
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
||||
entry.ramdisk_name))
|
||||
fout.write(entry.board_id)
|
||||
pad_file(fout, alignment)
|
||||
def write_padded_file(f_out, f_in, padding):
|
||||
if f_in is None:
|
||||
return
|
||||
f_out.write(f_in.read())
|
||||
pad_file(f_out, padding)
|
||||
def parse_int(x):
|
||||
return int(x, 0)
|
||||
def parse_os_version(x):
|
||||
match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
|
||||
if match:
|
||||
a = int(match.group(1))
|
||||
b = c = 0
|
||||
if match.lastindex >= 2:
|
||||
b = int(match.group(2))
|
||||
if match.lastindex == 3:
|
||||
c = int(match.group(3))
|
||||
# 7 bits allocated for each field
|
||||
assert a < 128
|
||||
assert b < 128
|
||||
assert c < 128
|
||||
return (a << 14) | (b << 7) | c
|
||||
return 0
|
||||
def parse_os_patch_level(x):
|
||||
match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
|
||||
if match:
|
||||
y = int(match.group(1)) - 2000
|
||||
m = int(match.group(2))
|
||||
# 7 bits allocated for the year, 4 bits for the month
|
||||
assert 0 <= y < 128
|
||||
assert 0 < m <= 12
|
||||
return (y << 4) | m
|
||||
return 0
|
||||
def parse_vendor_ramdisk_type(x):
|
||||
type_dict = {
|
||||
'none': VENDOR_RAMDISK_TYPE_NONE,
|
||||
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
|
||||
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
|
||||
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
|
||||
}
|
||||
if x.lower() in type_dict:
|
||||
return type_dict[x.lower()]
|
||||
return parse_int(x)
|
||||
def get_vendor_boot_v4_usage():
|
||||
return """vendor boot version 4 arguments:
|
||||
--ramdisk_type {none,platform,recovery,dlkm}
|
||||
specify the type of the ramdisk
|
||||
--ramdisk_name NAME
|
||||
specify the name of the ramdisk
|
||||
--board_id{0..15} NUMBER
|
||||
specify the value of the board_id vector, defaults to 0
|
||||
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
|
||||
path to the vendor ramdisk file
|
||||
These options can be specified multiple times, where each vendor ramdisk
|
||||
option group ends with a --vendor_ramdisk_fragment option.
|
||||
Each option group appends an additional ramdisk to the vendor boot image.
|
||||
"""
|
||||
def parse_vendor_ramdisk_args(args, args_list):
|
||||
"""Parses vendor ramdisk specific arguments.
|
||||
Args:
|
||||
args: An argparse.Namespace object. Parsed results are stored into this
|
||||
object.
|
||||
args_list: A list of argument strings to be parsed.
|
||||
Returns:
|
||||
A list argument strings that are not parsed by this method.
|
||||
"""
|
||||
parser = ArgumentParser(add_help=False)
|
||||
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
|
||||
default=VENDOR_RAMDISK_TYPE_NONE)
|
||||
parser.add_argument('--ramdisk_name',
|
||||
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
|
||||
required=True)
|
||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
|
||||
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
|
||||
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
|
||||
unknown_args = []
|
||||
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
|
||||
if args.vendor_ramdisk is not None:
|
||||
vendor_ramdisk_table_builder.add_entry(
|
||||
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
|
||||
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
|
||||
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
|
||||
vendor_ramdisk_args = args_list[:idx]
|
||||
args_list = args_list[idx:]
|
||||
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
|
||||
ramdisk_args_dict = vars(ramdisk_args)
|
||||
unknown_args.extend(extra_args)
|
||||
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
|
||||
ramdisk_type = ramdisk_args.ramdisk_type
|
||||
ramdisk_name = ramdisk_args.ramdisk_name
|
||||
board_id = [ramdisk_args_dict[f'board_id{i}']
|
||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
|
||||
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
|
||||
ramdisk_name, board_id)
|
||||
if len(args_list) > 0:
|
||||
unknown_args.extend(args_list)
|
||||
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
|
||||
.ramdisk_total_size)
|
||||
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
|
||||
.entries)
|
||||
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
|
||||
return unknown_args
|
||||
def parse_cmdline():
|
||||
version_parser = ArgumentParser(add_help=False)
|
||||
version_parser.add_argument('--header_version', type=parse_int, default=0)
|
||||
if version_parser.parse_known_args()[0].header_version < 3:
|
||||
# For boot header v0 to v2, the kernel commandline field is split into
|
||||
# two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
|
||||
# so we minus one here to ensure the encoded string plus the
|
||||
# null-terminator can fit in the buffer size.
|
||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
|
||||
else:
|
||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||
epilog=get_vendor_boot_v4_usage())
|
||||
parser.add_argument('--kernel', type=FileType('rb'),
|
||||
help='path to the kernel')
|
||||
parser.add_argument('--ramdisk', type=FileType('rb'),
|
||||
help='path to the ramdisk')
|
||||
parser.add_argument('--second', type=FileType('rb'),
|
||||
help='path to the second bootloader')
|
||||
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
|
||||
dtbo_group = parser.add_mutually_exclusive_group()
|
||||
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
|
||||
help='path to the recovery DTBO')
|
||||
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
|
||||
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
|
||||
help='path to the recovery ACPIO')
|
||||
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
|
||||
default='', help='kernel command line arguments')
|
||||
parser.add_argument('--vendor_cmdline',
|
||||
type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
|
||||
default='',
|
||||
help='vendor boot kernel command line arguments')
|
||||
parser.add_argument('--base', type=parse_int, default=0x10000000,
|
||||
help='base address')
|
||||
parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
|
||||
help='kernel offset')
|
||||
parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
|
||||
help='ramdisk offset')
|
||||
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
|
||||
help='second bootloader offset')
|
||||
parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
|
||||
help='dtb offset')
|
||||
parser.add_argument('--os_version', type=parse_os_version, default=0,
|
||||
help='operating system version')
|
||||
parser.add_argument('--os_patch_level', type=parse_os_patch_level,
|
||||
default=0, help='operating system patch level')
|
||||
parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
|
||||
help='tags offset')
|
||||
parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
|
||||
default='', help='board name')
|
||||
parser.add_argument('--pagesize', type=parse_int,
|
||||
choices=[2**i for i in range(11, 15)], default=2048,
|
||||
help='page size')
|
||||
parser.add_argument('--id', action='store_true',
|
||||
help='print the image ID on standard output')
|
||||
parser.add_argument('--header_version', type=parse_int, default=0,
|
||||
help='boot image header version')
|
||||
parser.add_argument('-o', '--output', type=FileType('wb'),
|
||||
help='output file name')
|
||||
parser.add_argument('--gki_signing_algorithm',
|
||||
help='GKI signing algorithm to use')
|
||||
parser.add_argument('--gki_signing_key',
|
||||
help='path to RSA private key file')
|
||||
parser.add_argument('--gki_signing_signature_args',
|
||||
help='other hash arguments passed to avbtool')
|
||||
parser.add_argument('--gki_signing_avbtool_path',
|
||||
help='path to avbtool for boot signature generation')
|
||||
parser.add_argument('--vendor_boot', type=FileType('wb'),
|
||||
help='vendor boot output file name')
|
||||
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
|
||||
help='path to the vendor ramdisk')
|
||||
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
|
||||
help='path to the vendor bootconfig file')
|
||||
args, extra_args = parser.parse_known_args()
|
||||
if args.vendor_boot is not None and args.header_version > 3:
|
||||
extra_args = parse_vendor_ramdisk_args(args, extra_args)
|
||||
if len(extra_args) > 0:
|
||||
raise ValueError(f'Unrecognized arguments: {extra_args}')
|
||||
if args.header_version < 3:
|
||||
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
|
||||
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
|
||||
assert len(args.cmdline) <= BOOT_ARGS_SIZE
|
||||
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
|
||||
return args
|
||||
def add_boot_image_signature(args, pagesize):
|
||||
"""Adds the boot image signature.
|
||||
Note that the signature will only be verified in VTS to ensure a
|
||||
generic boot.img is used. It will not be used by the device
|
||||
bootloader at boot time. The bootloader should only verify
|
||||
the boot vbmeta at the end of the boot partition (or in the top-level
|
||||
vbmeta partition) via the Android Verified Boot process, when the
|
||||
device boots.
|
||||
"""
|
||||
args.output.flush() # Flush the buffer for signature calculation.
|
||||
# Appends zeros if the signing key is not specified.
|
||||
if not args.gki_signing_key or not args.gki_signing_algorithm:
|
||||
zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
|
||||
args.output.write(zeros)
|
||||
pad_file(args.output, pagesize)
|
||||
return
|
||||
avbtool = 'avbtool' # Used from otatools.zip or Android build env.
|
||||
# We need to specify the path of avbtool in build/core/Makefile.
|
||||
# Because avbtool is not guaranteed to be in $PATH there.
|
||||
if args.gki_signing_avbtool_path:
|
||||
avbtool = args.gki_signing_avbtool_path
|
||||
# Need to specify a value of --partition_size for avbtool to work.
|
||||
# We use 64 MB below, but avbtool will not resize the boot image to
|
||||
# this size because --do_not_append_vbmeta_image is also specified.
|
||||
avbtool_cmd = [
|
||||
avbtool, 'add_hash_footer',
|
||||
'--partition_name', 'boot',
|
||||
'--partition_size', str(64 * 1024 * 1024),
|
||||
'--image', args.output.name,
|
||||
'--algorithm', args.gki_signing_algorithm,
|
||||
'--key', args.gki_signing_key,
|
||||
'--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
|
||||
# Additional arguments passed to avbtool.
|
||||
if args.gki_signing_signature_args:
|
||||
avbtool_cmd += args.gki_signing_signature_args.split()
|
||||
# Outputs the signed vbmeta to a separate file, then append to boot.img
|
||||
# as the boot signature.
|
||||
with tempfile.TemporaryDirectory() as temp_out_dir:
|
||||
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
|
||||
avbtool_cmd += ['--do_not_append_vbmeta_image',
|
||||
'--output_vbmeta_image', boot_signature_output]
|
||||
subprocess.check_call(avbtool_cmd)
|
||||
with open(boot_signature_output, 'rb') as boot_signature:
|
||||
if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
|
||||
raise ValueError(
|
||||
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
|
||||
write_padded_file(args.output, boot_signature, pagesize)
|
||||
def write_data(args, pagesize):
|
||||
write_padded_file(args.output, args.kernel, pagesize)
|
||||
write_padded_file(args.output, args.ramdisk, pagesize)
|
||||
write_padded_file(args.output, args.second, pagesize)
|
||||
if args.header_version > 0 and args.header_version < 3:
|
||||
write_padded_file(args.output, args.recovery_dtbo, pagesize)
|
||||
if args.header_version == 2:
|
||||
write_padded_file(args.output, args.dtb, pagesize)
|
||||
if args.header_version >= 4:
|
||||
add_boot_image_signature(args, pagesize)
|
||||
def write_vendor_boot_data(args):
|
||||
if args.header_version > 3:
|
||||
builder = args.vendor_ramdisk_table_builder
|
||||
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
|
||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||
builder.write_entries_padded(args.vendor_boot, args.pagesize)
|
||||
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
|
||||
args.pagesize)
|
||||
else:
|
||||
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
|
||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||
def main():
|
||||
args = parse_cmdline()
|
||||
if args.vendor_boot is not None:
|
||||
if args.header_version not in {3, 4}:
|
||||
raise ValueError(
|
||||
'--vendor_boot not compatible with given header version')
|
||||
if args.header_version == 3 and args.vendor_ramdisk is None:
|
||||
raise ValueError('--vendor_ramdisk missing or invalid')
|
||||
write_vendor_boot_header(args)
|
||||
write_vendor_boot_data(args)
|
||||
if args.output is not None:
|
||||
if args.second is not None and args.header_version > 2:
|
||||
raise ValueError(
|
||||
'--second not compatible with given header version')
|
||||
img_id = write_header(args)
|
||||
if args.header_version > 2:
|
||||
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||
else:
|
||||
write_data(args, args.pagesize)
|
||||
if args.id and img_id is not None:
|
||||
print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
|
||||
if __name__ == '__main__':
|
||||
main()
|
@@ -115,19 +115,6 @@ LABEL primary
|
||||
APPEND \${cbootargs} $BM_CMDLINE
|
||||
EOF
|
||||
|
||||
# Set up the pxelinux config for Jetson TK1
|
||||
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1
|
||||
PROMPT 0
|
||||
TIMEOUT 30
|
||||
DEFAULT primary
|
||||
MENU TITLE jetson TK1 boot options
|
||||
LABEL primary
|
||||
MENU LABEL CI kernel on TFTP
|
||||
LINUX zImage
|
||||
FDT tegra124-jetson-tk1.dtb
|
||||
APPEND \${cbootargs} $BM_CMDLINE
|
||||
EOF
|
||||
|
||||
# Create the rootfs in the NFS directory
|
||||
mkdir -p /nfs/results
|
||||
. $BM/rootfs-setup.sh /nfs
|
||||
@@ -144,7 +131,7 @@ while [ $((ATTEMPTS--)) -gt 0 ]; do
|
||||
--dev="$BM_SERIAL" \
|
||||
--powerup="$BM_POWERUP" \
|
||||
--powerdown="$BM_POWERDOWN" \
|
||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||
--timeout="${BM_POE_TIMEOUT:-60}"
|
||||
ret=$?
|
||||
|
||||
if [ $ret -eq 2 ]; then
|
||||
|
@@ -28,14 +28,11 @@ from serial_buffer import SerialBuffer
|
||||
import sys
|
||||
import threading
|
||||
|
||||
|
||||
class PoERun:
|
||||
def __init__(self, args, test_timeout):
|
||||
def __init__(self, args):
|
||||
self.powerup = args.powerup
|
||||
self.powerdown = args.powerdown
|
||||
self.ser = SerialBuffer(
|
||||
args.dev, "results/serial-output.txt", "")
|
||||
self.test_timeout = test_timeout
|
||||
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout)
|
||||
|
||||
def print_error(self, message):
|
||||
RED = '\033[0;31m'
|
||||
@@ -51,17 +48,16 @@ class PoERun:
|
||||
return 1
|
||||
|
||||
boot_detected = False
|
||||
for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("Booting Linux", line):
|
||||
boot_detected = True
|
||||
break
|
||||
|
||||
if not boot_detected:
|
||||
self.print_error(
|
||||
"Something wrong; couldn't detect the boot start up sequence")
|
||||
self.print_error("Something wrong; couldn't detect the boot start up sequence")
|
||||
return 2
|
||||
|
||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||
for line in self.ser.lines():
|
||||
if re.search("---. end Kernel panic", line):
|
||||
return 1
|
||||
|
||||
@@ -74,11 +70,6 @@ class PoERun:
|
||||
self.print_error("nouveau jetson boot bug, retrying.")
|
||||
return 2
|
||||
|
||||
# network fail on tk1
|
||||
if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line):
|
||||
self.print_error("nouveau jetson tk1 network fail, retrying.")
|
||||
return 2
|
||||
|
||||
result = re.search("hwci: mesa: (\S*)", line)
|
||||
if result:
|
||||
if result.group(1) == "pass":
|
||||
@@ -86,30 +77,24 @@ class PoERun:
|
||||
else:
|
||||
return 1
|
||||
|
||||
self.print_error(
|
||||
"Reached the end of the CPU serial log without finding a result")
|
||||
self.print_error("Reached the end of the CPU serial log without finding a result")
|
||||
return 2
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--dev', type=str,
|
||||
help='Serial device to monitor', required=True)
|
||||
parser.add_argument('--powerup', type=str,
|
||||
help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str,
|
||||
help='shell command for powering off', required=True)
|
||||
parser.add_argument(
|
||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||
parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
|
||||
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||
parser.add_argument('--timeout', type=int, default=60,
|
||||
help='time in seconds to wait for activity', required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
poe = PoERun(args, args.test_timeout * 60)
|
||||
poe = PoERun(args)
|
||||
retval = poe.run()
|
||||
|
||||
poe.logged_system(args.powerdown)
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@@ -28,9 +28,8 @@ import serial
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
class SerialBuffer:
|
||||
def __init__(self, dev, filename, prefix, timeout=None, line_queue=None):
|
||||
def __init__(self, dev, filename, prefix, timeout = None):
|
||||
self.filename = filename
|
||||
self.dev = dev
|
||||
|
||||
@@ -42,13 +41,7 @@ class SerialBuffer:
|
||||
self.serial = None
|
||||
|
||||
self.byte_queue = queue.Queue()
|
||||
# allow multiple SerialBuffers to share a line queue so you can merge
|
||||
# servo's CPU and EC streams into one thing to watch the boot/test
|
||||
# progress on.
|
||||
if line_queue:
|
||||
self.line_queue = line_queue
|
||||
else:
|
||||
self.line_queue = queue.Queue()
|
||||
self.line_queue = queue.Queue()
|
||||
self.prefix = prefix
|
||||
self.timeout = timeout
|
||||
self.sentinel = object()
|
||||
@@ -137,30 +130,14 @@ class SerialBuffer:
|
||||
self.line_queue.put(line)
|
||||
line = bytearray()
|
||||
|
||||
def lines(self, timeout=None, phase=None):
|
||||
start_time = time.monotonic()
|
||||
while True:
|
||||
read_timeout = None
|
||||
if timeout:
|
||||
read_timeout = timeout - (time.monotonic() - start_time)
|
||||
if read_timeout <= 0:
|
||||
print("read timeout waiting for serial during {}".format(phase))
|
||||
self.close()
|
||||
break
|
||||
def get_line(self):
|
||||
line = self.line_queue.get()
|
||||
if line == self.sentinel:
|
||||
self.lines_thread.join()
|
||||
return line
|
||||
|
||||
try:
|
||||
line = self.line_queue.get(timeout=read_timeout)
|
||||
except queue.Empty:
|
||||
print("read timeout waiting for serial during {}".format(phase))
|
||||
self.close()
|
||||
break
|
||||
|
||||
if line == self.sentinel:
|
||||
print("End of serial output")
|
||||
self.lines_thread.join()
|
||||
break
|
||||
|
||||
yield line
|
||||
def lines(self):
|
||||
return iter(self.get_line, self.sentinel)
|
||||
|
||||
|
||||
def main():
|
||||
|
@@ -28,8 +28,8 @@
|
||||
import sys
|
||||
import telnetlib
|
||||
|
||||
host = sys.argv[1]
|
||||
port = sys.argv[2]
|
||||
host=sys.argv[1]
|
||||
port=sys.argv[2]
|
||||
|
||||
tn = telnetlib.Telnet(host, port, 1000000)
|
||||
|
||||
|
2
.gitlab-ci/bin/.gitignore
vendored
2
.gitlab-ci/bin/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
schema.graphql
|
||||
gitlab_gql.py.cache.db
|
@@ -1,301 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright © 2020 - 2022 Collabora Ltd.
|
||||
# Authors:
|
||||
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
# David Heidelberg <david.heidelberg@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Helper script to restrict running only required CI jobs
|
||||
and show the job(s) logs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import partial
|
||||
from itertools import chain
|
||||
from typing import Optional
|
||||
|
||||
import gitlab
|
||||
from colorama import Fore, Style
|
||||
from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline
|
||||
from gitlab_gql import GitlabGQL, create_job_needs_dag, filter_dag, print_dag
|
||||
|
||||
REFRESH_WAIT_LOG = 10
|
||||
REFRESH_WAIT_JOBS = 6
|
||||
|
||||
URL_START = "\033]8;;"
|
||||
URL_END = "\033]8;;\a"
|
||||
|
||||
STATUS_COLORS = {
|
||||
"created": "",
|
||||
"running": Fore.BLUE,
|
||||
"success": Fore.GREEN,
|
||||
"failed": Fore.RED,
|
||||
"canceled": Fore.MAGENTA,
|
||||
"manual": "",
|
||||
"pending": "",
|
||||
"skipped": "",
|
||||
}
|
||||
|
||||
COMPLETED_STATUSES = ["success", "failed"]
|
||||
|
||||
|
||||
def print_job_status(job) -> None:
|
||||
"""It prints a nice, colored job status with a link to the job."""
|
||||
if job.status == "canceled":
|
||||
return
|
||||
|
||||
print(
|
||||
STATUS_COLORS[job.status]
|
||||
+ "🞋 job "
|
||||
+ URL_START
|
||||
+ f"{job.web_url}\a{job.name}"
|
||||
+ URL_END
|
||||
+ f" :: {job.status}"
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def print_job_status_change(job) -> None:
|
||||
"""It reports job status changes."""
|
||||
if job.status == "canceled":
|
||||
return
|
||||
|
||||
print(
|
||||
STATUS_COLORS[job.status]
|
||||
+ "🗘 job "
|
||||
+ URL_START
|
||||
+ f"{job.web_url}\a{job.name}"
|
||||
+ URL_END
|
||||
+ f" has new status: {job.status}"
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
|
||||
|
||||
def pretty_wait(sec: int) -> None:
|
||||
"""shows progressbar in dots"""
|
||||
for val in range(sec, 0, -1):
|
||||
print(f"⏲ {val} seconds", end="\r")
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def monitor_pipeline(
|
||||
project,
|
||||
pipeline,
|
||||
target_job: Optional[str],
|
||||
dependencies,
|
||||
force_manual: bool,
|
||||
stress: bool,
|
||||
) -> tuple[Optional[int], Optional[int]]:
|
||||
"""Monitors pipeline and delegate canceling jobs"""
|
||||
statuses = {}
|
||||
target_statuses = {}
|
||||
stress_succ = 0
|
||||
stress_fail = 0
|
||||
|
||||
if target_job:
|
||||
target_jobs_regex = re.compile(target_job.strip())
|
||||
|
||||
while True:
|
||||
to_cancel = []
|
||||
for job in pipeline.jobs.list(all=True, sort="desc"):
|
||||
# target jobs
|
||||
if target_job and target_jobs_regex.match(job.name):
|
||||
if force_manual and job.status == "manual":
|
||||
enable_job(project, job, True)
|
||||
|
||||
if stress and job.status in ["success", "failed"]:
|
||||
if job.status == "success":
|
||||
stress_succ += 1
|
||||
if job.status == "failed":
|
||||
stress_fail += 1
|
||||
retry_job(project, job)
|
||||
|
||||
if (job.id not in target_statuses) or (
|
||||
job.status not in target_statuses[job.id]
|
||||
):
|
||||
print_job_status_change(job)
|
||||
target_statuses[job.id] = job.status
|
||||
else:
|
||||
print_job_status(job)
|
||||
|
||||
continue
|
||||
|
||||
# all jobs
|
||||
if (job.id not in statuses) or (job.status not in statuses[job.id]):
|
||||
print_job_status_change(job)
|
||||
statuses[job.id] = job.status
|
||||
|
||||
# dependencies and cancelling the rest
|
||||
if job.name in dependencies:
|
||||
if job.status == "manual":
|
||||
enable_job(project, job, False)
|
||||
|
||||
elif target_job and job.status not in [
|
||||
"canceled",
|
||||
"success",
|
||||
"failed",
|
||||
"skipped",
|
||||
]:
|
||||
to_cancel.append(job)
|
||||
|
||||
if target_job:
|
||||
cancel_jobs(project, to_cancel)
|
||||
|
||||
if stress:
|
||||
print(
|
||||
"∑ succ: " + str(stress_succ) + "; fail: " + str(stress_fail),
|
||||
flush=False,
|
||||
)
|
||||
pretty_wait(REFRESH_WAIT_JOBS)
|
||||
continue
|
||||
|
||||
print("---------------------------------", flush=False)
|
||||
|
||||
if len(target_statuses) == 1 and {"running"}.intersection(
|
||||
target_statuses.values()
|
||||
):
|
||||
return next(iter(target_statuses)), None
|
||||
|
||||
if {"failed", "canceled"}.intersection(target_statuses.values()):
|
||||
return None, 1
|
||||
|
||||
if {"success", "manual"}.issuperset(target_statuses.values()):
|
||||
return None, 0
|
||||
|
||||
pretty_wait(REFRESH_WAIT_JOBS)
|
||||
|
||||
|
||||
def enable_job(project, job, target: bool) -> None:
|
||||
"""enable manual job"""
|
||||
pjob = project.jobs.get(job.id, lazy=True)
|
||||
pjob.play()
|
||||
if target:
|
||||
jtype = "🞋 "
|
||||
else:
|
||||
jtype = "(dependency)"
|
||||
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
|
||||
|
||||
|
||||
def retry_job(project, job) -> None:
|
||||
"""retry job"""
|
||||
pjob = project.jobs.get(job.id, lazy=True)
|
||||
pjob.retry()
|
||||
jtype = "↻"
|
||||
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
|
||||
|
||||
|
||||
def cancel_job(project, job) -> None:
|
||||
"""Cancel GitLab job"""
|
||||
pjob = project.jobs.get(job.id, lazy=True)
|
||||
pjob.cancel()
|
||||
print(f"♲ {job.name}")
|
||||
|
||||
|
||||
def cancel_jobs(project, to_cancel) -> None:
|
||||
"""Cancel unwanted GitLab jobs"""
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
with ThreadPoolExecutor(max_workers=6) as exe:
|
||||
part = partial(cancel_job, project)
|
||||
exe.map(part, to_cancel)
|
||||
|
||||
|
||||
def print_log(project, job_id) -> None:
|
||||
"""Print job log into output"""
|
||||
printed_lines = 0
|
||||
while True:
|
||||
job = project.jobs.get(job_id)
|
||||
|
||||
# GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
|
||||
lines = job.trace().decode("unicode_escape").splitlines()
|
||||
for line in lines[printed_lines:]:
|
||||
print(line)
|
||||
printed_lines = len(lines)
|
||||
|
||||
if job.status in COMPLETED_STATUSES:
|
||||
print(Fore.GREEN + f"Job finished: {job.web_url}" + Style.RESET_ALL)
|
||||
return
|
||||
pretty_wait(REFRESH_WAIT_LOG)
|
||||
|
||||
|
||||
def parse_args() -> None:
|
||||
"""Parse args"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Tool to trigger a subset of container jobs "
|
||||
+ "and monitor the progress of a test job",
|
||||
epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
|
||||
+ '--target ".*traces" ',
|
||||
)
|
||||
parser.add_argument("--target", metavar="target-job", help="Target job")
|
||||
parser.add_argument(
|
||||
"--rev", metavar="revision", help="repository git revision", required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
metavar="token",
|
||||
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force-manual", action="store_true", help="Force jobs marked as manual"
|
||||
)
|
||||
parser.add_argument("--stress", action="store_true", help="Stresstest job(s)")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def find_dependencies(target_job: str, project_path: str, sha: str) -> set[str]:
|
||||
gql_instance = GitlabGQL()
|
||||
dag, _ = create_job_needs_dag(
|
||||
gql_instance, {"projectPath": project_path.path_with_namespace, "sha": sha}
|
||||
)
|
||||
|
||||
target_dep_dag = filter_dag(dag, target_job)
|
||||
print(Fore.YELLOW)
|
||||
print("Detected job dependencies:")
|
||||
print()
|
||||
print_dag(target_dep_dag)
|
||||
print(Fore.RESET)
|
||||
return set(chain.from_iterable(target_dep_dag.values()))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
t_start = time.perf_counter()
|
||||
|
||||
args = parse_args()
|
||||
|
||||
token = read_token(args.token)
|
||||
|
||||
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
|
||||
|
||||
cur_project = get_gitlab_project(gl, "mesa")
|
||||
|
||||
print(f"Revision: {args.rev}")
|
||||
pipe = wait_for_pipeline(cur_project, args.rev)
|
||||
print(f"Pipeline: {pipe.web_url}")
|
||||
deps = set()
|
||||
if args.target:
|
||||
print("🞋 job: " + Fore.BLUE + args.target + Style.RESET_ALL)
|
||||
deps = find_dependencies(
|
||||
target_job=args.target, sha=args.rev, project_path=cur_project
|
||||
)
|
||||
target_job_id, ret = monitor_pipeline(
|
||||
cur_project, pipe, args.target, deps, args.force_manual, args.stress
|
||||
)
|
||||
|
||||
if target_job_id:
|
||||
print_log(cur_project, target_job_id)
|
||||
|
||||
t_end = time.perf_counter()
|
||||
spend_minutes = (t_end - t_start) / 60
|
||||
print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes")
|
||||
|
||||
sys.exit(ret)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
@@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Helper script to download the schema GraphQL from Gitlab to enable IDEs to
|
||||
# assist the developer to edit gql files
|
||||
|
||||
SOURCE_DIR=$(dirname "$(realpath "$0")")
|
||||
|
||||
(
|
||||
cd $SOURCE_DIR || exit 1
|
||||
gql-cli https://gitlab.freedesktop.org/api/graphql --print-schema > schema.graphql
|
||||
)
|
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright © 2020 - 2022 Collabora Ltd.
|
||||
# Authors:
|
||||
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
# David Heidelberg <david.heidelberg@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
'''Shared functions between the scripts.'''
|
||||
|
||||
import os
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def get_gitlab_project(glab, name: str):
|
||||
"""Finds a specified gitlab project for given user"""
|
||||
glab.auth()
|
||||
username = glab.user.username
|
||||
return glab.projects.get(f"{username}/mesa")
|
||||
|
||||
|
||||
def read_token(token_arg: Optional[str]) -> str:
|
||||
"""pick token from args or file"""
|
||||
if token_arg:
|
||||
return token_arg
|
||||
return (
|
||||
open(os.path.expanduser("~/.config/gitlab-token"), encoding="utf-8")
|
||||
.readline()
|
||||
.rstrip()
|
||||
)
|
||||
|
||||
|
||||
def wait_for_pipeline(project, sha: str):
|
||||
"""await until pipeline appears in Gitlab"""
|
||||
print("⏲ for the pipeline to appear..", end="")
|
||||
while True:
|
||||
pipelines = project.pipelines.list(sha=sha)
|
||||
if pipelines:
|
||||
print("", flush=True)
|
||||
return pipelines[0]
|
||||
print("", end=".", flush=True)
|
||||
time.sleep(1)
|
@@ -1,303 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
|
||||
from dataclasses import dataclass, field
|
||||
from os import getenv
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional, Pattern, Union
|
||||
|
||||
import yaml
|
||||
from filecache import DAY, filecache
|
||||
from gql import Client, gql
|
||||
from gql.transport.aiohttp import AIOHTTPTransport
|
||||
from graphql import DocumentNode
|
||||
|
||||
Dag = dict[str, list[str]]
|
||||
TOKEN_DIR = Path(getenv("XDG_CONFIG_HOME") or Path.home() / ".config")
|
||||
|
||||
|
||||
def get_token_from_default_dir() -> str:
|
||||
try:
|
||||
token_file = TOKEN_DIR / "gitlab-token"
|
||||
return token_file.resolve()
|
||||
except FileNotFoundError as ex:
|
||||
print(
|
||||
f"Could not find {token_file}, please provide a token file as an argument"
|
||||
)
|
||||
raise ex
|
||||
|
||||
|
||||
def get_project_root_dir():
|
||||
root_path = Path(__file__).parent.parent.parent.resolve()
|
||||
gitlab_file = root_path / ".gitlab-ci.yml"
|
||||
assert gitlab_file.exists()
|
||||
|
||||
return root_path
|
||||
|
||||
|
||||
@dataclass
|
||||
class GitlabGQL:
|
||||
_transport: Any = field(init=False)
|
||||
client: Client = field(init=False)
|
||||
url: str = "https://gitlab.freedesktop.org/api/graphql"
|
||||
token: Optional[str] = None
|
||||
|
||||
def __post_init__(self):
|
||||
self._setup_gitlab_gql_client()
|
||||
|
||||
def _setup_gitlab_gql_client(self) -> Client:
|
||||
# Select your transport with a defined url endpoint
|
||||
headers = {}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"Bearer {self.token}"
|
||||
self._transport = AIOHTTPTransport(url=self.url, headers=headers)
|
||||
|
||||
# Create a GraphQL client using the defined transport
|
||||
self.client = Client(
|
||||
transport=self._transport, fetch_schema_from_transport=True
|
||||
)
|
||||
|
||||
@filecache(DAY)
|
||||
def query(
|
||||
self, gql_file: Union[Path, str], params: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
# Provide a GraphQL query
|
||||
source_path = Path(__file__).parent
|
||||
pipeline_query_file = source_path / gql_file
|
||||
|
||||
query: DocumentNode
|
||||
with open(pipeline_query_file, "r") as f:
|
||||
pipeline_query = f.read()
|
||||
query = gql(pipeline_query)
|
||||
|
||||
# Execute the query on the transport
|
||||
return self.client.execute(query, variable_values=params)
|
||||
|
||||
def invalidate_query_cache(self):
|
||||
self.query._db.clear()
|
||||
|
||||
|
||||
def create_job_needs_dag(
|
||||
gl_gql: GitlabGQL, params
|
||||
) -> tuple[Dag, dict[str, dict[str, Any]]]:
|
||||
|
||||
result = gl_gql.query("pipeline_details.gql", params)
|
||||
dag = {}
|
||||
jobs = {}
|
||||
pipeline = result["project"]["pipeline"]
|
||||
if not pipeline:
|
||||
raise RuntimeError(f"Could not find any pipelines for {params}")
|
||||
|
||||
for stage in pipeline["stages"]["nodes"]:
|
||||
for stage_job in stage["groups"]["nodes"]:
|
||||
for job in stage_job["jobs"]["nodes"]:
|
||||
needs = job.pop("needs")["nodes"]
|
||||
jobs[job["name"]] = job
|
||||
dag[job["name"]] = {node["name"] for node in needs}
|
||||
|
||||
for job, needs in dag.items():
|
||||
needs: set
|
||||
partial = True
|
||||
|
||||
while partial:
|
||||
next_depth = {n for dn in needs for n in dag[dn]}
|
||||
partial = not needs.issuperset(next_depth)
|
||||
needs = needs.union(next_depth)
|
||||
|
||||
dag[job] = needs
|
||||
|
||||
return dag, jobs
|
||||
|
||||
|
||||
def filter_dag(dag: Dag, regex: Pattern) -> Dag:
|
||||
return {job: needs for job, needs in dag.items() if re.match(regex, job)}
|
||||
|
||||
|
||||
def print_dag(dag: Dag) -> None:
|
||||
for job, needs in dag.items():
|
||||
print(f"{job}:")
|
||||
print(f"\t{' '.join(needs)}")
|
||||
print()
|
||||
|
||||
|
||||
def fetch_merged_yaml(gl_gql: GitlabGQL, params) -> dict[Any]:
|
||||
gitlab_yml_file = get_project_root_dir() / ".gitlab-ci.yml"
|
||||
content = Path(gitlab_yml_file).read_text().strip()
|
||||
params["content"] = content
|
||||
raw_response = gl_gql.query("job_details.gql", params)
|
||||
if merged_yaml := raw_response["ciConfig"]["mergedYaml"]:
|
||||
return yaml.safe_load(merged_yaml)
|
||||
|
||||
gl_gql.invalidate_query_cache()
|
||||
raise ValueError(
|
||||
"""
|
||||
Could not fetch any content for merged YAML,
|
||||
please verify if the git SHA exists in remote.
|
||||
Maybe you forgot to `git push`? """
|
||||
)
|
||||
|
||||
|
||||
def recursive_fill(job, relationship_field, target_data, acc_data: dict, merged_yaml):
|
||||
if relatives := job.get(relationship_field):
|
||||
if isinstance(relatives, str):
|
||||
relatives = [relatives]
|
||||
|
||||
for relative in relatives:
|
||||
parent_job = merged_yaml[relative]
|
||||
acc_data = recursive_fill(parent_job, acc_data, merged_yaml)
|
||||
|
||||
acc_data |= job.get(target_data, {})
|
||||
|
||||
return acc_data
|
||||
|
||||
|
||||
def get_variables(job, merged_yaml, project_path, sha) -> dict[str, str]:
|
||||
p = get_project_root_dir() / ".gitlab-ci" / "image-tags.yml"
|
||||
image_tags = yaml.safe_load(p.read_text())
|
||||
|
||||
variables = image_tags["variables"]
|
||||
variables |= merged_yaml["variables"]
|
||||
variables |= job["variables"]
|
||||
variables["CI_PROJECT_PATH"] = project_path
|
||||
variables["CI_PROJECT_NAME"] = project_path.split("/")[1]
|
||||
variables["CI_REGISTRY_IMAGE"] = "registry.freedesktop.org/${CI_PROJECT_PATH}"
|
||||
variables["CI_COMMIT_SHA"] = sha
|
||||
|
||||
while recurse_among_variables_space(variables):
|
||||
pass
|
||||
|
||||
return variables
|
||||
|
||||
|
||||
# Based on: https://stackoverflow.com/a/2158532/1079223
|
||||
def flatten(xs):
|
||||
for x in xs:
|
||||
if isinstance(x, Iterable) and not isinstance(x, (str, bytes)):
|
||||
yield from flatten(x)
|
||||
else:
|
||||
yield x
|
||||
|
||||
|
||||
def get_full_script(job) -> list[str]:
|
||||
script = []
|
||||
for script_part in ("before_script", "script", "after_script"):
|
||||
script.append(f"# {script_part}")
|
||||
lines = flatten(job.get(script_part, []))
|
||||
script.extend(lines)
|
||||
script.append("")
|
||||
|
||||
return script
|
||||
|
||||
|
||||
def recurse_among_variables_space(var_graph) -> bool:
|
||||
updated = False
|
||||
for var, value in var_graph.items():
|
||||
value = str(value)
|
||||
dep_vars = []
|
||||
if match := re.findall(r"(\$[{]?[\w\d_]*[}]?)", value):
|
||||
all_dep_vars = [v.lstrip("${").rstrip("}") for v in match]
|
||||
# print(value, match, all_dep_vars)
|
||||
dep_vars = [v for v in all_dep_vars if v in var_graph]
|
||||
|
||||
for dep_var in dep_vars:
|
||||
dep_value = str(var_graph[dep_var])
|
||||
new_value = var_graph[var]
|
||||
new_value = new_value.replace(f"${{{dep_var}}}", dep_value)
|
||||
new_value = new_value.replace(f"${dep_var}", dep_value)
|
||||
var_graph[var] = new_value
|
||||
updated |= dep_value != new_value
|
||||
|
||||
return updated
|
||||
|
||||
|
||||
def get_job_final_definiton(job_name, merged_yaml, project_path, sha):
|
||||
job = merged_yaml[job_name]
|
||||
variables = get_variables(job, merged_yaml, project_path, sha)
|
||||
|
||||
print("# --------- variables ---------------")
|
||||
for var, value in sorted(variables.items()):
|
||||
print(f"export {var}={value!r}")
|
||||
|
||||
# TODO: Recurse into needs to get full script
|
||||
# TODO: maybe create a extra yaml file to avoid too much rework
|
||||
script = get_full_script(job)
|
||||
print()
|
||||
print()
|
||||
print("# --------- full script ---------------")
|
||||
print("\n".join(script))
|
||||
|
||||
if image := variables.get("MESA_IMAGE"):
|
||||
print()
|
||||
print()
|
||||
print("# --------- container image ---------------")
|
||||
print(image)
|
||||
|
||||
|
||||
def parse_args() -> Namespace:
|
||||
parser = ArgumentParser(
|
||||
formatter_class=ArgumentDefaultsHelpFormatter,
|
||||
description="CLI and library with utility functions to debug jobs via Gitlab GraphQL",
|
||||
epilog=f"""Example:
|
||||
{Path(__file__).name} --rev $(git rev-parse HEAD) --print-job-dag""",
|
||||
)
|
||||
parser.add_argument("-pp", "--project-path", type=str, default="mesa/mesa")
|
||||
parser.add_argument("--sha", "--rev", type=str, required=True)
|
||||
parser.add_argument(
|
||||
"--regex",
|
||||
type=str,
|
||||
required=False,
|
||||
help="Regex pattern for the job name to be considered",
|
||||
)
|
||||
parser.add_argument("--print-dag", action="store_true", help="Print job needs DAG")
|
||||
parser.add_argument(
|
||||
"--print-merged-yaml",
|
||||
action="store_true",
|
||||
help="Print the resulting YAML for the specific SHA",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--print-job-manifest", type=str, help="Print the resulting job data"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gitlab-token-file",
|
||||
type=str,
|
||||
default=get_token_from_default_dir(),
|
||||
help="force GitLab token, otherwise it's read from $XDG_CONFIG_HOME/gitlab-token",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
args.gitlab_token = Path(args.gitlab_token_file).read_text()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
gl_gql = GitlabGQL(token=args.gitlab_token)
|
||||
|
||||
if args.print_dag:
|
||||
dag, jobs = create_job_needs_dag(
|
||||
gl_gql, {"projectPath": args.project_path, "sha": args.sha}
|
||||
)
|
||||
|
||||
if args.regex:
|
||||
dag = filter_dag(dag, re.compile(args.regex))
|
||||
print_dag(dag)
|
||||
|
||||
if args.print_merged_yaml:
|
||||
print(
|
||||
fetch_merged_yaml(
|
||||
gl_gql, {"projectPath": args.project_path, "sha": args.sha}
|
||||
)
|
||||
)
|
||||
|
||||
if args.print_job_manifest:
|
||||
merged_yaml = fetch_merged_yaml(
|
||||
gl_gql, {"projectPath": args.project_path, "sha": args.sha}
|
||||
)
|
||||
get_job_final_definiton(
|
||||
args.print_job_manifest, merged_yaml, args.project_path, args.sha
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -1,7 +0,0 @@
|
||||
query getCiConfigData($projectPath: ID!, $sha: String, $content: String!) {
|
||||
ciConfig(projectPath: $projectPath, sha: $sha, content: $content) {
|
||||
errors
|
||||
mergedYaml
|
||||
__typename
|
||||
}
|
||||
}
|
@@ -1,86 +0,0 @@
|
||||
fragment LinkedPipelineData on Pipeline {
|
||||
id
|
||||
iid
|
||||
path
|
||||
cancelable
|
||||
retryable
|
||||
userPermissions {
|
||||
updatePipeline
|
||||
}
|
||||
status: detailedStatus {
|
||||
id
|
||||
group
|
||||
label
|
||||
icon
|
||||
}
|
||||
sourceJob {
|
||||
id
|
||||
name
|
||||
}
|
||||
project {
|
||||
id
|
||||
name
|
||||
fullPath
|
||||
}
|
||||
}
|
||||
|
||||
query getPipelineDetails($projectPath: ID!, $sha: String!) {
|
||||
project(fullPath: $projectPath) {
|
||||
id
|
||||
pipeline(sha: $sha) {
|
||||
id
|
||||
iid
|
||||
complete
|
||||
downstream {
|
||||
nodes {
|
||||
...LinkedPipelineData
|
||||
}
|
||||
}
|
||||
upstream {
|
||||
...LinkedPipelineData
|
||||
}
|
||||
stages {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
status: detailedStatus {
|
||||
id
|
||||
action {
|
||||
id
|
||||
icon
|
||||
path
|
||||
title
|
||||
}
|
||||
}
|
||||
groups {
|
||||
nodes {
|
||||
id
|
||||
status: detailedStatus {
|
||||
id
|
||||
label
|
||||
group
|
||||
icon
|
||||
}
|
||||
name
|
||||
size
|
||||
jobs {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
kind
|
||||
scheduledAt
|
||||
needs {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
aiohttp==3.8.1
|
||||
colorama==0.4.5
|
||||
filecache==0.81
|
||||
gql==3.4.0
|
||||
python-gitlab==3.5.0
|
||||
PyYAML==6.0
|
||||
ruamel.yaml.clib==0.2.6
|
||||
ruamel.yaml==0.17.21
|
@@ -1,140 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright © 2022 Collabora Ltd.
|
||||
# Authors:
|
||||
# David Heidelberg <david.heidelberg@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Helper script to update traces checksums
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import bz2
|
||||
import glob
|
||||
import re
|
||||
import json
|
||||
import sys
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
import gitlab
|
||||
from gitlab_common import get_gitlab_project, read_token, wait_for_pipeline
|
||||
|
||||
|
||||
DESCRIPTION_FILE = "export PIGLIT_REPLAY_DESCRIPTION_FILE='.*/install/(.*)'$"
|
||||
DEVICE_NAME = "export PIGLIT_REPLAY_DEVICE_NAME='(.*)'$"
|
||||
|
||||
|
||||
def gather_results(
|
||||
project,
|
||||
pipeline,
|
||||
) -> None:
|
||||
"""Gather results"""
|
||||
|
||||
target_jobs_regex = re.compile(".*-traces([:].*)?$")
|
||||
|
||||
for job in pipeline.jobs.list(all=True, sort="desc"):
|
||||
if target_jobs_regex.match(job.name) and job.status == "failed":
|
||||
cur_job = project.jobs.get(job.id)
|
||||
# get variables
|
||||
print(f"👁 Looking through logs for the device variable and traces.yml file in {job.name}...")
|
||||
log = cur_job.trace().decode("unicode_escape").splitlines()
|
||||
filename: str = ''
|
||||
dev_name: str = ''
|
||||
for logline in log:
|
||||
desc_file = re.search(DESCRIPTION_FILE, logline)
|
||||
device_name = re.search(DEVICE_NAME, logline)
|
||||
if desc_file:
|
||||
filename = desc_file.group(1)
|
||||
if device_name:
|
||||
dev_name = device_name.group(1)
|
||||
|
||||
if not filename or not dev_name:
|
||||
print("! Couldn't find device name or YML file in the logs!")
|
||||
return
|
||||
|
||||
print(f"👁 Found {dev_name} and file {filename}")
|
||||
|
||||
# find filename in Mesa source
|
||||
traces_file = glob.glob('./**/' + filename, recursive=True)
|
||||
# write into it
|
||||
with open(traces_file[0], 'r', encoding='utf-8') as target_file:
|
||||
yaml = YAML()
|
||||
yaml.compact(seq_seq=False, seq_map=False)
|
||||
yaml.version = 1,2
|
||||
yaml.width = 2048 # do not break the text fields
|
||||
yaml.default_flow_style = None
|
||||
target = yaml.load(target_file)
|
||||
|
||||
# parse artifact
|
||||
results_json_bz2 = cur_job.artifact(path="results/results.json.bz2", streamed=False)
|
||||
results_json = bz2.decompress(results_json_bz2).decode("utf-8")
|
||||
results = json.loads(results_json)
|
||||
|
||||
for _, value in results["tests"].items():
|
||||
if (
|
||||
not value['images'] or
|
||||
not value['images'][0] or
|
||||
"image_desc" not in value['images'][0]
|
||||
):
|
||||
continue
|
||||
|
||||
trace: str = value['images'][0]['image_desc']
|
||||
checksum: str = value['images'][0]['checksum_render']
|
||||
|
||||
if not checksum:
|
||||
print(f"Trace {trace} checksum is missing! Abort.")
|
||||
continue
|
||||
|
||||
if checksum == "error":
|
||||
print(f"Trace {trace} crashed")
|
||||
continue
|
||||
|
||||
if target['traces'][trace][dev_name].get('checksum') == checksum:
|
||||
continue
|
||||
|
||||
if "label" in target['traces'][trace][dev_name]:
|
||||
print(f'{trace}: {dev_name}: has label: {target["traces"][trace][dev_name]["label"]}, is it still right?')
|
||||
|
||||
target['traces'][trace][dev_name]['checksum'] = checksum
|
||||
|
||||
with open(traces_file[0], 'w', encoding='utf-8') as target_file:
|
||||
yaml.dump(target, target_file)
|
||||
|
||||
|
||||
|
||||
def parse_args() -> None:
|
||||
"""Parse args"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Tool to generate patch from checksums ",
|
||||
epilog="Example: update_traces_checksum.py --rev $(git rev-parse HEAD) "
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rev", metavar="revision", help="repository git revision", required=True
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
metavar="token",
|
||||
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
args = parse_args()
|
||||
|
||||
token = read_token(args.token)
|
||||
|
||||
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
|
||||
|
||||
cur_project = get_gitlab_project(gl, "mesa")
|
||||
|
||||
print(f"Revision: {args.rev}")
|
||||
pipe = wait_for_pipeline(cur_project, args.rev)
|
||||
print(f"Pipeline: {pipe.web_url}")
|
||||
gather_results(cur_project, pipe)
|
||||
|
||||
sys.exit()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
@@ -1,6 +1,6 @@
|
||||
# Shared between windows and Linux
|
||||
.build-common:
|
||||
extends: .build-rules
|
||||
extends: .ci-run-policy
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
artifacts:
|
||||
@@ -37,7 +37,7 @@
|
||||
tags:
|
||||
- windows
|
||||
- docker
|
||||
- "2022"
|
||||
- "1809"
|
||||
- mesa
|
||||
cache:
|
||||
key: ${CI_JOB_NAME}
|
||||
@@ -54,15 +54,6 @@
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
.meson-build_mingw:
|
||||
extends:
|
||||
- .build-linux
|
||||
- .use-debian/x86_build_mingw
|
||||
- .use-wine
|
||||
stage: build-x86_64
|
||||
script:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-testing:
|
||||
extends:
|
||||
- .meson-build
|
||||
@@ -78,14 +69,13 @@ debian-testing:
|
||||
-D dri3=enabled
|
||||
-D gallium-va=enabled
|
||||
GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915"
|
||||
VULKAN_DRIVERS: "swrast,amd,intel,virtio-experimental"
|
||||
VULKAN_DRIVERS: "swrast,amd,intel"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D valgrind=false
|
||||
MINIO_ARTIFACT_NAME: mesa-amd64
|
||||
LLVM_VERSION: "13"
|
||||
script:
|
||||
- .gitlab-ci/lava/lava-pytest.sh
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
artifacts:
|
||||
@@ -123,17 +113,19 @@ debian-testing-msan:
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
|
||||
VULKAN_DRIVERS: intel,amd,broadcom,virtio-experimental
|
||||
|
||||
.debian-cl-testing:
|
||||
debian-clover-testing:
|
||||
extends:
|
||||
- .meson-build
|
||||
- .ci-deqp-artifacts
|
||||
variables:
|
||||
LLVM_VERSION: "13"
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D egl=disabled
|
||||
-D gbm=disabled
|
||||
GALLIUM_ST: >
|
||||
-D gallium-opencl=icd
|
||||
-D opencl-spirv=true
|
||||
GALLIUM_DRIVERS: "swrast"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
EXTRA_OPTION: >
|
||||
@@ -142,23 +134,7 @@ debian-testing-msan:
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/prepare-artifacts.sh
|
||||
|
||||
debian-clover-testing:
|
||||
extends:
|
||||
- .debian-cl-testing
|
||||
variables:
|
||||
GALLIUM_ST: >
|
||||
-D gallium-opencl=icd
|
||||
-D opencl-spirv=true
|
||||
|
||||
debian-rusticl-testing:
|
||||
extends:
|
||||
- .debian-cl-testing
|
||||
variables:
|
||||
GALLIUM_ST: >
|
||||
-D gallium-rusticl=true
|
||||
-D opencl-spirv=true
|
||||
|
||||
debian-build-testing:
|
||||
debian-gallium:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
UNWIND: "enabled"
|
||||
@@ -171,22 +147,18 @@ debian-build-testing:
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=enabled
|
||||
-D gallium-omx=bellagio
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=true
|
||||
-D gallium-opencl=disabled
|
||||
-D gallium-rusticl=false
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
|
||||
VULKAN_DRIVERS: swrast
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,xvmc,lima,panfrost,asahi
|
||||
script:
|
||||
- .gitlab-ci/lava/lava-pytest.sh
|
||||
- .gitlab-ci/run-shellcheck.sh
|
||||
- .gitlab-ci/run-yamllint.sh
|
||||
- .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/run-shader-db.sh
|
||||
|
||||
@@ -194,7 +166,6 @@ debian-build-testing:
|
||||
debian-release:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
LLVM_VERSION: "13"
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
@@ -205,18 +176,17 @@ debian-release:
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D gallium-rusticl=false
|
||||
-D llvm=enabled
|
||||
GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
|
||||
VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental"
|
||||
VULKAN_DRIVERS: "amd,imagination-experimental"
|
||||
BUILDTYPE: "release"
|
||||
EXTRA_OPTION: >
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D tools=all
|
||||
-D intel-clc=enabled
|
||||
@@ -232,6 +202,7 @@ fedora-release:
|
||||
BUILDTYPE: "release"
|
||||
C_ARGS: >
|
||||
-Wno-error=array-bounds
|
||||
-Wno-error=maybe-uninitialized
|
||||
-Wno-error=stringop-overread
|
||||
-Wno-error=uninitialized
|
||||
CPP_ARGS: >
|
||||
@@ -242,30 +213,29 @@ fedora-release:
|
||||
-D egl=enabled
|
||||
-D glvnd=true
|
||||
-D platforms=x11,wayland
|
||||
# intel-clc disabled, we need llvm-spirv-translator 13.0+, Fedora 34 only packages 12.0.
|
||||
EXTRA_OPTION: >
|
||||
-D osmesa=true
|
||||
-D selinux=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
|
||||
-D vulkan-layers=device-select,overlay
|
||||
-D intel-clc=disabled
|
||||
-D intel-clc=enabled
|
||||
-D imagination-srv=true
|
||||
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=icd
|
||||
-D gallium-rusticl=false
|
||||
-D gles1=disabled
|
||||
-D gles2=enabled
|
||||
-D llvm=enabled
|
||||
-D microsoft-clc=disabled
|
||||
-D shared-llvm=enabled
|
||||
-D vulkan-device-select-layer=true
|
||||
LLVM_VERSION: ""
|
||||
UNWIND: "disabled"
|
||||
VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
|
||||
@@ -301,12 +271,12 @@ debian-android:
|
||||
GALLIUM_ST: >
|
||||
-D dri3=disabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D gallium-rusticl=false
|
||||
LLVM_VERSION: ""
|
||||
PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files"
|
||||
script:
|
||||
@@ -333,6 +303,7 @@ debian-android:
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
@@ -346,7 +317,7 @@ debian-android:
|
||||
- debian/arm_build
|
||||
variables:
|
||||
VULKAN_DRIVERS: freedreno,broadcom
|
||||
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink"
|
||||
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4"
|
||||
BUILDTYPE: "debugoptimized"
|
||||
tags:
|
||||
- aarch64
|
||||
@@ -384,6 +355,8 @@ debian-arm64-asan:
|
||||
extends:
|
||||
- debian-arm64
|
||||
variables:
|
||||
C_ARGS: >
|
||||
-Wno-error=stringop-truncation
|
||||
EXTRA_OPTION: >
|
||||
-D llvm=disabled
|
||||
-D b_sanitize=address
|
||||
@@ -407,65 +380,32 @@ debian-arm64-build-test:
|
||||
debian-clang:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
LLVM_VERSION: "13"
|
||||
UNWIND: "enabled"
|
||||
GALLIUM_DUMP_CPU: "true"
|
||||
C_ARGS: >
|
||||
-Wno-error=constant-conversion
|
||||
-Wno-error=enum-conversion
|
||||
-Wno-error=implicit-const-int-float-conversion
|
||||
-Wno-error=initializer-overrides
|
||||
-Wno-error=sometimes-uninitialized
|
||||
-Wno-error=unused-function
|
||||
CPP_ARGS: >
|
||||
-Wno-error=c99-designator
|
||||
-Wno-error=deprecated-declarations
|
||||
-Wno-error=implicit-const-int-float-conversion
|
||||
-Wno-error=missing-braces
|
||||
-Wno-error=overloaded-virtual
|
||||
-Wno-error=tautological-constant-out-of-range-compare
|
||||
-Wno-error=unused-const-variable
|
||||
-Wno-error=unused-private-field
|
||||
DRI_LOADERS: >
|
||||
-D glx=dri
|
||||
-D gbm=enabled
|
||||
-D egl=enabled
|
||||
-D glvnd=true
|
||||
-D platforms=x11,wayland
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-extra-hud=true
|
||||
-D gallium-vdpau=enabled
|
||||
-D gallium-omx=bellagio
|
||||
-D gallium-va=enabled
|
||||
-D gallium-xa=enabled
|
||||
-D gallium-nine=true
|
||||
-D gallium-opencl=icd
|
||||
-D gles1=enabled
|
||||
-D gles2=enabled
|
||||
-D llvm=enabled
|
||||
-D microsoft-clc=enabled
|
||||
-D shared-llvm=enabled
|
||||
-D opencl-spirv=true
|
||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,swrast,panfrost,imagination-experimental,microsoft-experimental
|
||||
EXTRA_OPTION:
|
||||
-D spirv-to-dxil=true
|
||||
-D osmesa=true
|
||||
-D imagination-srv=true
|
||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi,imagination
|
||||
-D vulkan-layers=device-select,overlay
|
||||
-D build-aco-tests=true
|
||||
-D intel-clc=enabled
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,swrast,panfrost,imagination-experimental
|
||||
EXTRA_OPTIONS:
|
||||
-D imagination-srv=true
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
|
||||
debian-clang-release:
|
||||
extends: debian-clang
|
||||
variables:
|
||||
BUILDTYPE: "release"
|
||||
DRI_LOADERS: >
|
||||
-D glx=xlib
|
||||
-D platforms=x11,wayland
|
||||
|
||||
windows-vs2019:
|
||||
extends:
|
||||
- .build-windows
|
||||
@@ -473,56 +413,39 @@ windows-vs2019:
|
||||
- .windows-build-rules
|
||||
stage: build-misc
|
||||
script:
|
||||
- pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1
|
||||
- . .\.gitlab-ci\windows\mesa_build.ps1
|
||||
artifacts:
|
||||
paths:
|
||||
- _build/meson-logs/*.txt
|
||||
- _install/
|
||||
|
||||
.debian-cl:
|
||||
debian-clover:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
LLVM_VERSION: "13"
|
||||
UNWIND: "enabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
-D egl=disabled
|
||||
-D gbm=disabled
|
||||
EXTRA_OPTION: >
|
||||
-D valgrind=false
|
||||
|
||||
debian-clover:
|
||||
extends: .debian-cl
|
||||
variables:
|
||||
GALLIUM_DRIVERS: "r600,radeonsi,swrast"
|
||||
GALLIUM_DRIVERS: "r600,radeonsi"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=disabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=icd
|
||||
-D gallium-rusticl=false
|
||||
|
||||
debian-rusticl:
|
||||
extends: .debian-cl
|
||||
variables:
|
||||
GALLIUM_DRIVERS: "iris,swrast"
|
||||
GALLIUM_ST: >
|
||||
-D dri3=disabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D gallium-rusticl=true
|
||||
EXTRA_OPTION: >
|
||||
-D valgrind=false
|
||||
script:
|
||||
- LLVM_VERSION=9 GALLIUM_DRIVERS=r600,swrast .gitlab-ci/meson/build.sh
|
||||
- .gitlab-ci/meson/build.sh
|
||||
|
||||
debian-vulkan:
|
||||
extends: .meson-build
|
||||
variables:
|
||||
LLVM_VERSION: "13"
|
||||
UNWIND: "disabled"
|
||||
DRI_LOADERS: >
|
||||
-D glx=disabled
|
||||
@@ -533,21 +456,21 @@ debian-vulkan:
|
||||
GALLIUM_ST: >
|
||||
-D dri3=enabled
|
||||
-D gallium-vdpau=disabled
|
||||
-D gallium-xvmc=disabled
|
||||
-D gallium-omx=disabled
|
||||
-D gallium-va=disabled
|
||||
-D gallium-xa=disabled
|
||||
-D gallium-nine=false
|
||||
-D gallium-opencl=disabled
|
||||
-D gallium-rusticl=false
|
||||
-D b_sanitize=undefined
|
||||
-D c_args=-fno-sanitize-recover=all
|
||||
-D cpp_args=-fno-sanitize-recover=all
|
||||
UBSAN_OPTIONS: "print_stacktrace=1"
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,imagination-experimental,microsoft-experimental
|
||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,imagination-experimental
|
||||
EXTRA_OPTION: >
|
||||
-D vulkan-layers=device-select,overlay
|
||||
-D build-aco-tests=true
|
||||
-D intel-clc=disabled
|
||||
-D intel-clc=enabled
|
||||
-D imagination-srv=true
|
||||
|
||||
debian-i386:
|
||||
@@ -558,7 +481,6 @@ debian-i386:
|
||||
CROSS: i386
|
||||
VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental
|
||||
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
|
||||
LLVM_VERSION: 13
|
||||
EXTRA_OPTION: >
|
||||
-D vulkan-layers=device-select,overlay
|
||||
|
||||
@@ -572,7 +494,8 @@ debian-s390x:
|
||||
variables:
|
||||
CROSS: s390x
|
||||
GALLIUM_DRIVERS: "swrast,zink"
|
||||
LLVM_VERSION: 13
|
||||
# The lp_test_blend test times out with LLVM 11
|
||||
LLVM_VERSION: 9
|
||||
VULKAN_DRIVERS: "swrast"
|
||||
|
||||
debian-ppc64el:
|
||||
@@ -586,43 +509,17 @@ debian-ppc64el:
|
||||
VULKAN_DRIVERS: "amd,swrast"
|
||||
|
||||
debian-mingw32-x86_64:
|
||||
extends: .meson-build_mingw
|
||||
extends: .meson-build
|
||||
stage: build-misc
|
||||
variables:
|
||||
UNWIND: "disabled"
|
||||
C_ARGS: >
|
||||
-Wno-error=format
|
||||
-Wno-error=format-extra-args
|
||||
-Wno-error=deprecated-declarations
|
||||
-Wno-error=unused-function
|
||||
-Wno-error=unused-variable
|
||||
-Wno-error=unused-but-set-variable
|
||||
-Wno-error=unused-value
|
||||
-Wno-error=switch
|
||||
-Wno-error=parentheses
|
||||
-Wno-error=missing-prototypes
|
||||
-Wno-error=sign-compare
|
||||
-Wno-error=narrowing
|
||||
-Wno-error=overflow
|
||||
CPP_ARGS: $C_ARGS
|
||||
GALLIUM_DRIVERS: "swrast,d3d12,zink"
|
||||
VULKAN_DRIVERS: "swrast,amd,microsoft-experimental"
|
||||
GALLIUM_ST: >
|
||||
-D gallium-opencl=icd
|
||||
-D gallium-rusticl=false
|
||||
-D opencl-spirv=true
|
||||
-D microsoft-clc=enabled
|
||||
-D static-libclc=all
|
||||
-D llvm=enabled
|
||||
-D gallium-va=true
|
||||
-D video-codecs=h264dec,h264enc,h265dec,h265enc,vc1dec
|
||||
GALLIUM_DRIVERS: "swrast"
|
||||
EXTRA_OPTION: >
|
||||
-D min-windows-version=7
|
||||
-D spirv-to-dxil=true
|
||||
-D gles1=enabled
|
||||
-D gles2=enabled
|
||||
-D osmesa=true
|
||||
-D cpp_rtti=true
|
||||
-D shared-glapi=enabled
|
||||
-D zlib=enabled
|
||||
-Dllvm=disabled
|
||||
-Dzlib=disabled
|
||||
-Dosmesa=true
|
||||
--cross-file=.gitlab-ci/x86_64-w64-mingw32
|
||||
|
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
for var in \
|
||||
ACO_DEBUG \
|
||||
ASAN_OPTIONS \
|
||||
BASE_SYSTEM_FORK_HOST_PREFIX \
|
||||
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
|
||||
@@ -81,7 +80,6 @@ for var in \
|
||||
MESA_IMAGE \
|
||||
MESA_IMAGE_PATH \
|
||||
MESA_IMAGE_TAG \
|
||||
MESA_LOADER_DRIVER_OVERRIDE \
|
||||
MESA_TEMPLATES_COMMIT \
|
||||
MESA_VK_IGNORE_CONFORMANCE_WARNING \
|
||||
MESA_SPIRV_LOG_LEVEL \
|
||||
@@ -105,17 +103,13 @@ for var in \
|
||||
PIGLIT_RESULTS \
|
||||
PIGLIT_TESTS \
|
||||
PIPELINE_ARTIFACTS_BASE \
|
||||
RADV_DEBUG \
|
||||
RADV_PERFTEST \
|
||||
SKQP_ASSETS_DIR \
|
||||
SKQP_BACKENDS \
|
||||
TU_DEBUG \
|
||||
VIRGL_HOST_API \
|
||||
WAFFLE_PLATFORM \
|
||||
VK_CPU \
|
||||
VK_DRIVER \
|
||||
VK_ICD_FILENAMES \
|
||||
VKD3D_PROTON_RESULTS \
|
||||
; do
|
||||
if [ -n "${!var+x}" ]; then
|
||||
echo "export $var=${!var@Q}"
|
||||
|
@@ -1,38 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Make sure to kill itself and all the children process from this script on
|
||||
# exiting, since any console output may interfere with LAVA signals handling,
|
||||
# which based on the log console.
|
||||
cleanup() {
|
||||
if [ "$BACKGROUND_PIDS" = "" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
set +x
|
||||
echo "Killing all child processes"
|
||||
for pid in $BACKGROUND_PIDS
|
||||
do
|
||||
kill "$pid" 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Sleep just a little to give enough time for subprocesses to be gracefully
|
||||
# killed. Then apply a SIGKILL if necessary.
|
||||
sleep 5
|
||||
for pid in $BACKGROUND_PIDS
|
||||
do
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
done
|
||||
|
||||
BACKGROUND_PIDS=
|
||||
set -x
|
||||
}
|
||||
trap cleanup INT TERM EXIT
|
||||
|
||||
# Space separated values with the PIDS of the processes started in the
|
||||
# background by this script
|
||||
BACKGROUND_PIDS=
|
||||
|
||||
|
||||
# Second-stage init, used to set up devices and our job environment before
|
||||
# running tests.
|
||||
|
||||
@@ -93,11 +60,10 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
||||
# Disable GPU runtime power management
|
||||
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
|
||||
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
||||
|
||||
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
||||
# and enable throttling detection & reporting.
|
||||
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
||||
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
||||
./intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
||||
./intel-gpu-freq.sh -s 70% -g all -d
|
||||
fi
|
||||
|
||||
# Increase freedreno hangcheck timer because it's right at the edge of the
|
||||
@@ -108,8 +74,7 @@ fi
|
||||
|
||||
# Start a little daemon to capture the first devcoredump we encounter. (They
|
||||
# expire after 5 minutes, so we poll for them).
|
||||
/capture-devcoredump.sh &
|
||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
./capture-devcoredump.sh &
|
||||
|
||||
# If we want Xorg to be running for the test, then we start it up before the
|
||||
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
||||
@@ -119,7 +84,6 @@ if [ -n "$HWCI_START_XORG" ]; then
|
||||
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
||||
env \
|
||||
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||
|
||||
# Wait for xorg to be ready for connections.
|
||||
for i in 1 2 3 4 5; do
|
||||
@@ -131,34 +95,18 @@ if [ -n "$HWCI_START_XORG" ]; then
|
||||
export DISPLAY=:0
|
||||
fi
|
||||
|
||||
RESULT=fail
|
||||
set +e
|
||||
sh -c "$HWCI_TEST_SCRIPT"
|
||||
EXIT_CODE=$?
|
||||
set -e
|
||||
sh -c "$HWCI_TEST_SCRIPT" && RESULT=pass || RESULT=fail
|
||||
|
||||
# Let's make sure the results are always stored in current working directory
|
||||
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
||||
|
||||
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
||||
|
||||
# Make sure that capture-devcoredump is done before we start trying to tar up
|
||||
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
||||
# kill the job.
|
||||
cleanup
|
||||
[ "${RESULT}" = "fail" ] || rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
|
||||
|
||||
# upload artifacts
|
||||
if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
|
||||
tar --zstd -cf results.tar.zst results/;
|
||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.zst https://"$MINIO_RESULTS_UPLOAD"/results.tar.zst;
|
||||
tar -czf results.tar.gz results/;
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
|
||||
ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
|
||||
fi
|
||||
|
||||
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
||||
# as the python ones inside the bare-metal folder
|
||||
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass
|
||||
|
||||
set +x
|
||||
echo "hwci: mesa: $RESULT"
|
||||
# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
|
||||
sleep 1
|
||||
exit $EXIT_CODE
|
||||
|
@@ -1,15 +1,8 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# This is an utility script to manage Intel GPU frequencies.
|
||||
# It can be used for debugging performance problems or trying to obtain a stable
|
||||
# frequency while benchmarking.
|
||||
#
|
||||
# Note the Intel i915 GPU driver allows to change the minimum, maximum and boost
|
||||
# frequencies in steps of 50 MHz via:
|
||||
#
|
||||
# /sys/class/drm/card<n>/<freq_info>
|
||||
#
|
||||
# Where <n> is the DRM card index and <freq_info> one of the following:
|
||||
# The Intel i915 GPU driver allows to change the minimum, maximum and boost
|
||||
# frequencies in steps of 50 MHz via /sys/class/drm/card<n>/<freq_info>,
|
||||
# where <n> is the DRM card index and <freq_info> one of the following:
|
||||
#
|
||||
# - gt_max_freq_mhz (enforced maximum freq)
|
||||
# - gt_min_freq_mhz (enforced minimum freq)
|
||||
@@ -25,11 +18,6 @@
|
||||
# - gt_act_freq_mhz (the actual GPU freq)
|
||||
# - gt_cur_freq_mhz (the last requested freq)
|
||||
#
|
||||
# Also note that in addition to GPU management, the script offers the
|
||||
# possibility to adjust CPU operating frequencies. However, this is currently
|
||||
# limited to just setting the maximum scaling frequency as percentage of the
|
||||
# maximum frequency allowed by the hardware.
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Ltd.
|
||||
# Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
|
||||
#
|
||||
@@ -39,8 +27,6 @@
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
# GPU
|
||||
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
|
||||
ENF_FREQ_INFO="max min boost"
|
||||
CAP_FREQ_INFO="RP0 RPn RP1"
|
||||
@@ -48,14 +34,6 @@ ACT_FREQ_INFO="act cur"
|
||||
THROTT_DETECT_SLEEP_SEC=2
|
||||
THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
|
||||
|
||||
# CPU
|
||||
CPU_SYSFS_PREFIX=/sys/devices/system/cpu
|
||||
CPU_PSTATE_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/intel_pstate/%s"
|
||||
CPU_FREQ_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/cpu%s/cpufreq/%s_freq"
|
||||
CAP_CPU_FREQ_INFO="cpuinfo_max cpuinfo_min"
|
||||
ENF_CPU_FREQ_INFO="scaling_max scaling_min"
|
||||
ACT_CPU_FREQ_INFO="scaling_cur"
|
||||
|
||||
#
|
||||
# Global variables.
|
||||
#
|
||||
@@ -63,7 +41,6 @@ unset INTEL_DRM_CARD_INDEX
|
||||
unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ
|
||||
unset SET_MIN_FREQ SET_MAX_FREQ
|
||||
unset MONITOR_FREQ
|
||||
unset CPU_SET_MAX_FREQ
|
||||
unset DETECT_THROTT
|
||||
unset DRY_RUN
|
||||
|
||||
@@ -121,16 +98,14 @@ identify_intel_gpu() {
|
||||
# return: Global variable(s) FREQ_${arg} containing the requested information
|
||||
#
|
||||
read_freq_info() {
|
||||
local var val info path print=0 ret=0
|
||||
local var val path print=0 ret=0
|
||||
|
||||
[ "$1" = "y" ] && print=1
|
||||
shift
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
info=$1
|
||||
shift
|
||||
var=FREQ_${info}
|
||||
path=$(print_freq_sysfs_path "${info}")
|
||||
var=FREQ_$1
|
||||
path=$(print_freq_sysfs_path "$1")
|
||||
|
||||
[ -r ${path} ] && read ${var} < ${path} || {
|
||||
log ERROR "Failed to read freq info from: %s" "${path}"
|
||||
@@ -146,8 +121,10 @@ read_freq_info() {
|
||||
|
||||
[ ${print} -eq 1 ] && {
|
||||
eval val=\$${var}
|
||||
printf "%6s: %4s MHz\n" "${info}" "${val}"
|
||||
printf "%6s: %4s MHz\n" "$1" "${val}"
|
||||
}
|
||||
|
||||
shift
|
||||
done
|
||||
|
||||
return ${ret}
|
||||
@@ -415,156 +392,6 @@ detect_throttling() {
|
||||
esac
|
||||
}
|
||||
|
||||
#
|
||||
# Retrieve the list of online CPUs.
|
||||
#
|
||||
get_online_cpus() {
|
||||
local path cpu_index
|
||||
|
||||
printf "0"
|
||||
for path in $(grep 1 ${CPU_SYSFS_PREFIX}/cpu*/online); do
|
||||
cpu_index=${path##*/cpu}
|
||||
printf " %s" ${cpu_index%%/*}
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print sysfs path for the given CPU index and freq info.
|
||||
#
|
||||
# arg1: Frequency info sysfs name, one of *_CPU_FREQ_INFO constants above
|
||||
# arg2: CPU index
|
||||
#
|
||||
print_cpu_freq_sysfs_path() {
|
||||
printf ${CPU_FREQ_SYSFS_PATTERN} "$2" "$1"
|
||||
}
|
||||
|
||||
#
|
||||
# Read the specified CPU freq info from sysfs.
|
||||
#
|
||||
# arg1: CPU index
|
||||
# arg2: Flag (y/n) to also enable printing the freq info.
|
||||
# arg3...: Frequency info sysfs name(s), see *_CPU_FREQ_INFO constants above
|
||||
# return: Global variable(s) CPU_FREQ_${arg} containing the requested information
|
||||
#
|
||||
read_cpu_freq_info() {
|
||||
local var val info path cpu_index print=0 ret=0
|
||||
|
||||
cpu_index=$1
|
||||
[ "$2" = "y" ] && print=1
|
||||
shift 2
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
info=$1
|
||||
shift
|
||||
var=CPU_FREQ_${info}
|
||||
path=$(print_cpu_freq_sysfs_path "${info}" ${cpu_index})
|
||||
|
||||
[ -r ${path} ] && read ${var} < ${path} || {
|
||||
log ERROR "Failed to read CPU freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ -n "${var}" ] || {
|
||||
log ERROR "Got empty CPU freq info from: %s" "${path}"
|
||||
ret=1
|
||||
continue
|
||||
}
|
||||
|
||||
[ ${print} -eq 1 ] && {
|
||||
eval val=\$${var}
|
||||
printf "%6s: %4s Hz\n" "${info}" "${val}"
|
||||
}
|
||||
done
|
||||
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
#
|
||||
# Helper to print freq. value as requested by user via '--cpu-set-max' option.
|
||||
# arg1: user requested freq value
|
||||
#
|
||||
compute_cpu_freq_set() {
|
||||
local val
|
||||
|
||||
case "$1" in
|
||||
+)
|
||||
val=${CPU_FREQ_cpuinfo_max}
|
||||
;;
|
||||
-)
|
||||
val=${CPU_FREQ_cpuinfo_min}
|
||||
;;
|
||||
*%)
|
||||
val=$((${1%?} * ${CPU_FREQ_cpuinfo_max} / 100))
|
||||
;;
|
||||
*[!0-9]*)
|
||||
log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
|
||||
return 1
|
||||
;;
|
||||
"")
|
||||
log ERROR "Cannot set CPU freq to unspecified value"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
log ERROR "Cannot set CPU freq to custom value; use +, -, or % instead"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
printf "%s" "${val}"
|
||||
}
|
||||
|
||||
#
|
||||
# Adjust CPU max scaling frequency.
|
||||
#
|
||||
set_cpu_freq_max() {
|
||||
local target_freq res=0
|
||||
case "${CPU_SET_MAX_FREQ}" in
|
||||
+)
|
||||
target_freq=100
|
||||
;;
|
||||
-)
|
||||
target_freq=1
|
||||
;;
|
||||
*%)
|
||||
target_freq=${CPU_SET_MAX_FREQ%?}
|
||||
;;
|
||||
*)
|
||||
log ERROR "Invalid CPU freq"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
|
||||
[ -e "${pstate_info}" ] && {
|
||||
log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
|
||||
printf "%s" "${target_freq}" > "${pstate_info}"
|
||||
[ $? -eq 0 ] || {
|
||||
log ERROR "Failed to set intel_pstate max perf"
|
||||
res=1
|
||||
}
|
||||
}
|
||||
|
||||
local cpu_index
|
||||
for cpu_index in $(get_online_cpus); do
|
||||
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
|
||||
|
||||
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
|
||||
[ -z "${target_freq}" ] && { res=$?; continue; }
|
||||
|
||||
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
|
||||
[ -n "${DRY_RUN}" ] && continue
|
||||
|
||||
printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index})
|
||||
[ $? -eq 0 ] || {
|
||||
res=1
|
||||
log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
|
||||
}
|
||||
done
|
||||
|
||||
return ${res}
|
||||
}
|
||||
|
||||
#
|
||||
# Show help message.
|
||||
#
|
||||
@@ -599,12 +426,6 @@ Options:
|
||||
as a background process. Use 'stop' or 'status' to
|
||||
terminate the detector process or verify its status.
|
||||
|
||||
--cpu-set-max [FREQUENCY%|+|-}
|
||||
Set CPU max scaling frequency as % of hw max.
|
||||
Use '+' or '-' to set frequency to hardware max or min.
|
||||
|
||||
-r, --reset Reset frequencies to hardware defaults.
|
||||
|
||||
--dry-run See what the script will do without applying any
|
||||
frequency changes.
|
||||
|
||||
@@ -640,8 +461,6 @@ parse_option_get() {
|
||||
|
||||
#
|
||||
# Validate user input for '-s, --set' option.
|
||||
# arg1: input value to be validated
|
||||
# arg2: optional flag indicating input is restricted to %
|
||||
#
|
||||
validate_option_set() {
|
||||
case "$1" in
|
||||
@@ -653,8 +472,6 @@ validate_option_set() {
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
[ -z "$2" ] || { print_usage; exit 1; }
|
||||
}
|
||||
|
||||
#
|
||||
@@ -708,12 +525,6 @@ while [ $# -gt 0 ]; do
|
||||
esac
|
||||
;;
|
||||
|
||||
--cpu-set-max)
|
||||
shift
|
||||
CPU_SET_MAX_FREQ=$1
|
||||
validate_option_set "${CPU_SET_MAX_FREQ}" restricted
|
||||
;;
|
||||
|
||||
--dry-run)
|
||||
DRY_RUN=1
|
||||
;;
|
||||
@@ -747,8 +558,6 @@ print_freq_info
|
||||
|
||||
[ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT}
|
||||
|
||||
[ -n "${CPU_SET_MAX_FREQ}" ] && { set_cpu_freq_max || RET=$?; }
|
||||
|
||||
[ -n "${MONITOR_FREQ}" ] && {
|
||||
log INFO "Entering frequency monitoring mode"
|
||||
sleep 2
|
||||
|
@@ -55,9 +55,3 @@ CONFIG_USB_NET_DRIVERS=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_NET_AX8817X=y
|
||||
CONFIG_USB_NET_SMSC95XX=y
|
||||
|
||||
# TK1
|
||||
CONFIG_ARM_TEGRA_DEVFREQ=y
|
||||
|
||||
# 32-bit build failure
|
||||
CONFIG_DRM_MSM=n
|
||||
|
@@ -16,7 +16,6 @@ CONFIG_DRM_LIMA=y
|
||||
CONFIG_DRM_PANEL_SIMPLE=y
|
||||
CONFIG_DRM_PANEL_EDP=y
|
||||
CONFIG_DRM_MSM=y
|
||||
CONFIG_DRM_ETNAVIV=y
|
||||
CONFIG_DRM_I2C_ADV7511=y
|
||||
CONFIG_PWM_CROS_EC=y
|
||||
CONFIG_BACKLIGHT_PWM=y
|
||||
|
@@ -6,34 +6,32 @@ set -o xtrace
|
||||
# Fetch the arm-built rootfs image and unpack it in our x86 container (saves
|
||||
# network transfer, disk usage, and runtime on test jobs)
|
||||
|
||||
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
||||
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
|
||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||
else
|
||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||
fi
|
||||
|
||||
wget "${ARTIFACTS_URL}"/lava-rootfs.tar.zst -O rootfs.tar.zst
|
||||
mkdir -p /rootfs-"$arch"
|
||||
tar -C /rootfs-"$arch" '--exclude=./dev/*' --zstd -xf rootfs.tar.zst
|
||||
rm rootfs.tar.zst
|
||||
wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz
|
||||
mkdir -p /rootfs-$arch
|
||||
tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz
|
||||
rm rootfs.tgz
|
||||
|
||||
if [[ $arch == "arm64" ]]; then
|
||||
mkdir -p /baremetal-files
|
||||
pushd /baremetal-files
|
||||
|
||||
wget "${ARTIFACTS_URL}"/Image
|
||||
wget "${ARTIFACTS_URL}"/Image.gz
|
||||
wget "${ARTIFACTS_URL}"/cheza-kernel
|
||||
wget ${ARTIFACTS_URL}/Image
|
||||
wget ${ARTIFACTS_URL}/Image.gz
|
||||
wget ${ARTIFACTS_URL}/cheza-kernel
|
||||
|
||||
DEVICE_TREES=""
|
||||
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb"
|
||||
|
||||
for DTB in $DEVICE_TREES; do
|
||||
wget "${ARTIFACTS_URL}/$DTB"
|
||||
wget ${ARTIFACTS_URL}/$DTB
|
||||
done
|
||||
|
||||
popd
|
||||
@@ -41,14 +39,12 @@ elif [[ $arch == "armhf" ]]; then
|
||||
mkdir -p /baremetal-files
|
||||
pushd /baremetal-files
|
||||
|
||||
wget "${ARTIFACTS_URL}"/zImage
|
||||
wget ${ARTIFACTS_URL}/zImage
|
||||
|
||||
DEVICE_TREES=""
|
||||
DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
|
||||
DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb"
|
||||
DEVICE_TREES="imx6q-cubox-i.dtb"
|
||||
|
||||
for DTB in $DEVICE_TREES; do
|
||||
wget "${ARTIFACTS_URL}/$DTB"
|
||||
wget ${ARTIFACTS_URL}/$DTB
|
||||
done
|
||||
|
||||
popd
|
||||
|
@@ -1,16 +1,15 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
APITRACE_VERSION="790380e05854d5c9d315555444ffcc7acb8f4037"
|
||||
APITRACE_VERSION="170424754bb46002ba706e16ee5404b61988d74a"
|
||||
|
||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
||||
pushd /apitrace
|
||||
git checkout "$APITRACE_VERSION"
|
||||
git submodule update --init --depth 1 --recursive
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
|
||||
cmake --build _build --parallel --target apitrace eglretrace
|
||||
ninja -C _build
|
||||
mkdir build
|
||||
cp _build/apitrace build
|
||||
cp _build/eglretrace build
|
||||
|
@@ -1,23 +1,24 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
git config --global user.email "mesa@example.com"
|
||||
git config --global user.name "Mesa CI"
|
||||
SCRIPT_DIR="$(pwd)"
|
||||
|
||||
CROSVM_VERSION=acd262cb42111c53b580a67355e795775545cced
|
||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
||||
CROSVM_VERSION=c7cd0e0114c8363b884ba56d8e12adee718dcc93
|
||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
|
||||
pushd /platform/crosvm
|
||||
git checkout "$CROSVM_VERSION"
|
||||
git submodule update --init
|
||||
# Apply all crosvm patches for Mesa CI
|
||||
cat "$SCRIPT_DIR"/.gitlab-ci/container/build-crosvm_*.patch |
|
||||
patch -p1
|
||||
|
||||
VIRGLRENDERER_VERSION=3c5a9bbb7464e0e91e446991055300f4f989f6a9
|
||||
VIRGLRENDERER_VERSION=0564c9a0c2f584e004a7d4864aee3b8ec9692105
|
||||
rm -rf third_party/virglrenderer
|
||||
git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
||||
pushd third_party/virglrenderer
|
||||
git checkout "$VIRGLRENDERER_VERSION"
|
||||
meson build/ -Drender-server=true -Drender-server-worker=process -Dvenus-experimental=true $EXTRA_MESON_ARGS
|
||||
meson build/ $EXTRA_MESON_ARGS
|
||||
ninja -C build install
|
||||
popd
|
||||
|
||||
@@ -25,7 +26,6 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||
bindgen \
|
||||
-j ${FDO_CI_CONCURRENT:-4} \
|
||||
--root /usr/local \
|
||||
--version 0.60.1 \
|
||||
$EXTRA_CARGO_ARGS
|
||||
|
||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||
|
43
.gitlab-ci/container/build-crosvm_no-syslog.patch
Normal file
43
.gitlab-ci/container/build-crosvm_no-syslog.patch
Normal file
@@ -0,0 +1,43 @@
|
||||
From 3c57ec558bccc67fd53363c23deea20646be5c47 Mon Sep 17 00:00:00 2001
|
||||
From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
Date: Wed, 17 Nov 2021 10:18:04 +0100
|
||||
Subject: [PATCH] Hack syslog out
|
||||
|
||||
It's causing stability problems when running several Crosvm instances in
|
||||
parallel.
|
||||
|
||||
Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||
---
|
||||
base/src/unix/linux/syslog.rs | 2 +-
|
||||
common/sys_util/src/linux/syslog.rs | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/base/src/unix/linux/syslog.rs b/base/src/unix/linux/syslog.rs
|
||||
index 05972a3a..f0db3781 100644
|
||||
--- a/base/src/unix/linux/syslog.rs
|
||||
+++ b/base/src/unix/linux/syslog.rs
|
||||
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||
impl Syslog for PlatformSyslog {
|
||||
fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
- socket: Some(openlog_and_get_socket()?),
|
||||
+ socket: None,
|
||||
})
|
||||
}
|
||||
|
||||
diff --git a/common/sys_util/src/linux/syslog.rs b/common/sys_util/src/linux/syslog.rs
|
||||
index 05972a3a..f0db3781 100644
|
||||
--- a/common/sys_util/src/linux/syslog.rs
|
||||
+++ b/common/sys_util/src/linux/syslog.rs
|
||||
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||
impl Syslog for PlatformSyslog {
|
||||
fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
- socket: Some(openlog_and_get_socket()?),
|
||||
+ socket: None,
|
||||
})
|
||||
}
|
||||
|
||||
--
|
||||
2.25.1
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -16,16 +15,10 @@ if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
||||
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
|
||||
else
|
||||
# Install from package registry
|
||||
DEQP_RUNNER_CARGO_ARGS="--version 0.15.0 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
DEQP_RUNNER_CARGO_ARGS="--version 0.13.1 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||
fi
|
||||
|
||||
cargo install --locked \
|
||||
-j ${FDO_CI_CONCURRENT:-4} \
|
||||
--root /usr/local \
|
||||
${DEQP_RUNNER_CARGO_ARGS}
|
||||
|
||||
# remove unused test runners to shrink images for the Mesa CI build (not kernel,
|
||||
# which chooses its own deqp branch)
|
||||
if [ -z "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
||||
rm -f /usr/local/bin/igt-runner
|
||||
fi
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -7,17 +6,14 @@ git config --global user.email "mesa@example.com"
|
||||
git config --global user.name "Mesa CI"
|
||||
git clone \
|
||||
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
||||
-b vulkan-cts-1.3.3.0 \
|
||||
-b vulkan-cts-1.3.1.1 \
|
||||
--depth 1 \
|
||||
/VK-GL-CTS
|
||||
pushd /VK-GL-CTS
|
||||
|
||||
# Apply a patch to update zlib link to an available version.
|
||||
# vulkan-cts-1.3.3.0 uses zlib 1.2.12 which was removed from zlib server due to
|
||||
# a CVE. See https://zlib.net/
|
||||
# FIXME: Remove this patch when uprev to 1.3.4.0+
|
||||
wget -O- https://github.com/KhronosGroup/VK-GL-CTS/commit/6bb2e7d64261bedb503947b1b251b1eeeb49be73.patch |
|
||||
git am -
|
||||
# Cherry-pick fix for zlib dependency
|
||||
git fetch origin main
|
||||
git cherry-pick -x ec1804831b654ac55bd2a7a5dd27a556afe05030
|
||||
|
||||
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
||||
# libpng (sigh). The archives get their checksums checked anyway, and git
|
||||
@@ -51,8 +47,8 @@ mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl
|
||||
|
||||
# Copy out the mustpass lists we want.
|
||||
mkdir /deqp/mustpass
|
||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
||||
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt) ; do
|
||||
cat /VK-GL-CTS/external/vulkancts/mustpass/master/$mustpass \
|
||||
>> /deqp/mustpass/vk-master.txt
|
||||
done
|
||||
|
||||
@@ -68,9 +64,6 @@ cp \
|
||||
cp \
|
||||
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
|
||||
/deqp/mustpass/.
|
||||
cp \
|
||||
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass_single/4.6.1.x/*-single.txt \
|
||||
/deqp/mustpass/.
|
||||
|
||||
# Save *some* executor utils, but otherwise strip things down
|
||||
# to reduct deqp build size:
|
||||
@@ -88,11 +81,10 @@ rm -rf /deqp/external/openglcts/modules/cts-runner
|
||||
rm -rf /deqp/modules/internal
|
||||
rm -rf /deqp/execserver
|
||||
rm -rf /deqp/framework
|
||||
# shellcheck disable=SC2038,SC2185 # TODO: rewrite find
|
||||
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
|
||||
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
||||
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
||||
${STRIP_CMD:-strip} modules/*/deqp-*
|
||||
du -sh ./*
|
||||
du -sh *
|
||||
rm -rf /VK-GL-CTS
|
||||
popd
|
||||
|
@@ -2,18 +2,18 @@
|
||||
|
||||
set -ex
|
||||
|
||||
GFXRECONSTRUCT_VERSION=5ed3caeecc46e976c4df31e263df8451ae176c26
|
||||
GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1
|
||||
|
||||
git clone https://github.com/LunarG/gfxreconstruct.git \
|
||||
--single-branch \
|
||||
-b master \
|
||||
--no-checkout \
|
||||
/gfxreconstruct
|
||||
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct
|
||||
pushd /gfxreconstruct
|
||||
git checkout "$GFXRECONSTRUCT_VERSION"
|
||||
git submodule update --init
|
||||
git submodule update
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH=/gfxreconstruct/build -DBUILD_WERROR=OFF
|
||||
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||
ninja -C _build gfxrecon-replay gfxrecon-info
|
||||
mkdir -p build/bin
|
||||
install _build/tools/replay/gfxrecon-replay build/bin
|
||||
install _build/tools/info/gfxrecon-info build/bin
|
||||
strip build/bin/*
|
||||
find . -not -path './build' -not -path './build/*' -delete
|
||||
popd
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
set -ex
|
||||
|
||||
PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
|
||||
PARALLEL_DEQP_RUNNER_VERSION=6596b71cf37a7efb4d54acd48c770ed2d4ad6b7e
|
||||
|
||||
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
|
||||
pushd /parallel-deqp-runner
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -12,15 +11,12 @@ pushd kernel
|
||||
# debian (they'll get blown away by the rm of the kernel dir at the end).
|
||||
mkdir -p ld-links
|
||||
for i in /usr/bin/*-ld /usr/bin/ld; do
|
||||
i=$(basename $i)
|
||||
i=`basename $i`
|
||||
ln -sf /usr/bin/$i.bfd ld-links/$i
|
||||
done
|
||||
export PATH=`pwd`/ld-links:$PATH
|
||||
|
||||
NEWPATH=$(pwd)/ld-links
|
||||
export PATH=$NEWPATH:$PATH
|
||||
|
||||
KERNEL_FILENAME=$(basename $KERNEL_URL)
|
||||
export LOCALVERSION="$KERNEL_FILENAME"
|
||||
export LOCALVERSION="`basename $KERNEL_URL`"
|
||||
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
|
||||
make ${KERNEL_IMAGE_NAME}
|
||||
for image in ${KERNEL_IMAGE_NAME}; do
|
||||
@@ -32,8 +28,10 @@ if [[ -n ${DEVICE_TREES} ]]; then
|
||||
cp ${DEVICE_TREES} /lava-files/.
|
||||
fi
|
||||
|
||||
make modules
|
||||
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
|
||||
if [[ ${DEBIAN_ARCH} = "amd64" || ${DEBIAN_ARCH} = "arm64" ]]; then
|
||||
make modules
|
||||
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
|
||||
fi
|
||||
|
||||
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
|
||||
make Image.lzma
|
||||
|
@@ -26,5 +26,5 @@ mkdir -p /usr/lib/clc
|
||||
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
|
||||
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
||||
|
||||
du -sh ./*
|
||||
du -sh *
|
||||
rm -rf /libclc /llvm-project
|
||||
|
@@ -1,14 +1,14 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
export LIBDRM_VERSION=libdrm-2.4.110
|
||||
|
||||
wget https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz
|
||||
tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz
|
||||
cd "$LIBDRM_VERSION"
|
||||
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||
cd $LIBDRM_VERSION
|
||||
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
|
||||
ninja -C build install
|
||||
cd ..
|
||||
rm -rf "$LIBDRM_VERSION"
|
||||
rm -rf $LIBDRM_VERSION
|
||||
|
||||
|
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
wget https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v13.0.0.tar.gz
|
||||
tar -xvf v13.0.0.tar.gz && rm v13.0.0.tar.gz
|
||||
|
||||
mkdir SPIRV-LLVM-Translator-13.0.0/build
|
||||
pushd SPIRV-LLVM-Translator-13.0.0/build
|
||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
|
||||
ninja
|
||||
ninja install
|
||||
# For some reason llvm-spirv is not installed by default
|
||||
ninja llvm-spirv
|
||||
cp tools/llvm-spirv/llvm-spirv /usr/bin/
|
||||
popd
|
||||
|
||||
du -sh SPIRV-LLVM-Translator-13.0.0
|
||||
rm -rf SPIRV-LLVM-Translator-13.0.0
|
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
MOLD_VERSION="1.6.0"
|
||||
|
||||
git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
|
||||
cd mold
|
||||
make
|
||||
make install
|
||||
cd ..
|
||||
rm -rf mold
|
@@ -1,24 +1,16 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||
pushd /piglit
|
||||
git checkout 591c91865012de4224bea551eac5d2274acf06ad
|
||||
|
||||
# TODO: Remove the following patch when piglit commit got past
|
||||
# 1cd716180cfb6ef0c1fc54702460ef49e5115791
|
||||
git apply $OLDPWD/.gitlab-ci/piglit/build-piglit_backport-s3-migration.diff
|
||||
|
||||
git checkout 445711587d461539a4d8f9d35a7fe996a86d3c8d
|
||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
||||
ninja $PIGLIT_BUILD_TARGETS
|
||||
# shellcheck disable=SC2038,SC2185 # TODO: rewrite find
|
||||
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
|
||||
rm -rf target_api
|
||||
if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then
|
||||
# shellcheck disable=SC2038,SC2185 # TODO: rewrite find
|
||||
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
|
||||
find ! -regex "^\.$" \
|
||||
! -regex "^\.\/piglit.*" \
|
||||
! -regex "^\.\/framework.*" \
|
||||
|
@@ -8,24 +8,17 @@ set -ex
|
||||
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
|
||||
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
|
||||
# are just available to all build jobs.
|
||||
mkdir -p "$HOME"/.cargo
|
||||
ln -s /usr/local/bin "$HOME"/.cargo/bin
|
||||
|
||||
# Rusticl requires at least Rust 1.59.0
|
||||
#
|
||||
# Also, oick a specific snapshot from rustup so the compiler doesn't drift on
|
||||
# us.
|
||||
RUST_VERSION=1.59.0-2022-02-24
|
||||
mkdir -p $HOME/.cargo
|
||||
ln -s /usr/local/bin $HOME/.cargo/bin
|
||||
|
||||
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
||||
# version of the compiler, rather than whatever the container's Debian comes
|
||||
# with.
|
||||
wget https://sh.rustup.rs -O - | sh -s -- \
|
||||
--default-toolchain $RUST_VERSION \
|
||||
--profile minimal \
|
||||
-y
|
||||
|
||||
rustup component add rustfmt
|
||||
#
|
||||
# Pick the rust compiler (1.48) available in Debian stable, and pick a specific
|
||||
# snapshot from rustup so the compiler doesn't drift on us.
|
||||
wget https://sh.rustup.rs -O - | \
|
||||
sh -s -- -y --default-toolchain 1.49.0-2020-12-31
|
||||
|
||||
# Set up a config script for cross compiling -- cargo needs your system cc for
|
||||
# linking in cross builds, but doesn't know what you want to use for system cc.
|
||||
|
@@ -41,7 +41,7 @@ download_skia_source() {
|
||||
# Skia cloned from https://android.googlesource.com/platform/external/skqp
|
||||
# has all needed assets tracked on git-fs
|
||||
SKQP_REPO=https://android.googlesource.com/platform/external/skqp
|
||||
SKQP_BRANCH=android-cts-11.0_r7
|
||||
SKQP_BRANCH=android-cts-10.0_r11
|
||||
|
||||
git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
|
||||
}
|
||||
@@ -55,9 +55,9 @@ BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
|
||||
SKQP_ARCH=${SKQP_ARCH:-x64}
|
||||
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
|
||||
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
|
||||
SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp}
|
||||
SKQP_INSTALL_DIR=/skqp
|
||||
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
|
||||
SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms)
|
||||
SKQP_BINARIES=(skqp)
|
||||
|
||||
download_skia_source
|
||||
|
||||
|
@@ -1,41 +0,0 @@
|
||||
diff --git a/tools/skqp/src/skqp.cpp b/tools/skqp/src/skqp.cpp
|
||||
index 50ed9db01d..938217000d 100644
|
||||
--- a/tools/skqp/src/skqp.cpp
|
||||
+++ b/tools/skqp/src/skqp.cpp
|
||||
@@ -448,7 +448,7 @@ inline void write(SkWStream* wStream, const T& text) {
|
||||
|
||||
void SkQP::makeReport() {
|
||||
SkASSERT_RELEASE(fAssetManager);
|
||||
- int glesErrorCount = 0, vkErrorCount = 0, gles = 0, vk = 0;
|
||||
+ int glErrorCount = 0, glesErrorCount = 0, vkErrorCount = 0, gl = 0, gles = 0, vk = 0;
|
||||
|
||||
if (!sk_isdir(fReportDirectory.c_str())) {
|
||||
SkDebugf("Report destination does not exist: '%s'\n", fReportDirectory.c_str());
|
||||
@@ -460,6 +460,7 @@ void SkQP::makeReport() {
|
||||
htmOut.writeText(kDocHead);
|
||||
for (const SkQP::RenderResult& run : fRenderResults) {
|
||||
switch (run.fBackend) {
|
||||
+ case SkQP::SkiaBackend::kGL: ++gl; break;
|
||||
case SkQP::SkiaBackend::kGLES: ++gles; break;
|
||||
case SkQP::SkiaBackend::kVulkan: ++vk; break;
|
||||
default: break;
|
||||
@@ -477,15 +478,17 @@ void SkQP::makeReport() {
|
||||
}
|
||||
write(&htmOut, SkStringPrintf(" f(%s);\n", str.c_str()));
|
||||
switch (run.fBackend) {
|
||||
+ case SkQP::SkiaBackend::kGL: ++glErrorCount; break;
|
||||
case SkQP::SkiaBackend::kGLES: ++glesErrorCount; break;
|
||||
case SkQP::SkiaBackend::kVulkan: ++vkErrorCount; break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
htmOut.writeText(kDocMiddle);
|
||||
- write(&htmOut, SkStringPrintf("<p>gles errors: %d (of %d)</br>\n"
|
||||
+ write(&htmOut, SkStringPrintf("<p>gl errors: %d (of %d)</br>\n"
|
||||
+ "gles errors: %d (of %d)</br>\n"
|
||||
"vk errors: %d (of %d)</p>\n",
|
||||
- glesErrorCount, gles, vkErrorCount, vk));
|
||||
+ glErrorCount, gl, glesErrorCount, gles, vkErrorCount, vk));
|
||||
htmOut.writeText(kDocTail);
|
||||
SkFILEWStream unitOut(SkOSPath::Join(fReportDirectory.c_str(), kUnitTestReportPath).c_str());
|
||||
SkASSERT_RELEASE(unitOut.isValid());
|
@@ -1,18 +0,0 @@
|
||||
Nima-Cpp is not available anymore inside googlesource, revert to github one
|
||||
Simulates `git revert 49233d2521054037ded7d760427c4a0dc1e11356`
|
||||
|
||||
diff --git a/DEPS b/DEPS
|
||||
index 7e0b941..c88b064 100644
|
||||
--- a/DEPS
|
||||
+++ b/DEPS
|
||||
@@ -33,8 +33,8 @@ deps = {
|
||||
#"third_party/externals/v8" : "https://chromium.googlesource.com/v8/v8.git@5f1ae66d5634e43563b2d25ea652dfb94c31a3b4",
|
||||
"third_party/externals/wuffs" : "https://skia.googlesource.com/external/github.com/google/wuffs.git@fda3c4c9863d9f9fcec58ae66508c4621fc71ea5",
|
||||
"third_party/externals/zlib" : "https://chromium.googlesource.com/chromium/src/third_party/zlib@47af7c547f8551bd25424e56354a2ae1e9062859",
|
||||
- "third_party/externals/Nima-Cpp" : "https://skia.googlesource.com/external/github.com/2d-inc/Nima-Cpp.git@4bd02269d7d1d2e650950411325eafa15defb084",
|
||||
- "third_party/externals/Nima-Math-Cpp" : "https://skia.googlesource.com/external/github.com/2d-inc/Nima-Math-Cpp.git@e0c12772093fa8860f55358274515b86885f0108",
|
||||
+ "third_party/externals/Nima-Cpp" : "https://github.com/2d-inc/Nima-Cpp.git@4bd02269d7d1d2e650950411325eafa15defb084",
|
||||
+ "third_party/externals/Nima-Math-Cpp" : "https://github.com/2d-inc/Nima-Math-Cpp.git@e0c12772093fa8860f55358274515b86885f0108",
|
||||
|
||||
"../src": {
|
||||
"url": "https://chromium.googlesource.com/chromium/src.git@ccf3465732e5d5363f0e44a8fac54550f62dd1d0",
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
|
@@ -2,7 +2,8 @@
|
||||
|
||||
set -ex
|
||||
|
||||
VKD3D_PROTON_COMMIT="5b73139f182d86cd58a757e4b5f0d4cfad96d319"
|
||||
VKD3D_PROTON_VERSION="2.6"
|
||||
VKD3D_PROTON_COMMIT="3e5aab6fb3e18f81a71b339be4cb5cdf55140980"
|
||||
|
||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
||||
@@ -15,7 +16,7 @@ function build_arch {
|
||||
meson "$@" \
|
||||
-Denable_tests=true \
|
||||
--buildtype release \
|
||||
--prefix "$VKD3D_PROTON_DST_DIR" \
|
||||
--prefix "$VKD3D_PROTON_BUILD_DIR" \
|
||||
--strip \
|
||||
--bindir "x${arch}" \
|
||||
--libdir "x${arch}" \
|
||||
@@ -23,17 +24,20 @@ function build_arch {
|
||||
|
||||
ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
|
||||
|
||||
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/d3d12"
|
||||
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/"*.exe
|
||||
}
|
||||
|
||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b "v$VKD3D_PROTON_VERSION" --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
||||
pushd "$VKD3D_PROTON_SRC_DIR"
|
||||
git checkout "$VKD3D_PROTON_COMMIT"
|
||||
git submodule update --init --recursive
|
||||
git submodule update --recursive
|
||||
build_arch 64
|
||||
build_arch 86
|
||||
build_arch 64 --cross-file build-win64.txt
|
||||
build_arch 86 --cross-file build-win32.txt
|
||||
cp "setup_vkd3d_proton.sh" "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
|
||||
chmod +x "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
|
||||
popd
|
||||
|
||||
"$VKD3D_PROTON_BUILD_DIR"/setup_vkd3d_proton.sh install
|
||||
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
||||
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
||||
|
@@ -17,8 +17,7 @@ export PATH=$CCACHE_PATH:$PATH
|
||||
export CC="${CCACHE_PATH}/gcc"
|
||||
export CXX="${CCACHE_PATH}/g++"
|
||||
|
||||
# When not using the mold linker (e.g. unsupported architecture), force
|
||||
# linkers to gold, since it's so much faster for building. We can't use
|
||||
# Force linkers to gold, since it's so much faster for building. We can't use
|
||||
# lld because we're on old debian and it's buggy. ming fails meson builds
|
||||
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
|
||||
find /usr/bin -name \*-ld -o -name ld | \
|
||||
@@ -28,19 +27,10 @@ find /usr/bin -name \*-ld -o -name ld | \
|
||||
ccache --show-stats
|
||||
|
||||
# Make a wrapper script for ninja to always include the -j flags
|
||||
{
|
||||
echo '#!/bin/sh -x'
|
||||
# shellcheck disable=SC2016
|
||||
echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"'
|
||||
} > /usr/local/bin/ninja
|
||||
echo '#!/bin/sh -x' > /usr/local/bin/ninja
|
||||
echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' >> /usr/local/bin/ninja
|
||||
chmod +x /usr/local/bin/ninja
|
||||
|
||||
# Set MAKEFLAGS so that all make invocations in container builds include the
|
||||
# flags (doesn't apply to non-container builds, but we don't run make there)
|
||||
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
|
||||
|
||||
# make wget to try more than once, when download fails or timeout
|
||||
echo -e "retry_connrefused = on\n" \
|
||||
"read_timeout = 300\n" \
|
||||
"tries = 4\n" \
|
||||
"wait_retry = 32" >> /etc/wgetrc
|
||||
|
@@ -13,7 +13,7 @@ arch2=${5:-$2}
|
||||
# and allowing it in code generation means we get unwind symbols that break
|
||||
# the libEGL and driver symbol tests.
|
||||
|
||||
cat > "$cross_file" <<EOF
|
||||
cat >$cross_file <<EOF
|
||||
[binaries]
|
||||
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-ar'
|
||||
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
# Makes a .pc file in the Android NDK for meson to find its libraries.
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
arch=$1
|
||||
cross_file="/cross_file-$arch.txt"
|
||||
/usr/share/meson/debcrossgen --arch "$arch" -o "$cross_file"
|
||||
/usr/share/meson/debcrossgen --arch $arch -o "$cross_file"
|
||||
# Explicitly set ccache path for cross compilers
|
||||
sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
|
||||
if [ "$arch" = "i386" ]; then
|
||||
@@ -10,11 +10,10 @@ if [ "$arch" = "i386" ]; then
|
||||
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file"
|
||||
fi
|
||||
# Rely on qemu-user being configured in binfmt_misc on the host
|
||||
# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
|
||||
sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
|
||||
|
||||
# Add a line for rustc, which debcrossgen is missing.
|
||||
cc=$(sed -n 's|c = .\(.*\).|\1|p' < "$cross_file")
|
||||
cc=`sed -n 's|c = .\(.*\).|\1|p' < $cross_file`
|
||||
if [[ "$arch" = "arm64" ]]; then
|
||||
rust_target=aarch64-unknown-linux-gnu
|
||||
elif [[ "$arch" = "armhf" ]]; then
|
||||
@@ -28,7 +27,6 @@ elif [[ "$arch" = "s390x" ]]; then
|
||||
else
|
||||
echo "Needs rustc target mapping"
|
||||
fi
|
||||
# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
|
||||
sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
|
||||
|
||||
# Set up cmake cross compile toolchain file for dEQP builds
|
||||
@@ -36,18 +34,18 @@ toolchain_file="/toolchain-$arch.cmake"
|
||||
if [[ "$arch" = "arm64" ]]; then
|
||||
GCC_ARCH="aarch64-linux-gnu"
|
||||
DE_CPU="DE_CPU_ARM_64"
|
||||
CMAKE_ARCH=arm
|
||||
elif [[ "$arch" = "armhf" ]]; then
|
||||
GCC_ARCH="arm-linux-gnueabihf"
|
||||
DE_CPU="DE_CPU_ARM"
|
||||
CMAKE_ARCH=arm
|
||||
fi
|
||||
|
||||
if [[ -n "$GCC_ARCH" ]]; then
|
||||
{
|
||||
echo "set(CMAKE_SYSTEM_NAME Linux)";
|
||||
echo "set(CMAKE_SYSTEM_PROCESSOR arm)";
|
||||
echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)";
|
||||
echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)";
|
||||
echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")";
|
||||
echo "set(DE_CPU $DE_CPU)";
|
||||
} > "$toolchain_file"
|
||||
echo "set(CMAKE_SYSTEM_NAME Linux)" > "$toolchain_file"
|
||||
echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> "$toolchain_file"
|
||||
echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)" >> "$toolchain_file"
|
||||
echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)" >> "$toolchain_file"
|
||||
echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")" >> "$toolchain_file"
|
||||
echo "set(DE_CPU $DE_CPU)" >> "$toolchain_file"
|
||||
fi
|
||||
|
@@ -1,7 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2140 # ugly array, remove later
|
||||
# shellcheck disable=SC2288 # ugly array, remove later
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -14,37 +11,19 @@ if [ $DEBIAN_ARCH = arm64 ]; then
|
||||
libvulkan-dev
|
||||
"
|
||||
elif [ $DEBIAN_ARCH = amd64 ]; then
|
||||
# Add llvm 13 to the build image
|
||||
apt-get -y install --no-install-recommends wget gnupg2 software-properties-common
|
||||
apt-key add /llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
# Debian bullseye has older wine 5.0, we want >= 7.0 for traces.
|
||||
apt-key add /winehq.gpg.key
|
||||
apt-add-repository https://dl.winehq.org/wine-builds/debian/
|
||||
|
||||
|
||||
ARCH_PACKAGES="firmware-amd-graphics
|
||||
inetutils-syslogd
|
||||
iptables
|
||||
libcap2
|
||||
libfontconfig1
|
||||
libelf1
|
||||
libfdt1
|
||||
libgl1
|
||||
libglu1-mesa
|
||||
libllvm13
|
||||
libllvm11
|
||||
libva2
|
||||
libva-drm2
|
||||
libvulkan-dev
|
||||
socat
|
||||
spirv-tools
|
||||
sysvinit-core
|
||||
"
|
||||
|
||||
elif [ $DEBIAN_ARCH = armhf ]; then
|
||||
ARCH_PACKAGES="firmware-misc-nonfree
|
||||
"
|
||||
fi
|
||||
|
||||
INSTALL_CI_FAIRY_PACKAGES="git
|
||||
@@ -54,7 +33,6 @@ INSTALL_CI_FAIRY_PACKAGES="git
|
||||
python3-wheel
|
||||
"
|
||||
|
||||
apt-get update
|
||||
apt-get -y install --no-install-recommends \
|
||||
$ARCH_PACKAGES \
|
||||
$INSTALL_CI_FAIRY_PACKAGES \
|
||||
@@ -63,7 +41,6 @@ apt-get -y install --no-install-recommends \
|
||||
ca-certificates \
|
||||
firmware-realtek \
|
||||
initramfs-tools \
|
||||
jq \
|
||||
libasan6 \
|
||||
libexpat1 \
|
||||
libpng16-16 \
|
||||
@@ -104,29 +81,11 @@ apt-get -y install --no-install-recommends \
|
||||
waffle-utils \
|
||||
wget \
|
||||
xinit \
|
||||
xserver-xorg-core \
|
||||
zstd
|
||||
|
||||
|
||||
if [ "$DEBIAN_ARCH" = "amd64" ]; then
|
||||
# workaround wine needing 32-bit
|
||||
# https://bugs.winehq.org/show_bug.cgi?id=53393
|
||||
apt-get install -y --no-remove wine-stable-amd64 # a requirement for wine-stable
|
||||
WINE_PKG="wine-stable"
|
||||
WINE_PKG_DROP="wine-stable-i386"
|
||||
apt download "${WINE_PKG}"
|
||||
dpkg --ignore-depends="${WINE_PKG_DROP}" -i "${WINE_PKG}"*.deb
|
||||
rm "${WINE_PKG}"*.deb
|
||||
sed -i "/${WINE_PKG_DROP}/d" /var/lib/dpkg/status
|
||||
apt-get install -y --no-remove winehq-stable # symlinks-only, depends on wine-stable
|
||||
fi
|
||||
xserver-xorg-core
|
||||
|
||||
# Needed for ci-fairy, this revision is able to upload files to
|
||||
# MinIO and doesn't depend on git
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
|
||||
# Needed for manipulation with traces yaml files.
|
||||
pip3 install yq
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
|
||||
|
||||
apt-get purge -y \
|
||||
$INSTALL_CI_FAIRY_PACKAGES
|
||||
@@ -229,8 +188,6 @@ UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
|
||||
"libgles2-mesa-dev "\
|
||||
"libglx-mesa0 "\
|
||||
"mesa-common-dev "\
|
||||
"gnupg2 "\
|
||||
"software-properties-common " \
|
||||
|
||||
# Removing unneeded packages
|
||||
for PACKAGE in ${UNNEEDED_PACKAGES}
|
||||
@@ -255,7 +212,7 @@ rm -rf etc/dpkg
|
||||
# Drop directories not part of ostree
|
||||
# Note that /var needs to exist as ostree bind mounts the deployment /var over
|
||||
# it
|
||||
rm -rf var/* srv share
|
||||
rm -rf var/* opt srv share
|
||||
|
||||
# ca-certificates are in /etc drop the source
|
||||
rm -rf usr/share/ca-certificates
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -38,9 +37,8 @@ apt-get install -y --no-remove \
|
||||
wget
|
||||
|
||||
if [[ $arch != "armhf" ]]; then
|
||||
# See the list of available architectures in https://apt.llvm.org/bullseye/dists/llvm-toolchain-bullseye-13/main/
|
||||
if [[ $arch == "s390x" ]] || [[ $arch == "i386" ]] || [[ $arch == "arm64" ]]; then
|
||||
LLVM=13
|
||||
if [[ $arch == "s390x" ]]; then
|
||||
LLVM=9
|
||||
else
|
||||
LLVM=11
|
||||
fi
|
||||
@@ -48,7 +46,7 @@ if [[ $arch != "armhf" ]]; then
|
||||
# llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only
|
||||
# with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get
|
||||
# around this.
|
||||
apt-get install -y --no-remove --no-install-recommends \
|
||||
apt-get install -y --no-remove \
|
||||
libclang-cpp${LLVM}:$arch \
|
||||
libffi-dev:$arch \
|
||||
libgcc-s1:$arch \
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -ex
|
||||
|
||||
@@ -20,7 +19,7 @@ rm $ndk.zip
|
||||
# duplicate files. Turn them into hardlinks to save on container space.
|
||||
rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/
|
||||
# Drop some large tools we won't use in this build.
|
||||
find /android-ndk-r21d/ -type f | grep -E -i "clang-check|clang-tidy|lldb" | xargs rm -f
|
||||
find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f
|
||||
|
||||
sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3"
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -9,15 +8,9 @@ sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/buster.list
|
||||
apt-get update
|
||||
|
||||
# Ephemeral packages (installed for this script and removed again at
|
||||
# the end)
|
||||
STABLE_EPHEMERAL=" \
|
||||
libssl-dev \
|
||||
"
|
||||
|
||||
apt-get -y install \
|
||||
${EXTRA_LOCAL_PACKAGES} \
|
||||
${STABLE_EPHEMERAL} \
|
||||
abootimg \
|
||||
autoconf \
|
||||
automake \
|
||||
bc \
|
||||
@@ -29,13 +22,11 @@ apt-get -y install \
|
||||
flex \
|
||||
g++ \
|
||||
git \
|
||||
glslang-tools \
|
||||
kmod \
|
||||
libasan6 \
|
||||
libdrm-dev \
|
||||
libelf-dev \
|
||||
libexpat1-dev \
|
||||
libvulkan-dev \
|
||||
libx11-dev \
|
||||
libx11-xcb-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
@@ -61,26 +52,21 @@ apt-get -y install \
|
||||
u-boot-tools \
|
||||
wget \
|
||||
xz-utils \
|
||||
zlib1g-dev \
|
||||
zstd
|
||||
zlib1g-dev
|
||||
|
||||
# Not available anymore in bullseye
|
||||
apt-get install -y --no-remove -t buster \
|
||||
android-sdk-ext4-utils
|
||||
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
|
||||
|
||||
arch=armhf
|
||||
. .gitlab-ci/container/cross_build.sh
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
. .gitlab-ci/container/build-mold.sh
|
||||
|
||||
# dependencies where we want a specific version
|
||||
EXTRA_MESON_ARGS=
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
apt-get purge -y $STABLE_EPHEMERAL
|
||||
|
||||
. .gitlab-ci/container/container_post_build.sh
|
||||
|
@@ -9,6 +9,7 @@ sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
apt-get update
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
abootimg \
|
||||
cpio \
|
||||
fastboot \
|
||||
netcat \
|
||||
@@ -18,8 +19,7 @@ apt-get install -y --no-remove \
|
||||
python3-serial \
|
||||
rsync \
|
||||
snmp \
|
||||
wget \
|
||||
zstd
|
||||
wget
|
||||
|
||||
# setup SNMPv2 SMI MIB
|
||||
wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
|
||||
@@ -37,9 +37,3 @@ ln -s \
|
||||
/baremetal-files/Image \
|
||||
/baremetal-files/tegra210-p3450-0000.dtb \
|
||||
/baremetal-files/jetson-nano/boot/
|
||||
|
||||
mkdir -p /baremetal-files/jetson-tk1/boot/
|
||||
ln -s \
|
||||
/baremetal-files/zImage \
|
||||
/baremetal-files/tegra124-jetson-tk1.dtb \
|
||||
/baremetal-files/jetson-tk1/boot/
|
||||
|
@@ -1,52 +0,0 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1.4.12 (GNU/Linux)
|
||||
|
||||
mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM
|
||||
EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM
|
||||
R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2
|
||||
B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY
|
||||
Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT
|
||||
DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1
|
||||
G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/
|
||||
ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU
|
||||
cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq
|
||||
7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc
|
||||
Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB
|
||||
tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz
|
||||
dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE
|
||||
FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC
|
||||
9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR
|
||||
udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX
|
||||
wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn
|
||||
l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv
|
||||
gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W
|
||||
R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg
|
||||
hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx
|
||||
K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya
|
||||
KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B
|
||||
MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7
|
||||
BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g
|
||||
zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc
|
||||
bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC
|
||||
DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw
|
||||
F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta
|
||||
RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/
|
||||
21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV
|
||||
ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+
|
||||
M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa
|
||||
xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ
|
||||
d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/
|
||||
fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X
|
||||
OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB
|
||||
pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML
|
||||
PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL
|
||||
wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd
|
||||
oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l
|
||||
tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG
|
||||
5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP
|
||||
LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov
|
||||
1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3
|
||||
krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN
|
||||
bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw==
|
||||
=j+4q
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@@ -1,16 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
arch=s390x
|
||||
|
||||
# Ephemeral packages (installed for this script and removed again at the end)
|
||||
STABLE_EPHEMERAL="libssl-dev"
|
||||
|
||||
apt-get -y install "$STABLE_EPHEMERAL"
|
||||
|
||||
. .gitlab-ci/container/build-mold.sh
|
||||
|
||||
apt-get purge -y "$STABLE_EPHEMERAL"
|
||||
|
||||
. .gitlab-ci/container/cross_build.sh
|
||||
|
@@ -1,53 +0,0 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQGNBFwOmrgBDAC9FZW3dFpew1hwDaqRfdQQ1ABcmOYu1NKZHwYjd+bGvcR2LRGe
|
||||
R5dfRqG1Uc/5r6CPCMvnWxFprymkqKEADn8eFn+aCnPx03HrhA+lNEbciPfTHylt
|
||||
NTTuRua7YpJIgEOjhXUbxXxnvF8fhUf5NJpJg6H6fPQARUW+5M//BlVgwn2jhzlW
|
||||
U+uwgeJthhiuTXkls9Yo3EoJzmkUih+ABZgvaiBpr7GZRw9GO1aucITct0YDNTVX
|
||||
KA6el78/udi5GZSCKT94yY9ArN4W6NiOFCLV7MU5d6qMjwGFhfg46NBv9nqpGinK
|
||||
3NDjqCevKouhtKl2J+nr3Ju3Spzuv6Iex7tsOqt+XdZCoY+8+dy3G5zbJwBYsMiS
|
||||
rTNF55PHtBH1S0QK5OoN2UR1ie/aURAyAFEMhTzvFB2B2v7C0IKIOmYMEG+DPMs9
|
||||
FQs/vZ1UnAQgWk02ZiPryoHfjFO80+XYMrdWN+RSo5q9ODClloaKXjqI/aWLGirm
|
||||
KXw2R8tz31go3NMAEQEAAbQnV2luZUhRIHBhY2thZ2VzIDx3aW5lLWRldmVsQHdp
|
||||
bmVocS5vcmc+iQHOBBMBCgA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEE
|
||||
1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmyUACgkQdvGiD/mHZy/zkwv7B+nKFlDY
|
||||
Bzz/7j0gqIODbs5FRZRtuf/IuPP3vZdWlNfAW/VyaLtVLJCM/mmaf/O6/gJ+D+E9
|
||||
BBoSmHdHzBBOQHIj5IbRedynNcHT5qXsdBeU2ZPR50sdE+jmukvw3Wa5JijoDgUu
|
||||
LGLGtU48Z3JsBXQ54OlnTZXQ2SMFhRUa10JANXSJQ+QY2Wo2Pi2+MEAHcrd71A2S
|
||||
0mT2DQSSBQ92c6WPfUpOSBawd8P0ipT7rVFNLJh8HVQGyEWxPl8ecDEHoVfG2rdV
|
||||
D0ADbNLx9031UUwpUicO6vW/2Ec7c3VNG1cpOtyNTw/lEgvsXOh3GQs/DvFvMy/h
|
||||
QzaeF3Qq6cAPlKuxieJe4lLYFBTmCAT4iB1J8oeFs4G7ScfZH4+4NBe3VGoeCD/M
|
||||
Wl+qxntAroblxiFuqtPJg+NKZYWBzkptJNhnrBxcBnRinGZLw2k/GR/qPMgsR2L4
|
||||
cP+OUuka+R2gp9oDVTZTyMowz+ROIxnEijF50pkj2VBFRB02rfiMp7q6iQIzBBAB
|
||||
CgAdFiEE2iNXmnTUrZr50/lFzvrI6q8XUZ0FAlwOm3AACgkQzvrI6q8XUZ3KKg/+
|
||||
MD8CgvLiHEX90fXQ23RZQRm2J21w3gxdIen/N8yJVIbK7NIgYhgWfGWsGQedtM7D
|
||||
hMwUlDSRb4rWy9vrXBaiZoF3+nK9AcLvPChkZz28U59Jft6/l0gVrykey/ERU7EV
|
||||
w1Ie1eRu0tRSXsKvMZyQH8897iHZ7uqoJgyk8U8CvSW+V80yqLB2M8Tk8ECZq34f
|
||||
HqUIGs4Wo0UZh0vV4+dEQHBh1BYpmmWl+UPf7nzNwFWXu/EpjVhkExRqTnkEJ+Ai
|
||||
OxbtrRn6ETKzpV4DjyifqQF639bMIem7DRRf+mkcrAXetvWkUkE76e3E9KLvETCZ
|
||||
l4SBfgqSZs2vNngmpX6Qnoh883aFo5ZgVN3v6uTS+LgTwMt/XlnDQ7+Zw+ehCZ2R
|
||||
CO21Y9Kbw6ZEWls/8srZdCQ2LxnyeyQeIzsLnqT/waGjQj35i4exzYeWpojVDb3r
|
||||
tvvOALYGVlSYqZXIALTx2/tHXKLHyrn1C0VgHRnl+hwv7U49f7RvfQXpx47YQN/C
|
||||
PWrpbG69wlKuJptr+olbyoKAWfl+UzoO8vLMo5njWQNAoAwh1H8aFUVNyhtbkRuq
|
||||
l0kpy1Cmcq8uo6taK9lvYp8jak7eV8lHSSiGUKTAovNTwfZG2JboGV4/qLDUKvpa
|
||||
lPp2xVpF9MzA8VlXTOzLpSyIVxZnPTpL+xR5P9WQjMS5AY0EXA6auAEMAMReKL89
|
||||
0z0SL+/i/geB/agfG/k6AXiG2a9kVWeIjAqFwHKl9W/DTNvOqCDgAt51oiHGRRjt
|
||||
1Xm3XZD4p+GM1uZWn9qIFL49Gt5x94TqdrsKTVCJr0Kazn2mKQc7aja0zac+WtZG
|
||||
OFn7KbniuAcwtC780cyikfmmExLI1/Vjg+NiMlMtZfpK6FIW+ulPiDQPdzIhVppx
|
||||
w9/KlR2Fvh4TbzDsUqkFQSSAFdQ65BWgvzLpZHdKO/ILpDkThLbipjtvbBv/pHKM
|
||||
O/NFTNoYkJ3cNW/kfcynwV+4AcKwdRz2A3Mez+g5TKFYPZROIbayOo01yTMLfz2p
|
||||
jcqki/t4PACtwFOhkAs+MYPPyZDUkTFcEJQCPDstkAgmJWI3K2qELtDOLQyps3WY
|
||||
Mfp+mntOdc8bKjFTMcCEk1zcm14K4Oms+w6dw2UnYsX1FAYYhPm8HUYwE4kP8M+D
|
||||
9HGLMjLqqF/kanlCFZs5Avx3mDSAx6zS8vtNdGh+64oDNk4x4A2j8GTUuQARAQAB
|
||||
iQG8BBgBCgAmFiEE1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmrgCGwwFCQPCZwAA
|
||||
CgkQdvGiD/mHZy9FnAwAgfUkxsO53Pm2iaHhtF4+BUc8MNJj64Jvm1tghr6PBRtM
|
||||
hpbvvN8SSOFwYIsS+2BMsJ2ldox4zMYhuvBcgNUlix0G0Z7h1MjftDdsLFi1DNv2
|
||||
J9dJ9LdpWdiZbyg4Sy7WakIZ/VvH1Znd89Imo7kCScRdXTjIw2yCkotE5lK7A6Ns
|
||||
NbVuoYEN+dbGioF4csYehnjTdojwF/19mHFxrXkdDZ/V6ZYFIFxEsxL8FEuyI4+o
|
||||
LC3DFSA4+QAFdkjGFXqFPlaEJxWt5d7wk0y+tt68v+ulkJ900BvR+OOMqQURwrAi
|
||||
iP3I28aRrMjZYwyqHl8i/qyIv+WRakoDKV+wWteR5DmRAPHmX2vnlPlCmY8ysR6J
|
||||
2jUAfuDFVu4/qzJe6vw5tmPJMdfvy0W5oogX6sEdin5M5w2b3WrN8nXZcjbWymqP
|
||||
6jCdl6eoCCkKNOIbr/MMSkd2KqAqDVM5cnnlQ7q+AXzwNpj3RGJVoBxbS0nn9JWY
|
||||
QNQrWh9rAcMIGT+b1le0
|
||||
=4lsa
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# Installing wine, need this for testing mingw or nine
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-remove \
|
||||
wine \
|
||||
wine64 \
|
||||
xvfb
|
||||
|
||||
# Used to initialize the Wine environment to reduce build time
|
||||
wine64 whoami.exe
|
||||
|
@@ -1,16 +1,11 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get install -y ca-certificates gnupg2 software-properties-common
|
||||
|
||||
# Add llvm 13 to the build image
|
||||
apt-key add .gitlab-ci/container/debian/llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
apt-get install -y ca-certificates
|
||||
|
||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
|
||||
@@ -28,22 +23,22 @@ apt-get install -y --no-remove \
|
||||
bison \
|
||||
ccache \
|
||||
dpkg-cross \
|
||||
findutils \
|
||||
flex \
|
||||
g++ \
|
||||
cmake \
|
||||
g++-mingw-w64-x86-64 \
|
||||
gcc \
|
||||
git \
|
||||
glslang-tools \
|
||||
kmod \
|
||||
libclang-13-dev \
|
||||
libclang-11-dev \
|
||||
libclang-9-dev \
|
||||
libclc-dev \
|
||||
libelf-dev \
|
||||
libepoxy-dev \
|
||||
libexpat1-dev \
|
||||
libgtk-3-dev \
|
||||
libllvm13 \
|
||||
libllvm11 \
|
||||
libllvm9 \
|
||||
libomxil-bellagio-dev \
|
||||
libpciaccess-dev \
|
||||
libunwind-dev \
|
||||
@@ -57,33 +52,27 @@ apt-get install -y --no-remove \
|
||||
libxrandr-dev \
|
||||
libxrender-dev \
|
||||
libxshmfence-dev \
|
||||
libxvmc-dev \
|
||||
libxxf86vm-dev \
|
||||
libz-mingw-w64-dev \
|
||||
make \
|
||||
meson \
|
||||
pkg-config \
|
||||
python3-mako \
|
||||
python3-pil \
|
||||
python3-ply \
|
||||
python3-requests \
|
||||
qemu-user \
|
||||
valgrind \
|
||||
wget \
|
||||
wine64 \
|
||||
x11proto-dri2-dev \
|
||||
x11proto-gl-dev \
|
||||
x11proto-randr-dev \
|
||||
xz-utils \
|
||||
zlib1g-dev \
|
||||
zstd
|
||||
zlib1g-dev
|
||||
|
||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
|
||||
# We need at least 0.61.4 for proper Rust
|
||||
pip3 install meson==0.61.5
|
||||
|
||||
. .gitlab-ci/container/build-rust.sh
|
||||
|
||||
. .gitlab-ci/container/debian/x86_build-base-wine.sh
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
|
||||
|
||||
############### Uninstall ephemeral packages
|
||||
|
||||
|
@@ -1,77 +0,0 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
|
||||
# Pull packages from msys2 repository that can be directly used.
|
||||
# We can use https://packages.msys2.org/ to retrieve the newest package
|
||||
mkdir ~/tmp
|
||||
pushd ~/tmp
|
||||
MINGW_PACKET_LIST="
|
||||
mingw-w64-x86_64-headers-git-10.0.0.r14.ga08c638f8-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-vulkan-loader-1.3.211-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-libelf-0.8.13-6-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-zlib-1.2.12-1-any.pkg.tar.zst
|
||||
mingw-w64-x86_64-zstd-1.5.2-2-any.pkg.tar.zst
|
||||
"
|
||||
|
||||
for i in $MINGW_PACKET_LIST
|
||||
do
|
||||
wget -q https://mirror.msys2.org/mingw/mingw64/$i
|
||||
tar xf $i --strip-components=1 -C /usr/x86_64-w64-mingw32/
|
||||
done
|
||||
popd
|
||||
rm -rf ~/tmp
|
||||
|
||||
mkdir -p /usr/x86_64-w64-mingw32/bin
|
||||
|
||||
# The output of `wine64 llvm-config --system-libs --cxxflags mcdisassembler`
|
||||
# containes absolute path like '-IZ:'
|
||||
# The sed is used to replace `-IZ:/usr/x86_64-w64-mingw32/include`
|
||||
# to `-I/usr/x86_64-w64-mingw32/include`
|
||||
|
||||
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
|
||||
# they're going to be fixed, so we'll just have to fix it ourselves
|
||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
|
||||
cat >/usr/x86_64-w64-mingw32/bin/pkg-config <<EOF
|
||||
#!/bin/sh
|
||||
|
||||
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig:/usr/x86_64-w64-mingw32/share/pkgconfig pkg-config \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/pkg-config
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-config <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-config \$@ | sed -e "s,Z:/,/,gi"
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-config
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/clang <<EOF
|
||||
#!/bin/sh
|
||||
wine64 clang \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/clang
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-as <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-as \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-as
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-link <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-link \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-link
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/opt <<EOF
|
||||
#!/bin/sh
|
||||
wine64 opt \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/opt
|
||||
|
||||
cat >/usr/x86_64-w64-mingw32/bin/llvm-spirv <<EOF
|
||||
#!/bin/sh
|
||||
wine64 llvm-spirv \$@
|
||||
EOF
|
||||
chmod +x /usr/x86_64-w64-mingw32/bin/llvm-spirv
|
@@ -1,126 +0,0 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
|
||||
# Building libdrm (libva dependency)
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
wd=$PWD
|
||||
CMAKE_TOOLCHAIN_MINGW_PATH=$wd/.gitlab-ci/container/debian/x86_mingw-toolchain.cmake
|
||||
mkdir -p ~/tmp
|
||||
pushd ~/tmp
|
||||
|
||||
# Building DirectX-Headers
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.606.4 --depth 1
|
||||
mkdir -p DirectX-Headers/build
|
||||
pushd DirectX-Headers/build
|
||||
meson .. \
|
||||
--backend=ninja \
|
||||
--buildtype=release -Dbuild-test=false \
|
||||
-Dprefix=/usr/x86_64-w64-mingw32/ \
|
||||
--cross-file=$wd/.gitlab-ci/x86_64-w64-mingw32
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
# Building libva
|
||||
git clone https://github.com/intel/libva
|
||||
pushd libva/
|
||||
# Checking out commit hash with libva-win32 support
|
||||
# This feature will be released with libva version 2.17
|
||||
git checkout 2579eb0f77897dc01a02c1e43defc63c40fd2988
|
||||
popd
|
||||
# libva already has a build dir in their repo, use builddir instead
|
||||
mkdir -p libva/builddir
|
||||
pushd libva/builddir
|
||||
meson .. \
|
||||
--backend=ninja \
|
||||
--buildtype=release \
|
||||
-Dprefix=/usr/x86_64-w64-mingw32/ \
|
||||
--cross-file=$wd/.gitlab-ci/x86_64-w64-mingw32
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
export VULKAN_SDK_VERSION=1.3.211.0
|
||||
|
||||
# Building SPIRV Tools
|
||||
git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-Tools SPIRV-Tools
|
||||
|
||||
git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
|
||||
|
||||
mkdir -p SPIRV-Tools/build
|
||||
pushd SPIRV-Tools/build
|
||||
cmake .. \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DCMAKE_POLICY_DEFAULT_CMP0091=NEW
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
# Building LLVM
|
||||
git clone -b release/14.x --depth=1 \
|
||||
https://github.com/llvm/llvm-project llvm-project
|
||||
|
||||
git clone -b v14.0.0 --depth=1 \
|
||||
https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
|
||||
|
||||
mkdir llvm-project/build
|
||||
pushd llvm-project/build
|
||||
cmake ../llvm \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DLLVM_ENABLE_RTTI=ON \
|
||||
-DCROSS_TOOLCHAIN_FLAGS_NATIVE=-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
|
||||
-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" \
|
||||
-DLLVM_OPTIMIZED_TABLEGEN=TRUE \
|
||||
-DLLVM_ENABLE_ASSERTIONS=TRUE \
|
||||
-DLLVM_INCLUDE_UTILS=OFF \
|
||||
-DLLVM_INCLUDE_RUNTIMES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_GO_TESTS=OFF \
|
||||
-DLLVM_INCLUDE_BENCHMARKS=OFF \
|
||||
-DLLVM_BUILD_LLVM_C_DYLIB=OFF \
|
||||
-DLLVM_ENABLE_DIA_SDK=OFF \
|
||||
-DCLANG_BUILD_TOOLS=ON \
|
||||
-DLLVM_SPIRV_INCLUDE_TESTS=OFF
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
# Building libclc
|
||||
mkdir llvm-project/build-libclc
|
||||
pushd llvm-project/build-libclc
|
||||
cmake ../libclc \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
|
||||
-GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CROSSCOMPILING=1 \
|
||||
-DCMAKE_POLICY_DEFAULT_CMP0091=NEW \
|
||||
-DCMAKE_CXX_FLAGS="-m64" \
|
||||
-DLLVM_CONFIG="/usr/x86_64-w64-mingw32/bin/llvm-config" \
|
||||
-DLLVM_CLANG="/usr/x86_64-w64-mingw32/bin/clang" \
|
||||
-DLLVM_AS="/usr/x86_64-w64-mingw32/bin/llvm-as" \
|
||||
-DLLVM_LINK="/usr/x86_64-w64-mingw32/bin/llvm-link" \
|
||||
-DLLVM_OPT="/usr/x86_64-w64-mingw32/bin/opt" \
|
||||
-DLLVM_SPIRV="/usr/x86_64-w64-mingw32/bin/llvm-spirv" \
|
||||
-DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-"
|
||||
|
||||
ninja install
|
||||
popd
|
||||
|
||||
popd # ~/tmp
|
||||
|
||||
# Cleanup ~/tmp
|
||||
rm -rf ~/tmp
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-remove \
|
||||
zstd \
|
||||
g++-mingw-w64-i686 \
|
||||
g++-mingw-w64-x86-64
|
||||
|
||||
. .gitlab-ci/container/debian/x86_build-mingw-patch.sh
|
||||
. .gitlab-ci/container/debian/x86_build-mingw-source-deps.sh
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -13,22 +12,24 @@ STABLE_EPHEMERAL=" \
|
||||
autotools-dev \
|
||||
bzip2 \
|
||||
libtool \
|
||||
libssl-dev \
|
||||
python3-pip \
|
||||
"
|
||||
|
||||
# We need multiarch for Wine
|
||||
dpkg --add-architecture i386
|
||||
apt-get update
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
check \
|
||||
clang \
|
||||
cmake \
|
||||
libasan6 \
|
||||
libarchive-dev \
|
||||
libclang-cpp13-dev \
|
||||
libclang-cpp11-dev \
|
||||
libgbm-dev \
|
||||
libglvnd-dev \
|
||||
libllvmspirvlib-dev \
|
||||
liblua5.3-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
libxcb-dri3-dev \
|
||||
@@ -40,38 +41,43 @@ apt-get install -y --no-remove \
|
||||
libxcb-xfixes0-dev \
|
||||
libxcb1-dev \
|
||||
libxml2-dev \
|
||||
llvm-13-dev \
|
||||
llvm-11-dev \
|
||||
llvm-9-dev \
|
||||
ocl-icd-opencl-dev \
|
||||
python3-freezegun \
|
||||
python3-pytest \
|
||||
procps \
|
||||
spirv-tools \
|
||||
shellcheck \
|
||||
strace \
|
||||
time \
|
||||
yamllint \
|
||||
zstd
|
||||
wine \
|
||||
wine32
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
|
||||
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
|
||||
# they're going to be fixed, so we'll just have to fix it ourselves
|
||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
|
||||
cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF
|
||||
#!/bin/sh
|
||||
|
||||
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@
|
||||
EOF
|
||||
chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
|
||||
|
||||
|
||||
# dependencies where we want a specific version
|
||||
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
|
||||
|
||||
export XORGMACROS_VERSION=util-macros-1.19.0
|
||||
|
||||
. .gitlab-ci/container/build-mold.sh
|
||||
|
||||
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
|
||||
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
|
||||
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
|
||||
rm -rf $XORGMACROS_VERSION
|
||||
|
||||
. .gitlab-ci/container/build-llvm-spirv.sh
|
||||
|
||||
. .gitlab-ci/container/build-libclc.sh
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
@@ -83,9 +89,10 @@ cd shader-db
|
||||
make
|
||||
popd
|
||||
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.606.4 --depth 1
|
||||
mkdir -p DirectX-Headers/build
|
||||
pushd DirectX-Headers/build
|
||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1
|
||||
pushd DirectX-Headers
|
||||
mkdir build
|
||||
cd build
|
||||
meson .. --backend=ninja --buildtype=release -Dbuild-test=false
|
||||
ninja
|
||||
ninja install
|
||||
@@ -94,12 +101,6 @@ rm -rf DirectX-Headers
|
||||
|
||||
pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7
|
||||
|
||||
# install bindgen
|
||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||
bindgen --version 0.59.2 \
|
||||
-j ${FDO_CI_CONCURRENT:-4} \
|
||||
--root /usr/local
|
||||
|
||||
############### Uninstall the build software
|
||||
|
||||
apt-get purge -y \
|
||||
|
@@ -1,8 +0,0 @@
|
||||
set(CMAKE_SYSTEM_NAME Windows)
|
||||
set(CMAKE_SYSTEM_PROCESSOR x86_64)
|
||||
|
||||
set(CMAKE_SYSROOT /usr/x86_64-w64-mingw32/)
|
||||
set(ENV{PKG_CONFIG} /usr/x86_64-w64-mingw32/bin/pkg-config)
|
||||
|
||||
set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc-posix)
|
||||
set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++-posix)
|
@@ -1,77 +1,34 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get install -y ca-certificates gnupg2 software-properties-common
|
||||
apt-get install -y ca-certificates
|
||||
|
||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||
|
||||
# Ephemeral packages (installed for this script and removed again at
|
||||
# the end)
|
||||
STABLE_EPHEMERAL=" \
|
||||
autoconf \
|
||||
automake \
|
||||
bc \
|
||||
bison \
|
||||
bzip2 \
|
||||
ccache \
|
||||
cmake \
|
||||
clang-11 \
|
||||
flex \
|
||||
glslang-tools \
|
||||
g++ \
|
||||
libasound2-dev \
|
||||
libcap-dev \
|
||||
libclang-cpp11-dev \
|
||||
libegl-dev \
|
||||
libelf-dev \
|
||||
libepoxy-dev \
|
||||
libgbm-dev \
|
||||
libpciaccess-dev \
|
||||
libvulkan-dev \
|
||||
libwayland-dev \
|
||||
libx11-xcb-dev \
|
||||
libxext-dev \
|
||||
llvm-13-dev \
|
||||
llvm-11-dev \
|
||||
make \
|
||||
meson \
|
||||
patch \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
cargo \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
spirv-tools \
|
||||
wayland-protocols \
|
||||
xz-utils \
|
||||
"
|
||||
|
||||
# Add llvm 13 to the build image
|
||||
apt-key add .gitlab-ci/container/debian/llvm-snapshot.gpg.key
|
||||
add-apt-repository "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-13 main"
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
|
||||
apt-get install -y \
|
||||
sysvinit-core
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
git \
|
||||
git-lfs \
|
||||
inetutils-syslogd \
|
||||
iptables \
|
||||
jq \
|
||||
libasan6 \
|
||||
libexpat1 \
|
||||
libllvm13 \
|
||||
libllvm11 \
|
||||
libllvm9 \
|
||||
liblz4-1 \
|
||||
libpng16-16 \
|
||||
libpython3.9 \
|
||||
@@ -91,69 +48,22 @@ apt-get install -y --no-remove \
|
||||
python3-requests \
|
||||
python3-six \
|
||||
python3-yaml \
|
||||
socat \
|
||||
vulkan-tools \
|
||||
waffle-utils \
|
||||
wget \
|
||||
xauth \
|
||||
xvfb \
|
||||
zlib1g \
|
||||
zstd
|
||||
zlib1g
|
||||
|
||||
apt-get install -y --no-install-recommends \
|
||||
$STABLE_EPHEMERAL
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
############### Build kernel
|
||||
|
||||
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
|
||||
export KERNEL_IMAGE_NAME=bzImage
|
||||
export KERNEL_ARCH=x86_64
|
||||
export DEBIAN_ARCH=amd64
|
||||
|
||||
mkdir -p /lava-files/
|
||||
. .gitlab-ci/container/build-kernel.sh
|
||||
|
||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
|
||||
# and doesn't depend on git
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||
|
||||
# Needed for manipulation with traces yaml files.
|
||||
pip3 install yq
|
||||
|
||||
# Needed for crosvm compilation.
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-11 100
|
||||
|
||||
############### Build LLVM-SPIRV translator
|
||||
|
||||
. .gitlab-ci/container/build-llvm-spirv.sh
|
||||
|
||||
############### Build libclc
|
||||
|
||||
. .gitlab-ci/container/build-libclc.sh
|
||||
|
||||
############### Build libdrm
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
############### Build Wayland
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
############### Build Crosvm
|
||||
|
||||
. .gitlab-ci/container/build-rust.sh
|
||||
. .gitlab-ci/container/build-crosvm.sh
|
||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125
|
||||
|
||||
############### Build dEQP runner
|
||||
. .gitlab-ci/container/build-deqp-runner.sh
|
||||
|
||||
rm -rf /root/.cargo
|
||||
rm -rf /root/.rustup
|
||||
|
||||
ccache --show-stats
|
||||
rm -rf ~/.cargo
|
||||
|
||||
apt-get purge -y $STABLE_EPHEMERAL
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -8,18 +7,26 @@ export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Ephemeral packages (installed for this script and removed again at the end)
|
||||
STABLE_EPHEMERAL=" \
|
||||
autoconf \
|
||||
automake \
|
||||
bc \
|
||||
bison \
|
||||
bzip2 \
|
||||
ccache \
|
||||
clang-13 \
|
||||
clang-11 \
|
||||
cmake \
|
||||
flex \
|
||||
g++ \
|
||||
glslang-tools \
|
||||
libasound2-dev \
|
||||
libcap-dev \
|
||||
libclang-cpp13-dev \
|
||||
libclang-cpp11-dev \
|
||||
libelf-dev \
|
||||
libexpat1-dev \
|
||||
libfdt-dev \
|
||||
libgbm-dev \
|
||||
libgles2-mesa-dev \
|
||||
libllvmspirvlib-dev \
|
||||
libpciaccess-dev \
|
||||
libpng-dev \
|
||||
libudev-dev \
|
||||
@@ -27,10 +34,11 @@ STABLE_EPHEMERAL=" \
|
||||
libwaffle-dev \
|
||||
libx11-xcb-dev \
|
||||
libxcb-dri2-0-dev \
|
||||
libxext-dev \
|
||||
libxkbcommon-dev \
|
||||
libxrender-dev \
|
||||
llvm-13-dev \
|
||||
llvm-11-dev \
|
||||
llvm-spirv \
|
||||
make \
|
||||
meson \
|
||||
ocl-icd-opencl-dev \
|
||||
@@ -40,30 +48,59 @@ STABLE_EPHEMERAL=" \
|
||||
xz-utils \
|
||||
"
|
||||
|
||||
apt-get update
|
||||
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
clinfo \
|
||||
iptables \
|
||||
libclang-common-13-dev \
|
||||
libclang-common-11-dev \
|
||||
libclang-cpp13 \
|
||||
libclang-cpp11 \
|
||||
libcap2 \
|
||||
libegl1 \
|
||||
libepoxy0 \
|
||||
libepoxy-dev \
|
||||
libfdt1 \
|
||||
libllvmspirvlib11 \
|
||||
libxcb-shm0 \
|
||||
ocl-icd-libopencl1 \
|
||||
python3-lxml \
|
||||
python3-renderdoc \
|
||||
python3-simplejson \
|
||||
spirv-tools
|
||||
socat \
|
||||
spirv-tools \
|
||||
sysvinit-core \
|
||||
wget
|
||||
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
############### Build libdrm
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
############### Build Wayland
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
############### Build Crosvm
|
||||
|
||||
. .gitlab-ci/container/build-rust.sh
|
||||
. .gitlab-ci/container/build-crosvm.sh
|
||||
rm -rf /root/.cargo
|
||||
rm -rf /root/.rustup
|
||||
|
||||
############### Build kernel
|
||||
|
||||
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
|
||||
export KERNEL_IMAGE_NAME=bzImage
|
||||
export KERNEL_ARCH=x86_64
|
||||
export DEBIAN_ARCH=amd64
|
||||
|
||||
mkdir -p /lava-files/
|
||||
. .gitlab-ci/container/build-kernel.sh
|
||||
|
||||
############### Build libclc
|
||||
|
||||
. .gitlab-ci/container/build-libclc.sh
|
||||
|
||||
############### Build piglit
|
||||
|
||||
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
|
||||
|
@@ -1,7 +1,4 @@
|
||||
#!/bin/bash
|
||||
# The relative paths in this file only become valid at runtime.
|
||||
# shellcheck disable=SC1091
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -17,7 +14,6 @@ STABLE_EPHEMERAL=" \
|
||||
g++-mingw-w64-x86-64-posix \
|
||||
glslang-tools \
|
||||
libexpat1-dev \
|
||||
gnupg2 \
|
||||
libgbm-dev \
|
||||
libgles2-mesa-dev \
|
||||
liblz4-dev \
|
||||
@@ -39,58 +35,81 @@ STABLE_EPHEMERAL=" \
|
||||
p7zip \
|
||||
patch \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-distutils \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-wheel \
|
||||
software-properties-common \
|
||||
wget \
|
||||
wine64-tools \
|
||||
xz-utils \
|
||||
"
|
||||
|
||||
apt-get install -y --no-remove --no-install-recommends \
|
||||
apt-get install -y --no-remove \
|
||||
$STABLE_EPHEMERAL \
|
||||
libepoxy0 \
|
||||
libxcb-shm0 \
|
||||
pciutils \
|
||||
python3-lxml \
|
||||
python3-simplejson \
|
||||
xinit \
|
||||
xserver-xorg-video-amdgpu \
|
||||
xserver-xorg-video-ati
|
||||
|
||||
# Install a more recent version of Wine than exists in Debian.
|
||||
apt-key add .gitlab-ci/container/debian/winehq.gpg.key
|
||||
apt-add-repository https://dl.winehq.org/wine-builds/debian/
|
||||
apt-get update -q
|
||||
# We need multiarch for Wine
|
||||
dpkg --add-architecture i386
|
||||
|
||||
# Needed for Valve's tracing jobs to collect information about the graphics
|
||||
# hardware on the test devices.
|
||||
pip3 install gfxinfo-mupuf==0.0.9
|
||||
apt-get update
|
||||
|
||||
# workaround wine needing 32-bit
|
||||
# https://bugs.winehq.org/show_bug.cgi?id=53393
|
||||
apt-get install -y --no-remove wine-stable-amd64 # a requirement for wine-stable
|
||||
WINE_PKG="wine-stable"
|
||||
WINE_PKG_DROP="wine-stable-i386"
|
||||
apt-get download "${WINE_PKG}"
|
||||
dpkg --ignore-depends="${WINE_PKG_DROP}" -i "${WINE_PKG}"*.deb
|
||||
rm "${WINE_PKG}"*.deb
|
||||
sed -i "/${WINE_PKG_DROP}/d" /var/lib/dpkg/status
|
||||
apt-get install -y --no-remove winehq-stable # symlinks-only, depends on wine-stable
|
||||
apt-get install -y --no-remove \
|
||||
wine \
|
||||
wine32 \
|
||||
wine64
|
||||
|
||||
function setup_wine() {
|
||||
export WINEDEBUG="-all"
|
||||
export WINEPREFIX="$1"
|
||||
|
||||
# We don't want crash dialogs
|
||||
cat >crashdialog.reg <<EOF
|
||||
Windows Registry Editor Version 5.00
|
||||
|
||||
[HKEY_CURRENT_USER\Software\Wine\WineDbg]
|
||||
"ShowCrashDialog"=dword:00000000
|
||||
|
||||
EOF
|
||||
|
||||
# Set the wine prefix and disable the crash dialog
|
||||
wine regedit crashdialog.reg
|
||||
rm crashdialog.reg
|
||||
|
||||
# An immediate wine command may fail with: "${WINEPREFIX}: Not a
|
||||
# valid wine prefix." and that is just spit because of checking
|
||||
# the existance of the system.reg file, which fails. Just giving
|
||||
# it a bit more of time for it to be created solves the problem
|
||||
# ...
|
||||
while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done
|
||||
}
|
||||
|
||||
############### Install DXVK
|
||||
|
||||
. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
|
||||
. .gitlab-ci/container/install-wine-dxvk.sh
|
||||
DXVK_VERSION="1.8.1"
|
||||
|
||||
############### Install apitrace binaries for wine
|
||||
setup_wine "/dxvk-wine64"
|
||||
|
||||
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
|
||||
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
|
||||
dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install
|
||||
rm -rf dxvk-"${DXVK_VERSION}"
|
||||
rm dxvk-"${DXVK_VERSION}".tar.gz
|
||||
|
||||
############### Install Windows' apitrace binaries
|
||||
|
||||
APITRACE_VERSION="10.0"
|
||||
APITRACE_VERSION_DATE=""
|
||||
|
||||
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
|
||||
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
|
||||
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
|
||||
. .gitlab-ci/container/install-wine-apitrace.sh
|
||||
# Add the apitrace path to the registry
|
||||
wine64 \
|
||||
wine \
|
||||
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
|
||||
/v Path \
|
||||
/t REG_EXPAND_SZ \
|
||||
@@ -101,6 +120,14 @@ wine64 \
|
||||
|
||||
. .gitlab-ci/container/container_pre_build.sh
|
||||
|
||||
############### Build libdrm
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
############### Build Wayland
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
||||
############### Build parallel-deqp-runner's hang-detection tool
|
||||
|
||||
. .gitlab-ci/container/build-hang-detection.sh
|
||||
@@ -117,17 +144,13 @@ PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
|
||||
|
||||
. .gitlab-ci/container/build-deqp.sh
|
||||
|
||||
############### Build apitrace
|
||||
|
||||
. .gitlab-ci/container/build-apitrace.sh
|
||||
|
||||
############### Build gfxreconstruct
|
||||
|
||||
. .gitlab-ci/container/build-gfxreconstruct.sh
|
||||
|
||||
############### Build VKD3D-Proton
|
||||
|
||||
. .gitlab-ci/container/setup-wine.sh "/vkd3d-proton-wine64"
|
||||
setup_wine "/vkd3d-proton-wine64"
|
||||
|
||||
. .gitlab-ci/container/build-vkd3d-proton.sh
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -9,12 +8,10 @@ EPHEMERAL="
|
||||
autoconf
|
||||
automake
|
||||
bzip2
|
||||
cmake
|
||||
git
|
||||
libtool
|
||||
pkgconfig(epoxy)
|
||||
pkgconfig(gbm)
|
||||
pkgconfig(openssl)
|
||||
unzip
|
||||
wget
|
||||
xz
|
||||
@@ -28,7 +25,6 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
gettext \
|
||||
glslang \
|
||||
kernel-headers \
|
||||
llvm-devel \
|
||||
clang-devel \
|
||||
@@ -67,7 +63,6 @@ dnf install -y --setopt=install_weak_deps=False \
|
||||
python3-mako \
|
||||
python3-devel \
|
||||
python3-mako \
|
||||
python3-ply \
|
||||
vulkan-headers \
|
||||
spirv-tools-devel \
|
||||
spirv-llvm-translator-devel \
|
||||
@@ -87,8 +82,6 @@ tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
|
||||
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
|
||||
rm -rf $XORGMACROS_VERSION
|
||||
|
||||
. .gitlab-ci/container/build-mold.sh
|
||||
|
||||
. .gitlab-ci/container/build-libdrm.sh
|
||||
|
||||
. .gitlab-ci/container/build-wayland.sh
|
||||
|
@@ -23,9 +23,6 @@
|
||||
variables:
|
||||
MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
|
||||
|
||||
.use-wine:
|
||||
variables:
|
||||
WINEPATH: "/usr/x86_64-w64-mingw32/bin;/usr/x86_64-w64-mingw32/lib;/usr/lib/gcc/x86_64-w64-mingw32/10-posix;c:/windows;c:/windows/system32"
|
||||
|
||||
# Build the CI docker images.
|
||||
#
|
||||
@@ -49,11 +46,10 @@
|
||||
extends:
|
||||
- .container-rules
|
||||
- .incorporate-templates-commit
|
||||
- .use-wine
|
||||
variables:
|
||||
FDO_DISTRIBUTION_VERSION: bullseye-slim
|
||||
FDO_REPO_SUFFIX: $CI_JOB_NAME
|
||||
FDO_DISTRIBUTION_EXEC: 'bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
FDO_DISTRIBUTION_EXEC: 'env FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
# no need to pull the whole repo to build the container image
|
||||
GIT_STRATEGY: none
|
||||
|
||||
@@ -62,7 +58,7 @@
|
||||
- .container
|
||||
- .incorporate-base-tag+templates-commit
|
||||
# Don't want the .container rules
|
||||
- .build-rules
|
||||
- .ci-run-policy
|
||||
|
||||
# Debian 11 based x86 build image base
|
||||
debian/x86_build-base:
|
||||
@@ -117,23 +113,6 @@ debian/i386_build:
|
||||
needs:
|
||||
- debian/i386_build
|
||||
|
||||
# Debian 11 based x86-mingw cross main build image
|
||||
debian/x86_build-mingw:
|
||||
extends:
|
||||
- .use-debian/x86_build-base
|
||||
variables:
|
||||
MESA_IMAGE_TAG: &debian-x86_build_mingw ${DEBIAN_BUILD_MINGW_TAG}
|
||||
|
||||
.use-debian/x86_build_mingw:
|
||||
extends:
|
||||
- .set-image-base-tag
|
||||
variables:
|
||||
MESA_BASE_TAG: *debian-x86_build-base
|
||||
MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_MINGW_IMAGE_PATH}
|
||||
MESA_IMAGE_TAG: *debian-x86_build_mingw
|
||||
needs:
|
||||
- debian/x86_build-mingw
|
||||
|
||||
# Debian 11 based ppc64el cross-build image
|
||||
debian/ppc64el_build:
|
||||
extends:
|
||||
@@ -189,7 +168,6 @@ debian/android_build:
|
||||
debian/x86_test-base:
|
||||
extends: debian/x86_build-base
|
||||
variables:
|
||||
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.19-for-mesa-ci-d4efddaec194/linux-v5.17-for-mesa-ci-b78f7870d97b.tar.bz2"
|
||||
MESA_IMAGE_TAG: &debian-x86_test-base ${DEBIAN_BASE_TAG}
|
||||
|
||||
.use-debian/x86_test-base:
|
||||
@@ -206,6 +184,8 @@ debian/x86_test-base:
|
||||
debian/x86_test-gl:
|
||||
extends: .use-debian/x86_test-base
|
||||
variables:
|
||||
FDO_DISTRIBUTION_EXEC: 'env KERNEL_URL=${KERNEL_URL} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.16-for-mesa-ci-991fec6622591/linux-v5.16-for-mesa-ci-991fec6622591.tar.bz2"
|
||||
MESA_IMAGE_TAG: &debian-x86_test-gl ${DEBIAN_X86_TEST_GL_TAG}
|
||||
|
||||
.use-debian/x86_test-gl:
|
||||
@@ -276,7 +256,7 @@ fedora/x86_build:
|
||||
|
||||
.kernel+rootfs:
|
||||
extends:
|
||||
- .build-rules
|
||||
- .ci-run-policy
|
||||
stage: container
|
||||
variables:
|
||||
GIT_STRATEGY: fetch
|
||||
@@ -327,14 +307,13 @@ debian/arm_test:
|
||||
- .fdo.container-build@debian
|
||||
- .container
|
||||
# Don't want the .container rules
|
||||
- .build-rules
|
||||
- .ci-run-policy
|
||||
needs:
|
||||
- kernel+rootfs_arm64
|
||||
- kernel+rootfs_armhf
|
||||
variables:
|
||||
FDO_DISTRIBUTION_EXEC: 'env ARTIFACTS_PREFIX=https://${MINIO_HOST}/mesa-lava ARTIFACTS_SUFFIX=${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT} CI_PROJECT_PATH=${CI_PROJECT_PATH} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} FDO_UPSTREAM_REPO=${FDO_UPSTREAM_REPO} bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
|
||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
ARTIFACTS_PREFIX: "https://${MINIO_HOST}/mesa-lava"
|
||||
ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}"
|
||||
MESA_ARM_BUILD_TAG: *debian-arm_build
|
||||
MESA_IMAGE_TAG: &debian-arm_test ${DEBIAN_BASE_TAG}
|
||||
MESA_ROOTFS_TAG: *kernel-rootfs
|
||||
@@ -368,7 +347,7 @@ debian/arm_test:
|
||||
|
||||
.windows_container_build:
|
||||
inherit:
|
||||
default: [retry]
|
||||
default: false
|
||||
extends:
|
||||
- .container
|
||||
- .windows-docker-vs2019
|
||||
@@ -382,79 +361,60 @@ debian/arm_test:
|
||||
tags:
|
||||
- windows
|
||||
- shell
|
||||
- "2022"
|
||||
- "1809"
|
||||
- mesa
|
||||
script:
|
||||
- .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE}
|
||||
|
||||
windows_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
extends:
|
||||
- .windows_container_build
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_vs_image_path ${WINDOWS_X64_VS_PATH}
|
||||
MESA_IMAGE_TAG: &windows_vs_image_tag ${WINDOWS_X64_VS_TAG}
|
||||
DOCKERFILE: Dockerfile_vs
|
||||
MESA_BASE_IMAGE: "mcr.microsoft.com/windows/server:ltsc2022"
|
||||
|
||||
windows_build_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
default: false
|
||||
extends:
|
||||
- .windows_container_build
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.build-rules, rules]
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH}
|
||||
MESA_IMAGE_TAG: &windows_build_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_BUILD_TAG}
|
||||
MESA_IMAGE_TAG: &windows_build_image_tag ${WINDOWS_X64_BUILD_TAG}
|
||||
DOCKERFILE: Dockerfile_build
|
||||
MESA_BASE_IMAGE_PATH: *windows_vs_image_path
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
|
||||
timeout: 2h 30m # LLVM takes ages
|
||||
needs:
|
||||
- windows_vs2019
|
||||
|
||||
windows_test_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
default: false
|
||||
extends:
|
||||
- .windows_container_build
|
||||
rules:
|
||||
- if: '$MICROSOFT_FARM == "offline"'
|
||||
when: never
|
||||
- !reference [.build-rules, rules]
|
||||
- !reference [.ci-run-policy, rules]
|
||||
variables:
|
||||
MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH}
|
||||
MESA_IMAGE_TAG: &windows_test_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_TEST_TAG}
|
||||
MESA_IMAGE_TAG: &windows_test_image_tag ${WINDOWS_X64_BUILD_TAG}--${WINDOWS_X64_TEST_TAG}
|
||||
DOCKERFILE: Dockerfile_test
|
||||
MESA_BASE_IMAGE_PATH: *windows_vs_image_path
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
# Right now this only needs the VS install to get DXIL.dll. Maybe see about decoupling this at some point
|
||||
MESA_BASE_IMAGE_PATH: *windows_build_image_path
|
||||
MESA_BASE_IMAGE_TAG: *windows_build_image_tag
|
||||
MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
|
||||
script:
|
||||
- .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE Dockerfile_test ${MESA_BASE_IMAGE}
|
||||
needs:
|
||||
- windows_vs2019
|
||||
- windows_build_vs2019
|
||||
|
||||
.use-windows_build_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
default: false
|
||||
extends: .windows-docker-vs2019
|
||||
image: "$MESA_IMAGE"
|
||||
variables:
|
||||
MESA_IMAGE_PATH: *windows_build_image_path
|
||||
MESA_IMAGE_TAG: *windows_build_image_tag
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
needs:
|
||||
- windows_build_vs2019
|
||||
|
||||
.use-windows_test_vs2019:
|
||||
inherit:
|
||||
default: [retry]
|
||||
default: false
|
||||
extends: .windows-docker-vs2019
|
||||
image: "$MESA_IMAGE"
|
||||
variables:
|
||||
MESA_IMAGE_PATH: *windows_test_image_path
|
||||
MESA_IMAGE_TAG: *windows_test_image_tag
|
||||
MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
|
||||
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
APITRACE_VERSION="11.1"
|
||||
APITRACE_VERSION_DATE=""
|
||||
|
||||
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
|
||||
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
|
||||
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
|
||||
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
|
||||
|
||||
|
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
dxvk_install_release() {
|
||||
local DXVK_VERSION=${1:-"1.10.3"}
|
||||
|
||||
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
|
||||
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
|
||||
# https://github.com/doitsujin/dxvk/issues/2921
|
||||
sed -i 's/wine="wine"/wine="wine32"/' "dxvk-${DXVK_VERSION}"/setup_dxvk.sh
|
||||
"dxvk-${DXVK_VERSION}"/setup_dxvk.sh install
|
||||
rm -rf "dxvk-${DXVK_VERSION}"
|
||||
rm dxvk-"${DXVK_VERSION}".tar.gz
|
||||
}
|
||||
|
||||
# Install from a Github PR number
|
||||
dxvk_install_pr() {
|
||||
local __prnum=$1
|
||||
|
||||
# NOTE: Clone all the ensite history of the repo so as not to think
|
||||
# harder about cloning just enough for 'git describe' to work. 'git
|
||||
# describe' is used by the dxvk build system to generate a
|
||||
# dxvk_version Meson variable, which is nice-to-have.
|
||||
git clone https://github.com/doitsujin/dxvk
|
||||
pushd dxvk
|
||||
git fetch origin pull/"$__prnum"/head:pr
|
||||
git checkout pr
|
||||
./package-release.sh pr ../dxvk-build --no-package
|
||||
popd
|
||||
pushd ./dxvk-build/dxvk-pr
|
||||
./setup_dxvk.sh install
|
||||
popd
|
||||
rm -rf ./dxvk-build ./dxvk
|
||||
}
|
||||
|
||||
dxvk_install_release "1.10.1"
|
||||
#dxvk_install_pr 2359
|
||||
|
@@ -1,7 +1,4 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
||||
# shellcheck disable=SC2034 # Variables are used in scripts called from here
|
||||
# shellcheck disable=SC2086 # we want word splitting
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
@@ -28,7 +25,6 @@ check_minio "${CI_PROJECT_PATH}"
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
GCC_ARCH="aarch64-linux-gnu"
|
||||
KERNEL_ARCH="arm64"
|
||||
SKQP_ARCH="arm64"
|
||||
DEFCONFIG="arch/arm64/configs/defconfig"
|
||||
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
|
||||
@@ -39,29 +35,24 @@ if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dtb"
|
||||
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dtb"
|
||||
KERNEL_IMAGE_NAME="Image"
|
||||
|
||||
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
|
||||
GCC_ARCH="arm-linux-gnueabihf"
|
||||
KERNEL_ARCH="arm"
|
||||
SKQP_ARCH="arm"
|
||||
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
|
||||
DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb"
|
||||
DEVICE_TREES+=" arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
|
||||
DEVICE_TREES+=" arch/arm/boot/dts/imx6q-cubox-i.dtb"
|
||||
DEVICE_TREES+=" arch/arm/boot/dts/tegra124-jetson-tk1.dtb"
|
||||
KERNEL_IMAGE_NAME="zImage"
|
||||
. .gitlab-ci/container/create-cross-file.sh armhf
|
||||
else
|
||||
GCC_ARCH="x86_64-linux-gnu"
|
||||
KERNEL_ARCH="x86_64"
|
||||
SKQP_ARCH="x64"
|
||||
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
|
||||
DEVICE_TREES=""
|
||||
KERNEL_IMAGE_NAME="bzImage"
|
||||
ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols p7zip"
|
||||
ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols"
|
||||
fi
|
||||
|
||||
# Determine if we're in a cross build.
|
||||
@@ -111,15 +102,13 @@ apt-get install -y --no-remove \
|
||||
libxkbcommon-dev \
|
||||
ninja-build \
|
||||
patch \
|
||||
protobuf-compiler \
|
||||
python-is-python3 \
|
||||
python3-distutils \
|
||||
python3-mako \
|
||||
python3-numpy \
|
||||
python3-serial \
|
||||
unzip \
|
||||
wget \
|
||||
zstd
|
||||
wget
|
||||
|
||||
|
||||
if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
|
||||
@@ -137,20 +126,6 @@ if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
|
||||
libxkbcommon-dev:armhf
|
||||
fi
|
||||
|
||||
mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}"
|
||||
|
||||
############### Setuping
|
||||
if [ "$DEBIAN_ARCH" = "amd64" ]; then
|
||||
. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
|
||||
. .gitlab-ci/container/install-wine-dxvk.sh
|
||||
mv /dxvk-wine64 "/lava-files/rootfs-${DEBIAN_ARCH}/"
|
||||
fi
|
||||
|
||||
############### Installing
|
||||
. .gitlab-ci/container/install-wine-apitrace.sh
|
||||
mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}/apitrace-msvc-win64"
|
||||
mv /apitrace-msvc-win64/bin "/lava-files/rootfs-${DEBIAN_ARCH}/apitrace-msvc-win64"
|
||||
rm -rf /apitrace-msvc-win64
|
||||
|
||||
############### Building
|
||||
STRIP_CMD="${GCC_ARCH}-strip"
|
||||
@@ -177,12 +152,12 @@ mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
|
||||
|
||||
############### Build SKQP
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]] \
|
||||
|| [[ "$DEBIAN_ARCH" = "amd64" ]]; then
|
||||
. .gitlab-ci/container/build-skqp.sh
|
||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||
SKQP_ARCH="arm64" . .gitlab-ci/container/build-skqp.sh
|
||||
mv /skqp /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
fi
|
||||
|
||||
|
||||
############### Build piglit
|
||||
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
|
||||
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
@@ -235,10 +210,7 @@ fi
|
||||
set -e
|
||||
|
||||
cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
cp .gitlab-ci/container/debian/llvm-snapshot.gpg.key /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
cp .gitlab-ci/container/debian/winehq.gpg.key /lava-files/rootfs-${DEBIAN_ARCH}/.
|
||||
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/{llvm-snapshot,winehq}.gpg.key
|
||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
|
||||
|
||||
|
||||
@@ -246,8 +218,7 @@ rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
|
||||
# Dependencies pulled during the creation of the rootfs may overwrite
|
||||
# the built libdrm. Hence, we add it after the rootfs has been already
|
||||
# created.
|
||||
find /libdrm/ -name lib\*\.so\* \
|
||||
-exec cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/. {} \;
|
||||
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
|
||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/
|
||||
cp -Rp /libdrm/share /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/share
|
||||
rm -rf /libdrm
|
||||
@@ -261,13 +232,14 @@ fi
|
||||
|
||||
du -ah /lava-files/rootfs-${DEBIAN_ARCH} | sort -h | tail -100
|
||||
pushd /lava-files/rootfs-${DEBIAN_ARCH}
|
||||
tar --zstd -cf /lava-files/lava-rootfs.tar.zst .
|
||||
tar czf /lava-files/lava-rootfs.tgz .
|
||||
popd
|
||||
|
||||
. .gitlab-ci/container/container_post_build.sh
|
||||
|
||||
############### Upload the files!
|
||||
FILES_TO_UPLOAD="lava-rootfs.tar.zst \
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||
FILES_TO_UPLOAD="lava-rootfs.tgz \
|
||||
$KERNEL_IMAGE_NAME"
|
||||
|
||||
if [[ -n $DEVICE_TREES ]]; then
|
||||
@@ -275,9 +247,9 @@ if [[ -n $DEVICE_TREES ]]; then
|
||||
fi
|
||||
|
||||
for f in $FILES_TO_UPLOAD; do
|
||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/$f \
|
||||
https://${MINIO_PATH}/$f
|
||||
ci-fairy minio cp /lava-files/$f \
|
||||
minio://${MINIO_PATH}/$f
|
||||
done
|
||||
|
||||
touch /lava-files/done
|
||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/done https://${MINIO_PATH}/done
|
||||
ci-fairy minio cp /lava-files/done minio://${MINIO_PATH}/done
|
||||
|
@@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
export WINEPREFIX="$1"
|
||||
export WINEDEBUG="-all"
|
||||
|
||||
# We don't want crash dialogs
|
||||
cat >crashdialog.reg <<EOF
|
||||
Windows Registry Editor Version 5.00
|
||||
|
||||
[HKEY_CURRENT_USER\Software\Wine\WineDbg]
|
||||
"ShowCrashDialog"=dword:00000000
|
||||
|
||||
EOF
|
||||
|
||||
# Set the wine prefix and disable the crash dialog
|
||||
wine64 regedit crashdialog.reg
|
||||
rm crashdialog.reg
|
||||
|
||||
# An immediate wine command may fail with: "${WINEPREFIX}: Not a
|
||||
# valid wine prefix." and that is just spit because of checking
|
||||
# the existance of the system.reg file, which fails. Just giving
|
||||
# it a bit more of time for it to be created solves the problem
|
||||
# ...
|
||||
while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done
|
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
VSOCK_STDOUT=$1
|
||||
VSOCK_STDERR=$2
|
||||
VM_TEMP_DIR=$3
|
||||
VSOCK_TEMP_DIR=$3
|
||||
|
||||
mount -t proc none /proc
|
||||
mount -t sysfs none /sys
|
||||
@@ -12,7 +12,7 @@ mkdir -p /dev/pts
|
||||
mount -t devpts devpts /dev/pts
|
||||
mount -t tmpfs tmpfs /tmp
|
||||
|
||||
. ${VM_TEMP_DIR}/crosvm-env.sh
|
||||
. ${VSOCK_TEMP_DIR}/crosvm-env.sh
|
||||
|
||||
# .gitlab-ci.yml script variable is using relative paths to install directory,
|
||||
# so change to that dir before running `crosvm-script`
|
||||
@@ -31,7 +31,7 @@ DMESG_PID=$!
|
||||
# Transfer the errors and crosvm-script output via a pair of virtio-vsocks
|
||||
socat -d -u pipe:${STDERR_FIFO} vsock-listen:${VSOCK_STDERR} &
|
||||
socat -d -U vsock-listen:${VSOCK_STDOUT} \
|
||||
system:"stdbuf -eL sh ${VM_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VM_TEMP_DIR}/exit_code",nofork
|
||||
system:"stdbuf -eL sh ${VSOCK_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VSOCK_TEMP_DIR}/exit_code",nofork
|
||||
|
||||
kill ${DMESG_PID}
|
||||
wait
|
||||
|
@@ -2,10 +2,6 @@
|
||||
|
||||
set -e
|
||||
|
||||
# If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act
|
||||
# the same as the first thread in its threadpool.
|
||||
THREAD=${DEQP_RUNNER_THREAD:-0}
|
||||
|
||||
#
|
||||
# Helper to generate CIDs for virtio-vsock based communication with processes
|
||||
# running inside crosvm guests.
|
||||
@@ -21,7 +17,7 @@ THREAD=${DEQP_RUNNER_THREAD:-0}
|
||||
# - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept
|
||||
# vsock connections on in order to transfer output messages
|
||||
#
|
||||
# - VM_TEMP_DIR: the temporary directory path used to pass additional
|
||||
# - VSOCK_TEMP_DIR: the temporary directory path used to pass additional
|
||||
# context data towards the guest
|
||||
#
|
||||
set_vsock_context() {
|
||||
@@ -30,12 +26,19 @@ set_vsock_context() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
VM_TEMP_DIR="/tmp-vm.${THREAD}"
|
||||
# Clear out any leftover files from a previous run.
|
||||
rm -rf $VM_TEMP_DIR
|
||||
mkdir $VM_TEMP_DIR || return 1
|
||||
local dir_prefix="/tmp-vsock."
|
||||
local cid_prefix=0
|
||||
unset VSOCK_TEMP_DIR
|
||||
|
||||
VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((${THREAD} & 0x7f) << 25)))
|
||||
while [ ${cid_prefix} -lt 128 ]; do
|
||||
VSOCK_TEMP_DIR=${dir_prefix}${cid_prefix}
|
||||
mkdir "${VSOCK_TEMP_DIR}" >/dev/null 2>&1 && break || unset VSOCK_TEMP_DIR
|
||||
cid_prefix=$((cid_prefix + 1))
|
||||
done
|
||||
|
||||
[ -n "${VSOCK_TEMP_DIR}" ] || return 1
|
||||
|
||||
VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((cid_prefix & 0x7f) << 25)))
|
||||
VSOCK_STDOUT=5001
|
||||
VSOCK_STDERR=5002
|
||||
|
||||
@@ -48,27 +51,19 @@ if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then
|
||||
export DEQP_BIN_DIR
|
||||
fi
|
||||
|
||||
VM_SOCKET=crosvm-${THREAD}.sock
|
||||
|
||||
# Terminate any existing crosvm, if a previous invocation of this shell script
|
||||
# was terminated due to timeouts. This "vm stop" may fail if the crosvm died
|
||||
# without cleaning itself up.
|
||||
if [ -e $VM_SOCKET ]; then
|
||||
crosvm stop $VM_SOCKET || true
|
||||
# Wait for socats from that invocation to drain
|
||||
sleep 5
|
||||
rm -rf $VM_SOCKET || true
|
||||
fi
|
||||
|
||||
set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
|
||||
|
||||
# Ensure cleanup on script exit
|
||||
trap 'exit ${exit_code}' INT TERM
|
||||
trap 'exit_code=$?; [ -z "${CROSVM_PID}${SOCAT_PIDS}" ] || kill ${CROSVM_PID} ${SOCAT_PIDS} >/dev/null 2>&1 || true; rm -rf ${VSOCK_TEMP_DIR}' EXIT
|
||||
|
||||
# Securely pass the current variables to the crosvm environment
|
||||
echo "Variables passed through:"
|
||||
SCRIPT_DIR=$(readlink -en "${0%/*}")
|
||||
${SCRIPT_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
|
||||
${SCRIPT_DIR}/common/generate-env.sh | tee ${VSOCK_TEMP_DIR}/crosvm-env.sh
|
||||
|
||||
# Set the crosvm-script as the arguments of the current script
|
||||
echo "$@" > ${VM_TEMP_DIR}/crosvm-script.sh
|
||||
echo "$@" > ${VSOCK_TEMP_DIR}/crosvm-script.sh
|
||||
|
||||
# Setup networking
|
||||
/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
@@ -76,14 +71,16 @@ echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
|
||||
# Start background processes to receive output from guest
|
||||
socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr &
|
||||
SOCAT_PIDS=$!
|
||||
socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout &
|
||||
SOCAT_PIDS="${SOCAT_PIDS} $!"
|
||||
|
||||
# Prepare to start crosvm
|
||||
unset DISPLAY
|
||||
unset XDG_RUNTIME_DIR
|
||||
|
||||
CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
|
||||
CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
|
||||
CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VSOCK_TEMP_DIR}"
|
||||
|
||||
[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \
|
||||
CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
|
||||
@@ -94,20 +91,25 @@ set +e -x
|
||||
NIR_DEBUG="novalidate" \
|
||||
LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
|
||||
GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
|
||||
VK_ICD_FILENAMES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER}_icd.x86_64.json \
|
||||
crosvm --no-syslog run \
|
||||
--gpu "${CROSVM_GPU_ARGS}" -m "${CROSVM_MEMORY:-4096}" -c 2 --disable-sandbox \
|
||||
crosvm run \
|
||||
--gpu "${CROSVM_GPU_ARGS}" -m 4096 -c 2 --disable-sandbox \
|
||||
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
|
||||
--host-ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \
|
||||
-s $VM_SOCKET \
|
||||
--host_ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \
|
||||
--cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \
|
||||
/lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VM_TEMP_DIR}/crosvm 2>&1
|
||||
/lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VSOCK_TEMP_DIR}/crosvm 2>&1 &
|
||||
|
||||
# Wait for crosvm process to terminate
|
||||
CROSVM_PID=$!
|
||||
wait ${CROSVM_PID}
|
||||
CROSVM_RET=$?
|
||||
unset CROSVM_PID
|
||||
|
||||
[ ${CROSVM_RET} -eq 0 ] && {
|
||||
# socat background processes terminate gracefully on remote peers exit
|
||||
wait
|
||||
unset SOCAT_PIDS
|
||||
# The actual return code is the crosvm guest script's exit code
|
||||
CROSVM_RET=$(cat ${VM_TEMP_DIR}/exit_code 2>/dev/null)
|
||||
CROSVM_RET=$(cat ${VSOCK_TEMP_DIR}/exit_code 2>/dev/null)
|
||||
# Force error when the guest script's exit code is not available
|
||||
CROSVM_RET=${CROSVM_RET:-1}
|
||||
}
|
||||
@@ -116,7 +118,7 @@ CROSVM_RET=$?
|
||||
[ ${CROSVM_RET} -eq 0 ] || {
|
||||
set +x
|
||||
echo "Dumping crosvm output.." >&2
|
||||
cat ${VM_TEMP_DIR}/crosvm >&2
|
||||
cat ${VSOCK_TEMP_DIR}/crosvm >&2
|
||||
set -x
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
echo -e "\e[0Ksection_start:$(date +%s):test_setup[collapsed=true]\r\e[0Kpreparing test setup"
|
||||
|
||||
|
@@ -1,27 +1,21 @@
|
||||
variables:
|
||||
DEBIAN_X86_BUILD_BASE_IMAGE: "debian/x86_build-base"
|
||||
DEBIAN_BASE_TAG: "2022-11-15-ci-fairy"
|
||||
DEBIAN_BASE_TAG: "2022-02-21-libdrm"
|
||||
|
||||
DEBIAN_X86_BUILD_IMAGE_PATH: "debian/x86_build"
|
||||
DEBIAN_BUILD_TAG: "2022-11-15-ci-fairy"
|
||||
|
||||
DEBIAN_X86_BUILD_MINGW_IMAGE_PATH: "debian/x86_build-mingw"
|
||||
DEBIAN_BUILD_MINGW_TAG: "2022-10-18-dx-headers-va"
|
||||
DEBIAN_BUILD_TAG: "2022-02-21-libdrm"
|
||||
|
||||
DEBIAN_X86_TEST_BASE_IMAGE: "debian/x86_test-base"
|
||||
|
||||
DEBIAN_X86_TEST_IMAGE_PATH: "debian/x86_test-gl"
|
||||
DEBIAN_X86_TEST_GL_TAG: "2022-11-15-ci-fairy"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-11-15-ci-fairy"
|
||||
DEBIAN_X86_TEST_GL_TAG: "2022-04-07-virgl-crosvm"
|
||||
DEBIAN_X86_TEST_VK_TAG: "2022-04-05-deqp-runner"
|
||||
|
||||
FEDORA_X86_BUILD_TAG: "2022-09-22-python3-ply-2"
|
||||
KERNEL_ROOTFS_TAG: "2022-11-03-piglit_mesa-22.3"
|
||||
|
||||
WINDOWS_X64_VS_PATH: "windows/x64_vs"
|
||||
WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"
|
||||
FEDORA_X86_BUILD_TAG: "2022-03-18-spirv-tools-5"
|
||||
KERNEL_ROOTFS_TAG: "2022-04-07-prefix-skqp"
|
||||
|
||||
WINDOWS_X64_BUILD_PATH: "windows/x64_build"
|
||||
WINDOWS_X64_BUILD_TAG: "2022-10-18-wrap-nodownload-va"
|
||||
WINDOWS_X64_BUILD_TAG: "2022-20-02-base_split"
|
||||
|
||||
WINDOWS_X64_TEST_PATH: "windows/x64_test"
|
||||
WINDOWS_X64_TEST_TAG: "2022-08-17-bump"
|
||||
WINDOWS_X64_TEST_TAG: "2022-04-13-dozen_ci"
|
||||
|
@@ -1,28 +0,0 @@
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
class MesaCIException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MesaCITimeoutError(MesaCIException):
|
||||
def __init__(self, *args, timeout_duration: timedelta) -> None:
|
||||
super().__init__(*args)
|
||||
self.timeout_duration = timeout_duration
|
||||
|
||||
|
||||
class MesaCIRetryError(MesaCIException):
|
||||
def __init__(self, *args, retry_count: int) -> None:
|
||||
super().__init__(*args)
|
||||
self.retry_count = retry_count
|
||||
|
||||
|
||||
class MesaCIParseException(MesaCIException):
|
||||
pass
|
||||
|
||||
|
||||
class MesaCIKnownIssueException(MesaCIException):
|
||||
"""Exception raised when the Mesa CI script finds something in the logs that
|
||||
is known to cause the LAVA job to eventually fail"""
|
||||
|
||||
pass
|
@@ -1,4 +1,6 @@
|
||||
.lava-test:
|
||||
extends:
|
||||
- .ci-run-policy
|
||||
# Cancel job if a newer commit is pushed to the same branch
|
||||
interruptible: true
|
||||
variables:
|
||||
@@ -12,9 +14,9 @@
|
||||
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}"
|
||||
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}"
|
||||
# per-job build artifacts
|
||||
BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/${CI_PROJECT_NAME}-${ARCH}.tar.zst"
|
||||
BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/${CI_PROJECT_NAME}-${ARCH}.tar.gz"
|
||||
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
|
||||
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst"
|
||||
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz"
|
||||
MINIO_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
|
||||
PIGLIT_NO_WINDOW: 1
|
||||
VISIBILITY_GROUP: "Collabora+fdo"
|
||||
@@ -27,12 +29,10 @@
|
||||
- results/
|
||||
exclude:
|
||||
- results/*.shader_cache
|
||||
reports:
|
||||
junit: results/junit.xml
|
||||
tags:
|
||||
- $RUNNER_TAG
|
||||
after_script:
|
||||
- wget -q "https://${JOB_RESULTS_PATH}" -O- | tar --zstd -x
|
||||
- wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz
|
||||
|
||||
.lava-test:armhf:
|
||||
variables:
|
||||
|
@@ -31,5 +31,4 @@ TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests
|
||||
PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \
|
||||
pytest "${TEST_DIR}" \
|
||||
-W ignore::DeprecationWarning \
|
||||
--junitxml=artifacts/ci_scripts_report.xml \
|
||||
-m 'not slow'
|
||||
--junitxml=artifacts/ci_scripts_report.xml
|
||||
|
@@ -21,16 +21,14 @@ cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
|
||||
# Prepare env vars for upload.
|
||||
KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
|
||||
echo -e "\e[0Ksection_start:$(date +%s):variables[collapsed=true]\r\e[0KVariables passed through:"
|
||||
cat results/job-rootfs-overlay/set-job-env-vars.sh
|
||||
echo -e "\e[0Ksection_end:$(date +%s):variables\r\e[0K"
|
||||
|
||||
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
|
||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||
ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}"
|
||||
|
||||
touch results/lava.log
|
||||
tail -f results/lava.log &
|
||||
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
|
||||
artifacts/lava/lava_job_submitter.py \
|
||||
--dump-yaml \
|
||||
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
|
||||
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
||||
@@ -47,6 +45,4 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
|
||||
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
|
||||
--boot-method ${BOOT_METHOD} \
|
||||
--visibility-group ${VISIBILITY_GROUP} \
|
||||
--lava-tags "${LAVA_TAGS}" \
|
||||
--mesa-job-name "$CI_JOB_NAME" \
|
||||
>> results/lava.log
|
||||
--lava-tags "${LAVA_TAGS}" >> results/lava.log
|
||||
|
@@ -1,47 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2020 - 2022 Collabora Limited
|
||||
# Authors:
|
||||
# Gustavo Padovan <gustavo.padovan@collabora.com>
|
||||
# Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
# Copyright (C) 2020, 2021 Collabora Limited
|
||||
# Author: Gustavo Padovan <gustavo.padovan@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice (including the next
|
||||
# paragraph) shall be included in all copies or substantial portions of the
|
||||
# Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
"""Send a job to LAVA, track it and collect log back"""
|
||||
|
||||
|
||||
import argparse
|
||||
import contextlib
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import urllib.parse
|
||||
import xmlrpc.client
|
||||
import xmlrpc
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from os import getenv
|
||||
from typing import Any, Optional
|
||||
|
||||
import lavacli
|
||||
import yaml
|
||||
from lava.exceptions import (
|
||||
MesaCIException,
|
||||
MesaCIKnownIssueException,
|
||||
MesaCIParseException,
|
||||
MesaCIRetryError,
|
||||
MesaCITimeoutError,
|
||||
)
|
||||
from lava.utils import CONSOLE_LOG
|
||||
from lava.utils import DEFAULT_GITLAB_SECTION_TIMEOUTS as GL_SECTION_TIMEOUTS
|
||||
from lava.utils import (
|
||||
GitlabSection,
|
||||
LogFollower,
|
||||
LogSectionType,
|
||||
fatal_err,
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
)
|
||||
from lavacli.utils import loader
|
||||
|
||||
# Timeout in seconds to decide if the device from the dispatched LAVA job has
|
||||
@@ -58,8 +53,17 @@ LOG_POLLING_TIME_SEC = int(getenv("LAVA_LOG_POLLING_TIME_SEC", 5))
|
||||
# How many retries should be made when a timeout happen.
|
||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT_DETECTION", 2))
|
||||
|
||||
# How many attempts should be made when a timeout happen during LAVA device boot.
|
||||
NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
|
||||
|
||||
def print_log(msg):
|
||||
print("{}: {}".format(datetime.now(), msg))
|
||||
|
||||
def fatal_err(msg):
|
||||
print_log(msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
||||
|
||||
|
||||
def generate_lava_yaml(args):
|
||||
@@ -72,13 +76,9 @@ def generate_lava_yaml(args):
|
||||
'context': {
|
||||
'extra_nfsroot_args': ' init=/init rootwait usbcore.quirks=0bda:8153:k'
|
||||
},
|
||||
"timeouts": {
|
||||
"job": {"minutes": args.job_timeout},
|
||||
"action": {"minutes": 3},
|
||||
"actions": {
|
||||
"depthcharge-action": {
|
||||
"minutes": 3 * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
}
|
||||
'timeouts': {
|
||||
'job': {
|
||||
'minutes': args.job_timeout
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -96,8 +96,8 @@ def generate_lava_yaml(args):
|
||||
'url': '{}/{}'.format(args.kernel_url_prefix, args.kernel_image_name),
|
||||
},
|
||||
'nfsrootfs': {
|
||||
'url': '{}/lava-rootfs.tar.zst'.format(args.rootfs_url_prefix),
|
||||
'compression': 'zstd',
|
||||
'url': '{}/lava-rootfs.tgz'.format(args.rootfs_url_prefix),
|
||||
'compression': 'gz',
|
||||
}
|
||||
}
|
||||
if args.kernel_image_type:
|
||||
@@ -109,22 +109,20 @@ def generate_lava_yaml(args):
|
||||
|
||||
# always boot over NFS
|
||||
boot = {
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"commands": "nfs",
|
||||
"prompts": ["lava-shell:"],
|
||||
'timeout': { 'minutes': 25 },
|
||||
'method': args.boot_method,
|
||||
'commands': 'nfs',
|
||||
'prompts': ['lava-shell:'],
|
||||
}
|
||||
|
||||
# skeleton test definition: only declaring each job as a single 'test'
|
||||
# since LAVA's test parsing is not useful to us
|
||||
run_steps = []
|
||||
test = {
|
||||
'timeout': { 'minutes': args.job_timeout },
|
||||
'failure_retry': 1,
|
||||
'definitions': [ {
|
||||
'name': 'mesa',
|
||||
'from': 'inline',
|
||||
'lava-signal': 'kmsg',
|
||||
'path': 'inline/mesa.yaml',
|
||||
'repository': {
|
||||
'metadata': {
|
||||
@@ -134,8 +132,10 @@ def generate_lava_yaml(args):
|
||||
'scope': [ 'functional' ],
|
||||
'format': 'Lava-Test Test Definition 1.0',
|
||||
},
|
||||
'parse': {
|
||||
'pattern': r'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
|
||||
},
|
||||
'run': {
|
||||
"steps": run_steps
|
||||
},
|
||||
},
|
||||
} ],
|
||||
@@ -145,39 +145,27 @@ def generate_lava_yaml(args):
|
||||
# - inline .gitlab-ci/common/init-stage1.sh
|
||||
# - fetch and unpack per-pipeline build artifacts from build job
|
||||
# - fetch and unpack per-job environment from lava-submit.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
init_lines = []
|
||||
|
||||
with open(args.first_stage_init, 'r') as init_sh:
|
||||
run_steps += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
|
||||
init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
|
||||
|
||||
if args.jwt_file:
|
||||
with open(args.jwt_file) as jwt_file:
|
||||
run_steps += [
|
||||
"set +x",
|
||||
f'echo -n "{jwt_file.read()}" > "{args.jwt_file}" # HIDEME',
|
||||
"set -x",
|
||||
f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
|
||||
]
|
||||
else:
|
||||
run_steps += [
|
||||
"echo Could not find jwt file, disabling MINIO requests...",
|
||||
"sed -i '/MINIO_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
|
||||
with open(args.jwt_file) as jwt_file:
|
||||
init_lines += [
|
||||
"set +x",
|
||||
f'echo -n "{jwt_file.read()}" > "{args.jwt_file}" # HIDEME',
|
||||
"set -x",
|
||||
]
|
||||
|
||||
run_steps += [
|
||||
init_lines += [
|
||||
'mkdir -p {}'.format(args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar --zstd -x -C {}'.format(args.build_url, args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.build_url, args.ci_project_dir),
|
||||
'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
|
||||
|
||||
# Sleep a bit to give time for bash to dump shell xtrace messages into
|
||||
# console which may cause interleaving with LAVA_SIGNAL_STARTTC in some
|
||||
# devices like a618.
|
||||
'sleep 1',
|
||||
|
||||
# Putting CI_JOB name as the testcase name, it may help LAVA farm
|
||||
# maintainers with monitoring
|
||||
f"lava-test-case 'mesa-ci_{args.mesa_job_name}' --shell /init-stage2.sh",
|
||||
f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
|
||||
'exec /init-stage2.sh',
|
||||
]
|
||||
test['definitions'][0]['repository']['run']['steps'] = init_lines
|
||||
|
||||
values['actions'] = [
|
||||
{ 'deploy': deploy },
|
||||
@@ -223,310 +211,135 @@ def _call_proxy(fn, *args):
|
||||
fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode))
|
||||
|
||||
|
||||
class LAVAJob:
|
||||
COLOR_STATUS_MAP = {
|
||||
"pass": CONSOLE_LOG["FG_GREEN"],
|
||||
"hung": CONSOLE_LOG["FG_YELLOW"],
|
||||
"fail": CONSOLE_LOG["FG_RED"],
|
||||
"canceled": CONSOLE_LOG["FG_MAGENTA"],
|
||||
}
|
||||
|
||||
def __init__(self, proxy, definition):
|
||||
self.job_id = None
|
||||
self.proxy = proxy
|
||||
self.definition = definition
|
||||
self.last_log_line = 0
|
||||
self.last_log_time = None
|
||||
self.is_finished = False
|
||||
self.status = "created"
|
||||
|
||||
def heartbeat(self):
|
||||
self.last_log_time = datetime.now()
|
||||
self.status = "running"
|
||||
|
||||
def validate(self) -> Optional[dict]:
|
||||
"""Returns a dict with errors, if the validation fails.
|
||||
|
||||
Returns:
|
||||
Optional[dict]: a dict with the validation errors, if any
|
||||
"""
|
||||
return _call_proxy(self.proxy.scheduler.jobs.validate, self.definition, True)
|
||||
|
||||
def submit(self):
|
||||
try:
|
||||
self.job_id = _call_proxy(self.proxy.scheduler.jobs.submit, self.definition)
|
||||
except MesaCIException:
|
||||
return False
|
||||
return True
|
||||
|
||||
def cancel(self):
|
||||
if self.job_id:
|
||||
self.proxy.scheduler.jobs.cancel(self.job_id)
|
||||
|
||||
def is_started(self) -> bool:
|
||||
waiting_states = ["Submitted", "Scheduling", "Scheduled"]
|
||||
job_state: dict[str, str] = _call_proxy(
|
||||
self.proxy.scheduler.job_state, self.job_id
|
||||
)
|
||||
return job_state["job_state"] not in waiting_states
|
||||
|
||||
def _load_log_from_data(self, data) -> list[str]:
|
||||
lines = []
|
||||
# When there is no new log data, the YAML is empty
|
||||
if loaded_lines := yaml.load(str(data), Loader=loader(False)):
|
||||
lines = loaded_lines
|
||||
self.last_log_line += len(lines)
|
||||
return lines
|
||||
|
||||
def get_logs(self) -> list[str]:
|
||||
try:
|
||||
(finished, data) = _call_proxy(
|
||||
self.proxy.scheduler.jobs.logs, self.job_id, self.last_log_line
|
||||
)
|
||||
self.is_finished = finished
|
||||
return self._load_log_from_data(data)
|
||||
|
||||
except Exception as mesa_ci_err:
|
||||
raise MesaCIParseException(
|
||||
f"Could not get LAVA job logs. Reason: {mesa_ci_err}"
|
||||
) from mesa_ci_err
|
||||
|
||||
def parse_job_result_from_log(
|
||||
self, lava_lines: list[dict[str, str]]
|
||||
) -> list[dict[str, str]]:
|
||||
"""Use the console log to catch if the job has completed successfully or
|
||||
not. Returns the list of log lines until the result line."""
|
||||
|
||||
last_line = None # Print all lines. lines[:None] == lines[:]
|
||||
|
||||
for idx, line in enumerate(lava_lines):
|
||||
if result := re.search(r"hwci: mesa: (pass|fail)", line):
|
||||
self.is_finished = True
|
||||
self.status = result.group(1)
|
||||
|
||||
last_line = idx + 1
|
||||
# We reached the log end here. hwci script has finished.
|
||||
break
|
||||
return lava_lines[:last_line]
|
||||
|
||||
|
||||
def find_exception_from_metadata(metadata, job_id):
|
||||
if "result" not in metadata or metadata["result"] != "fail":
|
||||
return
|
||||
if "error_type" in metadata:
|
||||
error_type = metadata["error_type"]
|
||||
if error_type == "Infrastructure":
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed with Infrastructure Error. Retry."
|
||||
)
|
||||
if error_type == "Job":
|
||||
# This happens when LAVA assumes that the job cannot terminate or
|
||||
# with mal-formed job definitions. As we are always validating the
|
||||
# jobs, only the former is probable to happen. E.g.: When some LAVA
|
||||
# action timed out more times than expected in job definition.
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed with JobError "
|
||||
"(possible LAVA timeout misconfiguration/bug). Retry."
|
||||
)
|
||||
if "case" in metadata and metadata["case"] == "validate":
|
||||
raise MesaCIException(
|
||||
f"LAVA job {job_id} failed validation (possible download error). Retry."
|
||||
)
|
||||
return metadata
|
||||
|
||||
|
||||
def find_lava_error(job) -> None:
|
||||
def get_job_results(proxy, job_id, test_suite, test_case):
|
||||
# Look for infrastructure errors and retry if we see them.
|
||||
results_yaml = _call_proxy(job.proxy.results.get_testjob_results_yaml, job.job_id)
|
||||
results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
|
||||
results = yaml.load(results_yaml, Loader=loader(False))
|
||||
for res in results:
|
||||
metadata = res["metadata"]
|
||||
find_exception_from_metadata(metadata, job.job_id)
|
||||
if "result" not in metadata or metadata["result"] != "fail":
|
||||
continue
|
||||
if 'error_type' in metadata and metadata['error_type'] == "Infrastructure":
|
||||
print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id))
|
||||
return False
|
||||
if 'case' in metadata and metadata['case'] == "validate":
|
||||
print_log("LAVA job {} failed validation (possible download error). Retry.".format(job_id))
|
||||
return False
|
||||
|
||||
# If we reach this far, it means that the job ended without hwci script
|
||||
# result and no LAVA infrastructure problem was found
|
||||
job.status = "fail"
|
||||
results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case)
|
||||
results = yaml.load(results_yaml, Loader=loader(False))
|
||||
if not results:
|
||||
fatal_err("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case))
|
||||
|
||||
print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result']))
|
||||
if results[0]['result'] != 'pass':
|
||||
fatal_err("FAIL")
|
||||
|
||||
def show_job_data(job):
|
||||
with GitlabSection(
|
||||
"job_data",
|
||||
"LAVA job info",
|
||||
type=LogSectionType.LAVA_POST_PROCESSING,
|
||||
start_collapsed=True,
|
||||
):
|
||||
show = _call_proxy(job.proxy.scheduler.jobs.show, job.job_id)
|
||||
for field, value in show.items():
|
||||
print("{}\t: {}".format(field, value))
|
||||
return True
|
||||
|
||||
def wait_until_job_is_started(proxy, job_id):
|
||||
print_log(f"Waiting for job {job_id} to start.")
|
||||
current_state = "Submitted"
|
||||
waiting_states = ["Submitted", "Scheduling", "Scheduled"]
|
||||
while current_state in waiting_states:
|
||||
job_state = _call_proxy(proxy.scheduler.job_state, job_id)
|
||||
current_state = job_state["job_state"]
|
||||
|
||||
def fetch_logs(job, max_idle_time, log_follower) -> None:
|
||||
# Poll to check for new logs, assuming that a prolonged period of
|
||||
# silence means that the device has died and we should try it again
|
||||
if datetime.now() - job.last_log_time > max_idle_time:
|
||||
max_idle_time_min = max_idle_time.total_seconds() / 60
|
||||
|
||||
raise MesaCITimeoutError(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"{CONSOLE_LOG['FG_YELLOW']}"
|
||||
f"LAVA job {job.job_id} does not respond for {max_idle_time_min} "
|
||||
"minutes. Retry."
|
||||
f"{CONSOLE_LOG['RESET']}",
|
||||
timeout_duration=max_idle_time,
|
||||
)
|
||||
|
||||
time.sleep(LOG_POLLING_TIME_SEC)
|
||||
|
||||
# The XMLRPC binary packet may be corrupted, causing a YAML scanner error.
|
||||
# Retry the log fetching several times before exposing the error.
|
||||
for _ in range(5):
|
||||
with contextlib.suppress(MesaCIParseException):
|
||||
new_log_lines = job.get_logs()
|
||||
break
|
||||
else:
|
||||
raise MesaCIParseException
|
||||
|
||||
if log_follower.feed(new_log_lines):
|
||||
# If we had non-empty log data, we can assure that the device is alive.
|
||||
job.heartbeat()
|
||||
parsed_lines = log_follower.flush()
|
||||
|
||||
# Only parse job results when the script reaches the end of the logs.
|
||||
# Depending on how much payload the RPC scheduler.jobs.logs get, it may
|
||||
# reach the LAVA_POST_PROCESSING phase.
|
||||
if log_follower.current_section.type in (
|
||||
LogSectionType.TEST_CASE,
|
||||
LogSectionType.LAVA_POST_PROCESSING,
|
||||
):
|
||||
parsed_lines = job.parse_job_result_from_log(parsed_lines)
|
||||
|
||||
for line in parsed_lines:
|
||||
print_log(line)
|
||||
|
||||
|
||||
def follow_job_execution(job):
|
||||
try:
|
||||
job.submit()
|
||||
except Exception as mesa_ci_err:
|
||||
raise MesaCIException(
|
||||
f"Could not submit LAVA job. Reason: {mesa_ci_err}"
|
||||
) from mesa_ci_err
|
||||
|
||||
print_log(f"Waiting for job {job.job_id} to start.")
|
||||
while not job.is_started():
|
||||
time.sleep(WAIT_FOR_DEVICE_POLLING_TIME_SEC)
|
||||
print_log(f"Job {job.job_id} started.")
|
||||
print_log(f"Job {job_id} started.")
|
||||
|
||||
gl = GitlabSection(
|
||||
id="lava_boot",
|
||||
header="LAVA boot",
|
||||
type=LogSectionType.LAVA_BOOT,
|
||||
start_collapsed=True,
|
||||
)
|
||||
print(gl.start())
|
||||
max_idle_time = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
|
||||
with LogFollower(current_section=gl) as lf:
|
||||
def follow_job_execution(proxy, job_id):
|
||||
line_count = 0
|
||||
finished = False
|
||||
last_time_logs = datetime.now()
|
||||
while not finished:
|
||||
(finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count)
|
||||
if logs := yaml.load(str(data), Loader=loader(False)):
|
||||
# Reset the timeout
|
||||
last_time_logs = datetime.now()
|
||||
for line in logs:
|
||||
print("{} {}".format(line["dt"], line["msg"]))
|
||||
|
||||
max_idle_time = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
|
||||
# Start to check job's health
|
||||
job.heartbeat()
|
||||
while not job.is_finished:
|
||||
fetch_logs(job, max_idle_time, lf)
|
||||
line_count += len(logs)
|
||||
|
||||
show_job_data(job)
|
||||
else:
|
||||
time_limit = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
|
||||
if datetime.now() - last_time_logs > time_limit:
|
||||
print_log("LAVA job {} doesn't advance (machine got hung?). Retry.".format(job_id))
|
||||
return False
|
||||
|
||||
# Mesa Developers expect to have a simple pass/fail job result.
|
||||
# If this does not happen, it probably means a LAVA infrastructure error
|
||||
# happened.
|
||||
if job.status not in ["pass", "fail"]:
|
||||
find_lava_error(job)
|
||||
# `proxy.scheduler.jobs.logs` does not block, even when there is no
|
||||
# new log to be fetched. To avoid dosing the LAVA dispatcher
|
||||
# machine, let's add a sleep to save them some stamina.
|
||||
time.sleep(LOG_POLLING_TIME_SEC)
|
||||
|
||||
return True
|
||||
|
||||
def show_job_data(proxy, job_id):
|
||||
show = _call_proxy(proxy.scheduler.jobs.show, job_id)
|
||||
for field, value in show.items():
|
||||
print("{}\t: {}".format(field, value))
|
||||
|
||||
|
||||
def print_job_final_status(job):
|
||||
if job.status == "running":
|
||||
job.status = "hung"
|
||||
def validate_job(proxy, job_file):
|
||||
try:
|
||||
return _call_proxy(proxy.scheduler.jobs.validate, job_file, True)
|
||||
except:
|
||||
return False
|
||||
|
||||
color = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"])
|
||||
print_log(
|
||||
f"{color}"
|
||||
f"LAVA Job finished with status: {job.status}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
def submit_job(proxy, job_file):
|
||||
return _call_proxy(proxy.scheduler.jobs.submit, job_file)
|
||||
|
||||
|
||||
def retriable_follow_job(proxy, job_definition) -> LAVAJob:
|
||||
def retriable_follow_job(proxy, yaml_file):
|
||||
retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
|
||||
|
||||
for attempt_no in range(1, retry_count + 2):
|
||||
job = LAVAJob(proxy, job_definition)
|
||||
try:
|
||||
follow_job_execution(job)
|
||||
return job
|
||||
except MesaCIKnownIssueException as found_issue:
|
||||
print_log(found_issue)
|
||||
job.status = "canceled"
|
||||
except MesaCIException as mesa_exception:
|
||||
print_log(mesa_exception)
|
||||
job.cancel()
|
||||
except KeyboardInterrupt as e:
|
||||
print_log("LAVA job submitter was interrupted. Cancelling the job.")
|
||||
job.cancel()
|
||||
raise e
|
||||
finally:
|
||||
print_log(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"Finished executing LAVA job in the attempt #{attempt_no}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
print_job_final_status(job)
|
||||
while retry_count >= 0:
|
||||
job_id = submit_job(proxy, yaml_file)
|
||||
|
||||
raise MesaCIRetryError(
|
||||
f"{CONSOLE_LOG['BOLD']}"
|
||||
f"{CONSOLE_LOG['FG_RED']}"
|
||||
"Job failed after it exceeded the number of "
|
||||
f"{retry_count} retries."
|
||||
f"{CONSOLE_LOG['RESET']}",
|
||||
retry_count=retry_count,
|
||||
)
|
||||
print_log("LAVA job id: {}".format(job_id))
|
||||
|
||||
wait_until_job_is_started(proxy, job_id)
|
||||
|
||||
def treat_mesa_job_name(args):
|
||||
# Remove mesa job names with spaces, which breaks the lava-test-case command
|
||||
args.mesa_job_name = args.mesa_job_name.split(" ")[0]
|
||||
if not follow_job_execution(proxy, job_id):
|
||||
print_log(f"Job {job_id} has timed out. Cancelling it.")
|
||||
# Cancel the job as it is considered unreachable by Mesa CI.
|
||||
proxy.scheduler.jobs.cancel(job_id)
|
||||
|
||||
retry_count -= 1
|
||||
continue
|
||||
|
||||
show_job_data(proxy, job_id)
|
||||
|
||||
if get_job_results(proxy, job_id, "0_mesa", "mesa") == True:
|
||||
break
|
||||
else:
|
||||
# The script attempted all the retries. The job seemed to fail.
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def main(args):
|
||||
proxy = setup_lava_proxy()
|
||||
|
||||
# Overwrite the timeout for the testcases with the value offered by the
|
||||
# user. The testcase running time should be at least 4 times greater than
|
||||
# the other sections (boot and setup), so we can safely ignore them.
|
||||
# If LAVA fails to stop the job at this stage, it will fall back to the
|
||||
# script section timeout with a reasonable delay.
|
||||
GL_SECTION_TIMEOUTS[LogSectionType.TEST_CASE] = timedelta(minutes=args.job_timeout)
|
||||
|
||||
job_definition = generate_lava_yaml(args)
|
||||
yaml_file = generate_lava_yaml(args)
|
||||
|
||||
if args.dump_yaml:
|
||||
with GitlabSection(
|
||||
"yaml_dump",
|
||||
"LAVA job definition (YAML)",
|
||||
type=LogSectionType.LAVA_BOOT,
|
||||
start_collapsed=True,
|
||||
):
|
||||
print(hide_sensitive_data(job_definition))
|
||||
job = LAVAJob(proxy, job_definition)
|
||||
|
||||
if errors := job.validate():
|
||||
fatal_err(f"Error in LAVA job definition: {errors}")
|
||||
print_log("LAVA job definition validated successfully")
|
||||
print(hide_sensitive_data(generate_lava_yaml(args)))
|
||||
|
||||
if args.validate_only:
|
||||
ret = validate_job(proxy, yaml_file)
|
||||
if not ret:
|
||||
fatal_err("Error in LAVA job definition")
|
||||
print("LAVA job definition validated successfully")
|
||||
return
|
||||
|
||||
finished_job = retriable_follow_job(proxy, job_definition)
|
||||
exit_code = 0 if finished_job.status == "pass" else 1
|
||||
sys.exit(exit_code)
|
||||
if not retriable_follow_job(proxy, yaml_file):
|
||||
fatal_err(
|
||||
"Job failed after it exceeded the number of"
|
||||
f"{NUMBER_OF_RETRIES_TIMEOUT_DETECTION} retries."
|
||||
)
|
||||
|
||||
|
||||
def create_parser():
|
||||
@@ -550,11 +363,9 @@ def create_parser():
|
||||
parser.add_argument("--validate-only", action='store_true')
|
||||
parser.add_argument("--dump-yaml", action='store_true')
|
||||
parser.add_argument("--visibility-group")
|
||||
parser.add_argument("--mesa-job-name")
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
|
||||
# GitLab runner -> GitLab primary -> user, safe to say we don't need any
|
||||
@@ -566,5 +377,4 @@ if __name__ == "__main__":
|
||||
|
||||
parser.set_defaults(func=main)
|
||||
args = parser.parse_args()
|
||||
treat_mesa_job_name(args)
|
||||
args.func(args)
|
||||
|
@@ -1,16 +0,0 @@
|
||||
from .console_format import CONSOLE_LOG
|
||||
from .gitlab_section import GitlabSection
|
||||
from .log_follower import (
|
||||
LogFollower,
|
||||
fatal_err,
|
||||
fix_lava_color_log,
|
||||
fix_lava_gitlab_section_log,
|
||||
hide_sensitive_data,
|
||||
print_log,
|
||||
)
|
||||
from .log_section import (
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
||||
LogSection,
|
||||
LogSectionType,
|
||||
)
|
@@ -1,10 +0,0 @@
|
||||
CONSOLE_LOG = {
|
||||
"FG_GREEN": "\x1b[1;32;5;197m",
|
||||
"FG_RED": "\x1b[1;38;5;197m",
|
||||
"FG_YELLOW": "\x1b[1;33;5;197m",
|
||||
"FG_MAGENTA": "\x1b[1;35;5;197m",
|
||||
"RESET": "\x1b[0m",
|
||||
"UNDERLINED": "\x1b[3m",
|
||||
"BOLD": "\x1b[1m",
|
||||
"DIM": "\x1b[2m",
|
||||
}
|
@@ -1,84 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lava.utils.log_section import LogSectionType
|
||||
|
||||
|
||||
@dataclass
|
||||
class GitlabSection:
|
||||
id: str
|
||||
header: str
|
||||
type: LogSectionType
|
||||
start_collapsed: bool = False
|
||||
escape: str = "\x1b[0K"
|
||||
colour: str = f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"
|
||||
__start_time: Optional[datetime] = field(default=None, init=False)
|
||||
__end_time: Optional[datetime] = field(default=None, init=False)
|
||||
|
||||
@classmethod
|
||||
def section_id_filter(cls, value) -> str:
|
||||
return str(re.sub(r"[^\w_-]+", "-", value))
|
||||
|
||||
def __post_init__(self):
|
||||
self.id = self.section_id_filter(self.id)
|
||||
|
||||
@property
|
||||
def has_started(self) -> bool:
|
||||
return self.__start_time is not None
|
||||
|
||||
@property
|
||||
def has_finished(self) -> bool:
|
||||
return self.__end_time is not None
|
||||
|
||||
def get_timestamp(self, time: datetime) -> str:
|
||||
unix_ts = datetime.timestamp(time)
|
||||
return str(int(unix_ts))
|
||||
|
||||
def section(self, marker: str, header: str, time: datetime) -> str:
|
||||
preamble = f"{self.escape}section_{marker}"
|
||||
collapse = marker == "start" and self.start_collapsed
|
||||
collapsed = "[collapsed=true]" if collapse else ""
|
||||
section_id = f"{self.id}{collapsed}"
|
||||
|
||||
timestamp = self.get_timestamp(time)
|
||||
before_header = ":".join([preamble, timestamp, section_id])
|
||||
colored_header = f"{self.colour}{header}\x1b[0m" if header else ""
|
||||
header_wrapper = "\r" + f"{self.escape}{colored_header}"
|
||||
|
||||
return f"{before_header}{header_wrapper}"
|
||||
|
||||
def __enter__(self):
|
||||
print(self.start())
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
print(self.end())
|
||||
|
||||
def start(self) -> str:
|
||||
assert not self.has_finished, "Starting an already finished section"
|
||||
self.__start_time = datetime.now()
|
||||
return self.section(marker="start", header=self.header, time=self.__start_time)
|
||||
|
||||
def end(self) -> str:
|
||||
assert self.has_started, "Ending an uninitialized section"
|
||||
self.__end_time = datetime.now()
|
||||
assert (
|
||||
self.__end_time >= self.__start_time
|
||||
), "Section execution time will be negative"
|
||||
return self.section(marker="end", header="", time=self.__end_time)
|
||||
|
||||
def delta_time(self) -> Optional[timedelta]:
|
||||
if self.__start_time and self.__end_time:
|
||||
return self.__end_time - self.__start_time
|
||||
|
||||
if self.has_started:
|
||||
return datetime.now() - self.__start_time
|
||||
|
||||
return None
|
@@ -1,43 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lava.utils import LogFollower
|
||||
|
||||
from lava.exceptions import MesaCIKnownIssueException
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
from lava.utils.log_section import LogSectionType
|
||||
|
||||
|
||||
@dataclass
|
||||
class LAVALogHints:
|
||||
log_follower: LogFollower
|
||||
has_r8152_issue_history: bool = field(default=False, init=False)
|
||||
|
||||
def detect_failure(self, new_lines: list[dict[str, Any]]):
|
||||
for line in new_lines:
|
||||
self.detect_r8152_issue(line)
|
||||
|
||||
def detect_r8152_issue(self, line):
|
||||
if (
|
||||
self.log_follower.phase == LogSectionType.TEST_CASE
|
||||
and line["lvl"] == "target"
|
||||
):
|
||||
if re.search(r"r8152 \S+ eth0: Tx status -71", line["msg"]):
|
||||
self.has_r8152_issue_history = True
|
||||
return
|
||||
|
||||
if self.has_r8152_issue_history and re.search(
|
||||
r"nfs: server \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} not responding, still trying",
|
||||
line["msg"],
|
||||
):
|
||||
raise MesaCIKnownIssueException(
|
||||
f"{CONSOLE_LOG['FG_MAGENTA']}"
|
||||
"Probable network issue failure encountered, retrying the job"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
)
|
||||
|
||||
self.has_r8152_issue_history = False
|
@@ -1,218 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2022 Collabora Limited
|
||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Some utilities to analyse logs, create gitlab sections and other quality of life
|
||||
improvements
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Union
|
||||
|
||||
from lava.exceptions import MesaCITimeoutError
|
||||
from lava.utils.console_format import CONSOLE_LOG
|
||||
from lava.utils.gitlab_section import GitlabSection
|
||||
from lava.utils.lava_log_hints import LAVALogHints
|
||||
from lava.utils.log_section import (
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
||||
LOG_SECTIONS,
|
||||
LogSectionType,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LogFollower:
|
||||
current_section: Optional[GitlabSection] = None
|
||||
timeout_durations: dict[LogSectionType, timedelta] = field(
|
||||
default_factory=lambda: DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
||||
)
|
||||
fallback_timeout: timedelta = FALLBACK_GITLAB_SECTION_TIMEOUT
|
||||
_buffer: list[str] = field(default_factory=list, init=False)
|
||||
log_hints: LAVALogHints = field(init=False)
|
||||
|
||||
def __post_init__(self):
|
||||
section_is_created = bool(self.current_section)
|
||||
section_has_started = bool(
|
||||
self.current_section and self.current_section.has_started
|
||||
)
|
||||
self.log_hints = LAVALogHints(self)
|
||||
assert (
|
||||
section_is_created == section_has_started
|
||||
), "Can't follow logs beginning from uninitialized GitLab sections."
|
||||
|
||||
@property
|
||||
def phase(self) -> LogSectionType:
|
||||
return (
|
||||
self.current_section.type
|
||||
if self.current_section
|
||||
else LogSectionType.UNKNOWN
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Cleanup existing buffer if this object gets out from the context"""
|
||||
self.clear_current_section()
|
||||
last_lines = self.flush()
|
||||
for line in last_lines:
|
||||
print(line)
|
||||
|
||||
def watchdog(self):
|
||||
if not self.current_section:
|
||||
return
|
||||
|
||||
timeout_duration = self.timeout_durations.get(
|
||||
self.current_section.type, self.fallback_timeout
|
||||
)
|
||||
|
||||
if self.current_section.delta_time() > timeout_duration:
|
||||
raise MesaCITimeoutError(
|
||||
f"Gitlab Section {self.current_section} has timed out",
|
||||
timeout_duration=timeout_duration,
|
||||
)
|
||||
|
||||
def clear_current_section(self):
|
||||
if self.current_section and not self.current_section.has_finished:
|
||||
self._buffer.append(self.current_section.end())
|
||||
self.current_section = None
|
||||
|
||||
def update_section(self, new_section: GitlabSection):
|
||||
# Sections can have redundant regex to find them to mitigate LAVA
|
||||
# interleaving kmsg and stderr/stdout issue.
|
||||
if self.current_section and self.current_section.id == new_section.id:
|
||||
return
|
||||
self.clear_current_section()
|
||||
self.current_section = new_section
|
||||
self._buffer.append(new_section.start())
|
||||
|
||||
def manage_gl_sections(self, line):
|
||||
if isinstance(line["msg"], list):
|
||||
logging.debug("Ignoring messages as list. Kernel dumps.")
|
||||
return
|
||||
|
||||
for log_section in LOG_SECTIONS:
|
||||
if new_section := log_section.from_log_line_to_section(line):
|
||||
self.update_section(new_section)
|
||||
|
||||
def detect_kernel_dump_line(self, line: dict[str, Union[str, list]]) -> bool:
|
||||
# line["msg"] can be a list[str] when there is a kernel dump
|
||||
if isinstance(line["msg"], list):
|
||||
return line["lvl"] == "debug"
|
||||
|
||||
# result level has dict line["msg"]
|
||||
if not isinstance(line["msg"], str):
|
||||
return False
|
||||
|
||||
# we have a line, check if it is a kernel message
|
||||
if re.search(r"\[[\d\s]{5}\.[\d\s]{6}\] +\S{2,}", line["msg"]):
|
||||
print_log(f"{CONSOLE_LOG['BOLD']}{line['msg']}{CONSOLE_LOG['RESET']}")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def feed(self, new_lines: list[dict[str, str]]) -> bool:
|
||||
"""Input data to be processed by LogFollower instance
|
||||
Returns true if the DUT (device under test) seems to be alive.
|
||||
"""
|
||||
|
||||
self.watchdog()
|
||||
|
||||
# No signal of job health in the log
|
||||
is_job_healthy = False
|
||||
|
||||
for line in new_lines:
|
||||
if self.detect_kernel_dump_line(line):
|
||||
continue
|
||||
|
||||
# At least we are fed with a non-kernel dump log, it seems that the
|
||||
# job is progressing
|
||||
is_job_healthy = True
|
||||
self.manage_gl_sections(line)
|
||||
if parsed_line := parse_lava_line(line):
|
||||
self._buffer.append(parsed_line)
|
||||
|
||||
self.log_hints.detect_failure(new_lines)
|
||||
|
||||
return is_job_healthy
|
||||
|
||||
def flush(self) -> list[str]:
|
||||
buffer = self._buffer
|
||||
self._buffer = []
|
||||
return buffer
|
||||
|
||||
|
||||
def fix_lava_color_log(line):
|
||||
"""This function is a temporary solution for the color escape codes mangling
|
||||
problem. There is some problem in message passing between the LAVA
|
||||
dispatcher and the device under test (DUT). Here \x1b character is missing
|
||||
before `[:digit::digit:?:digit:?m` ANSI TTY color codes, or the more
|
||||
complicated ones with number values for text format before background and
|
||||
foreground colors.
|
||||
When this problem is fixed on the LAVA side, one should remove this function.
|
||||
"""
|
||||
line["msg"] = re.sub(r"(\[(\d+;){0,2}\d{1,3}m)", "\x1b" + r"\1", line["msg"])
|
||||
|
||||
|
||||
def fix_lava_gitlab_section_log(line):
|
||||
"""This function is a temporary solution for the Gitlab section markers
|
||||
mangling problem. Gitlab parses the following lines to define a collapsible
|
||||
gitlab section in their log:
|
||||
- \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
|
||||
- \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
|
||||
There is some problem in message passing between the LAVA dispatcher and the
|
||||
device under test (DUT), that digests \x1b and \r control characters
|
||||
incorrectly. When this problem is fixed on the LAVA side, one should remove
|
||||
this function.
|
||||
"""
|
||||
if match := re.match(r"\[0K(section_\w+):(\d+):(\S+)\[0K([\S ]+)?", line["msg"]):
|
||||
marker, timestamp, id_collapsible, header = match.groups()
|
||||
# The above regex serves for both section start and end lines.
|
||||
# When the header is None, it means we are dealing with `section_end` line
|
||||
header = header or ""
|
||||
line["msg"] = f"\x1b[0K{marker}:{timestamp}:{id_collapsible}\r\x1b[0K{header}"
|
||||
|
||||
|
||||
def parse_lava_line(line) -> Optional[str]:
|
||||
prefix = ""
|
||||
suffix = ""
|
||||
|
||||
if line["lvl"] in ["results", "feedback", "debug"]:
|
||||
return
|
||||
elif line["lvl"] in ["warning", "error"]:
|
||||
prefix = CONSOLE_LOG["FG_RED"]
|
||||
suffix = CONSOLE_LOG["RESET"]
|
||||
elif line["lvl"] == "input":
|
||||
prefix = "$ "
|
||||
suffix = ""
|
||||
elif line["lvl"] == "target":
|
||||
fix_lava_color_log(line)
|
||||
fix_lava_gitlab_section_log(line)
|
||||
|
||||
return f'{prefix}{line["msg"]}{suffix}'
|
||||
|
||||
|
||||
def print_log(msg):
|
||||
# Reset color from timestamp, since `msg` can tint the terminal color
|
||||
print(f"{CONSOLE_LOG['RESET']}{datetime.now()}: {msg}")
|
||||
|
||||
|
||||
def fatal_err(msg):
|
||||
colored_msg = f"{CONSOLE_LOG['FG_RED']}"
|
||||
f"{msg}"
|
||||
f"{CONSOLE_LOG['RESET']}"
|
||||
print_log(colored_msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
@@ -1,100 +0,0 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from datetime import timedelta
|
||||
from enum import Enum, auto
|
||||
from os import getenv
|
||||
from typing import Optional, Pattern, Union
|
||||
|
||||
from lava.utils.gitlab_section import GitlabSection
|
||||
|
||||
|
||||
class LogSectionType(Enum):
|
||||
UNKNOWN = auto()
|
||||
LAVA_BOOT = auto()
|
||||
TEST_SUITE = auto()
|
||||
TEST_CASE = auto()
|
||||
LAVA_POST_PROCESSING = auto()
|
||||
|
||||
|
||||
# Empirically, successful device boot in LAVA time takes less than 3
|
||||
# minutes.
|
||||
# LAVA itself is configured to attempt thrice to boot the device,
|
||||
# summing up to 9 minutes.
|
||||
# It is better to retry the boot than cancel the job and re-submit to avoid
|
||||
# the enqueue delay.
|
||||
LAVA_BOOT_TIMEOUT = int(getenv("LAVA_BOOT_TIMEOUT", 9))
|
||||
|
||||
# Test suite phase is where the initialization happens.
|
||||
LAVA_TEST_SUITE_TIMEOUT = int(getenv("LAVA_TEST_SUITE_TIMEOUT", 5))
|
||||
|
||||
# Test cases may take a long time, this script has no right to interrupt
|
||||
# them. But if the test case takes almost 1h, it will never succeed due to
|
||||
# Gitlab job timeout.
|
||||
LAVA_TEST_CASE_TIMEOUT = int(getenv("JOB_TIMEOUT", 60))
|
||||
|
||||
# LAVA post processing may refer to a test suite teardown, or the
|
||||
# adjustments to start the next test_case
|
||||
LAVA_POST_PROCESSING_TIMEOUT = int(getenv("LAVA_POST_PROCESSING_TIMEOUT", 5))
|
||||
|
||||
FALLBACK_GITLAB_SECTION_TIMEOUT = timedelta(minutes=10)
|
||||
DEFAULT_GITLAB_SECTION_TIMEOUTS = {
|
||||
LogSectionType.LAVA_BOOT: timedelta(minutes=LAVA_BOOT_TIMEOUT),
|
||||
LogSectionType.TEST_SUITE: timedelta(minutes=LAVA_TEST_SUITE_TIMEOUT),
|
||||
LogSectionType.TEST_CASE: timedelta(minutes=LAVA_TEST_CASE_TIMEOUT),
|
||||
LogSectionType.LAVA_POST_PROCESSING: timedelta(
|
||||
minutes=LAVA_POST_PROCESSING_TIMEOUT
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LogSection:
|
||||
regex: Union[Pattern, str]
|
||||
levels: tuple[str]
|
||||
section_id: str
|
||||
section_header: str
|
||||
section_type: LogSectionType
|
||||
collapsed: bool = False
|
||||
|
||||
def from_log_line_to_section(
|
||||
self, lava_log_line: dict[str, str]
|
||||
) -> Optional[GitlabSection]:
|
||||
if lava_log_line["lvl"] not in self.levels:
|
||||
return
|
||||
|
||||
if match := re.search(self.regex, lava_log_line["msg"]):
|
||||
section_id = self.section_id.format(*match.groups())
|
||||
section_header = self.section_header.format(*match.groups())
|
||||
timeout = DEFAULT_GITLAB_SECTION_TIMEOUTS[self.section_type]
|
||||
return GitlabSection(
|
||||
id=section_id,
|
||||
header=f"{section_header} - Timeout: {timeout}",
|
||||
type=self.section_type,
|
||||
start_collapsed=self.collapsed,
|
||||
)
|
||||
|
||||
|
||||
LOG_SECTIONS = (
|
||||
LogSection(
|
||||
regex=re.compile(r"<?STARTTC>? ([^>]*)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="{}",
|
||||
section_header="test_case {}",
|
||||
section_type=LogSectionType.TEST_CASE,
|
||||
),
|
||||
LogSection(
|
||||
regex=re.compile(r"<?STARTRUN>? ([^>]*)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="{}",
|
||||
section_header="test_suite {}",
|
||||
section_type=LogSectionType.TEST_SUITE,
|
||||
),
|
||||
LogSection(
|
||||
regex=re.compile(r"ENDTC>? ([^>]+)"),
|
||||
levels=("target", "debug"),
|
||||
section_id="post-{}",
|
||||
section_header="Post test_case {}",
|
||||
collapsed=True,
|
||||
section_type=LogSectionType.LAVA_POST_PROCESSING,
|
||||
),
|
||||
)
|
@@ -48,8 +48,6 @@ case $CI_JOB_NAME in
|
||||
if test -f /usr/bin/time; then
|
||||
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
|
||||
fi
|
||||
Xvfb :0 -screen 0 1024x768x16 &
|
||||
export DISPLAY=:0.0
|
||||
;;
|
||||
*)
|
||||
if test -f /usr/bin/time -a -f /usr/bin/strace; then
|
||||
@@ -68,26 +66,16 @@ meson _build --native-file=native.file \
|
||||
-D build-tests=true \
|
||||
-D c_args="$(echo -n $C_ARGS)" \
|
||||
-D cpp_args="$(echo -n $CPP_ARGS)" \
|
||||
-D enable-glcpp-tests=false \
|
||||
-D libunwind=${UNWIND} \
|
||||
${DRI_LOADERS} \
|
||||
${GALLIUM_ST} \
|
||||
-D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
|
||||
-D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
|
||||
-D video-codecs=h264dec,h264enc,h265dec,h265enc,vc1dec \
|
||||
-D werror=true \
|
||||
${EXTRA_OPTION}
|
||||
cd _build
|
||||
meson configure
|
||||
if command -V mold &> /dev/null ; then
|
||||
mold --run ninja
|
||||
else
|
||||
ninja
|
||||
fi
|
||||
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} --print-errorlogs ${MESON_TEST_ARGS}
|
||||
if command -V mold &> /dev/null ; then
|
||||
mold --run ninja install
|
||||
else
|
||||
ninja install
|
||||
fi
|
||||
ninja
|
||||
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} ${MESON_TEST_ARGS}
|
||||
ninja install
|
||||
cd ..
|
||||
|
@@ -1,8 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "x$STRACEDIR" = "x" ]; then
|
||||
STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
|
||||
fi
|
||||
STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
|
||||
|
||||
mkdir -p $STRACEDIR
|
||||
|
||||
|
@@ -1,89 +0,0 @@
|
||||
diff --git a/framework/replay/download_utils.py b/framework/replay/download_utils.py
|
||||
index 36322b000..5c3fe140d 100644
|
||||
--- a/framework/replay/download_utils.py
|
||||
+++ b/framework/replay/download_utils.py
|
||||
@@ -27,20 +27,20 @@ import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import xml.etree.ElementTree as ET
|
||||
-
|
||||
-from typing import Dict
|
||||
from email.utils import formatdate
|
||||
from os import path
|
||||
from time import time
|
||||
+from typing import Dict
|
||||
+from urllib.parse import urlparse
|
||||
+
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
-from framework.replay.local_file_adapter import LocalFileAdapter
|
||||
from requests.utils import requote_uri
|
||||
|
||||
from framework import core, exceptions
|
||||
+from framework.replay.local_file_adapter import LocalFileAdapter
|
||||
from framework.replay.options import OPTIONS
|
||||
|
||||
-
|
||||
__all__ = ['ensure_file']
|
||||
|
||||
minio_credentials = None
|
||||
@@ -90,7 +90,7 @@ def get_minio_credentials(url):
|
||||
minio_credentials['SessionToken'])
|
||||
|
||||
|
||||
-def get_authorization_headers(url, resource):
|
||||
+def get_minio_authorization_headers(url, resource):
|
||||
minio_key, minio_secret, minio_token = get_minio_credentials(url)
|
||||
|
||||
date = formatdate(timeval=None, localtime=False, usegmt=True)
|
||||
@@ -107,6 +107,17 @@ def get_authorization_headers(url, resource):
|
||||
return headers
|
||||
|
||||
|
||||
+def get_jwt_authorization_headers(url, resource):
|
||||
+ date = formatdate(timeval=None, localtime=False, usegmt=True)
|
||||
+ jwt = OPTIONS.download['jwt']
|
||||
+ host = urlparse(url).netloc
|
||||
+
|
||||
+ headers = {'Host': host,
|
||||
+ 'Date': date,
|
||||
+ 'Authorization': 'Bearer %s' % (jwt)}
|
||||
+ return headers
|
||||
+
|
||||
+
|
||||
def download(url: str, file_path: str, headers: Dict[str, str], attempts: int = 2) -> None:
|
||||
"""Downloads a URL content into a file
|
||||
|
||||
@@ -178,7 +189,9 @@ def ensure_file(file_path):
|
||||
assert OPTIONS.download['minio_bucket']
|
||||
assert OPTIONS.download['role_session_name']
|
||||
assert OPTIONS.download['jwt']
|
||||
- headers = get_authorization_headers(url, file_path)
|
||||
+ headers = get_minio_authorization_headers(url, file_path)
|
||||
+ elif OPTIONS.download['jwt']:
|
||||
+ headers = get_jwt_authorization_headers(url, file_path)
|
||||
else:
|
||||
headers = None
|
||||
|
||||
diff --git a/unittests/framework/replay/test_download_utils.py b/unittests/framework/replay/test_download_utils.py
|
||||
index 1e78b26e7..749c5d835 100644
|
||||
--- a/unittests/framework/replay/test_download_utils.py
|
||||
+++ b/unittests/framework/replay/test_download_utils.py
|
||||
@@ -195,3 +195,17 @@ class TestDownloadUtils(object):
|
||||
get_request = requests_mock.request_history[1]
|
||||
assert(get_request.method == 'GET')
|
||||
assert(requests_mock.request_history[1].headers['Authorization'].startswith('AWS Key'))
|
||||
+
|
||||
+ def test_jwt_authorization(self, requests_mock):
|
||||
+ """download_utils.ensure_file: Check we send the authentication headers to the server"""
|
||||
+ # reset minio_host from previous tests
|
||||
+ OPTIONS.download['minio_host'] = ''
|
||||
+ OPTIONS.download['jwt'] = 'jwt'
|
||||
+
|
||||
+ assert not self.trace_file.check()
|
||||
+ download_utils.ensure_file(self.trace_path)
|
||||
+ TestDownloadUtils.check_same_file(self.trace_file, "remote")
|
||||
+
|
||||
+ get_request = requests_mock.request_history[0]
|
||||
+ assert(get_request.method == 'GET')
|
||||
+ assert(requests_mock.request_history[0].headers['Authorization'].startswith('Bearer'))
|
@@ -3,47 +3,16 @@
|
||||
set -ex
|
||||
|
||||
INSTALL=$(realpath -s "$PWD"/install)
|
||||
MINIO_ARGS="--token-file ${CI_JOB_JWT_FILE}"
|
||||
MINIO_ARGS="--credentials=/tmp/.minio_credentials"
|
||||
|
||||
RESULTS=$(realpath -s "$PWD"/results)
|
||||
mkdir -p "$RESULTS"
|
||||
|
||||
if [ "$PIGLIT_REPLAY_SUBCOMMAND" = "profile" ]; then
|
||||
# workaround for older Debian Bullseye libyaml 0.2.2
|
||||
sed -i "/^%YAML 1\.2$/d" "$PIGLIT_REPLAY_DESCRIPTION_FILE"
|
||||
|
||||
yq -i -Y '. | del(.traces[][] | select(.label[0,1,2,3,4,5,6,7,8,9] == "no-perf"))' \
|
||||
"$PIGLIT_REPLAY_DESCRIPTION_FILE" # label positions are a bit hack
|
||||
fi
|
||||
|
||||
# WINE
|
||||
case "$PIGLIT_REPLAY_DEVICE_NAME" in
|
||||
vk-*)
|
||||
export WINEPREFIX="/dxvk-wine64"
|
||||
;;
|
||||
*)
|
||||
export WINEPREFIX="/generic-wine64"
|
||||
;;
|
||||
esac
|
||||
|
||||
PATH="/opt/wine-stable/bin/:$PATH" # WineHQ path
|
||||
|
||||
# Avoid asking about Gecko or Mono instalation
|
||||
export WINEDLLOVERRIDES=mscoree=d;mshtml=d
|
||||
|
||||
# Set environment for DXVK.
|
||||
export DXVK_LOG_LEVEL="info"
|
||||
export DXVK_LOG="$RESULTS/dxvk"
|
||||
[ -d "$DXVK_LOG" ] || mkdir -pv "$DXVK_LOG"
|
||||
export DXVK_STATE_CACHE=0
|
||||
|
||||
|
||||
# Set up the driver environment.
|
||||
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
|
||||
# using a command wrapper. Hence, we will just set it when running the
|
||||
# command.
|
||||
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.${VK_CPU:-`uname -m`}.json"
|
||||
|
||||
# Sanity check to ensure that our environment is sufficient to make our tests
|
||||
# run against the Mesa built by CI, rather than any installed distro version.
|
||||
@@ -64,52 +33,96 @@ quiet() {
|
||||
set -x
|
||||
}
|
||||
|
||||
# Set environment for apitrace executable.
|
||||
export PATH="/apitrace/build:$PATH"
|
||||
if [ "$VK_DRIVER" ]; then
|
||||
|
||||
export PIGLIT_REPLAY_WINE_BINARY=wine64
|
||||
export PIGLIT_REPLAY_WINE_APITRACE_BINARY="/apitrace-msvc-win64/bin/apitrace.exe"
|
||||
export PIGLIT_REPLAY_WINE_D3DRETRACE_BINARY="/apitrace-msvc-win64/bin/d3dretrace.exe"
|
||||
### VULKAN ###
|
||||
|
||||
# Our rootfs may not have "less", which apitrace uses during
|
||||
# apitrace dump
|
||||
export PAGER=cat
|
||||
# Set the Vulkan driver to use.
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="wflinfo"
|
||||
# Set environment for Wine.
|
||||
export WINEDEBUG="-all"
|
||||
export WINEPREFIX="/dxvk-wine64"
|
||||
export WINEESYNC=1
|
||||
|
||||
HANG_DETECTION_CMD=""
|
||||
# Set environment for DXVK.
|
||||
export DXVK_LOG_LEVEL="none"
|
||||
export DXVK_STATE_CACHE=0
|
||||
|
||||
# Set environment for gfxreconstruct executables.
|
||||
export PATH="/gfxreconstruct/build/bin:$PATH"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="vulkaninfo"
|
||||
|
||||
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
|
||||
|
||||
|
||||
# Set up the platform windowing system.
|
||||
# Set up the Window System Interface (WSI)
|
||||
|
||||
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
|
||||
# Use the surfaceless EGL platform.
|
||||
export DISPLAY=
|
||||
export WAFFLE_PLATFORM="surfaceless_egl"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
|
||||
|
||||
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
|
||||
# piglit is to use virpipe, and virgl_test_server llvmpipe
|
||||
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
|
||||
|
||||
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
|
||||
GALLIUM_DRIVER=llvmpipe \
|
||||
VTEST_USE_EGL_SURFACELESS=1 \
|
||||
VTEST_USE_GLES=1 \
|
||||
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
|
||||
|
||||
sleep 1
|
||||
if [ ${TEST_START_XORG:-0} -eq 1 ]; then
|
||||
"$INSTALL"/common/start-x.sh "$INSTALL"
|
||||
export DISPLAY=:0
|
||||
else
|
||||
# Run vulkan against the host's running X server (xvfb doesn't
|
||||
# have DRI3 support).
|
||||
# Set the DISPLAY env variable in each gitlab-runner's
|
||||
# configuration file:
|
||||
# https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
|
||||
quiet printf "%s%s\n" "Running against the hosts' X server. " \
|
||||
"DISPLAY is \"$DISPLAY\"."
|
||||
fi
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
|
||||
# It is assumed that you have already brought up your X server before
|
||||
# calling this script.
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
|
||||
else
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
|
||||
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
|
||||
|
||||
### GL/ES ###
|
||||
|
||||
# Set environment for apitrace executable.
|
||||
export PATH="/apitrace/build:$PATH"
|
||||
|
||||
# Our rootfs may not have "less", which apitrace uses during
|
||||
# apitrace dump
|
||||
export PAGER=cat
|
||||
|
||||
SANITY_MESA_VERSION_CMD="wflinfo"
|
||||
|
||||
HANG_DETECTION_CMD=""
|
||||
|
||||
|
||||
# Set up the platform windowing system.
|
||||
|
||||
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
|
||||
|
||||
# Use the surfaceless EGL platform.
|
||||
export DISPLAY=
|
||||
export WAFFLE_PLATFORM="surfaceless_egl"
|
||||
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
|
||||
|
||||
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
|
||||
# piglit is to use virpipe, and virgl_test_server llvmpipe
|
||||
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
|
||||
|
||||
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
|
||||
GALLIUM_DRIVER=llvmpipe \
|
||||
VTEST_USE_EGL_SURFACELESS=1 \
|
||||
VTEST_USE_GLES=1 \
|
||||
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
|
||||
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
|
||||
# It is assumed that you have already brought up your X server before
|
||||
# calling this script.
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
|
||||
else
|
||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
|
||||
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$ZINK_USE_LAVAPIPE" ]; then
|
||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
|
||||
fi
|
||||
|
||||
# If the job is parallel at the gitlab job level, will take the corresponding
|
||||
@@ -137,8 +150,8 @@ replay_minio_upload_images() {
|
||||
__DESTINATION_FILE_PATH="$__MINIO_TRACES_PREFIX/${line##*-}"
|
||||
fi
|
||||
|
||||
ci-fairy s3cp $MINIO_ARGS "$RESULTS/$__PREFIX/$line" \
|
||||
"https://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
|
||||
ci-fairy minio cp $MINIO_ARGS "$RESULTS/$__PREFIX/$line" \
|
||||
"minio://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
|
||||
done
|
||||
}
|
||||
|
||||
@@ -165,7 +178,7 @@ PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
|
||||
|
||||
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
|
||||
|
||||
PIGLIT_CMD="./piglit run -l verbose --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
|
||||
PIGLIT_CMD="./piglit run --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
|
||||
|
||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
|
||||
|
||||
@@ -173,6 +186,8 @@ if [ "$RUN_CMD_WRAPPER" ]; then
|
||||
RUN_CMD="set +e; $RUN_CMD_WRAPPER "$(/usr/bin/printf "%q" "$RUN_CMD")"; set -e"
|
||||
fi
|
||||
|
||||
ci-fairy minio login $MINIO_ARGS --token-file "${CI_JOB_JWT_FILE}"
|
||||
|
||||
# The replayer doesn't do any size or checksum verification for the traces in
|
||||
# the replayer db, so if we had to restart the system due to intermittent device
|
||||
# errors (or tried to cache replayer-db between runs, which would be nice to
|
||||
@@ -203,7 +218,7 @@ __PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
|
||||
__MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
|
||||
__MINIO_TRACES_PREFIX="traces"
|
||||
|
||||
if [ "$PIGLIT_REPLAY_SUBCOMMAND" != "profile" ]; then
|
||||
if [ "x$PIGLIT_REPLAY_SUBCOMMAND" != "xprofile" ]; then
|
||||
quiet replay_minio_upload_images
|
||||
fi
|
||||
|
||||
|
@@ -34,7 +34,6 @@ cp -Rp .gitlab-ci/fossilize-runner.sh install/
|
||||
cp -Rp .gitlab-ci/crosvm-init.sh install/
|
||||
cp -Rp .gitlab-ci/*.txt install/
|
||||
cp -Rp .gitlab-ci/report-flakes.py install/
|
||||
cp -Rp .gitlab-ci/valve install/
|
||||
cp -Rp .gitlab-ci/vkd3d-proton install/
|
||||
cp -Rp .gitlab-ci/*-runner.sh install/
|
||||
find . -path \*/ci/\*.txt \
|
||||
@@ -48,11 +47,12 @@ mkdir -p artifacts/
|
||||
tar -cf artifacts/install.tar install
|
||||
cp -Rp .gitlab-ci/common artifacts/ci-common
|
||||
cp -Rp .gitlab-ci/lava artifacts/
|
||||
cp -Rp .gitlab-ci/b2c artifacts/
|
||||
cp -Rp .gitlab-ci/valve artifacts/
|
||||
|
||||
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
|
||||
# Pass needed files to the test stage
|
||||
MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.zst"
|
||||
zstd artifacts/install.tar -o ${MINIO_ARTIFACT_NAME}
|
||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${MINIO_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}
|
||||
MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.gz"
|
||||
gzip -c artifacts/install.tar > ${MINIO_ARTIFACT_NAME}
|
||||
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||
ci-fairy minio cp ${MINIO_ARTIFACT_NAME} minio://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}
|
||||
fi
|
||||
|
@@ -16,12 +16,3 @@ for driver in freedreno intel v3d; do
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/${driver}-shader-db.txt
|
||||
done
|
||||
|
||||
# Run shader-db over a number of supported chipsets for nouveau
|
||||
for chipset in 40 a3 c0 e4 f0 134 162; do
|
||||
echo "Running drm-shim for nouveau - $chipset"
|
||||
env LD_PRELOAD=$LIBDIR/libnouveau_noop_drm_shim.so \
|
||||
NOUVEAU_CHIPSET=${chipset} \
|
||||
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
|
||||
> $ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt
|
||||
done
|
||||
|
@@ -1,23 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CHECKPATH=".gitlab-ci/container" # TODO: expand to cover whole .gitlab-ci/
|
||||
|
||||
is_bash() {
|
||||
[[ $1 == *.sh ]] && return 0
|
||||
[[ $1 == */bash-completion/* ]] && return 0
|
||||
[[ $(file -b --mime-type "$1") == text/x-shellscript ]] && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
while IFS= read -r -d $'' file; do
|
||||
if is_bash "$file" ; then
|
||||
shellcheck -x -W0 -s bash "$file"
|
||||
rc=$?
|
||||
if [ "${rc}" -eq 0 ]
|
||||
then
|
||||
continue
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done < <(find $CHECKPATH -type f \! -path "./.git/*" -print0)
|
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Run yamllint against all traces files.
|
||||
find . -name '*traces*yml' -print0 | xargs -0 yamllint -d "{rules: {line-length: {max: 150}}}"
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user