Compare commits
258 Commits
mesa-25.0.
...
mesa-22.2.
Author | SHA1 | Date | |
---|---|---|---|
|
4ef98ce435 | ||
|
5c4d90f1aa | ||
|
b47e856216 | ||
|
ab6ce72fd9 | ||
|
2adacf5594 | ||
|
242b498115 | ||
|
20886cf572 | ||
|
91ccbb017e | ||
|
8c36939dd6 | ||
|
a021a38f5e | ||
|
5b6aa0c7ee | ||
|
da97b8a0e1 | ||
|
fbc7e23afd | ||
|
6e574e2c81 | ||
|
4698093aff | ||
|
6ce21ce910 | ||
|
2c56768a2f | ||
|
4de1373cc4 | ||
|
d30ddb0406 | ||
|
f0dc4ee1cd | ||
|
ac210811e8 | ||
|
a861f9a0b2 | ||
|
4813bc413e | ||
|
fc8ba10f73 | ||
|
9478d1be89 | ||
|
842011a4bd | ||
|
26b95ed403 | ||
|
36ac6a48b9 | ||
|
314d56e9b8 | ||
|
4b3daadae0 | ||
|
cbcf1bb4d8 | ||
|
c916ec76ee | ||
|
3ded967693 | ||
|
609949fbcf | ||
|
f0193e09e1 | ||
|
1959ee0183 | ||
|
7d81b290d4 | ||
|
4022487538 | ||
|
9146b229fc | ||
|
dd6e95ad82 | ||
|
ca7c6dffd9 | ||
|
47aff2759f | ||
|
49fdd260a2 | ||
|
0b081167b2 | ||
|
c3cccad441 | ||
|
b5917e15f7 | ||
|
de8ec9b17a | ||
|
9b94b96829 | ||
|
9d0016594b | ||
|
7cd4905eaf | ||
|
7f79b1a010 | ||
|
d4c4412629 | ||
|
46d66bcb6f | ||
|
c559159b71 | ||
|
c9d05409ae | ||
|
726e9bbf60 | ||
|
ee37cceabd | ||
|
086b44101c | ||
|
a00e1b3a63 | ||
|
51cd3f89a1 | ||
|
84ef8f31f6 | ||
|
3632392123 | ||
|
b4c8e01393 | ||
|
02a7e1b33c | ||
|
80c165d155 | ||
|
3b11e41489 | ||
|
6a936dc090 | ||
|
0e9cbac188 | ||
|
064abf28b6 | ||
|
3f3ecdccb1 | ||
|
70dcad3214 | ||
|
8abdd60d1f | ||
|
88a188cc39 | ||
|
ccac10eb5a | ||
|
1ddca52324 | ||
|
b406a3c6ee | ||
|
f08ddee3a5 | ||
|
fc014e713a | ||
|
ad7ec70ca6 | ||
|
29415adbaa | ||
|
b84c8968c7 | ||
|
e201b751fc | ||
|
36a66a3124 | ||
|
7bcd2e6767 | ||
|
ab35b97b78 | ||
|
1a79d5e861 | ||
|
0e5bf558ef | ||
|
f5ae4c35fe | ||
|
c8ba293450 | ||
|
27fb80d64d | ||
|
2e3afb6765 | ||
|
46cf9fdb80 | ||
|
16757df4cb | ||
|
f7a3632d65 | ||
|
2f9757373e | ||
|
78e12c983e | ||
|
a3e250c789 | ||
|
d424ca0ae9 | ||
|
a606746bba | ||
|
066c682647 | ||
|
f3c5c6230f | ||
|
bb48242766 | ||
|
ec1d48938c | ||
|
123ece43a4 | ||
|
f342f7602a | ||
|
4bbf83a4a8 | ||
|
72681ac88e | ||
|
8da364d8fa | ||
|
e8112df5fb | ||
|
90ab3994de | ||
|
8c64dc4873 | ||
|
68f0becf2b | ||
|
3dfae4eec1 | ||
|
38254ea6cd | ||
|
a427499f8c | ||
|
03346d62e7 | ||
|
dd2c6e59ba | ||
|
28902b4746 | ||
|
caaab9f5e3 | ||
|
8dd736aa04 | ||
|
5c6b687e04 | ||
|
537c0efff9 | ||
|
1a57c75620 | ||
|
7e7a085317 | ||
|
08b7aa08e5 | ||
|
96073f7f98 | ||
|
5f562b1e00 | ||
|
6001a465c0 | ||
|
7b68155fa0 | ||
|
f918de9b42 | ||
|
5a267b1cf9 | ||
|
613e43c87b | ||
|
c1e5cfb01f | ||
|
7b2dc8e779 | ||
|
f73dd21d45 | ||
|
c2d094d0c7 | ||
|
9634e7cdf6 | ||
|
e7ed89f1bc | ||
|
ea6aafefd1 | ||
|
41705b8754 | ||
|
1faced9ec7 | ||
|
185b583ae1 | ||
|
16dc6872c3 | ||
|
0766a4ca0d | ||
|
2bb0d73816 | ||
|
705b30837c | ||
|
9a80d2f73b | ||
|
af2892677b | ||
|
cc504c9887 | ||
|
6561217214 | ||
|
7b1412130a | ||
|
40da2cee3d | ||
|
30ef443d23 | ||
|
499a65e88d | ||
|
35025cbb77 | ||
|
5c4028ac36 | ||
|
53cd211cb9 | ||
|
bdcffd60db | ||
|
b712253b53 | ||
|
ecc41f91ad | ||
|
f1a407de47 | ||
|
9998f8e1db | ||
|
26c1926a4a | ||
|
9a43a1f1d1 | ||
|
b3fc8cb419 | ||
|
9f305dd4e6 | ||
|
dbc956920f | ||
|
fa4c949150 | ||
|
3490712ad7 | ||
|
260b7902fe | ||
|
12f1cabeba | ||
|
4e0637a182 | ||
|
8b0343601c | ||
|
bac7da0264 | ||
|
b01498700c | ||
|
1c6c94424b | ||
|
87e006ca01 | ||
|
f88ce98ee6 | ||
|
ffc5316a7c | ||
|
5433fb705b | ||
|
773964fb8b | ||
|
75af03a653 | ||
|
61790c60dd | ||
|
6094318c4d | ||
|
6225807c85 | ||
|
df69376e68 | ||
|
7c6e24f329 | ||
|
c69f749bd8 | ||
|
42cd6b0fa0 | ||
|
0d27e5fd63 | ||
|
a4a7aa5d1a | ||
|
a77322c414 | ||
|
3b82f4eae2 | ||
|
d214aa3889 | ||
|
9b266113fe | ||
|
96df57ad5d | ||
|
9d3c4ea4ec | ||
|
853962d850 | ||
|
c10a10b3ac | ||
|
fe4bc64b9f | ||
|
1a1ded7d78 | ||
|
e2ff62782d | ||
|
df035d2894 | ||
|
23daa993df | ||
|
bc9e9c39ef | ||
|
dacab91f27 | ||
|
32ac1133d0 | ||
|
e99965a073 | ||
|
f330229d98 | ||
|
266fc5f6cc | ||
|
ec9691dbf1 | ||
|
b6973234ad | ||
|
b70516a37a | ||
|
3f18f014e4 | ||
|
17faf33ab7 | ||
|
e35dd22c6d | ||
|
797a781ffe | ||
|
517d22b3f7 | ||
|
f02522adce | ||
|
679049bf4c | ||
|
c9d2f45bf2 | ||
|
2c7c5cc016 | ||
|
fe2f7c06ae | ||
|
515faea62b | ||
|
25f9046ccd | ||
|
661d8de303 | ||
|
de6ee5b782 | ||
|
9b943044ac | ||
|
b1dbdecb27 | ||
|
f8bdbbdd90 | ||
|
38d6ae933d | ||
|
2ce1c12477 | ||
|
2f18e16512 | ||
|
eadc134dd8 | ||
|
e16a613de0 | ||
|
8cd9d2fcc0 | ||
|
167af40dae | ||
|
b525edfce6 | ||
|
aa90b5cd12 | ||
|
9234bdebed | ||
|
fda5f3f630 | ||
|
349576d92f | ||
|
0d7d35c84a | ||
|
1889d87783 | ||
|
50e133465c | ||
|
fdbabb07cf | ||
|
71b113251d | ||
|
0ee8821b83 | ||
|
3eda2a96a8 | ||
|
5c8aaa70e8 | ||
|
081fd3a4f4 | ||
|
08adb7bb9d | ||
|
c702465d56 | ||
|
5e00b2d8a7 | ||
|
46fc1b37b5 | ||
|
5814485a10 | ||
|
16d299e40b | ||
|
f8367fc41e |
@@ -1,2 +0,0 @@
|
|||||||
# Vendored code
|
|
||||||
src/amd/vulkan/radix_sort/*
|
|
@@ -1,10 +0,0 @@
|
|||||||
# The following files are opted into `ninja clang-format` and
|
|
||||||
# enforcement in the CI.
|
|
||||||
|
|
||||||
src/gallium/drivers/i915
|
|
||||||
src/gallium/drivers/r300/compiler/*
|
|
||||||
src/gallium/targets/teflon/**/*
|
|
||||||
src/amd/vulkan/**/*
|
|
||||||
src/amd/compiler/**/*
|
|
||||||
src/egl/**/*
|
|
||||||
src/etnaviv/isa/**/*
|
|
@@ -8,7 +8,7 @@ charset = utf-8
|
|||||||
insert_final_newline = true
|
insert_final_newline = true
|
||||||
tab_width = 8
|
tab_width = 8
|
||||||
|
|
||||||
[*.{c,h,cpp,hpp,cc,hh,y,yy}]
|
[*.{c,h,cpp,hpp,cc,hh}]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 3
|
indent_size = 3
|
||||||
max_line_length = 78
|
max_line_length = 78
|
||||||
@@ -35,10 +35,7 @@ trim_trailing_whitespace = false
|
|||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
|
|
||||||
[*.ps1]
|
[*.ps1]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
[*.rs]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 4
|
|
||||||
|
@@ -1,70 +0,0 @@
|
|||||||
# List of commits to ignore when using `git blame`.
|
|
||||||
# Enable with:
|
|
||||||
# git config blame.ignoreRevsFile .git-blame-ignore-revs
|
|
||||||
#
|
|
||||||
# Per git-blame(1):
|
|
||||||
# Ignore revisions listed in the file, one unabbreviated object name
|
|
||||||
# per line, in git-blame. Whitespace and comments beginning with # are
|
|
||||||
# ignored.
|
|
||||||
#
|
|
||||||
# Please keep these in chronological order :)
|
|
||||||
#
|
|
||||||
# You can add a new commit with the following command:
|
|
||||||
# git log -1 --pretty=format:'%n# %s%n%H%n' >> .git-blame-ignore-revs $COMMIT
|
|
||||||
|
|
||||||
# pvr: Fix clang-format error.
|
|
||||||
0ad5b0a74ef73f5fcbe1406ad9d57fe5dc00a5b1
|
|
||||||
|
|
||||||
# panfrost: Fix up some formatting for clang-format
|
|
||||||
a4705afe63412498d13ded73cba969c66be67907
|
|
||||||
|
|
||||||
# asahi: clang-format the world again
|
|
||||||
26c51bb8d8a33098b1990425a391f56ffba5728c
|
|
||||||
|
|
||||||
# perfetto: Add a .clang-format for the directory.
|
|
||||||
da78d5d729b1800136dd713b68492cb339993f4a
|
|
||||||
|
|
||||||
# panfrost/winsys: Clang-format
|
|
||||||
c90f036516a5376002be6550a917e8bad6a8a3b8
|
|
||||||
|
|
||||||
# panfrost: Re-run clang-format
|
|
||||||
4ccf174009af6732cbffa5d8ebb4687da7517505
|
|
||||||
|
|
||||||
# panvk: Clang-format
|
|
||||||
c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402
|
|
||||||
|
|
||||||
# pan/mdg: Fix icky formatting
|
|
||||||
133af0d6c945d3aaca8989edd15283a2b7dcc6c7
|
|
||||||
|
|
||||||
# mapi: clang-format _glapi_add_dispatch()
|
|
||||||
30332529663268a6406e910848e906e725e6fda7
|
|
||||||
|
|
||||||
# radv: reformat according to its .clang-format
|
|
||||||
8b319c6db8bd93603b18bd783eb75225fcfd51b7
|
|
||||||
|
|
||||||
# aco: reformat according to its .clang-format
|
|
||||||
6b21653ab4d3a67e711fe10e3d403128b6d26eb2
|
|
||||||
|
|
||||||
# egl: re-format using clang-format
|
|
||||||
2f670d89db038d5a29f6b72732fd7ad63dfaf4c6
|
|
||||||
|
|
||||||
# panfrost: clang-format the tree
|
|
||||||
0afd691f29683f6e9dde60f79eca094373521806
|
|
||||||
|
|
||||||
# aco: Format.
|
|
||||||
1e2639026fec7069806449f9ba2a124ce4eb5569
|
|
||||||
|
|
||||||
# radv: Format.
|
|
||||||
59c501ca353f8ec9d2717c98af2bfa1a1dbf4d75
|
|
||||||
|
|
||||||
# pvr: clang-format fixes
|
|
||||||
953c04ebd39c52d457301bdd8ac803949001da2d
|
|
||||||
|
|
||||||
# freedreno: Re-indent
|
|
||||||
2d439343ea1aee146d4ce32800992cd389bd505d
|
|
||||||
|
|
||||||
# ir3: Reformat source with clang-format
|
|
||||||
177138d8cb0b4f6a42ef0a1f8593e14d79f17c54
|
|
||||||
|
|
||||||
# ir3: reformat after refactoring in previous commit
|
|
||||||
8ae5b27ee0331a739d14b42e67586784d6840388
|
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -4,4 +4,3 @@
|
|||||||
*.png binary
|
*.png binary
|
||||||
*.gif binary
|
*.gif binary
|
||||||
*.ico binary
|
*.ico binary
|
||||||
*.cl gitlab-language=c
|
|
||||||
|
39
.github/workflows/ci.yml
vendored
Normal file
39
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
name: CI
|
||||||
|
on: push
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CI:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Install Dependencies
|
||||||
|
run: |
|
||||||
|
cat > Brewfile <<EOL
|
||||||
|
brew "bison"
|
||||||
|
brew "expat"
|
||||||
|
brew "gettext"
|
||||||
|
brew "libx11"
|
||||||
|
brew "libxcb"
|
||||||
|
brew "libxdamage"
|
||||||
|
brew "libxext"
|
||||||
|
brew "meson"
|
||||||
|
brew "pkg-config"
|
||||||
|
brew "python@3.10"
|
||||||
|
EOL
|
||||||
|
|
||||||
|
brew update
|
||||||
|
brew bundle --verbose
|
||||||
|
- name: Install Mako
|
||||||
|
run: pip3 install --user mako
|
||||||
|
- name: Configure
|
||||||
|
run: meson . build -Dbuild-tests=true -Dosmesa=true
|
||||||
|
- name: Build
|
||||||
|
run: meson compile -C build
|
||||||
|
- name: Test
|
||||||
|
run: meson test -C build --print-errorlogs
|
||||||
|
- name: Install
|
||||||
|
run: meson install -C build
|
60
.github/workflows/macos.yml
vendored
60
.github/workflows/macos.yml
vendored
@@ -1,60 +0,0 @@
|
|||||||
name: macOS-CI
|
|
||||||
on: push
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
macOS-CI:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
glx_option: ['dri', 'xlib']
|
|
||||||
runs-on: macos-11
|
|
||||||
env:
|
|
||||||
GALLIUM_DUMP_CPU: true
|
|
||||||
MESON_EXEC: /Users/runner/Library/Python/3.11/bin/meson
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Install Dependencies
|
|
||||||
run: |
|
|
||||||
cat > Brewfile <<EOL
|
|
||||||
brew "bison"
|
|
||||||
brew "expat"
|
|
||||||
brew "gettext"
|
|
||||||
brew "libx11"
|
|
||||||
brew "libxcb"
|
|
||||||
brew "libxdamage"
|
|
||||||
brew "libxext"
|
|
||||||
brew "molten-vk"
|
|
||||||
brew "ninja"
|
|
||||||
brew "pkg-config"
|
|
||||||
brew "python@3.10"
|
|
||||||
EOL
|
|
||||||
|
|
||||||
brew update
|
|
||||||
brew bundle --verbose
|
|
||||||
- name: Install Mako and meson
|
|
||||||
run: pip3 install --user mako meson
|
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
cat > native_config <<EOL
|
|
||||||
[binaries]
|
|
||||||
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
|
||||||
EOL
|
|
||||||
$MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }}
|
|
||||||
- name: Build
|
|
||||||
run: $MESON_EXEC compile -C build
|
|
||||||
- name: Test
|
|
||||||
run: $MESON_EXEC test -C build --print-errorlogs
|
|
||||||
- name: Install
|
|
||||||
run: $MESON_EXEC install -C build --destdir $PWD/install
|
|
||||||
- name: 'Upload Artifact'
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: macos-${{ matrix.glx_option }}-result
|
|
||||||
path: |
|
|
||||||
build/meson-logs/
|
|
||||||
install/
|
|
||||||
retention-days: 5
|
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,7 +1,4 @@
|
|||||||
.cache
|
|
||||||
.vscode*
|
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
*.out
|
*.out
|
||||||
/build
|
/build
|
||||||
.venv/
|
|
||||||
|
483
.gitlab-ci.yml
483
.gitlab-ci.yml
@@ -1,190 +1,57 @@
|
|||||||
# Types of CI pipelines:
|
|
||||||
# | pipeline name | context | description |
|
|
||||||
# |----------------------|-----------|-------------------------------------------------------------|
|
|
||||||
# | merge pipeline | mesa/mesa | pipeline running for an MR; if it passes the MR gets merged |
|
|
||||||
# | pre-merge pipeline | mesa/mesa | same as above, except its status doesn't affect the MR |
|
|
||||||
# | post-merge pipeline | mesa/mesa | pipeline immediately after merging |
|
|
||||||
# | fork pipeline | fork | pipeline running in a user fork |
|
|
||||||
# | scheduled pipeline | mesa/mesa | nightly pipelines, running every morning at 4am UTC |
|
|
||||||
# | direct-push pipeline | mesa/mesa | when commits are pushed directly to mesa/mesa, bypassing Marge and its gating pipeline |
|
|
||||||
#
|
|
||||||
# Note that the release branches maintained by the release manager fall under
|
|
||||||
# the "direct push" category.
|
|
||||||
#
|
|
||||||
# "context" indicates the permissions that the jobs get; notably, any
|
|
||||||
# container created in mesa/mesa gets pushed immediately for everyone to use
|
|
||||||
# as soon as the image tag change is merged.
|
|
||||||
#
|
|
||||||
# Merge pipelines contain all jobs that must pass before the MR can be merged.
|
|
||||||
# Pre-merge pipelines contain the exact same jobs as merge pipelines.
|
|
||||||
# Post-merge pipelines contain *only* the `pages` job that deploys the new
|
|
||||||
# version of the website.
|
|
||||||
# Fork pipelines contain everything.
|
|
||||||
# Scheduled pipelines only contain the container+build jobs, and some extra
|
|
||||||
# test jobs (typically "full" variants of pre-merge jobs that only run 1/X
|
|
||||||
# test cases), but not a repeat of the merge pipeline jobs.
|
|
||||||
# Direct-push pipelines contain the same jobs as merge pipelines.
|
|
||||||
|
|
||||||
workflow:
|
|
||||||
rules:
|
|
||||||
# do not duplicate pipelines on merge pipelines
|
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
|
|
||||||
when: never
|
|
||||||
# tag pipelines are disabled as it's too late to run all the tests by
|
|
||||||
# then, the release has been made based on the staging pipelines results
|
|
||||||
- if: $CI_COMMIT_TAG
|
|
||||||
when: never
|
|
||||||
# merge pipeline
|
|
||||||
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
variables:
|
|
||||||
MESA_CI_PERFORMANCE_ENABLED: 1
|
|
||||||
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
|
|
||||||
JOB_PRIORITY: 75
|
|
||||||
# fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes
|
|
||||||
DEQP_RUNNER_MAX_FAILS: 40
|
|
||||||
# post-merge pipeline
|
|
||||||
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
|
|
||||||
# Pre-merge pipeline
|
|
||||||
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
# Push to a branch on a fork
|
|
||||||
- if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
|
|
||||||
# nightly pipeline
|
|
||||||
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
|
|
||||||
variables:
|
|
||||||
JOB_PRIORITY: 45
|
|
||||||
# (some) nightly builds perform LTO, so they take much longer than the
|
|
||||||
# short timeout allowed in other pipelines.
|
|
||||||
# Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h
|
|
||||||
BUILD_JOB_TIMEOUT_OVERRIDE: 0
|
|
||||||
# pipeline for direct pushes that bypassed the CI
|
|
||||||
- if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
|
|
||||||
variables:
|
|
||||||
JOB_PRIORITY: 70
|
|
||||||
# pipeline for direct pushes from release maintainer
|
|
||||||
- if: &is-staging-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME =~ /^staging\//
|
|
||||||
variables:
|
|
||||||
JOB_PRIORITY: 70
|
|
||||||
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FDO_UPSTREAM_REPO: mesa/mesa
|
FDO_UPSTREAM_REPO: mesa/mesa
|
||||||
MESA_TEMPLATES_COMMIT: &ci-templates-commit e195d80f35b45cc73668be3767b923fd76c70ed5
|
MESA_TEMPLATES_COMMIT: &ci-templates-commit 290b79e0e78eab67a83766f4e9691be554fc4afd
|
||||||
CI_PRE_CLONE_SCRIPT: |-
|
CI_PRE_CLONE_SCRIPT: |-
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
|
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
|
||||||
bash download-git-cache.sh
|
bash download-git-cache.sh
|
||||||
rm download-git-cache.sh
|
rm download-git-cache.sh
|
||||||
set +o xtrace
|
set +o xtrace
|
||||||
S3_JWT_FILE: /s3_jwt
|
CI_JOB_JWT_FILE: /minio_jwt
|
||||||
S3_JWT_FILE_SCRIPT: |-
|
MINIO_HOST: minio-packet.freedesktop.org
|
||||||
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
|
|
||||||
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
|
|
||||||
S3_HOST: s3.freedesktop.org
|
|
||||||
# This bucket is used to fetch ANDROID prebuilts and images
|
|
||||||
S3_ANDROID_BUCKET: mesa-rootfs
|
|
||||||
# This bucket is used to fetch the kernel image
|
|
||||||
S3_KERNEL_BUCKET: mesa-rootfs
|
|
||||||
# Bucket for git cache
|
|
||||||
S3_GITCACHE_BUCKET: git-cache
|
|
||||||
# Bucket for the pipeline artifacts pushed to S3
|
|
||||||
S3_ARTIFACTS_BUCKET: artifacts
|
|
||||||
# Buckets for traces
|
|
||||||
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
|
|
||||||
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
|
|
||||||
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
|
|
||||||
# per-pipeline artifact storage on MinIO
|
# per-pipeline artifact storage on MinIO
|
||||||
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
PIPELINE_ARTIFACTS_BASE: ${MINIO_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
||||||
# per-job artifact storage on MinIO
|
# per-job artifact storage on MinIO
|
||||||
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
|
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
|
||||||
# reference images stored for traces
|
# reference images stored for traces
|
||||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/${S3_TRACIE_RESULTS_BUCKET}/$FDO_UPSTREAM_REPO"
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${MINIO_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
|
||||||
# For individual CI farm status see .ci-farms folder
|
# Individual CI farm status, set to "offline" to disable jobs
|
||||||
# Disable farm with `git mv .ci-farms{,-disabled}/$farm_name`
|
# running on a particular CI farm (ie. for outages, etc):
|
||||||
# Re-enable farm with `git mv .ci-farms{-disabled,}/$farm_name`
|
FD_FARM: "online"
|
||||||
# NEVER MIX FARM MAINTENANCE WITH ANY OTHER CHANGE IN THE SAME MERGE REQUEST!
|
COLLABORA_FARM: "online"
|
||||||
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
|
MICROSOFT_FARM: "online"
|
||||||
# Python scripts for structured logger
|
LIMA_FARM: "online"
|
||||||
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
|
IGALIA_FARM: "online"
|
||||||
# No point in continuing once the device is lost
|
|
||||||
MESA_VK_ABORT_ON_DEVICE_LOSS: 1
|
|
||||||
# Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output
|
|
||||||
MESA_SPIRV_LOG_LEVEL: error
|
|
||||||
# Default priority for non-merge pipelines
|
|
||||||
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
|
||||||
JOB_PRIORITY: 50
|
|
||||||
DATA_STORAGE_PATH: data_storage
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
id_tokens:
|
|
||||||
S3_JWT:
|
|
||||||
aud: https://s3.freedesktop.org
|
|
||||||
before_script:
|
before_script:
|
||||||
- |
|
- echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables"
|
||||||
if [ -z "${KERNEL_IMAGE_BASE:-}" ]; then
|
- echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}"
|
||||||
export KERNEL_IMAGE_BASE="https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${EXTERNAL_KERNEL_TAG:-$KERNEL_TAG}"
|
- unset CI_JOB_JWT
|
||||||
fi
|
- echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K"
|
||||||
- >
|
|
||||||
export SCRIPTS_DIR=$(mktemp -d) &&
|
|
||||||
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" &&
|
|
||||||
. ${SCRIPTS_DIR}/setup-test-env.sh
|
|
||||||
- eval "$S3_JWT_FILE_SCRIPT"
|
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
|
- >
|
||||||
- find -name '*.log' -exec mv {} {}.txt \;
|
set +x
|
||||||
|
|
||||||
# Retry when job fails. Failed jobs can be found in the Mesa CI Daily Reports:
|
test -e "${CI_JOB_JWT_FILE}" &&
|
||||||
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/?sort=created_date&state=opened&label_name%5B%5D=CI%20daily
|
export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" &&
|
||||||
|
rm "${CI_JOB_JWT_FILE}"
|
||||||
|
|
||||||
|
# Retry build or test jobs up to twice when the gitlab-runner itself fails somehow.
|
||||||
retry:
|
retry:
|
||||||
max: 1
|
max: 2
|
||||||
# Ignore runner_unsupported, stale_schedule, archived_failure, or
|
|
||||||
# unmet_prerequisites
|
|
||||||
when:
|
when:
|
||||||
- api_failure
|
|
||||||
- runner_system_failure
|
- runner_system_failure
|
||||||
- script_failure
|
|
||||||
- job_execution_timeout
|
|
||||||
- scheduler_failure
|
|
||||||
- data_integrity_failure
|
|
||||||
- unknown_failure
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- sanity
|
|
||||||
- container
|
|
||||||
- git-archive
|
|
||||||
- build-for-tests
|
|
||||||
- build-only
|
|
||||||
- code-validation
|
|
||||||
- amd
|
|
||||||
- amd-postmerge
|
|
||||||
- intel
|
|
||||||
- intel-postmerge
|
|
||||||
- nouveau
|
|
||||||
- nouveau-postmerge
|
|
||||||
- arm
|
|
||||||
- arm-postmerge
|
|
||||||
- broadcom
|
|
||||||
- broadcom-postmerge
|
|
||||||
- freedreno
|
|
||||||
- freedreno-postmerge
|
|
||||||
- etnaviv
|
|
||||||
- etnaviv-postmerge
|
|
||||||
- software-renderer
|
|
||||||
- software-renderer-postmerge
|
|
||||||
- layered-backends
|
|
||||||
- layered-backends-postmerge
|
|
||||||
- performance
|
|
||||||
- deploy
|
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- project: 'freedesktop/ci-templates'
|
- project: 'freedesktop/ci-templates'
|
||||||
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
|
ref: 34f4ade99434043f88e164933f570301fd18b125
|
||||||
file:
|
file:
|
||||||
- '/templates/ci-fairy.yml'
|
- '/templates/ci-fairy.yml'
|
||||||
- project: 'freedesktop/ci-templates'
|
- project: 'freedesktop/ci-templates'
|
||||||
ref: *ci-templates-commit
|
ref: *ci-templates-commit
|
||||||
file:
|
file:
|
||||||
- '/templates/alpine.yml'
|
|
||||||
- '/templates/debian.yml'
|
- '/templates/debian.yml'
|
||||||
- '/templates/fedora.yml'
|
- '/templates/fedora.yml'
|
||||||
- local: '.gitlab-ci/image-tags.yml'
|
- local: '.gitlab-ci/image-tags.yml'
|
||||||
@@ -192,84 +59,154 @@ include:
|
|||||||
- local: '.gitlab-ci/container/gitlab-ci.yml'
|
- local: '.gitlab-ci/container/gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/build/gitlab-ci.yml'
|
- local: '.gitlab-ci/build/gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/test/gitlab-ci.yml'
|
- local: '.gitlab-ci/test/gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/farm-rules.yml'
|
|
||||||
- local: '.gitlab-ci/test-source-dep.yml'
|
- local: '.gitlab-ci/test-source-dep.yml'
|
||||||
- local: 'docs/gitlab-ci.yml'
|
- local: 'src/amd/ci/gitlab-ci.yml'
|
||||||
- local: 'src/**/ci/gitlab-ci.yml'
|
- local: 'src/broadcom/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/etnaviv/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/freedreno/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/crocus/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/d3d12/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/i915/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/lima/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/llvmpipe/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/nouveau/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/radeonsi/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/softpipe/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/virgl/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/drivers/zink/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/gallium/frontends/lavapipe/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/intel/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/microsoft/ci/gitlab-ci.yml'
|
||||||
|
- local: 'src/panfrost/ci/gitlab-ci.yml'
|
||||||
|
|
||||||
|
stages:
|
||||||
|
- sanity
|
||||||
|
- container
|
||||||
|
- git-archive
|
||||||
|
- build-x86_64
|
||||||
|
- build-misc
|
||||||
|
- amd
|
||||||
|
- intel
|
||||||
|
- nouveau
|
||||||
|
- arm
|
||||||
|
- broadcom
|
||||||
|
- freedreno
|
||||||
|
- etnaviv
|
||||||
|
- software-renderer
|
||||||
|
- layered-backends
|
||||||
|
- deploy
|
||||||
|
|
||||||
|
|
||||||
# Rules applied to every job in the pipeline
|
# YAML anchors for rule conditions
|
||||||
.common-rules:
|
# --------------------------------
|
||||||
|
.rules-anchors:
|
||||||
rules:
|
rules:
|
||||||
- if: *is-fork-push
|
# Pipeline for forked project branch
|
||||||
|
- if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"'
|
||||||
when: manual
|
when: manual
|
||||||
|
# Forked project branch / pre-merge pipeline not for Marge bot
|
||||||
.never-post-merge-rules:
|
- if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")'
|
||||||
rules:
|
|
||||||
- if: *is-post-merge
|
|
||||||
when: never
|
|
||||||
|
|
||||||
|
|
||||||
.container+build-rules:
|
|
||||||
rules:
|
|
||||||
- !reference [.common-rules, rules]
|
|
||||||
# Run when re-enabling a disabled farm, but not when disabling it
|
|
||||||
- !reference [.disable-farm-mr-rules, rules]
|
|
||||||
# Never run immediately after merging, as we just ran everything
|
|
||||||
- !reference [.never-post-merge-rules, rules]
|
|
||||||
# Build everything in merge pipelines, if any files affecting the pipeline
|
|
||||||
# were changed
|
|
||||||
- if: *is-merge-attempt
|
|
||||||
changes: &all_paths
|
|
||||||
- VERSION
|
|
||||||
- bin/git_sha1_gen.py
|
|
||||||
- bin/install_megadrivers.py
|
|
||||||
- bin/symbols-check.py
|
|
||||||
- bin/ci/**/*
|
|
||||||
# GitLab CI
|
|
||||||
- .gitlab-ci.yml
|
|
||||||
- .gitlab-ci/**/*
|
|
||||||
- .ci-farms/*
|
|
||||||
# Meson
|
|
||||||
- meson*
|
|
||||||
- build-support/**/*
|
|
||||||
- subprojects/**/*
|
|
||||||
# clang format
|
|
||||||
- .clang-format
|
|
||||||
- .clang-format-include
|
|
||||||
- .clang-format-ignore
|
|
||||||
# Source code
|
|
||||||
- include/**/*
|
|
||||||
- src/**/*
|
|
||||||
when: on_success
|
|
||||||
# Same as above, but for pre-merge pipelines
|
|
||||||
- if: *is-pre-merge
|
|
||||||
changes:
|
|
||||||
*all_paths
|
|
||||||
when: manual
|
when: manual
|
||||||
# Skip everything for pre-merge and merge pipelines which don't change
|
# Pipeline runs for the main branch of the upstream Mesa project
|
||||||
# anything in the build
|
- if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH'
|
||||||
- if: *is-merge-attempt
|
when: always
|
||||||
when: never
|
# Post-merge pipeline
|
||||||
|
- if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
|
||||||
|
when: on_success
|
||||||
|
# Post-merge pipeline, not for Marge Bot
|
||||||
|
- if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
|
||||||
|
when: on_success
|
||||||
|
# Pre-merge pipeline
|
||||||
|
- if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||||
|
when: on_success
|
||||||
|
# Pre-merge pipeline for Marge Bot
|
||||||
|
- if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||||
|
when: on_success
|
||||||
|
|
||||||
|
|
||||||
|
.docs-base:
|
||||||
|
extends:
|
||||||
|
- .fdo.ci-fairy
|
||||||
|
- .build-rules
|
||||||
|
script:
|
||||||
|
- apk --no-cache add graphviz doxygen
|
||||||
|
- pip3 install sphinx breathe mako sphinx_rtd_theme
|
||||||
|
- docs/doxygen-wrapper.py --out-dir=docs/doxygen_xml
|
||||||
|
- sphinx-build -W -b html docs public
|
||||||
|
|
||||||
|
pages:
|
||||||
|
extends: .docs-base
|
||||||
|
stage: deploy
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- public
|
||||||
|
needs: []
|
||||||
|
rules:
|
||||||
|
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||||
|
- if: *is-mesa-main
|
||||||
|
changes: &docs-or-ci
|
||||||
|
- docs/**/*
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
when: always
|
||||||
|
# Other cases default to never
|
||||||
|
|
||||||
|
test-docs:
|
||||||
|
extends: .docs-base
|
||||||
|
# Cancel job if a newer commit is pushed to the same branch
|
||||||
|
interruptible: true
|
||||||
|
stage: deploy
|
||||||
|
needs: []
|
||||||
|
rules:
|
||||||
|
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||||
|
- if: *is-forked-branch
|
||||||
|
changes: *docs-or-ci
|
||||||
|
when: manual
|
||||||
|
# Other cases default to never
|
||||||
|
|
||||||
|
test-docs-mr:
|
||||||
|
extends:
|
||||||
|
- test-docs
|
||||||
|
needs:
|
||||||
|
- sanity
|
||||||
|
artifacts:
|
||||||
|
expose_as: 'Documentation preview'
|
||||||
|
paths:
|
||||||
|
- public/
|
||||||
|
rules:
|
||||||
- if: *is-pre-merge
|
- if: *is-pre-merge
|
||||||
when: never
|
changes: *docs-or-ci
|
||||||
# Build everything after someone bypassed the CI
|
|
||||||
- if: *is-direct-push
|
|
||||||
when: on_success
|
when: on_success
|
||||||
# Build everything when pushing to staging branches
|
# Other cases default to never
|
||||||
- if: *is-staging-push
|
|
||||||
|
# When to automatically run the CI for build jobs
|
||||||
|
.build-rules:
|
||||||
|
rules:
|
||||||
|
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||||
|
# If any files affecting the pipeline are changed, build/test jobs run
|
||||||
|
# automatically once all dependency jobs have passed
|
||||||
|
- changes: &all_paths
|
||||||
|
- VERSION
|
||||||
|
- bin/git_sha1_gen.py
|
||||||
|
- bin/install_megadrivers.py
|
||||||
|
- bin/meson_get_version.py
|
||||||
|
- bin/symbols-check.py
|
||||||
|
# GitLab CI
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
- .gitlab-ci/**/*
|
||||||
|
# Meson
|
||||||
|
- meson*
|
||||||
|
- build-support/**/*
|
||||||
|
- subprojects/**/*
|
||||||
|
# Source code
|
||||||
|
- include/**/*
|
||||||
|
- src/**/*
|
||||||
when: on_success
|
when: on_success
|
||||||
# Build everything in scheduled pipelines
|
# Otherwise, build/test jobs won't run because no rule matched.
|
||||||
- if: *is-scheduled-pipeline
|
|
||||||
when: on_success
|
|
||||||
# Allow building everything in fork pipelines, but build nothing unless
|
|
||||||
# manually triggered
|
|
||||||
- when: manual
|
|
||||||
|
|
||||||
|
|
||||||
.ci-deqp-artifacts:
|
.ci-deqp-artifacts:
|
||||||
artifacts:
|
artifacts:
|
||||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
|
name: "mesa_${CI_JOB_NAME}"
|
||||||
when: always
|
when: always
|
||||||
untracked: false
|
untracked: false
|
||||||
paths:
|
paths:
|
||||||
@@ -278,7 +215,35 @@ include:
|
|||||||
- artifacts
|
- artifacts
|
||||||
- _build/meson-logs/*.txt
|
- _build/meson-logs/*.txt
|
||||||
- _build/meson-logs/strace
|
- _build/meson-logs/strace
|
||||||
- _build/.ninja_log
|
|
||||||
|
.container-rules:
|
||||||
|
rules:
|
||||||
|
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||||
|
# Run pipeline by default in the main project if any CI pipeline
|
||||||
|
# configuration files were changed, to ensure docker images are up to date
|
||||||
|
- if: *is-post-merge
|
||||||
|
changes:
|
||||||
|
- .gitlab-ci.yml
|
||||||
|
- .gitlab-ci/**/*
|
||||||
|
when: on_success
|
||||||
|
# Run pipeline by default if it was triggered by Marge Bot, is for a
|
||||||
|
# merge request, and any files affecting the pipeline were changed
|
||||||
|
- if: *is-pre-merge-for-marge
|
||||||
|
changes:
|
||||||
|
*all_paths
|
||||||
|
when: on_success
|
||||||
|
# Run pipeline by default in the main project if it was not triggered by
|
||||||
|
# Marge Bot, and any files affecting the pipeline were changed
|
||||||
|
- if: *is-post-merge-not-for-marge
|
||||||
|
changes:
|
||||||
|
*all_paths
|
||||||
|
when: on_success
|
||||||
|
# Allow triggering jobs manually in other cases if any files affecting the
|
||||||
|
# pipeline were changed
|
||||||
|
- changes:
|
||||||
|
*all_paths
|
||||||
|
when: manual
|
||||||
|
# Otherwise, container jobs won't run because no rule matched.
|
||||||
|
|
||||||
# Git archive
|
# Git archive
|
||||||
|
|
||||||
@@ -294,12 +259,13 @@ make git archive:
|
|||||||
script:
|
script:
|
||||||
# Compactify the .git directory
|
# Compactify the .git directory
|
||||||
- git gc --aggressive
|
- git gc --aggressive
|
||||||
# Download & cache the perfetto subproject as well.
|
|
||||||
- rm -rf subprojects/perfetto ; mkdir -p subprojects/perfetto && curl https://android.googlesource.com/platform/external/perfetto/+archive/$(grep 'revision =' subprojects/perfetto.wrap | cut -d ' ' -f3).tar.gz | tar zxf - -C subprojects/perfetto
|
|
||||||
# compress the current folder
|
# compress the current folder
|
||||||
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
|
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
|
||||||
|
|
||||||
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
# login with the JWT token file
|
||||||
|
- ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"
|
||||||
|
- ci-fairy minio cp ../$CI_PROJECT_NAME.tar.gz minio://$MINIO_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
||||||
|
|
||||||
|
|
||||||
# Sanity checks of MR settings and commit logs
|
# Sanity checks of MR settings and commit logs
|
||||||
sanity:
|
sanity:
|
||||||
@@ -309,67 +275,28 @@ sanity:
|
|||||||
rules:
|
rules:
|
||||||
- if: *is-pre-merge
|
- if: *is-pre-merge
|
||||||
when: on_success
|
when: on_success
|
||||||
- when: never
|
# Other cases default to never
|
||||||
variables:
|
variables:
|
||||||
GIT_STRATEGY: none
|
GIT_STRATEGY: none
|
||||||
script:
|
script:
|
||||||
# ci-fairy check-commits --junit-xml=check-commits.xml
|
# ci-fairy check-commits --junit-xml=check-commits.xml
|
||||||
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
|
||||||
- |
|
|
||||||
set -eu
|
|
||||||
image_tags=(
|
|
||||||
ALPINE_X86_64_BUILD_TAG
|
|
||||||
ALPINE_X86_64_LAVA_SSH_TAG
|
|
||||||
DEBIAN_BASE_TAG
|
|
||||||
DEBIAN_BUILD_TAG
|
|
||||||
DEBIAN_PYUTILS_TAG
|
|
||||||
DEBIAN_TEST_ANDROID_TAG
|
|
||||||
DEBIAN_TEST_GL_TAG
|
|
||||||
DEBIAN_TEST_VK_TAG
|
|
||||||
FEDORA_X86_64_BUILD_TAG
|
|
||||||
KERNEL_ROOTFS_TAG
|
|
||||||
KERNEL_TAG
|
|
||||||
PKG_REPO_REV
|
|
||||||
WINDOWS_X64_BUILD_TAG
|
|
||||||
WINDOWS_X64_MSVC_TAG
|
|
||||||
WINDOWS_X64_TEST_TAG
|
|
||||||
)
|
|
||||||
for var in "${image_tags[@]}"
|
|
||||||
do
|
|
||||||
if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ]
|
|
||||||
then
|
|
||||||
echo "$var is too long; please make sure it is at most 20 chars."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
artifacts:
|
artifacts:
|
||||||
when: on_failure
|
when: on_failure
|
||||||
reports:
|
reports:
|
||||||
junit: check-*.xml
|
junit: check-*.xml
|
||||||
tags:
|
|
||||||
- placeholder-job
|
|
||||||
|
|
||||||
|
# Rules for tests that should not block merging, but should be available to
|
||||||
mr-label-maker-test:
|
# optionally run with the "play" button in the UI in pre-merge non-marge
|
||||||
extends:
|
# pipelines. This should appear in "extends:" after any includes of
|
||||||
- .fdo.ci-fairy
|
# test-source-dep.yml rules, so that these rules replace those.
|
||||||
stage: sanity
|
.test-manual-mr:
|
||||||
rules:
|
rules:
|
||||||
- !reference [.mr-label-maker-rules, rules]
|
- !reference [.no_scheduled_pipelines-rules, rules]
|
||||||
|
- if: *is-forked-branch-or-pre-merge-not-for-marge
|
||||||
|
changes:
|
||||||
|
*all_paths
|
||||||
|
when: manual
|
||||||
variables:
|
variables:
|
||||||
GIT_STRATEGY: fetch
|
JOB_TIMEOUT: 80
|
||||||
timeout: 10m
|
|
||||||
script:
|
|
||||||
- set -eu
|
|
||||||
- python3 -m venv .venv
|
|
||||||
- source .venv/bin/activate
|
|
||||||
- pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
|
|
||||||
- mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
|
|
||||||
|
|
||||||
# Jobs that need to pass before spending hardware resources on further testing
|
|
||||||
.required-for-hardware-jobs:
|
|
||||||
needs:
|
|
||||||
- job: rustfmt
|
|
||||||
optional: true
|
|
||||||
- job: yaml-toml-shell-py-test
|
|
||||||
optional: true
|
|
||||||
|
@@ -1,33 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
exclude = .venv*,
|
|
||||||
|
|
||||||
# PEP 8 Style Guide limits line length to 79 characters
|
|
||||||
max-line-length = 159
|
|
||||||
|
|
||||||
ignore =
|
|
||||||
# continuation line under-indented for hanging indent
|
|
||||||
E121
|
|
||||||
# continuation line over-indented for hanging indent
|
|
||||||
E126,
|
|
||||||
# continuation line under-indented for visual indent
|
|
||||||
E128,
|
|
||||||
# whitespace before ':'
|
|
||||||
E203,
|
|
||||||
# missing whitespace around arithmetic operator
|
|
||||||
E226,
|
|
||||||
# missing whitespace after ','
|
|
||||||
E231,
|
|
||||||
# expected 2 blank lines, found 1
|
|
||||||
E302,
|
|
||||||
# too many blank lines
|
|
||||||
E303,
|
|
||||||
# imported but unused
|
|
||||||
F401,
|
|
||||||
# f-string is missing placeholders
|
|
||||||
F541,
|
|
||||||
# local variable assigned to but never used
|
|
||||||
F841,
|
|
||||||
# line break before binary operator
|
|
||||||
W503,
|
|
||||||
# line break after binary operator
|
|
||||||
W504,
|
|
@@ -2,16 +2,6 @@
|
|||||||
# non-zero-length and not starting with '#', will regex match to
|
# non-zero-length and not starting with '#', will regex match to
|
||||||
# delete lines from the test list. Be careful.
|
# delete lines from the test list. Be careful.
|
||||||
|
|
||||||
# This test checks the driver's reported conformance version against the
|
|
||||||
# version of the CTS we're running. This check fails every few months
|
|
||||||
# and everyone has to go and bump the number in every driver.
|
|
||||||
# Running this check only makes sense while preparing a conformance
|
|
||||||
# submission, so skip it in the regular CI.
|
|
||||||
dEQP-VK.api.driver_properties.conformance_version
|
|
||||||
|
|
||||||
# Exclude this test which might fail when a new extension is implemented.
|
|
||||||
dEQP-VK.info.device_extensions
|
|
||||||
|
|
||||||
# These are tremendously slow (pushing toward a minute), and aren't
|
# These are tremendously slow (pushing toward a minute), and aren't
|
||||||
# reliable to be run in parallel with other tests due to CPU-side timing.
|
# reliable to be run in parallel with other tests due to CPU-side timing.
|
||||||
dEQP-GLES[0-9]*.functional.flush_finish.*
|
dEQP-GLES[0-9]*.functional.flush_finish.*
|
||||||
@@ -25,63 +15,3 @@ glx@glx_arb_sync_control@timing.*
|
|||||||
|
|
||||||
# This test is not built with waffle, while we do build tests with waffle
|
# This test is not built with waffle, while we do build tests with waffle
|
||||||
spec@!opengl 1.1@windowoverlap
|
spec@!opengl 1.1@windowoverlap
|
||||||
|
|
||||||
# These tests all read from the front buffer after a swap. Given that we
|
|
||||||
# run piglit tests in parallel in Mesa CI, and don't have a compositor
|
|
||||||
# running, the frontbuffer reads may end up with undefined results from
|
|
||||||
# windows overlapping us.
|
|
||||||
#
|
|
||||||
# Piglit does mark these tests as not to be run in parallel, but deqp-runner
|
|
||||||
# doesn't respect that. We need to extend deqp-runner to allow some tests to be
|
|
||||||
# marked as single-threaded and run after the rayon loop if we want to support
|
|
||||||
# them.
|
|
||||||
#
|
|
||||||
# Note that "glx-" tests don't appear in x11-skips.txt because they can be
|
|
||||||
# run even if PIGLIT_PLATFORM=gbm (for example)
|
|
||||||
glx@glx-copy-sub-buffer.*
|
|
||||||
|
|
||||||
# A majority of the tests introduced in CTS 1.3.7.0 are experiencing failures and flakes.
|
|
||||||
# Disable these tests until someone with a more deeper understanding of EGL examines them.
|
|
||||||
#
|
|
||||||
# Note: on sc8280xp/a690 I get identical results (same passes and fails)
|
|
||||||
# between freedreno, zink, and llvmpipe, so I believe this is either a
|
|
||||||
# deqp bug or egl/wayland bug, rather than driver issue.
|
|
||||||
#
|
|
||||||
# With llvmpipe, the failing tests have the error message:
|
|
||||||
#
|
|
||||||
# "Illegal sampler view creation without bind flag"
|
|
||||||
#
|
|
||||||
# which might be a hint. (But some passing tests also have the same
|
|
||||||
# error message.)
|
|
||||||
#
|
|
||||||
# more context from David Heidelberg on IRC: the deqp commit where these
|
|
||||||
# started failing is: https://github.com/KhronosGroup/VK-GL-CTS/commit/79b25659bcbced0cfc2c3fe318951c585f682abe
|
|
||||||
# prior to that they were skipping.
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2_gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2_gles3.other
|
|
||||||
|
|
||||||
# Seems to be the same is as wayland-dEQP-EGL.functional.color_clears.*
|
|
||||||
wayland-dEQP-EGL.functional.render.single_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.single_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles2_gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other
|
|
||||||
|
|
||||||
# These test the loader more than the implementation and are broken because the
|
|
||||||
# Vulkan loader in Debian is too old
|
|
||||||
dEQP-VK.api.get_device_proc_addr.non_enabled
|
|
||||||
dEQP-VK.api.version_check.unavailable_entry_points
|
|
||||||
|
@@ -1,7 +0,0 @@
|
|||||||
# Unlike zink which does support it, ANGLE relies on a waiver to not implement
|
|
||||||
# capturing individual array elements (see waivers.xml and gles3-waivers.txt in the CTS)
|
|
||||||
dEQP-GLES3.functional.transform_feedback.array_element.*
|
|
||||||
dEQP-GLES3.functional.transform_feedback.random.*
|
|
||||||
dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.*_array_element
|
|
||||||
dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.type.*.array.*
|
|
||||||
KHR-GLES31.core.program_interface_query.transform-feedback-types
|
|
@@ -2,118 +2,66 @@ version: 1
|
|||||||
|
|
||||||
# Rules to match for a machine to qualify
|
# Rules to match for a machine to qualify
|
||||||
target:
|
target:
|
||||||
id: '{{ ci_runner_description }}'
|
{% if tags %}
|
||||||
|
{% set b2ctags = tags.split(',') %}
|
||||||
|
tags:
|
||||||
|
{% for tag in b2ctags %}
|
||||||
|
- '{{ tag | trim }}'
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
timeouts:
|
timeouts:
|
||||||
|
|
||||||
first_console_activity: # This limits the time it can take to receive the first console log
|
first_console_activity: # This limits the time it can take to receive the first console log
|
||||||
minutes: {{ timeout_first_console_activity_minutes | default(0, true) }}
|
minutes: {{ timeout_first_minutes }}
|
||||||
seconds: {{ timeout_first_console_activity_seconds | default(0, true) }}
|
retries: {{ timeout_first_retries }}
|
||||||
retries: {{ timeout_first_console_activity_retries }}
|
|
||||||
|
|
||||||
console_activity: # Reset every time we receive a message from the logs
|
console_activity: # Reset every time we receive a message from the logs
|
||||||
minutes: {{ timeout_console_activity_minutes | default(0, true) }}
|
minutes: {{ timeout_minutes }}
|
||||||
seconds: {{ timeout_console_activity_seconds | default(0, true) }}
|
retries: {{ timeout_retries }}
|
||||||
retries: {{ timeout_console_activity_retries }}
|
|
||||||
|
|
||||||
boot_cycle:
|
boot_cycle:
|
||||||
minutes: {{ timeout_boot_minutes | default(0, true) }}
|
minutes: {{ timeout_boot_minutes }}
|
||||||
seconds: {{ timeout_boot_seconds | default(0, true) }}
|
|
||||||
retries: {{ timeout_boot_retries }}
|
retries: {{ timeout_boot_retries }}
|
||||||
|
|
||||||
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
|
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
|
||||||
minutes: {{ timeout_overall_minutes | default(0, true) }}
|
minutes: {{ timeout_overall_minutes }}
|
||||||
seconds: {{ timeout_overall_seconds | default(0, true) }}
|
|
||||||
retries: 0
|
retries: 0
|
||||||
# no retries possible here
|
# no retries possible here
|
||||||
|
|
||||||
watchdogs:
|
|
||||||
boot:
|
|
||||||
minutes: {{ timeout_boot_wd_minutes | default(0, true) }}
|
|
||||||
seconds: {{ timeout_boot_wd_seconds | default(0, true) }}
|
|
||||||
retries: {{ timeout_boot_wd_retries | default(0, true) }}
|
|
||||||
|
|
||||||
console_patterns:
|
console_patterns:
|
||||||
session_end:
|
session_end:
|
||||||
regex: >-
|
regex: >-
|
||||||
{{ session_end_regex }}
|
{{ session_end_regex }}
|
||||||
{% if session_reboot_regex %}
|
|
||||||
session_reboot:
|
session_reboot:
|
||||||
regex: >-
|
regex: >-
|
||||||
{{ session_reboot_regex }}
|
{{ session_reboot_regex }}
|
||||||
{% endif %}
|
|
||||||
job_success:
|
job_success:
|
||||||
regex: >-
|
regex: >-
|
||||||
{{ job_success_regex }}
|
{{ job_success_regex }}
|
||||||
{% if job_warn_regex %}
|
|
||||||
job_warn:
|
job_warn:
|
||||||
regex: >-
|
regex: >-
|
||||||
{{ job_warn_regex }}
|
{{ job_warn_regex }}
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if boot_wd_start_regex and boot_wd_stop_regex %}
|
|
||||||
watchdogs:
|
|
||||||
boot:
|
|
||||||
start:
|
|
||||||
regex: >-
|
|
||||||
{{ boot_wd_start_regex }}
|
|
||||||
reset:
|
|
||||||
regex: >-
|
|
||||||
{{ boot_wd_reset_regex | default(boot_wd_start_regex, true) }}
|
|
||||||
stop:
|
|
||||||
regex: >-
|
|
||||||
{{ boot_wd_stop_regex }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Environment to deploy
|
# Environment to deploy
|
||||||
deployment:
|
deployment:
|
||||||
# Initial boot
|
# Initial boot
|
||||||
start:
|
start:
|
||||||
storage:
|
|
||||||
http:
|
|
||||||
- path: "/b2c-extra-args"
|
|
||||||
data: >
|
|
||||||
b2c.pipefail b2c.poweroff_delay={{ poweroff_delay }}
|
|
||||||
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
|
||||||
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
|
|
||||||
{% for volume in volumes %}
|
|
||||||
b2c.volume={{ volume }}
|
|
||||||
{% endfor %}
|
|
||||||
b2c.run_service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
|
|
||||||
b2c.run="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
|
|
||||||
b2c.run="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd | replace('"', '\\\"') }}"
|
|
||||||
kernel:
|
kernel:
|
||||||
{% if kernel_url %}
|
|
||||||
url: '{{ kernel_url }}'
|
url: '{{ kernel_url }}'
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# NOTE: b2c.cache_device should not be here, but this works around
|
|
||||||
# a limitation of b2c which will be removed in the next release
|
|
||||||
cmdline: >
|
cmdline: >
|
||||||
SALAD.machine_id={{ '{{' }} machine_id }}
|
SALAD.machine_id={{ '{{' }} machine_id }}
|
||||||
console={{ '{{' }} local_tty_device }},115200
|
console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
|
||||||
b2c.cache_device=auto b2c.ntp_peer=10.42.0.1
|
loglevel={{ log_level }} no_hash_pointers
|
||||||
b2c.extra_args_url={{ '{{' }} job.http.url }}/b2c-extra-args
|
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/telegraf-container:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
|
||||||
{% if kernel_cmdline_extras is defined %}
|
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/mupuf/valve-infra/machine_registration:latest check"
|
||||||
{{ kernel_cmdline_extras }}
|
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
|
||||||
|
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
||||||
|
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},expiration=pipeline_end,preserve"
|
||||||
|
{% for volume in volumes %}
|
||||||
|
b2c.volume={{ volume }}
|
||||||
|
{% endfor %}
|
||||||
|
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
|
||||||
|
{% if cmdline_extras is defined %}
|
||||||
|
{{ cmdline_extras }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if initramfs_url or firmware_url %}
|
|
||||||
initramfs:
|
initramfs:
|
||||||
{% if firmware_url %}
|
url: '{{ initramfs_url }}'
|
||||||
- url: '{{ firmware_url }}'
|
|
||||||
{% endif %}
|
|
||||||
{% if initramfs_url %}
|
|
||||||
- url: '{{ initramfs_url }}'
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if dtb_url %}
|
|
||||||
dtb:
|
|
||||||
url: '{{ dtb_url }}'
|
|
||||||
{% if dtb_match %}
|
|
||||||
format:
|
|
||||||
archive:
|
|
||||||
match: "{{ dtb_match }}"
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
@@ -22,34 +22,80 @@
|
|||||||
# IN THE SOFTWARE.
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
from jinja2 import Environment, FileSystemLoader
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from argparse import ArgumentParser
|
||||||
from os import environ, path
|
from os import environ, path
|
||||||
|
|
||||||
|
|
||||||
# Pass all the environment variables prefixed by B2C_
|
parser = ArgumentParser()
|
||||||
values = {
|
parser.add_argument('--ci-job-id')
|
||||||
key.removeprefix("B2C_").lower(): environ[key]
|
parser.add_argument('--container-cmd')
|
||||||
for key in environ if key.startswith("B2C_")
|
parser.add_argument('--initramfs-url')
|
||||||
}
|
parser.add_argument('--job-success-regex')
|
||||||
|
parser.add_argument('--job-warn-regex')
|
||||||
|
parser.add_argument('--kernel-url')
|
||||||
|
parser.add_argument('--log-level', type=int)
|
||||||
|
parser.add_argument('--poweroff-delay', type=int)
|
||||||
|
parser.add_argument('--session-end-regex')
|
||||||
|
parser.add_argument('--session-reboot-regex')
|
||||||
|
parser.add_argument('--tags', nargs='?', default='')
|
||||||
|
parser.add_argument('--template', default='b2c.yml.jinja2.jinja2')
|
||||||
|
parser.add_argument('--timeout-boot-minutes', type=int)
|
||||||
|
parser.add_argument('--timeout-boot-retries', type=int)
|
||||||
|
parser.add_argument('--timeout-first-minutes', type=int)
|
||||||
|
parser.add_argument('--timeout-first-retries', type=int)
|
||||||
|
parser.add_argument('--timeout-minutes', type=int)
|
||||||
|
parser.add_argument('--timeout-overall-minutes', type=int)
|
||||||
|
parser.add_argument('--timeout-retries', type=int)
|
||||||
|
parser.add_argument('--job-volume-exclusions', nargs='?', default='')
|
||||||
|
parser.add_argument('--volume', action='append')
|
||||||
|
parser.add_argument('--mount-volume', action='append')
|
||||||
|
parser.add_argument('--local-container', default=environ.get('B2C_LOCAL_CONTAINER', 'alpine:latest'))
|
||||||
|
parser.add_argument('--working-dir')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
env = Environment(loader=FileSystemLoader(path.dirname(values['job_template'])),
|
env = Environment(loader=FileSystemLoader(path.dirname(args.template)),
|
||||||
trim_blocks=True, lstrip_blocks=True)
|
trim_blocks=True, lstrip_blocks=True)
|
||||||
|
|
||||||
template = env.get_template(path.basename(values['job_template']))
|
template = env.get_template(path.basename(args.template))
|
||||||
|
|
||||||
values['ci_job_id'] = environ['CI_JOB_ID']
|
values = {}
|
||||||
values['ci_runner_description'] = environ['CI_RUNNER_DESCRIPTION']
|
values['ci_job_id'] = args.ci_job_id
|
||||||
values['job_volume_exclusions'] = [excl for excl in values['job_volume_exclusions'].split(",") if excl]
|
values['container_cmd'] = args.container_cmd
|
||||||
values['working_dir'] = environ['CI_PROJECT_DIR']
|
values['initramfs_url'] = args.initramfs_url
|
||||||
|
values['job_success_regex'] = args.job_success_regex
|
||||||
|
values['job_warn_regex'] = args.job_warn_regex
|
||||||
|
values['kernel_url'] = args.kernel_url
|
||||||
|
values['log_level'] = args.log_level
|
||||||
|
values['poweroff_delay'] = args.poweroff_delay
|
||||||
|
values['session_end_regex'] = args.session_end_regex
|
||||||
|
values['session_reboot_regex'] = args.session_reboot_regex
|
||||||
|
values['tags'] = args.tags
|
||||||
|
values['template'] = args.template
|
||||||
|
values['timeout_boot_minutes'] = args.timeout_boot_minutes
|
||||||
|
values['timeout_boot_retries'] = args.timeout_boot_retries
|
||||||
|
values['timeout_first_minutes'] = args.timeout_first_minutes
|
||||||
|
values['timeout_first_retries'] = args.timeout_first_retries
|
||||||
|
values['timeout_minutes'] = args.timeout_minutes
|
||||||
|
values['timeout_overall_minutes'] = args.timeout_overall_minutes
|
||||||
|
values['timeout_retries'] = args.timeout_retries
|
||||||
|
if len(args.job_volume_exclusions) > 0:
|
||||||
|
exclusions = args.job_volume_exclusions.split(",")
|
||||||
|
values['job_volume_exclusions'] = [excl for excl in exclusions if len(excl) > 0]
|
||||||
|
if args.volume is not None:
|
||||||
|
values['volumes'] = args.volume
|
||||||
|
if args.mount_volume is not None:
|
||||||
|
values['mount_volumes'] = args.mount_volume
|
||||||
|
values['working_dir'] = args.working_dir
|
||||||
|
|
||||||
# Use the gateway's pull-through registry caches to reduce load on fd.o.
|
assert(len(args.local_container) > 0)
|
||||||
values['local_container'] = environ['IMAGE_UNDER_TEST']
|
values['local_container'] = args.local_container.replace(
|
||||||
values['local_container'] = values['local_container'].replace(
|
# Use the gateway's pull-through registry cache to reduce load on fd.o.
|
||||||
'registry.freedesktop.org',
|
'registry.freedesktop.org', '{{ fdo_proxy_registry }}'
|
||||||
'{{ fdo_proxy_registry }}'
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if 'kernel_cmdline_extras' not in values:
|
if 'B2C_KERNEL_CMDLINE_EXTRAS' in environ:
|
||||||
values['kernel_cmdline_extras'] = ''
|
values['cmdline_extras'] = environ['B2C_KERNEL_CMDLINE_EXTRAS']
|
||||||
|
|
||||||
with open(path.splitext(path.basename(values['job_template']))[0], "w") as f:
|
f = open(path.splitext(path.basename(args.template))[0], "w")
|
||||||
f.write(template.render(values))
|
f.write(template.render(values))
|
||||||
|
f.close()
|
||||||
|
26
.gitlab-ci/bare-metal/arm64_a630_egl.sh
Executable file
26
.gitlab-ci/bare-metal/arm64_a630_egl.sh
Executable file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This test script groups together a bunch of fast dEQP variant runs
|
||||||
|
# to amortize the cost of rebooting the board.
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
EXIT=0
|
||||||
|
|
||||||
|
# Run reset tests without parallelism:
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/reset \
|
||||||
|
FDO_CI_CONCURRENT=1 \
|
||||||
|
DEQP_CASELIST_FILTER='.*reset.*' \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Then run everything else with parallelism:
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/nonrobustness \
|
||||||
|
DEQP_CASELIST_INV_FILTER='.*reset.*' \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
@@ -5,8 +5,6 @@
|
|||||||
# First stage: very basic setup to bring up network and /dev etc
|
# First stage: very basic setup to bring up network and /dev etc
|
||||||
/init-stage1.sh
|
/init-stage1.sh
|
||||||
|
|
||||||
export CURRENT_SECTION=dut_boot
|
|
||||||
|
|
||||||
# Second stage: run jobs
|
# Second stage: run jobs
|
||||||
test $? -eq 0 && /init-stage2.sh
|
test $? -eq 0 && /init-stage2.sh
|
||||||
|
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
echo "Must supply the PoE Interface to power down"
|
echo "Must supply the PoE Interface to power down"
|
||||||
@@ -12,6 +11,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
||||||
|
SNMP_ON="i 1"
|
||||||
SNMP_OFF="i 4"
|
SNMP_OFF="i 4"
|
||||||
|
|
||||||
snmpset -v2c -r 3 -t 30 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
echo "Must supply the PoE Interface to power up"
|
echo "Must supply the PoE Interface to power up"
|
||||||
@@ -17,6 +16,6 @@ SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
|||||||
SNMP_ON="i 1"
|
SNMP_ON="i 1"
|
||||||
SNMP_OFF="i 4"
|
SNMP_OFF="i 4"
|
||||||
|
|
||||||
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF
|
||||||
sleep 3s
|
sleep 3s
|
||||||
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_ON
|
snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON
|
||||||
|
@@ -1,7 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# Boot script for Chrome OS devices attached to a servo debug connector, using
|
# Boot script for Chrome OS devices attached to a servo debug connector, using
|
||||||
# NFS and TFTP to boot.
|
# NFS and TFTP to boot.
|
||||||
@@ -9,7 +6,6 @@
|
|||||||
# We're run from the root of the repo, make a helper var for our paths
|
# We're run from the root of the repo, make a helper var for our paths
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
CI_COMMON=$CI_PROJECT_DIR/install/common
|
||||||
CI_INSTALL=$CI_PROJECT_DIR/install
|
|
||||||
|
|
||||||
# Runner config checks
|
# Runner config checks
|
||||||
if [ -z "$BM_SERIAL" ]; then
|
if [ -z "$BM_SERIAL" ]; then
|
||||||
@@ -50,10 +46,6 @@ if [ -z "$BM_CMDLINE" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. "${SCRIPTS_DIR}/setup-test-env.sh"
|
|
||||||
|
|
||||||
section_start prepare_rootfs "Preparing rootfs components"
|
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Clear out any previous run's artifacts.
|
# Clear out any previous run's artifacts.
|
||||||
@@ -88,42 +80,23 @@ mkdir -p /nfs/results
|
|||||||
|
|
||||||
rm -rf /tftp/*
|
rm -rf /tftp/*
|
||||||
if echo "$BM_KERNEL" | grep -q http; then
|
if echo "$BM_KERNEL" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt install -y wget
|
||||||
$BM_KERNEL -o /tftp/vmlinuz
|
wget $BM_KERNEL -O /tftp/vmlinuz
|
||||||
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/nfs/"
|
|
||||||
rm modules.tar.zst &
|
|
||||||
else
|
else
|
||||||
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
|
cp $BM_KERNEL /tftp/vmlinuz
|
||||||
fi
|
fi
|
||||||
echo "$BM_CMDLINE" > /tftp/cmdline
|
echo "$BM_CMDLINE" > /tftp/cmdline
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
STRUCTURED_LOG_FILE=results/job_detail.json
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
|
|
||||||
section_end prepare_rootfs
|
|
||||||
|
|
||||||
python3 $BM/cros_servo_run.py \
|
python3 $BM/cros_servo_run.py \
|
||||||
--cpu $BM_SERIAL \
|
--cpu $BM_SERIAL \
|
||||||
--ec $BM_SERIAL_EC \
|
--ec $BM_SERIAL_EC \
|
||||||
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20}
|
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||||
ret=$?
|
ret=$?
|
||||||
|
|
||||||
section_start dut_cleanup "Cleaning up after job"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
# will look for them.
|
# will look for them.
|
||||||
cp -Rp /nfs/results/. results/
|
cp -Rp /nfs/results/. results/
|
||||||
section_end dut_cleanup
|
|
||||||
|
|
||||||
exit $ret
|
exit $ret
|
||||||
|
@@ -1,77 +1,61 @@
|
|||||||
|
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
#
|
#
|
||||||
# Copyright © 2020 Google LLC
|
# Copyright © 2020 Google LLC
|
||||||
# SPDX-License-Identifier: MIT
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import datetime
|
import queue
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
|
|
||||||
from custom_logger import CustomLogger
|
|
||||||
from serial_buffer import SerialBuffer
|
from serial_buffer import SerialBuffer
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
ANSI_ESCAPE="\x1b[0K"
|
|
||||||
ANSI_COLOUR="\x1b[0;36m"
|
|
||||||
ANSI_RESET="\x1b[0m"
|
|
||||||
SECTION_START="start"
|
|
||||||
SECTION_END="end"
|
|
||||||
|
|
||||||
class CrosServoRun:
|
class CrosServoRun:
|
||||||
def __init__(self, cpu, ec, test_timeout, logger):
|
def __init__(self, cpu, ec, test_timeout):
|
||||||
self.cpu_ser = SerialBuffer(
|
self.cpu_ser = SerialBuffer(
|
||||||
cpu, "results/serial.txt", ": ")
|
cpu, "results/serial.txt", "R SERIAL-CPU> ")
|
||||||
# Merge the EC serial into the cpu_ser's line stream so that we can
|
# Merge the EC serial into the cpu_ser's line stream so that we can
|
||||||
# effectively poll on both at the same time and not have to worry about
|
# effectively poll on both at the same time and not have to worry about
|
||||||
self.ec_ser = SerialBuffer(
|
self.ec_ser = SerialBuffer(
|
||||||
ec, "results/serial-ec.txt", " EC: ", line_queue=self.cpu_ser.line_queue)
|
ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
|
||||||
self.test_timeout = test_timeout
|
self.test_timeout = test_timeout
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.ec_ser.close()
|
self.ec_ser.close()
|
||||||
self.cpu_ser.close()
|
self.cpu_ser.close()
|
||||||
|
|
||||||
def ec_write(self, s):
|
def ec_write(self, s):
|
||||||
print("EC> %s" % s)
|
print("W SERIAL-EC> %s" % s)
|
||||||
self.ec_ser.serial.write(s.encode())
|
self.ec_ser.serial.write(s.encode())
|
||||||
|
|
||||||
def cpu_write(self, s):
|
def cpu_write(self, s):
|
||||||
print("> %s" % s)
|
print("W SERIAL-CPU> %s" % s)
|
||||||
self.cpu_ser.serial.write(s.encode())
|
self.cpu_ser.serial.write(s.encode())
|
||||||
|
|
||||||
def print_error(self, message):
|
def print_error(self, message):
|
||||||
RED = '\033[0;31m'
|
RED = '\033[0;31m'
|
||||||
NO_COLOR = '\033[0m'
|
NO_COLOR = '\033[0m'
|
||||||
print(RED + message + NO_COLOR)
|
print(RED + message + NO_COLOR)
|
||||||
self.logger.update_status_fail(message)
|
|
||||||
|
|
||||||
def get_rel_timestamp(self):
|
|
||||||
now = datetime.datetime.now(tz=datetime.UTC)
|
|
||||||
then_env = os.getenv("CI_JOB_STARTED_AT")
|
|
||||||
if not then_env:
|
|
||||||
return ""
|
|
||||||
delta = now - datetime.datetime.fromisoformat(then_env)
|
|
||||||
return f"[{math.floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}]"
|
|
||||||
|
|
||||||
def get_cur_timestamp(self):
|
|
||||||
return str(int(datetime.datetime.timestamp(datetime.datetime.now())))
|
|
||||||
|
|
||||||
def print_gitlab_section(self, action, name, description, collapse=True):
|
|
||||||
assert action in [SECTION_START, SECTION_END]
|
|
||||||
out = ANSI_ESCAPE + "section_" + action + ":"
|
|
||||||
out += self.get_cur_timestamp() + ":"
|
|
||||||
out += name
|
|
||||||
if action == "start" and collapse:
|
|
||||||
out += "[collapsed=true]"
|
|
||||||
out += "\r" + ANSI_ESCAPE + ANSI_COLOUR
|
|
||||||
out += self.get_rel_timestamp() + " " + description + ANSI_RESET
|
|
||||||
print(out)
|
|
||||||
|
|
||||||
def boot_section(self, action):
|
|
||||||
self.print_gitlab_section(action, "dut_boot", "Booting hardware device", True)
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# Flush any partial commands in the EC's prompt, then ask for a reboot.
|
# Flush any partial commands in the EC's prompt, then ask for a reboot.
|
||||||
@@ -79,9 +63,6 @@ class CrosServoRun:
|
|||||||
self.ec_write("reboot\n")
|
self.ec_write("reboot\n")
|
||||||
|
|
||||||
bootloader_done = False
|
bootloader_done = False
|
||||||
self.logger.create_job_phase("boot")
|
|
||||||
self.boot_section(SECTION_START)
|
|
||||||
tftp_failures = 0
|
|
||||||
# This is emitted right when the bootloader pauses to check for input.
|
# This is emitted right when the bootloader pauses to check for input.
|
||||||
# Emit a ^N character to request network boot, because we don't have a
|
# Emit a ^N character to request network boot, because we don't have a
|
||||||
# direct-to-netboot firmware on cheza.
|
# direct-to-netboot firmware on cheza.
|
||||||
@@ -91,17 +72,6 @@ class CrosServoRun:
|
|||||||
bootloader_done = True
|
bootloader_done = True
|
||||||
break
|
break
|
||||||
|
|
||||||
# The Cheza firmware seems to occasionally get stuck looping in
|
|
||||||
# this error state during TFTP booting, possibly based on amount of
|
|
||||||
# network traffic around it, but it'll usually recover after a
|
|
||||||
# reboot. Currently mostly visible on google-freedreno-cheza-14.
|
|
||||||
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
|
||||||
tftp_failures += 1
|
|
||||||
if tftp_failures >= 10:
|
|
||||||
self.print_error(
|
|
||||||
"Detected intermittent tftp failure, restarting run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# If the board has a netboot firmware and we made it to booting the
|
# If the board has a netboot firmware and we made it to booting the
|
||||||
# kernel, proceed to processing of the test run.
|
# kernel, proceed to processing of the test run.
|
||||||
if re.search("Booting Linux", line):
|
if re.search("Booting Linux", line):
|
||||||
@@ -113,30 +83,41 @@ class CrosServoRun:
|
|||||||
# in the farm.
|
# in the farm.
|
||||||
if re.search("POWER_GOOD not seen in time", line):
|
if re.search("POWER_GOOD not seen in time", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected intermittent poweron failure, abandoning run.")
|
"Detected intermittent poweron failure, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
if not bootloader_done:
|
if not bootloader_done:
|
||||||
self.print_error("Failed to make it through bootloader, abandoning run.")
|
print("Failed to make it through bootloader, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
self.logger.create_job_phase("test")
|
tftp_failures = 0
|
||||||
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
||||||
if re.search("---. end Kernel panic", line):
|
if re.search("---. end Kernel panic", line):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
# The Cheza firmware seems to occasionally get stuck looping in
|
||||||
|
# this error state during TFTP booting, possibly based on amount of
|
||||||
|
# network traffic around it, but it'll usually recover after a
|
||||||
|
# reboot.
|
||||||
|
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
||||||
|
tftp_failures += 1
|
||||||
|
if tftp_failures >= 100:
|
||||||
|
self.print_error(
|
||||||
|
"Detected intermittent tftp failure, restarting run...")
|
||||||
|
return 2
|
||||||
|
|
||||||
# There are very infrequent bus errors during power management transitions
|
# There are very infrequent bus errors during power management transitions
|
||||||
# on cheza, which we don't expect to be the case on future boards.
|
# on cheza, which we don't expect to be the case on future boards.
|
||||||
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected cheza power management bus error, abandoning run.")
|
"Detected cheza power management bus error, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||||
if re.search("NETDEV WATCHDOG", line):
|
if re.search("NETDEV WATCHDOG", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected network device failure, abandoning run.")
|
"Detected network device failure, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# These HFI response errors started appearing with the introduction
|
# These HFI response errors started appearing with the introduction
|
||||||
# of piglit runs. CosmicPenguin says:
|
# of piglit runs. CosmicPenguin says:
|
||||||
@@ -149,34 +130,28 @@ class CrosServoRun:
|
|||||||
# break many tests after that, just restart the whole run.
|
# break many tests after that, just restart the whole run.
|
||||||
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected cheza power management bus error, abandoning run.")
|
"Detected cheza power management bus error, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
if re.search("coreboot.*bootblock starting", line):
|
if re.search("coreboot.*bootblock starting", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected spontaneous reboot, abandoning run.")
|
"Detected spontaneous reboot, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
|
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
|
||||||
self.print_error("Detected cheza MMU fail, abandoning run.")
|
self.print_error("Detected cheza MMU fail, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
result = re.search("hwci: mesa: (\S*)", line)
|
||||||
if result:
|
if result:
|
||||||
status = result.group(1)
|
if result.group(1) == "pass":
|
||||||
exit_code = int(result.group(2))
|
return 0
|
||||||
|
|
||||||
if status == "pass":
|
|
||||||
self.logger.update_dut_job("status", "pass")
|
|
||||||
else:
|
else:
|
||||||
self.logger.update_status_fail("test fail")
|
return 1
|
||||||
|
|
||||||
self.logger.update_dut_job("exit_code", exit_code)
|
|
||||||
return exit_code
|
|
||||||
|
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Reached the end of the CPU serial log without finding a result")
|
"Reached the end of the CPU serial log without finding a result")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -189,14 +164,16 @@ def main():
|
|||||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logger = CustomLogger("results/job_detail.json")
|
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
|
||||||
logger.update_dut_time("start", None)
|
|
||||||
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger)
|
while True:
|
||||||
retval = servo.run()
|
retval = servo.run()
|
||||||
|
if retval != 2:
|
||||||
|
break
|
||||||
|
|
||||||
# power down the CPU on the device
|
# power down the CPU on the device
|
||||||
servo.ec_write("power off\n")
|
servo.ec_write("power off\n")
|
||||||
logger.update_dut_time("end", None)
|
|
||||||
servo.close()
|
servo.close()
|
||||||
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
@@ -7,4 +7,4 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay
|
||||||
|
@@ -7,6 +7,6 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay
|
||||||
sleep 5
|
sleep 5
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" on "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT on $relay
|
||||||
|
@@ -5,27 +5,26 @@ set -e
|
|||||||
STRINGS=$(mktemp)
|
STRINGS=$(mktemp)
|
||||||
ERRORS=$(mktemp)
|
ERRORS=$(mktemp)
|
||||||
|
|
||||||
trap 'rm $STRINGS; rm $ERRORS;' EXIT
|
trap "rm $STRINGS; rm $ERRORS;" EXIT
|
||||||
|
|
||||||
FILE=$1
|
FILE=$1
|
||||||
shift 1
|
shift 1
|
||||||
|
|
||||||
while getopts "f:e:" opt; do
|
while getopts "f:e:" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
f) echo "$OPTARG" >> "$STRINGS";;
|
f) echo "$OPTARG" >> $STRINGS;;
|
||||||
e) echo "$OPTARG" >> "$STRINGS" ; echo "$OPTARG" >> "$ERRORS";;
|
e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;;
|
||||||
*) exit
|
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
shift $((OPTIND -1))
|
shift $((OPTIND -1))
|
||||||
|
|
||||||
echo "Waiting for $FILE to say one of following strings"
|
echo "Waiting for $FILE to say one of following strings"
|
||||||
cat "$STRINGS"
|
cat $STRINGS
|
||||||
|
|
||||||
while ! grep -E -wf "$STRINGS" "$FILE"; do
|
while ! egrep -wf $STRINGS $FILE; do
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
if grep -E -wf "$ERRORS" "$FILE"; then
|
if egrep -wf $ERRORS $FILE; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@@ -1,14 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
CI_COMMON=$CI_PROJECT_DIR/install/common
|
||||||
|
|
||||||
if [ -z "$BM_SERIAL" ] && [ -z "$BM_SERIAL_SCRIPT" ]; then
|
if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then
|
||||||
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
|
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
|
||||||
echo "BM_SERIAL:"
|
echo "BM_SERIAL:"
|
||||||
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
|
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
|
||||||
@@ -55,8 +50,6 @@ if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
|
|||||||
BM_FASTBOOT_NFSROOT=1
|
BM_FASTBOOT_NFSROOT=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
section_start prepare_rootfs "Preparing rootfs components"
|
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Clear out any previous run's artifacts.
|
# Clear out any previous run's artifacts.
|
||||||
@@ -89,57 +82,44 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
pushd rootfs
|
pushd rootfs
|
||||||
find -H . | \
|
find -H | \
|
||||||
grep -E -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
|
egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
|
||||||
grep -E -v "traces-db|apitrace|renderdoc" | \
|
egrep -v "traces-db|apitrace|renderdoc" | \
|
||||||
grep -E -v $EXCLUDE_FILTER | \
|
egrep -v $EXCLUDE_FILTER | \
|
||||||
cpio -H newc -o | \
|
cpio -H newc -o | \
|
||||||
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
|
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Make the combined kernel image and dtb for passing to fastboot. For normal
|
||||||
|
# Mesa development, we build the kernel and store it in the docker container
|
||||||
|
# that this script is running in.
|
||||||
|
#
|
||||||
|
# However, container builds are expensive, so when you're hacking on the
|
||||||
|
# kernel, it's nice to be able to skip the half hour container build and plus
|
||||||
|
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
|
||||||
|
# fetch them instead of looking in the container.
|
||||||
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt install -y wget
|
||||||
"$BM_KERNEL" -o kernel
|
|
||||||
# FIXME: modules should be supplied too
|
wget $BM_KERNEL -O kernel
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget $BM_DTB -O dtb
|
||||||
"$BM_DTB" -o dtb
|
|
||||||
|
|
||||||
cat kernel dtb > Image.gz-dtb
|
cat kernel dtb > Image.gz-dtb
|
||||||
|
rm kernel dtb
|
||||||
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
|
|
||||||
if [ -n "$BM_DTB" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o dtb
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat kernel dtb > Image.gz-dtb || echo "No DTB available, using pure kernel."
|
|
||||||
rm kernel
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "$BM_ROOTFS/"
|
|
||||||
rm modules.tar.zst &
|
|
||||||
else
|
else
|
||||||
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
|
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
|
||||||
cp /baremetal-files/"$BM_DTB".dtb dtb
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export PATH=$BM:$PATH
|
|
||||||
|
|
||||||
mkdir -p artifacts
|
mkdir -p artifacts
|
||||||
mkbootimg.py \
|
abootimg \
|
||||||
--kernel Image.gz-dtb \
|
--create artifacts/fastboot.img \
|
||||||
--ramdisk rootfs.cpio.gz \
|
-k Image.gz-dtb \
|
||||||
--dtb dtb \
|
-r rootfs.cpio.gz \
|
||||||
--cmdline "$BM_CMDLINE" \
|
-c cmdline="$BM_CMDLINE"
|
||||||
$BM_MKBOOT_PARAMS \
|
rm Image.gz-dtb
|
||||||
--header_version 2 \
|
|
||||||
-o artifacts/fastboot.img
|
|
||||||
|
|
||||||
rm Image.gz-dtb dtb
|
export PATH=$BM:$PATH
|
||||||
|
|
||||||
# Start background command for talking to serial if we have one.
|
# Start background command for talking to serial if we have one.
|
||||||
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
||||||
@@ -150,12 +130,10 @@ if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
section_end prepare_rootfs
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
$BM/fastboot_run.py \
|
$BM/fastboot_run.py \
|
||||||
--dev="$BM_SERIAL" \
|
--dev="$BM_SERIAL" \
|
||||||
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} \
|
--test-timeout ${TEST_PHASE_TIMEOUT:-20} \
|
||||||
--fbserial="$BM_FASTBOOT_SERIAL" \
|
--fbserial="$BM_FASTBOOT_SERIAL" \
|
||||||
--powerup="$BM_POWERUP" \
|
--powerup="$BM_POWERUP" \
|
||||||
--powerdown="$BM_POWERDOWN"
|
--powerdown="$BM_POWERDOWN"
|
||||||
|
@@ -51,8 +51,8 @@ class FastbootRun:
|
|||||||
try:
|
try:
|
||||||
return subprocess.call(cmd, shell=True, timeout=timeout)
|
return subprocess.call(cmd, shell=True, timeout=timeout)
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
self.print_error("timeout, abandoning run.")
|
self.print_error("timeout, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
if ret := self.logged_system(self.powerup):
|
if ret := self.logged_system(self.powerup):
|
||||||
@@ -60,20 +60,20 @@ class FastbootRun:
|
|||||||
|
|
||||||
fastboot_ready = False
|
fastboot_ready = False
|
||||||
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
||||||
if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \
|
if re.search("fastboot: processing commands", line) or \
|
||||||
re.search("Listening for fastboot command on", line):
|
re.search("Listening for fastboot command on", line):
|
||||||
fastboot_ready = True
|
fastboot_ready = True
|
||||||
break
|
break
|
||||||
|
|
||||||
if re.search("data abort", line):
|
if re.search("data abort", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected crash during boot, abandoning run.")
|
"Detected crash during boot, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
if not fastboot_ready:
|
if not fastboot_ready:
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Failed to get to fastboot prompt, abandoning run.")
|
"Failed to get to fastboot prompt, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
if ret := self.logged_system(self.fastboot):
|
if ret := self.logged_system(self.fastboot):
|
||||||
return ret
|
return ret
|
||||||
@@ -81,7 +81,7 @@ class FastbootRun:
|
|||||||
print_more_lines = -1
|
print_more_lines = -1
|
||||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||||
if print_more_lines == 0:
|
if print_more_lines == 0:
|
||||||
return 1
|
return 2
|
||||||
if print_more_lines > 0:
|
if print_more_lines > 0:
|
||||||
print_more_lines -= 1
|
print_more_lines -= 1
|
||||||
|
|
||||||
@@ -92,20 +92,20 @@ class FastbootRun:
|
|||||||
# when if we see a reboot after we got past fastboot.
|
# when if we see a reboot after we got past fastboot.
|
||||||
if re.search("PON REASON", line):
|
if re.search("PON REASON", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected spontaneous reboot, abandoning run.")
|
"Detected spontaneous reboot, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# db820c sometimes wedges around iommu fault recovery
|
# db820c sometimes wedges around iommu fault recovery
|
||||||
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
|
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected kernel soft lockup, abandoning run.")
|
"Detected kernel soft lockup, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||||
if re.search("NETDEV WATCHDOG", line):
|
if re.search("NETDEV WATCHDOG", line):
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected network device failure, abandoning run.")
|
"Detected network device failure, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# A3xx recovery doesn't quite work. Sometimes the GPU will get
|
# A3xx recovery doesn't quite work. Sometimes the GPU will get
|
||||||
# wedged and recovery will fail (because power can't be reset?)
|
# wedged and recovery will fail (because power can't be reset?)
|
||||||
@@ -115,20 +115,20 @@ class FastbootRun:
|
|||||||
# of the hang. Once a hang happens, it's pretty chatty.
|
# of the hang. Once a hang happens, it's pretty chatty.
|
||||||
if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
|
if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Detected GPU hang, abandoning run.")
|
"Detected GPU hang, restarting run...")
|
||||||
if print_more_lines == -1:
|
if print_more_lines == -1:
|
||||||
print_more_lines = 30
|
print_more_lines = 30
|
||||||
|
|
||||||
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
result = re.search("hwci: mesa: (\S*)", line)
|
||||||
if result:
|
if result:
|
||||||
status = result.group(1)
|
if result.group(1) == "pass":
|
||||||
exit_code = int(result.group(2))
|
return 0
|
||||||
|
else:
|
||||||
return exit_code
|
return 1
|
||||||
|
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Reached the end of the CPU serial log without finding a result, abandoning run.")
|
"Reached the end of the CPU serial log without finding a result, restarting run...")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -147,8 +147,13 @@ def main():
|
|||||||
|
|
||||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||||
|
|
||||||
retval = fastboot.run()
|
while True:
|
||||||
fastboot.close()
|
retval = fastboot.run()
|
||||||
|
fastboot.close()
|
||||||
|
if retval != 2:
|
||||||
|
break
|
||||||
|
|
||||||
|
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||||
|
|
||||||
fastboot.logged_system(args.powerdown)
|
fastboot.logged_system(args.powerdown)
|
||||||
|
|
||||||
|
@@ -7,4 +7,4 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
|
||||||
|
@@ -7,6 +7,6 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
|
||||||
sleep 5
|
sleep 5
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py on "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay
|
||||||
|
@@ -1,569 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
#
|
|
||||||
# Copyright 2015, The Android Open Source Project
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""Creates the boot image."""
|
|
||||||
from argparse import (ArgumentParser, ArgumentTypeError,
|
|
||||||
FileType, RawDescriptionHelpFormatter)
|
|
||||||
from hashlib import sha1
|
|
||||||
from os import fstat
|
|
||||||
from struct import pack
|
|
||||||
import array
|
|
||||||
import collections
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
# Constant and structure definition is in
|
|
||||||
# system/tools/mkbootimg/include/bootimg/bootimg.h
|
|
||||||
BOOT_MAGIC = 'ANDROID!'
|
|
||||||
BOOT_MAGIC_SIZE = 8
|
|
||||||
BOOT_NAME_SIZE = 16
|
|
||||||
BOOT_ARGS_SIZE = 512
|
|
||||||
BOOT_EXTRA_ARGS_SIZE = 1024
|
|
||||||
BOOT_IMAGE_HEADER_V1_SIZE = 1648
|
|
||||||
BOOT_IMAGE_HEADER_V2_SIZE = 1660
|
|
||||||
BOOT_IMAGE_HEADER_V3_SIZE = 1580
|
|
||||||
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
|
||||||
BOOT_IMAGE_HEADER_V4_SIZE = 1584
|
|
||||||
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
|
|
||||||
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
|
|
||||||
VENDOR_BOOT_MAGIC_SIZE = 8
|
|
||||||
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
|
|
||||||
VENDOR_BOOT_ARGS_SIZE = 2048
|
|
||||||
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
|
|
||||||
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
|
|
||||||
VENDOR_RAMDISK_TYPE_NONE = 0
|
|
||||||
VENDOR_RAMDISK_TYPE_PLATFORM = 1
|
|
||||||
VENDOR_RAMDISK_TYPE_RECOVERY = 2
|
|
||||||
VENDOR_RAMDISK_TYPE_DLKM = 3
|
|
||||||
VENDOR_RAMDISK_NAME_SIZE = 32
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
|
|
||||||
# Names with special meaning, mustn't be specified in --ramdisk_name.
|
|
||||||
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
|
|
||||||
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
|
|
||||||
def filesize(f):
|
|
||||||
if f is None:
|
|
||||||
return 0
|
|
||||||
try:
|
|
||||||
return fstat(f.fileno()).st_size
|
|
||||||
except OSError:
|
|
||||||
return 0
|
|
||||||
def update_sha(sha, f):
|
|
||||||
if f:
|
|
||||||
sha.update(f.read())
|
|
||||||
f.seek(0)
|
|
||||||
sha.update(pack('I', filesize(f)))
|
|
||||||
else:
|
|
||||||
sha.update(pack('I', 0))
|
|
||||||
def pad_file(f, padding):
|
|
||||||
pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
|
|
||||||
f.write(pack(str(pad) + 'x'))
|
|
||||||
def get_number_of_pages(image_size, page_size):
|
|
||||||
"""calculates the number of pages required for the image"""
|
|
||||||
return (image_size + page_size - 1) // page_size
|
|
||||||
def get_recovery_dtbo_offset(args):
|
|
||||||
"""calculates the offset of recovery_dtbo image in the boot image"""
|
|
||||||
num_header_pages = 1 # header occupies a page
|
|
||||||
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
|
|
||||||
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
|
|
||||||
args.pagesize)
|
|
||||||
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
|
|
||||||
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
|
|
||||||
num_ramdisk_pages + num_second_pages)
|
|
||||||
return dtbo_offset
|
|
||||||
def write_header_v3_and_above(args):
|
|
||||||
if args.header_version > 3:
|
|
||||||
boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
|
|
||||||
else:
|
|
||||||
boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
|
|
||||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
|
||||||
# kernel size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.kernel)))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
|
||||||
# os version and patch level
|
|
||||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
|
||||||
args.output.write(pack('I', boot_header_size))
|
|
||||||
# reserved
|
|
||||||
args.output.write(pack('4I', 0, 0, 0, 0))
|
|
||||||
# version of boot image header
|
|
||||||
args.output.write(pack('I', args.header_version))
|
|
||||||
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
|
|
||||||
args.cmdline))
|
|
||||||
if args.header_version >= 4:
|
|
||||||
# The signature used to verify boot image v4.
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
|
|
||||||
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
|
||||||
def write_vendor_boot_header(args):
|
|
||||||
if filesize(args.dtb) == 0:
|
|
||||||
raise ValueError('DTB image must not be empty.')
|
|
||||||
if args.header_version > 3:
|
|
||||||
vendor_ramdisk_size = args.vendor_ramdisk_total_size
|
|
||||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
|
|
||||||
else:
|
|
||||||
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
|
|
||||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
|
|
||||||
VENDOR_BOOT_MAGIC.encode()))
|
|
||||||
# version of boot image header
|
|
||||||
args.vendor_boot.write(pack('I', args.header_version))
|
|
||||||
# flash page size
|
|
||||||
args.vendor_boot.write(pack('I', args.pagesize))
|
|
||||||
# kernel physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
|
|
||||||
# ramdisk physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
|
|
||||||
args.vendor_cmdline))
|
|
||||||
# kernel tags physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
|
|
||||||
# asciiz product name
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
|
|
||||||
# header size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_boot_header_size))
|
|
||||||
# dtb size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', filesize(args.dtb)))
|
|
||||||
# dtb physical load address
|
|
||||||
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
|
|
||||||
if args.header_version > 3:
|
|
||||||
vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
|
|
||||||
# vendor ramdisk table size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
|
|
||||||
# number of vendor ramdisk table entries
|
|
||||||
args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
|
|
||||||
# vendor ramdisk table entry size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
|
|
||||||
# bootconfig section size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
|
|
||||||
pad_file(args.vendor_boot, args.pagesize)
|
|
||||||
def write_header(args):
|
|
||||||
if args.header_version > 4:
|
|
||||||
raise ValueError(
|
|
||||||
f'Boot header version {args.header_version} not supported')
|
|
||||||
if args.header_version in {3, 4}:
|
|
||||||
return write_header_v3_and_above(args)
|
|
||||||
ramdisk_load_address = ((args.base + args.ramdisk_offset)
|
|
||||||
if filesize(args.ramdisk) > 0 else 0)
|
|
||||||
second_load_address = ((args.base + args.second_offset)
|
|
||||||
if filesize(args.second) > 0 else 0)
|
|
||||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
|
||||||
# kernel size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.kernel)))
|
|
||||||
# kernel physical load address
|
|
||||||
args.output.write(pack('I', args.base + args.kernel_offset))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
|
||||||
# ramdisk physical load address
|
|
||||||
args.output.write(pack('I', ramdisk_load_address))
|
|
||||||
# second bootloader size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.second)))
|
|
||||||
# second bootloader physical load address
|
|
||||||
args.output.write(pack('I', second_load_address))
|
|
||||||
# kernel tags physical load address
|
|
||||||
args.output.write(pack('I', args.base + args.tags_offset))
|
|
||||||
# flash page size
|
|
||||||
args.output.write(pack('I', args.pagesize))
|
|
||||||
# version of boot image header
|
|
||||||
args.output.write(pack('I', args.header_version))
|
|
||||||
# os version and patch level
|
|
||||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
|
||||||
# asciiz product name
|
|
||||||
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
|
|
||||||
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
|
|
||||||
sha = sha1()
|
|
||||||
update_sha(sha, args.kernel)
|
|
||||||
update_sha(sha, args.ramdisk)
|
|
||||||
update_sha(sha, args.second)
|
|
||||||
if args.header_version > 0:
|
|
||||||
update_sha(sha, args.recovery_dtbo)
|
|
||||||
if args.header_version > 1:
|
|
||||||
update_sha(sha, args.dtb)
|
|
||||||
img_id = pack('32s', sha.digest())
|
|
||||||
args.output.write(img_id)
|
|
||||||
args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
|
|
||||||
if args.header_version > 0:
|
|
||||||
if args.recovery_dtbo:
|
|
||||||
# recovery dtbo size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.recovery_dtbo)))
|
|
||||||
# recovert dtbo offset in the boot image
|
|
||||||
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
|
|
||||||
else:
|
|
||||||
# Set to zero if no recovery dtbo
|
|
||||||
args.output.write(pack('I', 0))
|
|
||||||
args.output.write(pack('Q', 0))
|
|
||||||
# Populate boot image header size for header versions 1 and 2.
|
|
||||||
if args.header_version == 1:
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
|
|
||||||
elif args.header_version == 2:
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
|
|
||||||
if args.header_version > 1:
|
|
||||||
if filesize(args.dtb) == 0:
|
|
||||||
raise ValueError('DTB image must not be empty.')
|
|
||||||
# dtb size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.dtb)))
|
|
||||||
# dtb physical load address
|
|
||||||
args.output.write(pack('Q', args.base + args.dtb_offset))
|
|
||||||
pad_file(args.output, args.pagesize)
|
|
||||||
return img_id
|
|
||||||
class AsciizBytes:
|
|
||||||
"""Parses a string and encodes it as an asciiz bytes object.
|
|
||||||
>>> AsciizBytes(bufsize=4)('foo')
|
|
||||||
b'foo\\x00'
|
|
||||||
>>> AsciizBytes(bufsize=4)('foob')
|
|
||||||
Traceback (most recent call last):
|
|
||||||
...
|
|
||||||
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
|
|
||||||
"""
|
|
||||||
def __init__(self, bufsize):
|
|
||||||
self.bufsize = bufsize
|
|
||||||
def __call__(self, arg):
|
|
||||||
arg_bytes = arg.encode() + b'\x00'
|
|
||||||
if len(arg_bytes) > self.bufsize:
|
|
||||||
raise ArgumentTypeError(
|
|
||||||
'Encoded asciiz length exceeded: '
|
|
||||||
f'max {self.bufsize}, got {len(arg_bytes)}')
|
|
||||||
return arg_bytes
|
|
||||||
class VendorRamdiskTableBuilder:
|
|
||||||
"""Vendor ramdisk table builder.
|
|
||||||
Attributes:
|
|
||||||
entries: A list of VendorRamdiskTableEntry namedtuple.
|
|
||||||
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
|
|
||||||
"""
|
|
||||||
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
|
|
||||||
'VendorRamdiskTableEntry',
|
|
||||||
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
|
|
||||||
'ramdisk_name', 'board_id'])
|
|
||||||
def __init__(self):
|
|
||||||
self.entries = []
|
|
||||||
self.ramdisk_total_size = 0
|
|
||||||
self.ramdisk_names = set()
|
|
||||||
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
|
|
||||||
# Strip any trailing null for simple comparison.
|
|
||||||
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
|
|
||||||
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
|
|
||||||
raise ValueError(
|
|
||||||
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
|
|
||||||
if stripped_ramdisk_name in self.ramdisk_names:
|
|
||||||
raise ValueError(
|
|
||||||
f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
|
|
||||||
self.ramdisk_names.add(stripped_ramdisk_name)
|
|
||||||
if board_id is None:
|
|
||||||
board_id = array.array(
|
|
||||||
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
|
|
||||||
else:
|
|
||||||
board_id = array.array('I', board_id)
|
|
||||||
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
|
|
||||||
raise ValueError('board_id size must be '
|
|
||||||
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
|
|
||||||
with open(ramdisk_path, 'rb') as f:
|
|
||||||
ramdisk_size = filesize(f)
|
|
||||||
self.entries.append(self.VendorRamdiskTableEntry(
|
|
||||||
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
|
|
||||||
ramdisk_name, board_id))
|
|
||||||
self.ramdisk_total_size += ramdisk_size
|
|
||||||
def write_ramdisks_padded(self, fout, alignment):
|
|
||||||
for entry in self.entries:
|
|
||||||
with open(entry.ramdisk_path, 'rb') as f:
|
|
||||||
fout.write(f.read())
|
|
||||||
pad_file(fout, alignment)
|
|
||||||
def write_entries_padded(self, fout, alignment):
|
|
||||||
for entry in self.entries:
|
|
||||||
fout.write(pack('I', entry.ramdisk_size))
|
|
||||||
fout.write(pack('I', entry.ramdisk_offset))
|
|
||||||
fout.write(pack('I', entry.ramdisk_type))
|
|
||||||
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
|
||||||
entry.ramdisk_name))
|
|
||||||
fout.write(entry.board_id)
|
|
||||||
pad_file(fout, alignment)
|
|
||||||
def write_padded_file(f_out, f_in, padding):
|
|
||||||
if f_in is None:
|
|
||||||
return
|
|
||||||
f_out.write(f_in.read())
|
|
||||||
pad_file(f_out, padding)
|
|
||||||
def parse_int(x):
|
|
||||||
return int(x, 0)
|
|
||||||
def parse_os_version(x):
|
|
||||||
match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
|
|
||||||
if match:
|
|
||||||
a = int(match.group(1))
|
|
||||||
b = c = 0
|
|
||||||
if match.lastindex >= 2:
|
|
||||||
b = int(match.group(2))
|
|
||||||
if match.lastindex == 3:
|
|
||||||
c = int(match.group(3))
|
|
||||||
# 7 bits allocated for each field
|
|
||||||
assert a < 128
|
|
||||||
assert b < 128
|
|
||||||
assert c < 128
|
|
||||||
return (a << 14) | (b << 7) | c
|
|
||||||
return 0
|
|
||||||
def parse_os_patch_level(x):
|
|
||||||
match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
|
|
||||||
if match:
|
|
||||||
y = int(match.group(1)) - 2000
|
|
||||||
m = int(match.group(2))
|
|
||||||
# 7 bits allocated for the year, 4 bits for the month
|
|
||||||
assert 0 <= y < 128
|
|
||||||
assert 0 < m <= 12
|
|
||||||
return (y << 4) | m
|
|
||||||
return 0
|
|
||||||
def parse_vendor_ramdisk_type(x):
|
|
||||||
type_dict = {
|
|
||||||
'none': VENDOR_RAMDISK_TYPE_NONE,
|
|
||||||
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
|
|
||||||
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
|
|
||||||
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
|
|
||||||
}
|
|
||||||
if x.lower() in type_dict:
|
|
||||||
return type_dict[x.lower()]
|
|
||||||
return parse_int(x)
|
|
||||||
def get_vendor_boot_v4_usage():
|
|
||||||
return """vendor boot version 4 arguments:
|
|
||||||
--ramdisk_type {none,platform,recovery,dlkm}
|
|
||||||
specify the type of the ramdisk
|
|
||||||
--ramdisk_name NAME
|
|
||||||
specify the name of the ramdisk
|
|
||||||
--board_id{0..15} NUMBER
|
|
||||||
specify the value of the board_id vector, defaults to 0
|
|
||||||
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
|
|
||||||
path to the vendor ramdisk file
|
|
||||||
These options can be specified multiple times, where each vendor ramdisk
|
|
||||||
option group ends with a --vendor_ramdisk_fragment option.
|
|
||||||
Each option group appends an additional ramdisk to the vendor boot image.
|
|
||||||
"""
|
|
||||||
def parse_vendor_ramdisk_args(args, args_list):
|
|
||||||
"""Parses vendor ramdisk specific arguments.
|
|
||||||
Args:
|
|
||||||
args: An argparse.Namespace object. Parsed results are stored into this
|
|
||||||
object.
|
|
||||||
args_list: A list of argument strings to be parsed.
|
|
||||||
Returns:
|
|
||||||
A list argument strings that are not parsed by this method.
|
|
||||||
"""
|
|
||||||
parser = ArgumentParser(add_help=False)
|
|
||||||
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
|
|
||||||
default=VENDOR_RAMDISK_TYPE_NONE)
|
|
||||||
parser.add_argument('--ramdisk_name',
|
|
||||||
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
|
|
||||||
required=True)
|
|
||||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
|
|
||||||
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
|
|
||||||
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
|
|
||||||
unknown_args = []
|
|
||||||
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
|
|
||||||
if args.vendor_ramdisk is not None:
|
|
||||||
vendor_ramdisk_table_builder.add_entry(
|
|
||||||
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
|
|
||||||
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
|
|
||||||
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
|
|
||||||
vendor_ramdisk_args = args_list[:idx]
|
|
||||||
args_list = args_list[idx:]
|
|
||||||
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
|
|
||||||
ramdisk_args_dict = vars(ramdisk_args)
|
|
||||||
unknown_args.extend(extra_args)
|
|
||||||
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
|
|
||||||
ramdisk_type = ramdisk_args.ramdisk_type
|
|
||||||
ramdisk_name = ramdisk_args.ramdisk_name
|
|
||||||
board_id = [ramdisk_args_dict[f'board_id{i}']
|
|
||||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
|
|
||||||
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
|
|
||||||
ramdisk_name, board_id)
|
|
||||||
if len(args_list) > 0:
|
|
||||||
unknown_args.extend(args_list)
|
|
||||||
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
|
|
||||||
.ramdisk_total_size)
|
|
||||||
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
|
|
||||||
.entries)
|
|
||||||
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
|
|
||||||
return unknown_args
|
|
||||||
def parse_cmdline():
|
|
||||||
version_parser = ArgumentParser(add_help=False)
|
|
||||||
version_parser.add_argument('--header_version', type=parse_int, default=0)
|
|
||||||
if version_parser.parse_known_args()[0].header_version < 3:
|
|
||||||
# For boot header v0 to v2, the kernel commandline field is split into
|
|
||||||
# two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
|
|
||||||
# so we minus one here to ensure the encoded string plus the
|
|
||||||
# null-terminator can fit in the buffer size.
|
|
||||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
|
|
||||||
else:
|
|
||||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
|
|
||||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
|
||||||
epilog=get_vendor_boot_v4_usage())
|
|
||||||
parser.add_argument('--kernel', type=FileType('rb'),
|
|
||||||
help='path to the kernel')
|
|
||||||
parser.add_argument('--ramdisk', type=FileType('rb'),
|
|
||||||
help='path to the ramdisk')
|
|
||||||
parser.add_argument('--second', type=FileType('rb'),
|
|
||||||
help='path to the second bootloader')
|
|
||||||
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
|
|
||||||
dtbo_group = parser.add_mutually_exclusive_group()
|
|
||||||
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
|
|
||||||
help='path to the recovery DTBO')
|
|
||||||
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
|
|
||||||
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
|
|
||||||
help='path to the recovery ACPIO')
|
|
||||||
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
|
|
||||||
default='', help='kernel command line arguments')
|
|
||||||
parser.add_argument('--vendor_cmdline',
|
|
||||||
type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
|
|
||||||
default='',
|
|
||||||
help='vendor boot kernel command line arguments')
|
|
||||||
parser.add_argument('--base', type=parse_int, default=0x10000000,
|
|
||||||
help='base address')
|
|
||||||
parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
|
|
||||||
help='kernel offset')
|
|
||||||
parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
|
|
||||||
help='ramdisk offset')
|
|
||||||
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
|
|
||||||
help='second bootloader offset')
|
|
||||||
parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
|
|
||||||
help='dtb offset')
|
|
||||||
parser.add_argument('--os_version', type=parse_os_version, default=0,
|
|
||||||
help='operating system version')
|
|
||||||
parser.add_argument('--os_patch_level', type=parse_os_patch_level,
|
|
||||||
default=0, help='operating system patch level')
|
|
||||||
parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
|
|
||||||
help='tags offset')
|
|
||||||
parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
|
|
||||||
default='', help='board name')
|
|
||||||
parser.add_argument('--pagesize', type=parse_int,
|
|
||||||
choices=[2**i for i in range(11, 15)], default=2048,
|
|
||||||
help='page size')
|
|
||||||
parser.add_argument('--id', action='store_true',
|
|
||||||
help='print the image ID on standard output')
|
|
||||||
parser.add_argument('--header_version', type=parse_int, default=0,
|
|
||||||
help='boot image header version')
|
|
||||||
parser.add_argument('-o', '--output', type=FileType('wb'),
|
|
||||||
help='output file name')
|
|
||||||
parser.add_argument('--gki_signing_algorithm',
|
|
||||||
help='GKI signing algorithm to use')
|
|
||||||
parser.add_argument('--gki_signing_key',
|
|
||||||
help='path to RSA private key file')
|
|
||||||
parser.add_argument('--gki_signing_signature_args',
|
|
||||||
help='other hash arguments passed to avbtool')
|
|
||||||
parser.add_argument('--gki_signing_avbtool_path',
|
|
||||||
help='path to avbtool for boot signature generation')
|
|
||||||
parser.add_argument('--vendor_boot', type=FileType('wb'),
|
|
||||||
help='vendor boot output file name')
|
|
||||||
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
|
|
||||||
help='path to the vendor ramdisk')
|
|
||||||
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
|
|
||||||
help='path to the vendor bootconfig file')
|
|
||||||
args, extra_args = parser.parse_known_args()
|
|
||||||
if args.vendor_boot is not None and args.header_version > 3:
|
|
||||||
extra_args = parse_vendor_ramdisk_args(args, extra_args)
|
|
||||||
if len(extra_args) > 0:
|
|
||||||
raise ValueError(f'Unrecognized arguments: {extra_args}')
|
|
||||||
if args.header_version < 3:
|
|
||||||
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
|
|
||||||
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
|
|
||||||
assert len(args.cmdline) <= BOOT_ARGS_SIZE
|
|
||||||
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
|
|
||||||
return args
|
|
||||||
def add_boot_image_signature(args, pagesize):
|
|
||||||
"""Adds the boot image signature.
|
|
||||||
Note that the signature will only be verified in VTS to ensure a
|
|
||||||
generic boot.img is used. It will not be used by the device
|
|
||||||
bootloader at boot time. The bootloader should only verify
|
|
||||||
the boot vbmeta at the end of the boot partition (or in the top-level
|
|
||||||
vbmeta partition) via the Android Verified Boot process, when the
|
|
||||||
device boots.
|
|
||||||
"""
|
|
||||||
args.output.flush() # Flush the buffer for signature calculation.
|
|
||||||
# Appends zeros if the signing key is not specified.
|
|
||||||
if not args.gki_signing_key or not args.gki_signing_algorithm:
|
|
||||||
zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
|
|
||||||
args.output.write(zeros)
|
|
||||||
pad_file(args.output, pagesize)
|
|
||||||
return
|
|
||||||
avbtool = 'avbtool' # Used from otatools.zip or Android build env.
|
|
||||||
# We need to specify the path of avbtool in build/core/Makefile.
|
|
||||||
# Because avbtool is not guaranteed to be in $PATH there.
|
|
||||||
if args.gki_signing_avbtool_path:
|
|
||||||
avbtool = args.gki_signing_avbtool_path
|
|
||||||
# Need to specify a value of --partition_size for avbtool to work.
|
|
||||||
# We use 64 MB below, but avbtool will not resize the boot image to
|
|
||||||
# this size because --do_not_append_vbmeta_image is also specified.
|
|
||||||
avbtool_cmd = [
|
|
||||||
avbtool, 'add_hash_footer',
|
|
||||||
'--partition_name', 'boot',
|
|
||||||
'--partition_size', str(64 * 1024 * 1024),
|
|
||||||
'--image', args.output.name,
|
|
||||||
'--algorithm', args.gki_signing_algorithm,
|
|
||||||
'--key', args.gki_signing_key,
|
|
||||||
'--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
|
|
||||||
# Additional arguments passed to avbtool.
|
|
||||||
if args.gki_signing_signature_args:
|
|
||||||
avbtool_cmd += args.gki_signing_signature_args.split()
|
|
||||||
# Outputs the signed vbmeta to a separate file, then append to boot.img
|
|
||||||
# as the boot signature.
|
|
||||||
with tempfile.TemporaryDirectory() as temp_out_dir:
|
|
||||||
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
|
|
||||||
avbtool_cmd += ['--do_not_append_vbmeta_image',
|
|
||||||
'--output_vbmeta_image', boot_signature_output]
|
|
||||||
subprocess.check_call(avbtool_cmd)
|
|
||||||
with open(boot_signature_output, 'rb') as boot_signature:
|
|
||||||
if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
|
|
||||||
raise ValueError(
|
|
||||||
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
|
|
||||||
write_padded_file(args.output, boot_signature, pagesize)
|
|
||||||
def write_data(args, pagesize):
|
|
||||||
write_padded_file(args.output, args.kernel, pagesize)
|
|
||||||
write_padded_file(args.output, args.ramdisk, pagesize)
|
|
||||||
write_padded_file(args.output, args.second, pagesize)
|
|
||||||
if args.header_version > 0 and args.header_version < 3:
|
|
||||||
write_padded_file(args.output, args.recovery_dtbo, pagesize)
|
|
||||||
if args.header_version == 2:
|
|
||||||
write_padded_file(args.output, args.dtb, pagesize)
|
|
||||||
if args.header_version >= 4:
|
|
||||||
add_boot_image_signature(args, pagesize)
|
|
||||||
def write_vendor_boot_data(args):
|
|
||||||
if args.header_version > 3:
|
|
||||||
builder = args.vendor_ramdisk_table_builder
|
|
||||||
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
|
||||||
builder.write_entries_padded(args.vendor_boot, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
|
|
||||||
args.pagesize)
|
|
||||||
else:
|
|
||||||
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
|
||||||
def main():
|
|
||||||
args = parse_cmdline()
|
|
||||||
if args.vendor_boot is not None:
|
|
||||||
if args.header_version not in {3, 4}:
|
|
||||||
raise ValueError(
|
|
||||||
'--vendor_boot not compatible with given header version')
|
|
||||||
if args.header_version == 3 and args.vendor_ramdisk is None:
|
|
||||||
raise ValueError('--vendor_ramdisk missing or invalid')
|
|
||||||
write_vendor_boot_header(args)
|
|
||||||
write_vendor_boot_data(args)
|
|
||||||
if args.output is not None:
|
|
||||||
if args.second is not None and args.header_version > 2:
|
|
||||||
raise ValueError(
|
|
||||||
'--second not compatible with given header version')
|
|
||||||
img_id = write_header(args)
|
|
||||||
if args.header_version > 2:
|
|
||||||
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
|
||||||
else:
|
|
||||||
write_data(args, args.pagesize)
|
|
||||||
if args.id and img_id is not None:
|
|
||||||
print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@@ -10,7 +10,8 @@ if [ -z "$BM_POE_ADDRESS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))"
|
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
|
||||||
|
SNMP_ON="i 1"
|
||||||
SNMP_OFF="i 2"
|
SNMP_OFF="i 2"
|
||||||
|
|
||||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
|
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
|
||||||
|
@@ -10,7 +10,7 @@ if [ -z "$BM_POE_ADDRESS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))"
|
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
|
||||||
SNMP_ON="i 1"
|
SNMP_ON="i 1"
|
||||||
SNMP_OFF="i 2"
|
SNMP_OFF="i 2"
|
||||||
|
|
||||||
|
@@ -1,10 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2059
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
# Boot script for devices attached to a PoE switch, using NFS for the root
|
# Boot script for devices attached to a PoE switch, using NFS for the root
|
||||||
# filesystem.
|
# filesystem.
|
||||||
@@ -12,7 +6,6 @@
|
|||||||
# We're run from the root of the repo, make a helper var for our paths
|
# We're run from the root of the repo, make a helper var for our paths
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
CI_COMMON=$CI_PROJECT_DIR/install/common
|
||||||
CI_INSTALL=$CI_PROJECT_DIR/install
|
|
||||||
|
|
||||||
# Runner config checks
|
# Runner config checks
|
||||||
if [ -z "$BM_SERIAL" ]; then
|
if [ -z "$BM_SERIAL" ]; then
|
||||||
@@ -61,8 +54,8 @@ if [ -z "$BM_ROOTFS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_BOOTFS" ] && { [ -z "$BM_KERNEL" ] || [ -z "$BM_DTB" ]; } ; then
|
if [ -z "$BM_BOOTFS" ]; then
|
||||||
echo "Must set /boot files for the TFTP boot in the job's variables or set kernel and dtb"
|
echo "Must set /boot files for the TFTP boot in the job's variables"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -71,12 +64,13 @@ if [ -z "$BM_CMDLINE" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
section_start prepare_rootfs "Preparing rootfs components"
|
if [ -z "$BM_BOOTCONFIG" ]; then
|
||||||
|
echo "Must set BM_BOOTCONFIG to your board's required boot configuration arguments"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Clear out any previous run's artifacts.
|
# Clear out any previous run's artifacts.
|
||||||
rm -rf results/
|
rm -rf results/
|
||||||
mkdir -p results
|
mkdir -p results
|
||||||
@@ -85,64 +79,27 @@ mkdir -p results
|
|||||||
# state, since it's volume-mounted on the host.
|
# state, since it's volume-mounted on the host.
|
||||||
rsync -a --delete $BM_ROOTFS/ /nfs/
|
rsync -a --delete $BM_ROOTFS/ /nfs/
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# If BM_BOOTFS is an URL, download it
|
# If BM_BOOTFS is an URL, download it
|
||||||
if echo $BM_BOOTFS | grep -q http; then
|
if echo $BM_BOOTFS | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt install -y wget
|
||||||
"${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS" -o /tmp/bootfs.tar
|
wget ${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS -O /tmp/bootfs.tar
|
||||||
BM_BOOTFS=/tmp/bootfs.tar
|
BM_BOOTFS=/tmp/bootfs.tar
|
||||||
fi
|
fi
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
|
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
|
||||||
if [ -f "${BM_BOOTFS}" ]; then
|
if [ -f $BM_BOOTFS ]; then
|
||||||
mkdir -p /tmp/bootfs
|
mkdir -p /tmp/bootfs
|
||||||
tar xf $BM_BOOTFS -C /tmp/bootfs
|
tar xf $BM_BOOTFS -C /tmp/bootfs
|
||||||
BM_BOOTFS=/tmp/bootfs
|
BM_BOOTFS=/tmp/bootfs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If BM_KERNEL and BM_DTS is present
|
|
||||||
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
|
||||||
if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then
|
|
||||||
echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o "${BM_KERNEL}"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o "${BM_DTB}.dtb"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Install kernel modules (it could be either in /lib/modules or
|
# Install kernel modules (it could be either in /lib/modules or
|
||||||
# /usr/lib/modules, but we want to install in the latter)
|
# /usr/lib/modules, but we want to install in the latter)
|
||||||
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/
|
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
||||||
rm modules.tar.zst &
|
|
||||||
elif [ -n "${BM_BOOTFS}" ]; then
|
|
||||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
|
||||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
|
||||||
else
|
|
||||||
echo "No modules!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Install kernel image + bootloader files
|
# Install kernel image + bootloader files
|
||||||
if [ -n "${EXTERNAL_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
|
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
||||||
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
|
|
||||||
else # BM_BOOTFS
|
|
||||||
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Set up the pxelinux config for Jetson Nano
|
# Set up the pxelinux config for Jetson Nano
|
||||||
mkdir -p /tftp/pxelinux.cfg
|
mkdir -p /tftp/pxelinux.cfg
|
||||||
@@ -158,79 +115,35 @@ LABEL primary
|
|||||||
APPEND \${cbootargs} $BM_CMDLINE
|
APPEND \${cbootargs} $BM_CMDLINE
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Set up the pxelinux config for Jetson TK1
|
|
||||||
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1
|
|
||||||
PROMPT 0
|
|
||||||
TIMEOUT 30
|
|
||||||
DEFAULT primary
|
|
||||||
MENU TITLE jetson TK1 boot options
|
|
||||||
LABEL primary
|
|
||||||
MENU LABEL CI kernel on TFTP
|
|
||||||
LINUX zImage
|
|
||||||
FDT tegra124-jetson-tk1.dtb
|
|
||||||
APPEND \${cbootargs} $BM_CMDLINE
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create the rootfs in the NFS directory
|
# Create the rootfs in the NFS directory
|
||||||
|
mkdir -p /nfs/results
|
||||||
. $BM/rootfs-setup.sh /nfs
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
echo "$BM_CMDLINE" > /tftp/cmdline.txt
|
echo "$BM_CMDLINE" > /tftp/cmdline.txt
|
||||||
|
|
||||||
# Add some options in config.txt, if defined
|
# Add some required options in config.txt
|
||||||
if [ -n "$BM_BOOTCONFIG" ]; then
|
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
|
||||||
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
section_end prepare_rootfs
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
STRUCTURED_LOG_FILE=results/job_detail.json
|
ATTEMPTS=10
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
|
|
||||||
ATTEMPTS=3
|
|
||||||
first_attempt=True
|
|
||||||
while [ $((ATTEMPTS--)) -gt 0 ]; do
|
while [ $((ATTEMPTS--)) -gt 0 ]; do
|
||||||
section_start dut_boot "Booting hardware device ..."
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
|
|
||||||
# Update subtime time to CI_JOB_STARTED_AT only for the first run
|
|
||||||
if [ "$first_attempt" = "True" ]; then
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
|
|
||||||
else
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit
|
|
||||||
fi
|
|
||||||
python3 $BM/poe_run.py \
|
python3 $BM/poe_run.py \
|
||||||
--dev="$BM_SERIAL" \
|
--dev="$BM_SERIAL" \
|
||||||
--powerup="$BM_POWERUP" \
|
--powerup="$BM_POWERUP" \
|
||||||
--powerdown="$BM_POWERDOWN" \
|
--powerdown="$BM_POWERDOWN" \
|
||||||
--boot-timeout-seconds ${BOOT_PHASE_TIMEOUT_SECONDS:-300} \
|
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
||||||
--test-timeout-minutes ${TEST_PHASE_TIMEOUT_MINUTES:-$((CI_JOB_TIMEOUT/60 - ${TEST_SETUP_AND_UPLOAD_MARGIN_MINUTES:-5}))}
|
|
||||||
ret=$?
|
ret=$?
|
||||||
|
|
||||||
if [ $ret -eq 2 ]; then
|
if [ $ret -eq 2 ]; then
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
echo "Did not detect boot sequence, retrying..."
|
||||||
first_attempt=False
|
|
||||||
error "Device failed to boot; will retry"
|
|
||||||
else
|
else
|
||||||
# We're no longer in dut_boot by this point
|
|
||||||
unset CURRENT_SECTION
|
|
||||||
ATTEMPTS=0
|
ATTEMPTS=0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
section_start dut_cleanup "Cleaning up after job"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
# will look for them.
|
# will look for them.
|
||||||
cp -Rp /nfs/results/. results/
|
cp -Rp /nfs/results/. results/
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
section_end dut_cleanup
|
|
||||||
|
|
||||||
exit $ret
|
exit $ret
|
||||||
|
@@ -24,27 +24,23 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from serial_buffer import SerialBuffer
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from custom_logger import CustomLogger
|
|
||||||
from serial_buffer import SerialBuffer
|
|
||||||
|
|
||||||
class PoERun:
|
class PoERun:
|
||||||
def __init__(self, args, boot_timeout, test_timeout, logger):
|
def __init__(self, args, test_timeout):
|
||||||
self.powerup = args.powerup
|
self.powerup = args.powerup
|
||||||
self.powerdown = args.powerdown
|
self.powerdown = args.powerdown
|
||||||
self.ser = SerialBuffer(
|
self.ser = SerialBuffer(
|
||||||
args.dev, "results/serial-output.txt", ": ")
|
args.dev, "results/serial-output.txt", "")
|
||||||
self.boot_timeout = boot_timeout
|
|
||||||
self.test_timeout = test_timeout
|
self.test_timeout = test_timeout
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def print_error(self, message):
|
def print_error(self, message):
|
||||||
RED = '\033[0;31m'
|
RED = '\033[0;31m'
|
||||||
NO_COLOR = '\033[0m'
|
NO_COLOR = '\033[0m'
|
||||||
print(RED + message + NO_COLOR)
|
print(RED + message + NO_COLOR)
|
||||||
self.logger.update_status_fail(message)
|
|
||||||
|
|
||||||
def logged_system(self, cmd):
|
def logged_system(self, cmd):
|
||||||
print("Running '{}'".format(cmd))
|
print("Running '{}'".format(cmd))
|
||||||
@@ -52,12 +48,10 @@ class PoERun:
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
if self.logged_system(self.powerup) != 0:
|
if self.logged_system(self.powerup) != 0:
|
||||||
self.logger.update_status_fail("powerup failed")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
boot_detected = False
|
boot_detected = False
|
||||||
self.logger.create_job_phase("boot")
|
for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
|
||||||
for line in self.ser.lines(timeout=self.boot_timeout, phase="bootloader"):
|
|
||||||
if re.search("Booting Linux", line):
|
if re.search("Booting Linux", line):
|
||||||
boot_detected = True
|
boot_detected = True
|
||||||
break
|
break
|
||||||
@@ -67,10 +61,8 @@ class PoERun:
|
|||||||
"Something wrong; couldn't detect the boot start up sequence")
|
"Something wrong; couldn't detect the boot start up sequence")
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
self.logger.create_job_phase("test")
|
|
||||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||||
if re.search("---. end Kernel panic", line):
|
if re.search("---. end Kernel panic", line):
|
||||||
self.logger.update_status_fail("kernel panic")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# Binning memory problems
|
# Binning memory problems
|
||||||
@@ -79,30 +71,19 @@ class PoERun:
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
|
if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
|
||||||
self.print_error("nouveau jetson boot bug, abandoning run.")
|
self.print_error("nouveau jetson boot bug, retrying.")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
# network fail on tk1
|
result = re.search("hwci: mesa: (\S*)", line)
|
||||||
if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line):
|
|
||||||
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
|
||||||
if result:
|
if result:
|
||||||
status = result.group(1)
|
if result.group(1) == "pass":
|
||||||
exit_code = int(result.group(2))
|
return 0
|
||||||
|
|
||||||
if status == "pass":
|
|
||||||
self.logger.update_dut_job("status", "pass")
|
|
||||||
else:
|
else:
|
||||||
self.logger.update_status_fail("test fail")
|
return 1
|
||||||
|
|
||||||
self.logger.update_dut_job("exit_code", exit_code)
|
|
||||||
return exit_code
|
|
||||||
|
|
||||||
self.print_error(
|
self.print_error(
|
||||||
"Reached the end of the CPU serial log without finding a result")
|
"Reached the end of the CPU serial log without finding a result")
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@@ -114,18 +95,13 @@ def main():
|
|||||||
parser.add_argument('--powerdown', type=str,
|
parser.add_argument('--powerdown', type=str,
|
||||||
help='shell command for powering off', required=True)
|
help='shell command for powering off', required=True)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--boot-timeout-seconds', type=int, help='Boot phase timeout (seconds)', required=True)
|
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||||
parser.add_argument(
|
|
||||||
'--test-timeout-minutes', type=int, help='Test phase timeout (minutes)', required=True)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logger = CustomLogger("results/job_detail.json")
|
poe = PoERun(args, args.test_timeout * 60)
|
||||||
logger.update_dut_time("start", None)
|
|
||||||
poe = PoERun(args, args.boot_timeout_seconds, args.test_timeout_minutes * 60, logger)
|
|
||||||
retval = poe.run()
|
retval = poe.run()
|
||||||
|
|
||||||
poe.logged_system(args.powerdown)
|
poe.logged_system(args.powerdown)
|
||||||
logger.update_dut_time("end", None)
|
|
||||||
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
rootfs_dst=$1
|
rootfs_dst=$1
|
||||||
|
|
||||||
@@ -9,26 +8,23 @@ mkdir -p $rootfs_dst/results
|
|||||||
cp $BM/bm-init.sh $rootfs_dst/init
|
cp $BM/bm-init.sh $rootfs_dst/init
|
||||||
cp $CI_COMMON/init*.sh $rootfs_dst/
|
cp $CI_COMMON/init*.sh $rootfs_dst/
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Make JWT token available as file in the bare-metal storage to enable access
|
# Make JWT token available as file in the bare-metal storage to enable access
|
||||||
# to MinIO
|
# to MinIO
|
||||||
cp "${S3_JWT_FILE}" "${rootfs_dst}${S3_JWT_FILE}"
|
cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
|
||||||
|
|
||||||
date +'%F %T'
|
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
|
||||||
|
cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
|
||||||
cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
|
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
|
|
||||||
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
||||||
|
"$CI_COMMON"/generate-env.sh > $rootfs_dst/set-job-env-vars.sh
|
||||||
|
chmod +x $rootfs_dst/set-job-env-vars.sh
|
||||||
echo "Variables passed through:"
|
echo "Variables passed through:"
|
||||||
"$CI_COMMON"/generate-env.sh | tee $rootfs_dst/set-job-env-vars.sh
|
cat $rootfs_dst/set-job-env-vars.sh
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# Add the Mesa drivers we built, and make a consistent symlink to them.
|
# Add the Mesa drivers we built, and make a consistent symlink to them.
|
||||||
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
|
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
|
||||||
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
|
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
@@ -22,7 +22,7 @@
|
|||||||
# IN THE SOFTWARE.
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from datetime import datetime, UTC
|
from datetime import datetime, timezone
|
||||||
import queue
|
import queue
|
||||||
import serial
|
import serial
|
||||||
import threading
|
import threading
|
||||||
@@ -130,10 +130,9 @@ class SerialBuffer:
|
|||||||
if b == b'\n'[0]:
|
if b == b'\n'[0]:
|
||||||
line = line.decode(errors="replace")
|
line = line.decode(errors="replace")
|
||||||
|
|
||||||
ts = datetime.now(tz=UTC)
|
time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
|
||||||
ts_str = f"{ts.hour:02}:{ts.minute:02}:{ts.second:02}.{int(ts.microsecond / 1000):03}"
|
print("{endc}{time} {prefix}{line}".format(
|
||||||
print("{endc}{time}{prefix}{line}".format(
|
time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
|
||||||
time=ts_str, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
|
|
||||||
|
|
||||||
self.line_queue.put(line)
|
self.line_queue.put(line)
|
||||||
line = bytearray()
|
line = bytearray()
|
||||||
|
@@ -1 +0,0 @@
|
|||||||
../bin/ci
|
|
303
.gitlab-ci/bin/ci_run_n_monitor.py
Executable file
303
.gitlab-ci/bin/ci_run_n_monitor.py
Executable file
@@ -0,0 +1,303 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright © 2020 - 2022 Collabora Ltd.
|
||||||
|
# Authors:
|
||||||
|
# Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||||
|
# David Heidelberg <david.heidelberg@collabora.com>
|
||||||
|
#
|
||||||
|
# TODO GraphQL for dependencies
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helper script to restrict running only required CI jobs
|
||||||
|
and show the job(s) logs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
from functools import partial
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import gitlab
|
||||||
|
|
||||||
|
from colorama import Fore, Style
|
||||||
|
|
||||||
|
REFRESH_WAIT_LOG = 10
|
||||||
|
REFRESH_WAIT_JOBS = 6
|
||||||
|
|
||||||
|
URL_START = "\033]8;;"
|
||||||
|
URL_END = "\033]8;;\a"
|
||||||
|
|
||||||
|
STATUS_COLORS = {
|
||||||
|
"created": "",
|
||||||
|
"running": Fore.BLUE,
|
||||||
|
"success": Fore.GREEN,
|
||||||
|
"failed": Fore.RED,
|
||||||
|
"canceled": Fore.MAGENTA,
|
||||||
|
"manual": "",
|
||||||
|
"pending": "",
|
||||||
|
"skipped": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: This hardcoded list should be replaced by querying the pipeline's
|
||||||
|
# dependency graph to see which jobs the target jobs need
|
||||||
|
DEPENDENCIES = [
|
||||||
|
"debian/x86_build-base",
|
||||||
|
"debian/x86_build",
|
||||||
|
"debian/x86_test-base",
|
||||||
|
"debian/x86_test-gl",
|
||||||
|
"debian/arm_build",
|
||||||
|
"debian/arm_test",
|
||||||
|
"kernel+rootfs_amd64",
|
||||||
|
"kernel+rootfs_arm64",
|
||||||
|
"kernel+rootfs_armhf",
|
||||||
|
"debian-testing",
|
||||||
|
"debian-arm64",
|
||||||
|
]
|
||||||
|
|
||||||
|
COMPLETED_STATUSES = ["success", "failed"]
|
||||||
|
|
||||||
|
|
||||||
|
def get_gitlab_project(glab, name: str):
|
||||||
|
"""Finds a specified gitlab project for given user"""
|
||||||
|
glab.auth()
|
||||||
|
username = glab.user.username
|
||||||
|
return glab.projects.get(f"{username}/mesa")
|
||||||
|
|
||||||
|
|
||||||
|
def wait_for_pipeline(project, sha: str):
|
||||||
|
"""await until pipeline appears in Gitlab"""
|
||||||
|
print("⏲ for the pipeline to appear..", end="")
|
||||||
|
while True:
|
||||||
|
pipelines = project.pipelines.list(sha=sha)
|
||||||
|
if pipelines:
|
||||||
|
print("", flush=True)
|
||||||
|
return pipelines[0]
|
||||||
|
print("", end=".", flush=True)
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
def print_job_status(job) -> None:
|
||||||
|
"""It prints a nice, colored job status with a link to the job."""
|
||||||
|
if job.status == "canceled":
|
||||||
|
return
|
||||||
|
|
||||||
|
print(
|
||||||
|
STATUS_COLORS[job.status]
|
||||||
|
+ "🞋 job "
|
||||||
|
+ URL_START
|
||||||
|
+ f"{job.web_url}\a{job.name}"
|
||||||
|
+ URL_END
|
||||||
|
+ f" :: {job.status}"
|
||||||
|
+ Style.RESET_ALL
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def print_job_status_change(job) -> None:
|
||||||
|
"""It reports job status changes."""
|
||||||
|
if job.status == "canceled":
|
||||||
|
return
|
||||||
|
|
||||||
|
print(
|
||||||
|
STATUS_COLORS[job.status]
|
||||||
|
+ "🗘 job "
|
||||||
|
+ URL_START
|
||||||
|
+ f"{job.web_url}\a{job.name}"
|
||||||
|
+ URL_END
|
||||||
|
+ f" has new status: {job.status}"
|
||||||
|
+ Style.RESET_ALL
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def pretty_wait(sec: int) -> None:
|
||||||
|
"""shows progressbar in dots"""
|
||||||
|
for val in range(sec, 0, -1):
|
||||||
|
print(f"⏲ {val} seconds", end="\r")
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
def monitor_pipeline(
|
||||||
|
project, pipeline, target_job: Optional[str], dependencies, force_manual: bool
|
||||||
|
) -> tuple[Optional[int], Optional[int]]:
|
||||||
|
"""Monitors pipeline and delegate canceling jobs"""
|
||||||
|
statuses = {}
|
||||||
|
target_statuses = {}
|
||||||
|
|
||||||
|
if not dependencies:
|
||||||
|
dependencies = []
|
||||||
|
dependencies.extend(DEPENDENCIES)
|
||||||
|
|
||||||
|
if target_job:
|
||||||
|
target_jobs_regex = re.compile(target_job.strip())
|
||||||
|
|
||||||
|
while True:
|
||||||
|
to_cancel = []
|
||||||
|
for job in pipeline.jobs.list(all=True, sort="desc"):
|
||||||
|
# target jobs
|
||||||
|
if target_job and target_jobs_regex.match(job.name):
|
||||||
|
if force_manual and job.status == "manual":
|
||||||
|
enable_job(project, job, True)
|
||||||
|
|
||||||
|
if (job.id not in target_statuses) or (
|
||||||
|
job.status not in target_statuses[job.id]
|
||||||
|
):
|
||||||
|
print_job_status_change(job)
|
||||||
|
target_statuses[job.id] = job.status
|
||||||
|
else:
|
||||||
|
print_job_status(job)
|
||||||
|
|
||||||
|
continue
|
||||||
|
|
||||||
|
# all jobs
|
||||||
|
if (job.id not in statuses) or (job.status not in statuses[job.id]):
|
||||||
|
print_job_status_change(job)
|
||||||
|
statuses[job.id] = job.status
|
||||||
|
|
||||||
|
# dependencies and cancelling the rest
|
||||||
|
if job.name in dependencies:
|
||||||
|
if job.status == "manual":
|
||||||
|
enable_job(project, job, False)
|
||||||
|
|
||||||
|
elif target_job and job.status not in [
|
||||||
|
"canceled",
|
||||||
|
"success",
|
||||||
|
"failed",
|
||||||
|
"skipped",
|
||||||
|
]:
|
||||||
|
to_cancel.append(job)
|
||||||
|
|
||||||
|
if target_job:
|
||||||
|
cancel_jobs(project, to_cancel)
|
||||||
|
|
||||||
|
print("---------------------------------", flush=False)
|
||||||
|
|
||||||
|
if len(target_statuses) == 1 and {"running"}.intersection(
|
||||||
|
target_statuses.values()
|
||||||
|
):
|
||||||
|
return next(iter(target_statuses)), None
|
||||||
|
|
||||||
|
if {"failed", "canceled"}.intersection(target_statuses.values()):
|
||||||
|
return None, 1
|
||||||
|
|
||||||
|
if {"success", "manual"}.issuperset(target_statuses.values()):
|
||||||
|
return None, 0
|
||||||
|
|
||||||
|
pretty_wait(REFRESH_WAIT_JOBS)
|
||||||
|
|
||||||
|
|
||||||
|
def enable_job(project, job, target: bool) -> None:
|
||||||
|
"""enable manual job"""
|
||||||
|
pjob = project.jobs.get(job.id, lazy=True)
|
||||||
|
pjob.play()
|
||||||
|
if target:
|
||||||
|
jtype = "🞋 "
|
||||||
|
else:
|
||||||
|
jtype = "(dependency)"
|
||||||
|
print(Fore.MAGENTA + f"{jtype} job {job.name} manually enabled" + Style.RESET_ALL)
|
||||||
|
|
||||||
|
|
||||||
|
def cancel_job(project, job) -> None:
|
||||||
|
"""Cancel GitLab job"""
|
||||||
|
pjob = project.jobs.get(job.id, lazy=True)
|
||||||
|
pjob.cancel()
|
||||||
|
print(f"♲ {job.name}")
|
||||||
|
|
||||||
|
|
||||||
|
def cancel_jobs(project, to_cancel) -> None:
|
||||||
|
"""Cancel unwanted GitLab jobs"""
|
||||||
|
if not to_cancel:
|
||||||
|
return
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=6) as exe:
|
||||||
|
part = partial(cancel_job, project)
|
||||||
|
exe.map(part, to_cancel)
|
||||||
|
|
||||||
|
|
||||||
|
def print_log(project, job_id) -> None:
|
||||||
|
"""Print job log into output"""
|
||||||
|
printed_lines = 0
|
||||||
|
while True:
|
||||||
|
job = project.jobs.get(job_id)
|
||||||
|
|
||||||
|
# GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
|
||||||
|
lines = job.trace().decode("unicode_escape").splitlines()
|
||||||
|
for line in lines[printed_lines:]:
|
||||||
|
print(line)
|
||||||
|
printed_lines = len(lines)
|
||||||
|
|
||||||
|
if job.status in COMPLETED_STATUSES:
|
||||||
|
print(Fore.GREEN + f"Job finished: {job.web_url}" + Style.RESET_ALL)
|
||||||
|
return
|
||||||
|
pretty_wait(REFRESH_WAIT_LOG)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> None:
|
||||||
|
"""Parse args"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Tool to trigger a subset of container jobs "
|
||||||
|
+ "and monitor the progress of a test job",
|
||||||
|
epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
|
||||||
|
+ '--target ".*traces" ',
|
||||||
|
)
|
||||||
|
parser.add_argument("--target", metavar="target-job", help="Target job")
|
||||||
|
parser.add_argument("--deps", nargs="+", help="Job dependencies")
|
||||||
|
parser.add_argument(
|
||||||
|
"--rev", metavar="revision", help="repository git revision", required=True
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--token",
|
||||||
|
metavar="token",
|
||||||
|
help="force GitLab token, otherwise it's read from ~/.config/gitlab-token",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--force-manual", action="store_true", help="Force jobs marked as manual"
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def read_token(token_arg: Optional[str]) -> str:
|
||||||
|
"""pick token from args or file"""
|
||||||
|
if token_arg:
|
||||||
|
return token_arg
|
||||||
|
return (
|
||||||
|
open(os.path.expanduser("~/.config/gitlab-token"), encoding="utf-8")
|
||||||
|
.readline()
|
||||||
|
.rstrip()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
t_start = time.perf_counter()
|
||||||
|
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
token = read_token(args.token)
|
||||||
|
|
||||||
|
gl = gitlab.Gitlab(url="https://gitlab.freedesktop.org", private_token=token)
|
||||||
|
|
||||||
|
cur_project = get_gitlab_project(gl, "mesa")
|
||||||
|
|
||||||
|
print(f"Revision: {args.rev}")
|
||||||
|
pipe = wait_for_pipeline(cur_project, args.rev)
|
||||||
|
print(f"Pipeline: {pipe.web_url}")
|
||||||
|
if args.target:
|
||||||
|
print("🞋 job: " + Fore.BLUE + args.target + Style.RESET_ALL)
|
||||||
|
print(f"Extra dependencies: {args.deps}")
|
||||||
|
target_job_id, ret = monitor_pipeline(
|
||||||
|
cur_project, pipe, args.target, args.deps, args.force_manual
|
||||||
|
)
|
||||||
|
|
||||||
|
if target_job_id:
|
||||||
|
print_log(cur_project, target_job_id)
|
||||||
|
|
||||||
|
t_end = time.perf_counter()
|
||||||
|
spend_minutes = (t_end - t_start) / 60
|
||||||
|
print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes")
|
||||||
|
|
||||||
|
sys.exit(ret)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
sys.exit(1)
|
2
.gitlab-ci/bin/requirements.txt
Normal file
2
.gitlab-ci/bin/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
colorama==0.4.5
|
||||||
|
python-gitlab==3.5.0
|
File diff suppressed because it is too large
Load Diff
@@ -1,35 +1,14 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/sh
|
||||||
# shellcheck disable=SC2035
|
|
||||||
# shellcheck disable=SC2061
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null)
|
devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null`
|
||||||
for i in $devcds; do
|
for i in $devcds; do
|
||||||
echo "Found a devcoredump at $i."
|
echo "Found a devcoredump at $i."
|
||||||
if cp $i $RESULTS_DIR/first.devcore; then
|
if cp $i /results/first.devcore; then
|
||||||
echo 1 > $i
|
echo 1 > $i
|
||||||
echo "Saved to the job artifacts at /first.devcore"
|
echo "Saved to the job artifacts at /first.devcore"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
i915_error_states=$(find /sys/devices/ -path */drm/card*/error)
|
|
||||||
for i in $i915_error_states; do
|
|
||||||
tmpfile=$(mktemp)
|
|
||||||
cp "$i" "$tmpfile"
|
|
||||||
filesize=$(stat --printf="%s" "$tmpfile")
|
|
||||||
# Does the file contain "No error state collected" ?
|
|
||||||
if [ "$filesize" = 25 ]; then
|
|
||||||
rm "$tmpfile"
|
|
||||||
else
|
|
||||||
echo "Found an i915 error state at $i size=$filesize."
|
|
||||||
if cp "$tmpfile" $RESULTS_DIR/first.i915_error_state; then
|
|
||||||
rm "$tmpfile"
|
|
||||||
echo 1 > "$i"
|
|
||||||
echo "Saved to the job artifacts at /first.i915_error_state"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
sleep 10
|
sleep 10
|
||||||
done
|
done
|
||||||
|
@@ -1,138 +1,121 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
VARS=(
|
for var in \
|
||||||
ACO_DEBUG
|
ACO_DEBUG \
|
||||||
ARTIFACTS_BASE_URL
|
ASAN_OPTIONS \
|
||||||
ASAN_OPTIONS
|
BASE_SYSTEM_FORK_HOST_PREFIX \
|
||||||
BASE_SYSTEM_FORK_HOST_PREFIX
|
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
|
||||||
BASE_SYSTEM_MAINLINE_HOST_PREFIX
|
CI_COMMIT_BRANCH \
|
||||||
CI_COMMIT_BRANCH
|
CI_COMMIT_REF_NAME \
|
||||||
CI_COMMIT_REF_NAME
|
CI_COMMIT_TITLE \
|
||||||
CI_COMMIT_TITLE
|
CI_JOB_ID \
|
||||||
CI_JOB_ID
|
CI_JOB_JWT_FILE \
|
||||||
S3_JWT_FILE
|
CI_JOB_NAME \
|
||||||
CI_JOB_STARTED_AT
|
CI_JOB_URL \
|
||||||
CI_JOB_NAME
|
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
|
||||||
CI_JOB_URL
|
CI_MERGE_REQUEST_TITLE \
|
||||||
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
|
CI_NODE_INDEX \
|
||||||
CI_MERGE_REQUEST_TITLE
|
CI_NODE_TOTAL \
|
||||||
CI_NODE_INDEX
|
CI_PAGES_DOMAIN \
|
||||||
CI_NODE_TOTAL
|
CI_PIPELINE_ID \
|
||||||
CI_PAGES_DOMAIN
|
CI_PIPELINE_URL \
|
||||||
CI_PIPELINE_ID
|
CI_PROJECT_DIR \
|
||||||
CI_PIPELINE_URL
|
CI_PROJECT_NAME \
|
||||||
CI_PROJECT_DIR
|
CI_PROJECT_PATH \
|
||||||
CI_PROJECT_NAME
|
CI_PROJECT_ROOT_NAMESPACE \
|
||||||
CI_PROJECT_PATH
|
CI_RUNNER_DESCRIPTION \
|
||||||
CI_PROJECT_ROOT_NAMESPACE
|
CI_SERVER_URL \
|
||||||
CI_RUNNER_DESCRIPTION
|
CROSVM_GALLIUM_DRIVER \
|
||||||
CI_SERVER_URL
|
CROSVM_GPU_ARGS \
|
||||||
CROSVM_GALLIUM_DRIVER
|
DEQP_BIN_DIR \
|
||||||
CROSVM_GPU_ARGS
|
DEQP_CASELIST_FILTER \
|
||||||
CURRENT_SECTION
|
DEQP_CASELIST_INV_FILTER \
|
||||||
DEQP_BIN_DIR
|
DEQP_CONFIG \
|
||||||
DEQP_FORCE_ASAN
|
DEQP_EXPECTED_RENDERER \
|
||||||
DEQP_FRACTION
|
DEQP_FRACTION \
|
||||||
DEQP_RUNNER_MAX_FAILS
|
DEQP_HEIGHT \
|
||||||
DEQP_SUITE
|
DEQP_RESULTS_DIR \
|
||||||
DEQP_TEMP_DIR
|
DEQP_RUNNER_OPTIONS \
|
||||||
DEVICE_NAME
|
DEQP_SUITE \
|
||||||
DRIVER_NAME
|
DEQP_TEMP_DIR \
|
||||||
EGL_PLATFORM
|
DEQP_VARIANT \
|
||||||
ETNA_MESA_DEBUG
|
DEQP_VER \
|
||||||
FDO_CI_CONCURRENT
|
DEQP_WIDTH \
|
||||||
FDO_HTTP_CACHE_URI
|
DEVICE_NAME \
|
||||||
FDO_UPSTREAM_REPO
|
DRIVER_NAME \
|
||||||
FD_MESA_DEBUG
|
EGL_PLATFORM \
|
||||||
FLAKES_CHANNEL
|
ETNA_MESA_DEBUG \
|
||||||
FLUSTER_CODECS
|
FDO_CI_CONCURRENT \
|
||||||
FLUSTER_FRACTION
|
FDO_UPSTREAM_REPO \
|
||||||
FLUSTER_VECTORS_VERSION
|
FD_MESA_DEBUG \
|
||||||
FREEDRENO_HANGCHECK_MS
|
FLAKES_CHANNEL \
|
||||||
GALLIUM_DRIVER
|
FREEDRENO_HANGCHECK_MS \
|
||||||
GALLIVM_PERF
|
GALLIUM_DRIVER \
|
||||||
GPU_VERSION
|
GALLIVM_PERF \
|
||||||
GTEST
|
GPU_VERSION \
|
||||||
GTEST_FAILS
|
GTEST \
|
||||||
GTEST_FRACTION
|
GTEST_FAILS \
|
||||||
GTEST_RUNNER_OPTIONS
|
GTEST_FRACTION \
|
||||||
GTEST_SKIPS
|
GTEST_RESULTS_DIR \
|
||||||
HWCI_FREQ_MAX
|
GTEST_RUNNER_OPTIONS \
|
||||||
HWCI_KERNEL_MODULES
|
GTEST_SKIPS \
|
||||||
HWCI_KVM
|
HWCI_FREQ_MAX \
|
||||||
HWCI_START_WESTON
|
HWCI_KERNEL_MODULES \
|
||||||
HWCI_START_XORG
|
HWCI_KVM \
|
||||||
HWCI_TEST_ARGS
|
HWCI_START_XORG \
|
||||||
HWCI_TEST_SCRIPT
|
HWCI_TEST_SCRIPT \
|
||||||
IR3_SHADER_DEBUG
|
IR3_SHADER_DEBUG \
|
||||||
JOB_ARTIFACTS_BASE
|
JOB_ARTIFACTS_BASE \
|
||||||
JOB_RESULTS_PATH
|
JOB_RESULTS_PATH \
|
||||||
JOB_ROOTFS_OVERLAY_PATH
|
JOB_ROOTFS_OVERLAY_PATH \
|
||||||
KERNEL_IMAGE_BASE
|
KERNEL_IMAGE_BASE_URL \
|
||||||
KERNEL_IMAGE_NAME
|
KERNEL_IMAGE_NAME \
|
||||||
LD_LIBRARY_PATH
|
LD_LIBRARY_PATH \
|
||||||
LIBGL_ALWAYS_SOFTWARE
|
LP_NUM_THREADS \
|
||||||
LP_NUM_THREADS
|
MESA_BASE_TAG \
|
||||||
MESA_BASE_TAG
|
MESA_BUILD_PATH \
|
||||||
MESA_BUILD_PATH
|
MESA_DEBUG \
|
||||||
MESA_DEBUG
|
MESA_GLES_VERSION_OVERRIDE \
|
||||||
MESA_GLES_VERSION_OVERRIDE
|
MESA_GLSL_VERSION_OVERRIDE \
|
||||||
MESA_GLSL_VERSION_OVERRIDE
|
MESA_GL_VERSION_OVERRIDE \
|
||||||
MESA_GL_VERSION_OVERRIDE
|
MESA_IMAGE \
|
||||||
MESA_IMAGE
|
MESA_IMAGE_PATH \
|
||||||
MESA_IMAGE_PATH
|
MESA_IMAGE_TAG \
|
||||||
MESA_IMAGE_TAG
|
MESA_LOADER_DRIVER_OVERRIDE \
|
||||||
MESA_LOADER_DRIVER_OVERRIDE
|
MESA_TEMPLATES_COMMIT \
|
||||||
MESA_SPIRV_LOG_LEVEL
|
MESA_VK_IGNORE_CONFORMANCE_WARNING \
|
||||||
MESA_TEMPLATES_COMMIT
|
MESA_SPIRV_LOG_LEVEL \
|
||||||
MESA_VK_ABORT_ON_DEVICE_LOSS
|
MINIO_HOST \
|
||||||
MESA_VK_IGNORE_CONFORMANCE_WARNING
|
MINIO_RESULTS_UPLOAD \
|
||||||
S3_HOST
|
NIR_DEBUG \
|
||||||
S3_RESULTS_UPLOAD
|
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
|
||||||
NIR_DEBUG
|
PAN_MESA_DEBUG \
|
||||||
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER
|
PIGLIT_FRACTION \
|
||||||
PAN_MESA_DEBUG
|
PIGLIT_NO_WINDOW \
|
||||||
PANVK_DEBUG
|
PIGLIT_OPTIONS \
|
||||||
PIGLIT_FRACTION
|
PIGLIT_PLATFORM \
|
||||||
PIGLIT_NO_WINDOW
|
PIGLIT_PROFILES \
|
||||||
PIGLIT_OPTIONS
|
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
|
||||||
PIGLIT_PLATFORM
|
PIGLIT_REPLAY_DESCRIPTION_FILE \
|
||||||
PIGLIT_PROFILES
|
PIGLIT_REPLAY_DEVICE_NAME \
|
||||||
PIGLIT_REPLAY_ANGLE_TAG
|
PIGLIT_REPLAY_EXTRA_ARGS \
|
||||||
PIGLIT_REPLAY_ARTIFACTS_BASE_URL
|
PIGLIT_REPLAY_LOOP_TIMES \
|
||||||
PIGLIT_REPLAY_DEVICE_NAME
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
|
||||||
PIGLIT_REPLAY_EXTRA_ARGS
|
PIGLIT_REPLAY_SUBCOMMAND \
|
||||||
PIGLIT_REPLAY_LOOP_TIMES
|
PIGLIT_RESULTS \
|
||||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE
|
PIGLIT_TESTS \
|
||||||
PIGLIT_REPLAY_SUBCOMMAND
|
PIPELINE_ARTIFACTS_BASE \
|
||||||
PIGLIT_RESULTS
|
RADV_DEBUG \
|
||||||
PIGLIT_RUNNER_OPTIONS
|
RADV_PERFTEST \
|
||||||
PIGLIT_TESTS
|
SKQP_ASSETS_DIR \
|
||||||
PIGLIT_TRACES_FILE
|
SKQP_BACKENDS \
|
||||||
PIPELINE_ARTIFACTS_BASE
|
TU_DEBUG \
|
||||||
RADEON_DEBUG
|
VIRGL_HOST_API \
|
||||||
RADV_DEBUG
|
VK_CPU \
|
||||||
RADV_PERFTEST
|
VK_DRIVER \
|
||||||
SKQP_ASSETS_DIR
|
VK_ICD_FILENAMES \
|
||||||
SKQP_BACKENDS
|
VKD3D_PROTON_RESULTS \
|
||||||
STORAGE_FORK_HOST_PATH
|
; do
|
||||||
STORAGE_MAINLINE_HOST_PATH
|
|
||||||
TU_DEBUG
|
|
||||||
USE_ANGLE
|
|
||||||
VIRGL_HOST_API
|
|
||||||
VIRGL_RENDER_SERVER
|
|
||||||
WAFFLE_PLATFORM
|
|
||||||
VK_DRIVER
|
|
||||||
ZINK_DESCRIPTORS
|
|
||||||
ZINK_DEBUG
|
|
||||||
LVP_POISON_MEMORY
|
|
||||||
|
|
||||||
# Dead code within Mesa CI, but required by virglrender CI
|
|
||||||
# (because they include our files in their CI)
|
|
||||||
VK_DRIVER_FILES
|
|
||||||
)
|
|
||||||
|
|
||||||
for var in "${VARS[@]}"; do
|
|
||||||
if [ -n "${!var+x}" ]; then
|
if [ -n "${!var+x}" ]; then
|
||||||
echo "export $var=${!var@Q}"
|
echo "export $var=${!var@Q}"
|
||||||
fi
|
fi
|
||||||
|
@@ -7,14 +7,12 @@ set -ex
|
|||||||
|
|
||||||
cd /
|
cd /
|
||||||
|
|
||||||
findmnt --mountpoint /proc || mount -t proc none /proc
|
mount -t proc none /proc
|
||||||
findmnt --mountpoint /sys || mount -t sysfs none /sys
|
mount -t sysfs none /sys
|
||||||
mount -t debugfs none /sys/kernel/debug
|
mount -t debugfs none /sys/kernel/debug
|
||||||
findmnt --mountpoint /dev || mount -t devtmpfs none /dev
|
mount -t devtmpfs none /dev || echo possibly already mounted
|
||||||
mkdir -p /dev/pts
|
mkdir -p /dev/pts
|
||||||
mount -t devpts devpts /dev/pts
|
mount -t devpts devpts /dev/pts
|
||||||
mkdir /dev/shm
|
|
||||||
mount -t tmpfs -o noexec,nodev,nosuid tmpfs /dev/shm
|
|
||||||
mount -t tmpfs tmpfs /tmp
|
mount -t tmpfs tmpfs /tmp
|
||||||
|
|
||||||
echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
||||||
@@ -22,4 +20,4 @@ echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
|||||||
|
|
||||||
# Set the time so we can validate certificates before we fetch anything;
|
# Set the time so we can validate certificates before we fetch anything;
|
||||||
# however as not all DUTs have network, make this non-fatal.
|
# however as not all DUTs have network, make this non-fatal.
|
||||||
for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
|
for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
|
||||||
|
@@ -1,13 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/sh
|
||||||
# shellcheck disable=SC1090
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC2155
|
|
||||||
|
|
||||||
# Second-stage init, used to set up devices and our job environment before
|
|
||||||
# running tests.
|
|
||||||
|
|
||||||
shopt -s extglob
|
|
||||||
|
|
||||||
# Make sure to kill itself and all the children process from this script on
|
# Make sure to kill itself and all the children process from this script on
|
||||||
# exiting, since any console output may interfere with LAVA signals handling,
|
# exiting, since any console output may interfere with LAVA signals handling,
|
||||||
@@ -42,17 +33,10 @@ trap cleanup INT TERM EXIT
|
|||||||
BACKGROUND_PIDS=
|
BACKGROUND_PIDS=
|
||||||
|
|
||||||
|
|
||||||
for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do
|
# Second-stage init, used to set up devices and our job environment before
|
||||||
[ -f "$path" ] && source "$path"
|
# running tests.
|
||||||
done
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
# Flush out anything which might be stuck in a serial buffer
|
. /set-job-env-vars.sh
|
||||||
echo
|
|
||||||
echo
|
|
||||||
echo
|
|
||||||
|
|
||||||
section_switch init_stage2 "Pre-testing hardware setup"
|
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
@@ -61,16 +45,6 @@ set -ex
|
|||||||
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
||||||
}
|
}
|
||||||
|
|
||||||
# Set up ZRAM
|
|
||||||
HWCI_ZRAM_SIZE=2G
|
|
||||||
if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
|
|
||||||
mkswap /dev/zram0
|
|
||||||
swapon /dev/zram0
|
|
||||||
echo "zram: $HWCI_ZRAM_SIZE activated"
|
|
||||||
else
|
|
||||||
echo "zram: skipping, not supported"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Load the KVM module specific to the detected CPU virtualization extensions:
|
# Load the KVM module specific to the detected CPU virtualization extensions:
|
||||||
# - vmx for Intel VT
|
# - vmx for Intel VT
|
||||||
@@ -80,22 +54,17 @@ fi
|
|||||||
#
|
#
|
||||||
if [ "$HWCI_KVM" = "true" ]; then
|
if [ "$HWCI_KVM" = "true" ]; then
|
||||||
unset KVM_KERNEL_MODULE
|
unset KVM_KERNEL_MODULE
|
||||||
{
|
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || {
|
||||||
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
|
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
||||||
} || {
|
|
||||||
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
||||||
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
echo "WARNING: Failed to detect CPU virtualization extensions" || \
|
||||||
echo "WARNING: Failed to detect CPU virtualization extensions"
|
|
||||||
} || \
|
|
||||||
modprobe ${KVM_KERNEL_MODULE}
|
modprobe ${KVM_KERNEL_MODULE}
|
||||||
|
|
||||||
mkdir -p /lava-files
|
mkdir -p /lava-files
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \
|
||||||
-o "/lava-files/${KERNEL_IMAGE_NAME}" \
|
"${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}"
|
||||||
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
||||||
@@ -104,53 +73,31 @@ ln -sf $CI_PROJECT_DIR/install /install
|
|||||||
export LD_LIBRARY_PATH=/install/lib
|
export LD_LIBRARY_PATH=/install/lib
|
||||||
export LIBGL_DRIVERS_PATH=/install/lib/dri
|
export LIBGL_DRIVERS_PATH=/install/lib/dri
|
||||||
|
|
||||||
# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691
|
|
||||||
# The navi21 boards seem to have trouble with ld.so.cache, so try explicitly
|
|
||||||
# telling it to look in /usr/local/lib.
|
|
||||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
|
|
||||||
|
|
||||||
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
||||||
export XDG_CACHE_HOME=/tmp
|
export XDG_CACHE_HOME=/tmp
|
||||||
|
|
||||||
# Make sure Python can find all our imports
|
# Make sure Python can find all our imports
|
||||||
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
|
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
|
||||||
|
|
||||||
# If we need to specify a driver, it means several drivers could pick up this gpu;
|
|
||||||
# ensure that the other driver can't accidentally be used
|
|
||||||
if [ -n "$MESA_LOADER_DRIVER_OVERRIDE" ]; then
|
|
||||||
rm /install/lib/dri/!($MESA_LOADER_DRIVER_OVERRIDE)_dri.so
|
|
||||||
fi
|
|
||||||
ls -1 /install/lib/dri/*_dri.so || true
|
|
||||||
|
|
||||||
if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
||||||
# Ensure initialization of the DRM device (needed by MSM)
|
# Ensure initialization of the DRM device (needed by MSM)
|
||||||
head -0 /dev/dri/renderD128
|
head -0 /dev/dri/renderD128
|
||||||
|
|
||||||
# Disable GPU frequency scaling
|
# Disable GPU frequency scaling
|
||||||
DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true)
|
DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
|
||||||
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
|
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
|
||||||
|
|
||||||
# Disable CPU frequency scaling
|
# Disable CPU frequency scaling
|
||||||
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
|
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
|
||||||
|
|
||||||
# Disable GPU runtime power management
|
# Disable GPU runtime power management
|
||||||
GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1)
|
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
|
||||||
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
||||||
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
||||||
# and enable throttling detection & reporting.
|
# and enable throttling detection & reporting.
|
||||||
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
||||||
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
||||||
/install/common/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
./intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
||||||
fi
|
|
||||||
|
|
||||||
# Start a little daemon to capture sysfs records and produce a JSON file
|
|
||||||
KDL_PATH=/install/common/kdl.sh
|
|
||||||
if [ -x "$KDL_PATH" ]; then
|
|
||||||
echo "launch kdl.sh!"
|
|
||||||
$KDL_PATH &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
else
|
|
||||||
echo "kdl.sh not found!"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Increase freedreno hangcheck timer because it's right at the edge of the
|
# Increase freedreno hangcheck timer because it's right at the edge of the
|
||||||
@@ -161,14 +108,8 @@ fi
|
|||||||
|
|
||||||
# Start a little daemon to capture the first devcoredump we encounter. (They
|
# Start a little daemon to capture the first devcoredump we encounter. (They
|
||||||
# expire after 5 minutes, so we poll for them).
|
# expire after 5 minutes, so we poll for them).
|
||||||
CAPTURE_DEVCOREDUMP=/install/common/capture-devcoredump.sh
|
/capture-devcoredump.sh &
|
||||||
if [ -x "$CAPTURE_DEVCOREDUMP" ]; then
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||||
$CAPTURE_DEVCOREDUMP &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ARCH=$(uname -m)
|
|
||||||
export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json"
|
|
||||||
|
|
||||||
# If we want Xorg to be running for the test, then we start it up before the
|
# If we want Xorg to be running for the test, then we start it up before the
|
||||||
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
||||||
@@ -177,11 +118,11 @@ export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json"
|
|||||||
if [ -n "$HWCI_START_XORG" ]; then
|
if [ -n "$HWCI_START_XORG" ]; then
|
||||||
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
||||||
env \
|
env \
|
||||||
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile "$RESULTS_DIR/Xorg.0.log" &
|
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||||
|
|
||||||
# Wait for xorg to be ready for connections.
|
# Wait for xorg to be ready for connections.
|
||||||
for _ in 1 2 3 4 5; do
|
for i in 1 2 3 4 5; do
|
||||||
if [ -e /xorg-started ]; then
|
if [ -e /xorg-started ]; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
@@ -190,37 +131,16 @@ if [ -n "$HWCI_START_XORG" ]; then
|
|||||||
export DISPLAY=:0
|
export DISPLAY=:0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$HWCI_START_WESTON" ]; then
|
RESULT=fail
|
||||||
WESTON_X11_SOCK="/tmp/.X11-unix/X0"
|
|
||||||
if [ -n "$HWCI_START_XORG" ]; then
|
|
||||||
echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing."
|
|
||||||
WESTON_X11_SOCK="/tmp/.X11-unix/X1"
|
|
||||||
fi
|
|
||||||
export WAYLAND_DISPLAY=wayland-0
|
|
||||||
|
|
||||||
# Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's
|
|
||||||
export DISPLAY=:0
|
|
||||||
mkdir -p /tmp/.X11-unix
|
|
||||||
|
|
||||||
env \
|
|
||||||
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
|
|
||||||
while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
|
|
||||||
fi
|
|
||||||
|
|
||||||
set +x
|
|
||||||
|
|
||||||
section_end init_stage2
|
|
||||||
|
|
||||||
echo "Running ${HWCI_TEST_SCRIPT} ${HWCI_TEST_ARGS} ..."
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
$HWCI_TEST_SCRIPT ${HWCI_TEST_ARGS:-}; EXIT_CODE=$?
|
sh -c "$HWCI_TEST_SCRIPT"
|
||||||
|
EXIT_CODE=$?
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
section_start post_test_cleanup "Cleaning up after testing, uploading results"
|
# Let's make sure the results are always stored in current working directory
|
||||||
set -x
|
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
||||||
|
|
||||||
|
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
||||||
|
|
||||||
# Make sure that capture-devcoredump is done before we start trying to tar up
|
# Make sure that capture-devcoredump is done before we start trying to tar up
|
||||||
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
||||||
@@ -228,22 +148,18 @@ set -x
|
|||||||
cleanup
|
cleanup
|
||||||
|
|
||||||
# upload artifacts
|
# upload artifacts
|
||||||
if [ -n "$S3_RESULTS_UPLOAD" ]; then
|
if [ -n "$MINIO_RESULTS_UPLOAD" ]; then
|
||||||
tar --zstd -cf results.tar.zst results/;
|
tar -czf results.tar.gz results/;
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
|
ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}";
|
||||||
|
ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
||||||
# as the python ones inside the bare-metal folder
|
# as the python ones inside the bare-metal folder
|
||||||
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
|
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
section_end post_test_cleanup
|
echo "hwci: mesa: $RESULT"
|
||||||
|
# Sleep a bit to avoid kernel dump message interleave from LAVA ENDTC signal
|
||||||
# Print the final result; both bare-metal and LAVA look for this string to get
|
sleep 1
|
||||||
# the result of our run, so try really hard to get it out rather than losing
|
|
||||||
# the run. The device gets shut down right at this point, and a630 seems to
|
|
||||||
# enjoy corrupting the last line of serial output before shutdown.
|
|
||||||
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done
|
|
||||||
|
|
||||||
exit $EXIT_CODE
|
exit $EXIT_CODE
|
||||||
|
@@ -1,14 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/sh
|
||||||
# shellcheck disable=SC2013
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2046
|
|
||||||
# shellcheck disable=SC2059
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
# shellcheck disable=SC2155
|
|
||||||
# shellcheck disable=SC2162
|
|
||||||
# shellcheck disable=SC2229
|
|
||||||
#
|
#
|
||||||
# This is an utility script to manage Intel GPU frequencies.
|
# This is an utility script to manage Intel GPU frequencies.
|
||||||
# It can be used for debugging performance problems or trying to obtain a stable
|
# It can be used for debugging performance problems or trying to obtain a stable
|
||||||
@@ -203,7 +193,7 @@ compute_freq_set() {
|
|||||||
val=${FREQ_RPn}
|
val=${FREQ_RPn}
|
||||||
;;
|
;;
|
||||||
*%)
|
*%)
|
||||||
val=$((${1%?} * FREQ_RP0 / 100))
|
val=$((${1%?} * ${FREQ_RP0} / 100))
|
||||||
# Adjust freq to comply with 50 MHz increments
|
# Adjust freq to comply with 50 MHz increments
|
||||||
val=$((val / 50 * 50))
|
val=$((val / 50 * 50))
|
||||||
;;
|
;;
|
||||||
@@ -252,12 +242,12 @@ set_freq_max() {
|
|||||||
|
|
||||||
[ -z "${DRY_RUN}" ] || return 0
|
[ -z "${DRY_RUN}" ] || return 0
|
||||||
|
|
||||||
if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
|
printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
|
||||||
$(print_freq_sysfs_path boost) > /dev/null;
|
$(print_freq_sysfs_path boost) > /dev/null
|
||||||
then
|
[ $? -eq 0 ] || {
|
||||||
log ERROR "Failed to set GPU max frequency"
|
log ERROR "Failed to set GPU max frequency"
|
||||||
return 1
|
return 1
|
||||||
fi
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -282,11 +272,11 @@ set_freq_min() {
|
|||||||
|
|
||||||
[ -z "${DRY_RUN}" ] || return 0
|
[ -z "${DRY_RUN}" ] || return 0
|
||||||
|
|
||||||
if ! printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min);
|
printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min)
|
||||||
then
|
[ $? -eq 0 ] || {
|
||||||
log ERROR "Failed to set GPU min frequency"
|
log ERROR "Failed to set GPU min frequency"
|
||||||
return 1
|
return 1
|
||||||
fi
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -505,7 +495,7 @@ compute_cpu_freq_set() {
|
|||||||
val=${CPU_FREQ_cpuinfo_min}
|
val=${CPU_FREQ_cpuinfo_min}
|
||||||
;;
|
;;
|
||||||
*%)
|
*%)
|
||||||
val=$((${1%?} * CPU_FREQ_cpuinfo_max / 100))
|
val=$((${1%?} * ${CPU_FREQ_cpuinfo_max} / 100))
|
||||||
;;
|
;;
|
||||||
*[!0-9]*)
|
*[!0-9]*)
|
||||||
log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
|
log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
|
||||||
@@ -548,11 +538,11 @@ set_cpu_freq_max() {
|
|||||||
local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
|
local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
|
||||||
[ -e "${pstate_info}" ] && {
|
[ -e "${pstate_info}" ] && {
|
||||||
log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
|
log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
|
||||||
if ! printf "%s" "${target_freq}" > "${pstate_info}";
|
printf "%s" "${target_freq}" > "${pstate_info}"
|
||||||
then
|
[ $? -eq 0 ] || {
|
||||||
log ERROR "Failed to set intel_pstate max perf"
|
log ERROR "Failed to set intel_pstate max perf"
|
||||||
res=1
|
res=1
|
||||||
fi
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
local cpu_index
|
local cpu_index
|
||||||
@@ -560,17 +550,16 @@ set_cpu_freq_max() {
|
|||||||
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
|
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
|
||||||
|
|
||||||
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
|
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
|
||||||
tf_res=$?
|
[ -z "${target_freq}" ] && { res=$?; continue; }
|
||||||
[ -z "${target_freq}" ] && { res=$tf_res; continue; }
|
|
||||||
|
|
||||||
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
|
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
|
||||||
[ -n "${DRY_RUN}" ] && continue
|
[ -n "${DRY_RUN}" ] && continue
|
||||||
|
|
||||||
if ! printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index});
|
printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index})
|
||||||
then
|
[ $? -eq 0 ] || {
|
||||||
res=1
|
res=1
|
||||||
log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
|
log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
|
||||||
fi
|
}
|
||||||
done
|
done
|
||||||
|
|
||||||
return ${res}
|
return ${res}
|
||||||
|
@@ -1,18 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091 # the path is created in build-kdl and
|
|
||||||
# here is check if exist
|
|
||||||
# shellcheck disable=SC2086 # we want the arguments to be expanded
|
|
||||||
|
|
||||||
if ! [ -f /ci-kdl/bin/activate ]; then
|
|
||||||
echo -e "ci-kdl not installed; not monitoring temperature"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
KDL_ARGS="
|
|
||||||
--output-file=${RESULTS_DIR}/kdl.json
|
|
||||||
--log-level=WARNING
|
|
||||||
--num-samples=-1
|
|
||||||
"
|
|
||||||
|
|
||||||
source /ci-kdl/bin/activate
|
|
||||||
exec /ci-kdl/bin/ci-kdl ${KDL_ARGS}
|
|
21
.gitlab-ci/common/start-x.sh
Executable file
21
.gitlab-ci/common/start-x.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
_XORG_SCRIPT="/xorg-script"
|
||||||
|
_FLAG_FILE="/xorg-started"
|
||||||
|
|
||||||
|
echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
|
||||||
|
if [ "x$1" != "x" ]; then
|
||||||
|
export LD_LIBRARY_PATH="${1}/lib"
|
||||||
|
export LIBGL_DRIVERS_PATH="${1}/lib/dri"
|
||||||
|
fi
|
||||||
|
xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
||||||
|
|
||||||
|
# Wait for xorg to be ready for connections.
|
||||||
|
for i in 1 2 3 4 5; do
|
||||||
|
if [ -e "${_FLAG_FILE}" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 5
|
||||||
|
done
|
@@ -1,81 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# ALPINE_X86_64_BUILD_TAG
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
EPHEMERAL=(
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
DEPS=(
|
|
||||||
bash
|
|
||||||
bison
|
|
||||||
ccache
|
|
||||||
"clang${LLVM_VERSION}-dev"
|
|
||||||
cmake
|
|
||||||
clang-dev
|
|
||||||
coreutils
|
|
||||||
curl
|
|
||||||
flex
|
|
||||||
gcc
|
|
||||||
g++
|
|
||||||
git
|
|
||||||
gettext
|
|
||||||
glslang
|
|
||||||
graphviz
|
|
||||||
linux-headers
|
|
||||||
"llvm${LLVM_VERSION}-static"
|
|
||||||
"llvm${LLVM_VERSION}-dev"
|
|
||||||
meson
|
|
||||||
mold
|
|
||||||
musl-dev
|
|
||||||
expat-dev
|
|
||||||
elfutils-dev
|
|
||||||
libclc-dev
|
|
||||||
libdrm-dev
|
|
||||||
libva-dev
|
|
||||||
libpciaccess-dev
|
|
||||||
zlib-dev
|
|
||||||
python3-dev
|
|
||||||
py3-clang
|
|
||||||
py3-cparser
|
|
||||||
py3-mako
|
|
||||||
py3-packaging
|
|
||||||
py3-pip
|
|
||||||
py3-ply
|
|
||||||
py3-yaml
|
|
||||||
vulkan-headers
|
|
||||||
spirv-tools-dev
|
|
||||||
spirv-llvm-translator-dev
|
|
||||||
util-macros
|
|
||||||
wayland-dev
|
|
||||||
wayland-protocols
|
|
||||||
)
|
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
pip3 install --break-system-packages sphinx===5.1.1 hawkmoth===0.16.0
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
|
||||||
|
|
||||||
# too many vendor binarise, just keep the ones we need
|
|
||||||
find /usr/share/clc \
|
|
||||||
\( -type f -o -type l \) \
|
|
||||||
! -name 'spirv-mesa3d-.spv' \
|
|
||||||
! -name 'spirv64-mesa3d-.spv' \
|
|
||||||
-delete
|
|
||||||
|
|
||||||
apk del "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
@@ -1,32 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This is a ci-templates build script to generate a container for LAVA SSH client.
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
EPHEMERAL=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# We only need these very basic packages to run the tests.
|
|
||||||
DEPS=(
|
|
||||||
openssh-client # for ssh
|
|
||||||
iputils # for ping
|
|
||||||
bash
|
|
||||||
curl
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
|
||||||
|
|
||||||
apk del "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
57
.gitlab-ci/container/arm.config
Normal file
57
.gitlab-ci/container/arm.config
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
CONFIG_LOCALVERSION_AUTO=y
|
||||||
|
CONFIG_DEBUG_KERNEL=y
|
||||||
|
|
||||||
|
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
|
||||||
|
CONFIG_BLK_DEV_INITRD=n
|
||||||
|
|
||||||
|
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
||||||
|
|
||||||
|
CONFIG_DRM=y
|
||||||
|
CONFIG_DRM_ETNAVIV=y
|
||||||
|
CONFIG_DRM_ROCKCHIP=y
|
||||||
|
CONFIG_DRM_PANFROST=y
|
||||||
|
CONFIG_DRM_LIMA=y
|
||||||
|
CONFIG_DRM_PANEL_SIMPLE=y
|
||||||
|
CONFIG_PWM_CROS_EC=y
|
||||||
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
|
|
||||||
|
CONFIG_ROCKCHIP_CDN_DP=n
|
||||||
|
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_DP=y
|
||||||
|
CONFIG_DWMAC_ROCKCHIP=y
|
||||||
|
|
||||||
|
CONFIG_MFD_RK808=y
|
||||||
|
CONFIG_REGULATOR_RK808=y
|
||||||
|
CONFIG_RTC_DRV_RK808=y
|
||||||
|
CONFIG_COMMON_CLK_RK808=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
|
CONFIG_REGULATOR=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_VCTRL=y
|
||||||
|
|
||||||
|
CONFIG_KASAN=n
|
||||||
|
CONFIG_KASAN_INLINE=n
|
||||||
|
CONFIG_STACKTRACE=n
|
||||||
|
|
||||||
|
CONFIG_TMPFS=y
|
||||||
|
|
||||||
|
CONFIG_PROVE_LOCKING=n
|
||||||
|
CONFIG_DEBUG_LOCKDEP=n
|
||||||
|
CONFIG_SOFTLOCKUP_DETECTOR=n
|
||||||
|
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
|
||||||
|
|
||||||
|
CONFIG_FW_LOADER_COMPRESS=y
|
||||||
|
|
||||||
|
CONFIG_USB_USBNET=y
|
||||||
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_USB_NET_DRIVERS=y
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
CONFIG_USB_NET_AX8817X=y
|
||||||
|
CONFIG_USB_NET_SMSC95XX=y
|
172
.gitlab-ci/container/arm64.config
Normal file
172
.gitlab-ci/container/arm64.config
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
CONFIG_LOCALVERSION_AUTO=y
|
||||||
|
CONFIG_DEBUG_KERNEL=y
|
||||||
|
|
||||||
|
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
|
||||||
|
CONFIG_BLK_DEV_INITRD=n
|
||||||
|
|
||||||
|
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||||
|
|
||||||
|
CONFIG_DRM=y
|
||||||
|
CONFIG_DRM_ROCKCHIP=y
|
||||||
|
CONFIG_DRM_PANFROST=y
|
||||||
|
CONFIG_DRM_LIMA=y
|
||||||
|
CONFIG_DRM_PANEL_SIMPLE=y
|
||||||
|
CONFIG_DRM_PANEL_EDP=y
|
||||||
|
CONFIG_DRM_MSM=y
|
||||||
|
CONFIG_DRM_I2C_ADV7511=y
|
||||||
|
CONFIG_PWM_CROS_EC=y
|
||||||
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
|
|
||||||
|
CONFIG_ROCKCHIP_CDN_DP=n
|
||||||
|
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_DP=y
|
||||||
|
CONFIG_DWMAC_ROCKCHIP=y
|
||||||
|
CONFIG_STMMAC_ETH=y
|
||||||
|
CONFIG_TYPEC_FUSB302=y
|
||||||
|
CONFIG_TYPEC=y
|
||||||
|
CONFIG_TYPEC_TCPM=y
|
||||||
|
|
||||||
|
# MSM platform bits
|
||||||
|
|
||||||
|
# For CONFIG_QCOM_LMH
|
||||||
|
CONFIG_OF=y
|
||||||
|
|
||||||
|
CONFIG_QCOM_COMMAND_DB=y
|
||||||
|
CONFIG_QCOM_RPMHPD=y
|
||||||
|
CONFIG_QCOM_RPMPD=y
|
||||||
|
CONFIG_SDM_GPUCC_845=y
|
||||||
|
CONFIG_SDM_VIDEOCC_845=y
|
||||||
|
CONFIG_SDM_DISPCC_845=y
|
||||||
|
CONFIG_SDM_LPASSCC_845=y
|
||||||
|
CONFIG_SDM_CAMCC_845=y
|
||||||
|
CONFIG_RESET_QCOM_PDC=y
|
||||||
|
CONFIG_DRM_TI_SN65DSI86=y
|
||||||
|
CONFIG_I2C_QCOM_GENI=y
|
||||||
|
CONFIG_SPI_QCOM_GENI=y
|
||||||
|
CONFIG_PHY_QCOM_QUSB2=y
|
||||||
|
CONFIG_PHY_QCOM_QMP=y
|
||||||
|
CONFIG_QCOM_CLK_APCC_MSM8996=y
|
||||||
|
CONFIG_QCOM_LLCC=y
|
||||||
|
CONFIG_QCOM_LMH=y
|
||||||
|
CONFIG_QCOM_SPMI_TEMP_ALARM=y
|
||||||
|
CONFIG_QCOM_WDT=y
|
||||||
|
CONFIG_POWER_RESET_QCOM_PON=y
|
||||||
|
CONFIG_RTC_DRV_PM8XXX=y
|
||||||
|
CONFIG_INTERCONNECT=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_SDM845=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_MSM8916=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_SC7180=y
|
||||||
|
CONFIG_CRYPTO_DEV_QCOM_RNG=y
|
||||||
|
CONFIG_SC_DISPCC_7180=y
|
||||||
|
CONFIG_SC_GPUCC_7180=y
|
||||||
|
|
||||||
|
# db410c ethernet
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
# db820c ethernet
|
||||||
|
CONFIG_ATL1C=y
|
||||||
|
|
||||||
|
CONFIG_ARCH_ALPINE=n
|
||||||
|
CONFIG_ARCH_BCM2835=n
|
||||||
|
CONFIG_ARCH_BCM_IPROC=n
|
||||||
|
CONFIG_ARCH_BERLIN=n
|
||||||
|
CONFIG_ARCH_BRCMSTB=n
|
||||||
|
CONFIG_ARCH_EXYNOS=n
|
||||||
|
CONFIG_ARCH_K3=n
|
||||||
|
CONFIG_ARCH_LAYERSCAPE=n
|
||||||
|
CONFIG_ARCH_LG1K=n
|
||||||
|
CONFIG_ARCH_HISI=n
|
||||||
|
CONFIG_ARCH_MVEBU=n
|
||||||
|
CONFIG_ARCH_SEATTLE=n
|
||||||
|
CONFIG_ARCH_SYNQUACER=n
|
||||||
|
CONFIG_ARCH_RENESAS=n
|
||||||
|
CONFIG_ARCH_R8A774A1=n
|
||||||
|
CONFIG_ARCH_R8A774C0=n
|
||||||
|
CONFIG_ARCH_R8A7795=n
|
||||||
|
CONFIG_ARCH_R8A7796=n
|
||||||
|
CONFIG_ARCH_R8A77965=n
|
||||||
|
CONFIG_ARCH_R8A77970=n
|
||||||
|
CONFIG_ARCH_R8A77980=n
|
||||||
|
CONFIG_ARCH_R8A77990=n
|
||||||
|
CONFIG_ARCH_R8A77995=n
|
||||||
|
CONFIG_ARCH_STRATIX10=n
|
||||||
|
CONFIG_ARCH_TEGRA=n
|
||||||
|
CONFIG_ARCH_SPRD=n
|
||||||
|
CONFIG_ARCH_THUNDER=n
|
||||||
|
CONFIG_ARCH_THUNDER2=n
|
||||||
|
CONFIG_ARCH_UNIPHIER=n
|
||||||
|
CONFIG_ARCH_VEXPRESS=n
|
||||||
|
CONFIG_ARCH_XGENE=n
|
||||||
|
CONFIG_ARCH_ZX=n
|
||||||
|
CONFIG_ARCH_ZYNQMP=n
|
||||||
|
|
||||||
|
# Strip out some stuff we don't need for graphics testing, to reduce
|
||||||
|
# the build.
|
||||||
|
CONFIG_CAN=n
|
||||||
|
CONFIG_WIRELESS=n
|
||||||
|
CONFIG_RFKILL=n
|
||||||
|
CONFIG_WLAN=n
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
|
CONFIG_REGULATOR=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_VCTRL=y
|
||||||
|
|
||||||
|
CONFIG_KASAN=n
|
||||||
|
CONFIG_KASAN_INLINE=n
|
||||||
|
CONFIG_STACKTRACE=n
|
||||||
|
|
||||||
|
CONFIG_TMPFS=y
|
||||||
|
|
||||||
|
CONFIG_PROVE_LOCKING=n
|
||||||
|
CONFIG_DEBUG_LOCKDEP=n
|
||||||
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
|
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
|
||||||
|
|
||||||
|
CONFIG_DETECT_HUNG_TASK=y
|
||||||
|
|
||||||
|
CONFIG_FW_LOADER_COMPRESS=y
|
||||||
|
CONFIG_FW_LOADER_USER_HELPER=n
|
||||||
|
|
||||||
|
CONFIG_USB_USBNET=y
|
||||||
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_USB_NET_DRIVERS=y
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
CONFIG_USB_NET_AX8817X=y
|
||||||
|
CONFIG_USB_NET_SMSC95XX=y
|
||||||
|
|
||||||
|
# For amlogic
|
||||||
|
CONFIG_MESON_GXL_PHY=y
|
||||||
|
CONFIG_MDIO_BUS_MUX_MESON_G12A=y
|
||||||
|
CONFIG_DRM_MESON=y
|
||||||
|
|
||||||
|
# For Mediatek
|
||||||
|
CONFIG_DRM_MEDIATEK=y
|
||||||
|
CONFIG_PWM_MEDIATEK=y
|
||||||
|
CONFIG_DRM_MEDIATEK_HDMI=y
|
||||||
|
CONFIG_GNSS=y
|
||||||
|
CONFIG_GNSS_MTK_SERIAL=y
|
||||||
|
CONFIG_HW_RANDOM=y
|
||||||
|
CONFIG_HW_RANDOM_MTK=y
|
||||||
|
CONFIG_MTK_DEVAPC=y
|
||||||
|
CONFIG_PWM_MTK_DISP=y
|
||||||
|
CONFIG_MTK_CMDQ=y
|
||||||
|
|
||||||
|
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
|
||||||
|
CONFIG_ARCH_TEGRA=y
|
||||||
|
CONFIG_DRM_NOUVEAU=m
|
||||||
|
CONFIG_DRM_TEGRA=m
|
||||||
|
CONFIG_R8169=y
|
||||||
|
CONFIG_STAGING=y
|
||||||
|
CONFIG_DRM_TEGRA_STAGING=y
|
||||||
|
CONFIG_TEGRA_HOST1X=y
|
||||||
|
CONFIG_ARM_TEGRA_DEVFREQ=y
|
||||||
|
CONFIG_TEGRA_SOCTHERM=y
|
||||||
|
CONFIG_DRM_TEGRA_DEBUG=y
|
||||||
|
CONFIG_PWM_TEGRA=y
|
@@ -1,44 +1,37 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
# Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves
|
# Fetch the arm-built rootfs image and unpack it in our x86 container (saves
|
||||||
# network transfer, disk usage, and runtime on test jobs)
|
# network transfer, disk usage, and runtime on test jobs)
|
||||||
|
|
||||||
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
|
||||||
if curl -X HEAD -s "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
|
|
||||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
|
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||||
else
|
else
|
||||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
|
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz
|
||||||
"${ARTIFACTS_URL}"/lava-rootfs.tar.zst -o rootfs.tar.zst
|
mkdir -p /rootfs-$arch
|
||||||
mkdir -p /rootfs-"$arch"
|
tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz
|
||||||
tar -C /rootfs-"$arch" '--exclude=./dev/*' --zstd -xf rootfs.tar.zst
|
rm rootfs.tgz
|
||||||
rm rootfs.tar.zst
|
|
||||||
|
|
||||||
if [[ $arch == "arm64" ]]; then
|
if [[ $arch == "arm64" ]]; then
|
||||||
mkdir -p /baremetal-files
|
mkdir -p /baremetal-files
|
||||||
pushd /baremetal-files
|
pushd /baremetal-files
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/Image
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image
|
wget ${ARTIFACTS_URL}/Image.gz
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/cheza-kernel
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel
|
|
||||||
|
|
||||||
DEVICE_TREES=""
|
DEVICE_TREES=""
|
||||||
DEVICE_TREES="$DEVICE_TREES apq8016-sbc-usb-host.dtb"
|
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
|
||||||
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
||||||
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
||||||
DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb"
|
|
||||||
|
|
||||||
for DTB in $DEVICE_TREES; do
|
for DTB in $DEVICE_TREES; do
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/$DTB
|
||||||
-O "${KERNEL_IMAGE_BASE}/arm64/$DTB"
|
|
||||||
done
|
done
|
||||||
|
|
||||||
popd
|
popd
|
||||||
@@ -46,16 +39,12 @@ elif [[ $arch == "armhf" ]]; then
|
|||||||
mkdir -p /baremetal-files
|
mkdir -p /baremetal-files
|
||||||
pushd /baremetal-files
|
pushd /baremetal-files
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/zImage
|
||||||
-O "${KERNEL_IMAGE_BASE}"/armhf/zImage
|
|
||||||
|
|
||||||
DEVICE_TREES=""
|
DEVICE_TREES="imx6q-cubox-i.dtb"
|
||||||
DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
|
|
||||||
DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb"
|
|
||||||
|
|
||||||
for DTB in $DEVICE_TREES; do
|
for DTB in $DEVICE_TREES; do
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget ${ARTIFACTS_URL}/$DTB
|
||||||
-O "${KERNEL_IMAGE_BASE}/armhf/$DTB"
|
|
||||||
done
|
done
|
||||||
|
|
||||||
popd
|
popd
|
||||||
|
@@ -1,119 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -exu
|
|
||||||
|
|
||||||
# If CI vars are not set, assign an empty value, this prevents -u to fail
|
|
||||||
: "${CI:=}"
|
|
||||||
: "${CI_PROJECT_PATH:=}"
|
|
||||||
|
|
||||||
# Early check for required env variables, relies on `set -u`
|
|
||||||
: "$ANDROID_NDK_VERSION"
|
|
||||||
: "$ANDROID_SDK_VERSION"
|
|
||||||
: "$ANDROID_LLVM_VERSION"
|
|
||||||
: "$ANDROID_LLVM_ARTIFACT_NAME"
|
|
||||||
: "$S3_JWT_FILE"
|
|
||||||
: "$S3_HOST"
|
|
||||||
: "$S3_ANDROID_BUCKET"
|
|
||||||
|
|
||||||
# Check for CI if the auth file used later on is non-empty
|
|
||||||
if [ -n "$CI" ] && [ ! -s "${S3_JWT_FILE}" ]; then
|
|
||||||
echo "Error: ${S3_JWT_FILE} is empty." 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"; then
|
|
||||||
echo "Artifact ${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst already exists, skip re-building."
|
|
||||||
|
|
||||||
# Download prebuilt LLVM libraries for Android when they have not changed,
|
|
||||||
# to save some time
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
tar -C / --zstd -xf "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
rm "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install some dependencies needed to build LLVM
|
|
||||||
EPHEMERAL=(
|
|
||||||
ninja-build
|
|
||||||
unzip
|
|
||||||
)
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends --no-remove "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
ANDROID_NDK="android-ndk-${ANDROID_NDK_VERSION}"
|
|
||||||
ANDROID_NDK_ROOT="/${ANDROID_NDK}"
|
|
||||||
if [ ! -d "$ANDROID_NDK_ROOT" ];
|
|
||||||
then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "${ANDROID_NDK}.zip" \
|
|
||||||
"https://dl.google.com/android/repository/${ANDROID_NDK}-linux.zip"
|
|
||||||
unzip -d / "${ANDROID_NDK}.zip" "$ANDROID_NDK/source.properties" "$ANDROID_NDK/build/cmake/*" "$ANDROID_NDK/toolchains/llvm/*"
|
|
||||||
rm "${ANDROID_NDK}.zip"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d "/llvm-project" ];
|
|
||||||
then
|
|
||||||
mkdir "/llvm-project"
|
|
||||||
pushd "/llvm-project"
|
|
||||||
git init
|
|
||||||
git remote add origin https://github.com/llvm/llvm-project.git
|
|
||||||
git fetch --depth 1 origin "$ANDROID_LLVM_VERSION"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
popd
|
|
||||||
fi
|
|
||||||
|
|
||||||
pushd "/llvm-project"
|
|
||||||
|
|
||||||
# Checkout again the intended version, just in case of a pre-existing full clone
|
|
||||||
git checkout "$ANDROID_LLVM_VERSION" || true
|
|
||||||
|
|
||||||
LLVM_INSTALL_PREFIX="/${ANDROID_LLVM_ARTIFACT_NAME}"
|
|
||||||
|
|
||||||
rm -rf build/
|
|
||||||
cmake -GNinja -S llvm -B build/ \
|
|
||||||
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake" \
|
|
||||||
-DANDROID_ABI=x86_64 \
|
|
||||||
-DANDROID_PLATFORM="android-${ANDROID_SDK_VERSION}" \
|
|
||||||
-DANDROID_NDK="${ANDROID_NDK_ROOT}" \
|
|
||||||
-DCMAKE_ANDROID_ARCH_ABI=x86_64 \
|
|
||||||
-DCMAKE_ANDROID_NDK="${ANDROID_NDK_ROOT}" \
|
|
||||||
-DCMAKE_BUILD_TYPE=MinSizeRel \
|
|
||||||
-DCMAKE_SYSTEM_NAME=Android \
|
|
||||||
-DCMAKE_SYSTEM_VERSION="${ANDROID_SDK_VERSION}" \
|
|
||||||
-DCMAKE_INSTALL_PREFIX="${LLVM_INSTALL_PREFIX}" \
|
|
||||||
-DCMAKE_CXX_FLAGS="-march=x86-64 --target=x86_64-linux-android${ANDROID_SDK_VERSION} -fno-rtti" \
|
|
||||||
-DLLVM_HOST_TRIPLE="x86_64-linux-android${ANDROID_SDK_VERSION}" \
|
|
||||||
-DLLVM_TARGETS_TO_BUILD=X86 \
|
|
||||||
-DLLVM_BUILD_LLVM_DYLIB=OFF \
|
|
||||||
-DLLVM_BUILD_TESTS=OFF \
|
|
||||||
-DLLVM_BUILD_EXAMPLES=OFF \
|
|
||||||
-DLLVM_BUILD_DOCS=OFF \
|
|
||||||
-DLLVM_BUILD_TOOLS=OFF \
|
|
||||||
-DLLVM_ENABLE_RTTI=OFF \
|
|
||||||
-DLLVM_BUILD_INSTRUMENTED_COVERAGE=OFF \
|
|
||||||
-DLLVM_NATIVE_TOOL_DIR="${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin" \
|
|
||||||
-DLLVM_ENABLE_PIC=False \
|
|
||||||
-DLLVM_OPTIMIZED_TABLEGEN=ON
|
|
||||||
|
|
||||||
ninja "-j${FDO_CI_CONCURRENT:-4}" -C build/ install
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
rm -rf /llvm-project
|
|
||||||
|
|
||||||
tar --zstd -cf "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "$LLVM_INSTALL_PREFIX"
|
|
||||||
|
|
||||||
# If run in CI upload the tar.zst archive to S3 to avoid rebuilding it if the
|
|
||||||
# version does not change, and delete it.
|
|
||||||
# The file is not deleted for non-CI because it can be useful in local runs.
|
|
||||||
if [ -n "$CI" ]; then
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
rm "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf "$LLVM_INSTALL_PREFIX"
|
|
||||||
|
|
||||||
apt-get purge -y "${EPHEMERAL[@]}"
|
|
@@ -1,103 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
uncollapsed_section_start angle "Building angle"
|
|
||||||
|
|
||||||
ANGLE_REV="76025caa1a059f464a2b0e8f879dbd4746f092b9"
|
|
||||||
SCRIPTS_DIR="$(pwd)/.gitlab-ci"
|
|
||||||
ANGLE_PATCH_DIR="${SCRIPTS_DIR}/container/patches"
|
|
||||||
|
|
||||||
# DEPOT tools
|
|
||||||
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git /depot-tools
|
|
||||||
export PATH=/depot-tools:$PATH
|
|
||||||
export DEPOT_TOOLS_UPDATE=0
|
|
||||||
|
|
||||||
mkdir /angle-build
|
|
||||||
mkdir /angle
|
|
||||||
pushd /angle-build
|
|
||||||
git init
|
|
||||||
git remote add origin https://chromium.googlesource.com/angle/angle.git
|
|
||||||
git fetch --depth 1 origin "$ANGLE_REV"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
angle_patch_files=(
|
|
||||||
build-angle_deps_Make-more-sources-conditional.patch
|
|
||||||
)
|
|
||||||
for patch in "${angle_patch_files[@]}"; do
|
|
||||||
echo "Apply patch to ANGLE from ${patch}"
|
|
||||||
GIT_COMMITTER_DATE="$(LC_TIME=C date -d@0)" git am < "${ANGLE_PATCH_DIR}/${patch}"
|
|
||||||
done
|
|
||||||
|
|
||||||
{
|
|
||||||
echo "ANGLE base version $ANGLE_REV"
|
|
||||||
echo "The following local patches are applied on top:"
|
|
||||||
git log --reverse --oneline $ANGLE_REV.. --format='- %s'
|
|
||||||
} > /angle/version
|
|
||||||
|
|
||||||
# source preparation
|
|
||||||
gclient config --name REPLACE-WITH-A-DOT --unmanaged \
|
|
||||||
--custom-var='angle_enable_cl=False' \
|
|
||||||
--custom-var='angle_enable_cl_testing=False' \
|
|
||||||
--custom-var='angle_enable_vulkan_validation_layers=False' \
|
|
||||||
--custom-var='angle_enable_wgpu=False' \
|
|
||||||
--custom-var='build_allow_regenerate=False' \
|
|
||||||
--custom-var='build_angle_deqp_tests=False' \
|
|
||||||
--custom-var='build_angle_perftests=False' \
|
|
||||||
--custom-var='build_with_catapult=False' \
|
|
||||||
--custom-var='build_with_swiftshader=False' \
|
|
||||||
https://chromium.googlesource.com/angle/angle.git
|
|
||||||
sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient
|
|
||||||
gclient sync -j"${FDO_CI_CONCURRENT:-4}"
|
|
||||||
|
|
||||||
mkdir -p out/Release
|
|
||||||
echo '
|
|
||||||
angle_build_all=false
|
|
||||||
angle_build_tests=false
|
|
||||||
angle_enable_cl=false
|
|
||||||
angle_enable_cl_testing=false
|
|
||||||
angle_enable_gl=false
|
|
||||||
angle_enable_gl_desktop_backend=false
|
|
||||||
angle_enable_null=false
|
|
||||||
angle_enable_swiftshader=false
|
|
||||||
angle_enable_trace=false
|
|
||||||
angle_enable_wgpu=false
|
|
||||||
angle_enable_vulkan=true
|
|
||||||
angle_enable_vulkan_api_dump_layer=false
|
|
||||||
angle_enable_vulkan_validation_layers=false
|
|
||||||
angle_has_frame_capture=false
|
|
||||||
angle_has_histograms=false
|
|
||||||
angle_use_custom_libvulkan=false
|
|
||||||
angle_egl_extension="so.1"
|
|
||||||
angle_glesv2_extension="so.2"
|
|
||||||
build_angle_deqp_tests=false
|
|
||||||
dcheck_always_on=true
|
|
||||||
enable_expensive_dchecks=false
|
|
||||||
is_debug=false
|
|
||||||
' > out/Release/args.gn
|
|
||||||
|
|
||||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
|
||||||
build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
|
|
||||||
fi
|
|
||||||
|
|
||||||
gn gen out/Release
|
|
||||||
# depot_tools overrides ninja with a version that doesn't work. We want
|
|
||||||
# ninja with FDO_CI_CONCURRENT anyway.
|
|
||||||
/usr/local/bin/ninja -C out/Release/ libEGL libGLESv2
|
|
||||||
|
|
||||||
rm -f out/Release/libvulkan.so* out/Release/*.so.TOC
|
|
||||||
cp out/Release/lib*.so* /angle/
|
|
||||||
ln -s libEGL.so.1 /angle/libEGL.so
|
|
||||||
ln -s libGLESv2.so.2 /angle/libGLESv2.so
|
|
||||||
|
|
||||||
rm -rf out
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf /depot-tools
|
|
||||||
rm -rf /angle-build
|
|
||||||
|
|
||||||
section_end angle
|
|
@@ -1,23 +1,14 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
set -ex
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
APITRACE_VERSION="790380e05854d5c9d315555444ffcc7acb8f4037"
|
||||||
|
|
||||||
uncollapsed_section_start apitrace "Building apitrace"
|
|
||||||
|
|
||||||
APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a"
|
|
||||||
|
|
||||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
||||||
pushd /apitrace
|
pushd /apitrace
|
||||||
git checkout "$APITRACE_VERSION"
|
git checkout "$APITRACE_VERSION"
|
||||||
git submodule update --init --depth 1 --recursive
|
git submodule update --init --depth 1 --recursive
|
||||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on ${EXTRA_CMAKE_ARGS:-}
|
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
|
||||||
cmake --build _build --parallel --target apitrace eglretrace
|
cmake --build _build --parallel --target apitrace eglretrace
|
||||||
mkdir build
|
mkdir build
|
||||||
cp _build/apitrace build
|
cp _build/apitrace build
|
||||||
@@ -25,5 +16,3 @@ cp _build/eglretrace build
|
|||||||
${STRIP_CMD:-strip} build/*
|
${STRIP_CMD:-strip} build/*
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end apitrace
|
|
||||||
|
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
uncollapsed_section_start bindgen "Building bindgen"
|
|
||||||
|
|
||||||
BINDGEN_VER=0.65.1
|
|
||||||
CBINDGEN_VER=0.26.0
|
|
||||||
|
|
||||||
# bindgen
|
|
||||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|
||||||
bindgen-cli --version ${BINDGEN_VER} \
|
|
||||||
--locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local
|
|
||||||
|
|
||||||
# cbindgen
|
|
||||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|
||||||
cbindgen --version ${CBINDGEN_VER} \
|
|
||||||
--locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local
|
|
||||||
|
|
||||||
section_end bindgen
|
|
@@ -1,55 +1,41 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
set -ex
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
SCRIPT_DIR="$(pwd)"
|
||||||
|
|
||||||
uncollapsed_section_start crosvm "Building crosvm"
|
CROSVM_VERSION=c7cd0e0114c8363b884ba56d8e12adee718dcc93
|
||||||
|
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
|
|
||||||
CROSVM_VERSION=2118fbb57ca26b495a9aa407845c7729d697a24b
|
|
||||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
|
||||||
pushd /platform/crosvm
|
pushd /platform/crosvm
|
||||||
git checkout "$CROSVM_VERSION"
|
git checkout "$CROSVM_VERSION"
|
||||||
git submodule update --init
|
git submodule update --init
|
||||||
|
# Apply all crosvm patches for Mesa CI
|
||||||
|
cat "$SCRIPT_DIR"/.gitlab-ci/container/build-crosvm_*.patch |
|
||||||
|
patch -p1
|
||||||
|
|
||||||
VIRGLRENDERER_VERSION=57a2b82e0958f08d02ade8400786e1ca0935c9b1
|
VIRGLRENDERER_VERSION=dd301caf7e05ec9c09634fb7872067542aad89b7
|
||||||
rm -rf third_party/virglrenderer
|
rm -rf third_party/virglrenderer
|
||||||
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
||||||
pushd third_party/virglrenderer
|
pushd third_party/virglrenderer
|
||||||
git checkout "$VIRGLRENDERER_VERSION"
|
git checkout "$VIRGLRENDERER_VERSION"
|
||||||
meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true ${EXTRA_MESON_ARGS:-}
|
meson build/ $EXTRA_MESON_ARGS
|
||||||
meson install -C build
|
ninja -C build install
|
||||||
popd
|
popd
|
||||||
|
|
||||||
cargo update -p pkg-config@0.3.26 --precise 0.3.27
|
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||||
|
bindgen \
|
||||||
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
|
--root /usr/local \
|
||||||
|
$EXTRA_CARGO_ARGS
|
||||||
|
|
||||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||||
bindgen-cli \
|
|
||||||
--locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local \
|
|
||||||
--version 0.65.1 \
|
|
||||||
${EXTRA_CARGO_ARGS:-}
|
|
||||||
|
|
||||||
CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
--locked \
|
--locked \
|
||||||
--features 'default-no-sandbox gpu x virgl_renderer' \
|
--features 'default-no-sandbox gpu x virgl_renderer virgl_renderer_next' \
|
||||||
--path . \
|
--path . \
|
||||||
--root /usr/local \
|
--root /usr/local \
|
||||||
${EXTRA_CARGO_ARGS:-}
|
$EXTRA_CARGO_ARGS
|
||||||
|
|
||||||
popd
|
popd
|
||||||
|
|
||||||
rm -rf /platform/crosvm
|
rm -rf /platform/crosvm
|
||||||
|
|
||||||
section_end crosvm
|
|
||||||
|
43
.gitlab-ci/container/build-crosvm_no-syslog.patch
Normal file
43
.gitlab-ci/container/build-crosvm_no-syslog.patch
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
From 3c57ec558bccc67fd53363c23deea20646be5c47 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||||
|
Date: Wed, 17 Nov 2021 10:18:04 +0100
|
||||||
|
Subject: [PATCH] Hack syslog out
|
||||||
|
|
||||||
|
It's causing stability problems when running several Crosvm instances in
|
||||||
|
parallel.
|
||||||
|
|
||||||
|
Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
|
||||||
|
---
|
||||||
|
base/src/unix/linux/syslog.rs | 2 +-
|
||||||
|
common/sys_util/src/linux/syslog.rs | 2 +-
|
||||||
|
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/base/src/unix/linux/syslog.rs b/base/src/unix/linux/syslog.rs
|
||||||
|
index 05972a3a..f0db3781 100644
|
||||||
|
--- a/base/src/unix/linux/syslog.rs
|
||||||
|
+++ b/base/src/unix/linux/syslog.rs
|
||||||
|
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||||
|
impl Syslog for PlatformSyslog {
|
||||||
|
fn new() -> Result<Self, Error> {
|
||||||
|
Ok(Self {
|
||||||
|
- socket: Some(openlog_and_get_socket()?),
|
||||||
|
+ socket: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
diff --git a/common/sys_util/src/linux/syslog.rs b/common/sys_util/src/linux/syslog.rs
|
||||||
|
index 05972a3a..f0db3781 100644
|
||||||
|
--- a/common/sys_util/src/linux/syslog.rs
|
||||||
|
+++ b/common/sys_util/src/linux/syslog.rs
|
||||||
|
@@ -35,7 +35,7 @@ pub struct PlatformSyslog {
|
||||||
|
impl Syslog for PlatformSyslog {
|
||||||
|
fn new() -> Result<Self, Error> {
|
||||||
|
Ok(Self {
|
||||||
|
- socket: Some(openlog_and_get_socket()?),
|
||||||
|
+ socket: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
--
|
||||||
|
2.25.1
|
||||||
|
|
@@ -1,100 +1,24 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/sh
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
set -ex
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
||||||
|
# Build and install from source
|
||||||
|
DEQP_RUNNER_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git}"
|
||||||
|
|
||||||
uncollapsed_section_start deqp-runner "Building deqp-runner"
|
if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
|
||||||
|
DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
|
||||||
DEQP_RUNNER_VERSION=0.20.3
|
else
|
||||||
|
DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
|
||||||
commits_to_backport=(
|
|
||||||
)
|
|
||||||
|
|
||||||
patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}"
|
|
||||||
|
|
||||||
if [ -n "${DEQP_RUNNER_GIT_TAG:-}" ]; then
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG"
|
|
||||||
elif [ -n "${DEQP_RUNNER_GIT_REV:-}" ]; then
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV"
|
|
||||||
else
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="v$DEQP_RUNNER_VERSION"
|
|
||||||
fi
|
|
||||||
|
|
||||||
BASE_PWD=$PWD
|
|
||||||
|
|
||||||
mkdir -p /deqp-runner
|
|
||||||
pushd /deqp-runner
|
|
||||||
mkdir deqp-runner-git
|
|
||||||
pushd deqp-runner-git
|
|
||||||
git init
|
|
||||||
git remote add origin "$DEQP_RUNNER_GIT_URL"
|
|
||||||
git fetch --depth 1 origin "$DEQP_RUNNER_GIT_CHECKOUT"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
for commit in "${commits_to_backport[@]}"
|
|
||||||
do
|
|
||||||
PATCH_URL="https://gitlab.freedesktop.org/mesa/deqp-runner/-/commit/$commit.patch"
|
|
||||||
echo "Backport deqp-runner commit $commit from $PATCH_URL"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | git am
|
|
||||||
done
|
|
||||||
|
|
||||||
for patch in "${patch_files[@]}"
|
|
||||||
do
|
|
||||||
echo "Apply patch to deqp-runner from $patch"
|
|
||||||
git am "$BASE_PWD/.gitlab-ci/container/patches/$patch"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "${RUST_TARGET:-}" ]; then
|
|
||||||
RUST_TARGET=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$RUST_TARGET" != *-android ]]; then
|
|
||||||
# When CC (/usr/lib/ccache/gcc) variable is set, the rust compiler uses
|
|
||||||
# this variable when cross-compiling arm32 and build fails for zsys-sys.
|
|
||||||
# So unset the CC variable when cross-compiling for arm32.
|
|
||||||
SAVEDCC=${CC:-}
|
|
||||||
if [ "$RUST_TARGET" = "armv7-unknown-linux-gnueabihf" ]; then
|
|
||||||
unset CC
|
|
||||||
fi
|
fi
|
||||||
cargo install --locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
|
||||||
--root /usr/local \
|
|
||||||
${EXTRA_CARGO_ARGS:-} \
|
|
||||||
--path .
|
|
||||||
CC=$SAVEDCC
|
|
||||||
else
|
else
|
||||||
cargo install --locked \
|
# Install from package registry
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
DEQP_RUNNER_CARGO_ARGS="--version 0.13.1 ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
||||||
--root /usr/local --version 2.10.0 \
|
|
||||||
cargo-ndk
|
|
||||||
|
|
||||||
rustup target add $RUST_TARGET
|
|
||||||
RUSTFLAGS='-C target-feature=+crt-static' cargo ndk --target $RUST_TARGET build --release
|
|
||||||
|
|
||||||
mv target/$RUST_TARGET/release/deqp-runner /deqp-runner
|
|
||||||
|
|
||||||
cargo uninstall --locked \
|
|
||||||
--root /usr/local \
|
|
||||||
cargo-ndk
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
popd
|
cargo install --locked \
|
||||||
rm -rf deqp-runner-git
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
popd
|
--root /usr/local \
|
||||||
|
${DEQP_RUNNER_CARGO_ARGS}
|
||||||
# remove unused test runners to shrink images for the Mesa CI build (not kernel,
|
|
||||||
# which chooses its own deqp branch)
|
|
||||||
if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then
|
|
||||||
rm -f /usr/local/bin/igt-runner
|
|
||||||
fi
|
|
||||||
|
|
||||||
section_end deqp-runner
|
|
||||||
|
355
.gitlab-ci/container/build-deqp.sh
Executable file → Normal file
355
.gitlab-ci/container/build-deqp.sh
Executable file → Normal file
@@ -1,321 +1,86 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ue -o pipefail
|
|
||||||
|
|
||||||
# shellcheck disable=SC2153
|
|
||||||
deqp_api=${DEQP_API,,}
|
|
||||||
|
|
||||||
uncollapsed_section_start deqp-$deqp_api "Building dEQP $DEQP_API"
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# See `deqp_build_targets` below for which release is used to produce which
|
|
||||||
# binary. Unless this comment has bitrotten:
|
|
||||||
# - the commit from the main branch produces the deqp tools and `deqp-vk`,
|
|
||||||
# - the VK release produces `deqp-vk`,
|
|
||||||
# - the GL release produces `glcts`, and
|
|
||||||
# - the GLES release produces `deqp-gles*` and `deqp-egl`
|
|
||||||
|
|
||||||
DEQP_MAIN_COMMIT=a9988483c0864d7190e5e6264ccead95423dfd00
|
|
||||||
DEQP_VK_VERSION=1.4.1.1
|
|
||||||
DEQP_GL_VERSION=4.6.5.0
|
|
||||||
DEQP_GLES_VERSION=3.2.11.0
|
|
||||||
|
|
||||||
# Patches to VulkanCTS may come from commits in their repo (listed in
|
|
||||||
# cts_commits_to_backport) or patch files stored in our repo (in the patch
|
|
||||||
# directory `$OLDPWD/.gitlab-ci/container/patches/` listed in cts_patch_files).
|
|
||||||
# Both list variables would have comments explaining the reasons behind the
|
|
||||||
# patches.
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
main_cts_commits_to_backport=(
|
|
||||||
# If you find yourself wanting to add something in here, consider whether
|
|
||||||
# bumping DEQP_MAIN_COMMIT is not a better solution :)
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
main_cts_patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
vk_cts_commits_to_backport=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
vk_cts_patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gl_cts_commits_to_backport=(
|
|
||||||
# Add #include <cmath> in deMath.h when being compiled by C++
|
|
||||||
71808fe7d0a640dfd703e845d93ba1c5ab751055
|
|
||||||
# Revert "Add #include <cmath> in deMath.h when being compiled by C++ compiler"
|
|
||||||
# This also adds an alternative fix along with the revert.
|
|
||||||
6164879a0acce258637d261592a9c395e564b361
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gl_cts_patch_files=(
|
|
||||||
build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
gl_cts_patch_files+=(
|
|
||||||
build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch
|
|
||||||
build-deqp-gl_Android-prints-to-stdout-instead-of-logcat.patch
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# GLES builds also EGL
|
|
||||||
gles_cts_commits_to_backport=(
|
|
||||||
# Add #include <cmath> in deMath.h when being compiled by C++
|
|
||||||
71808fe7d0a640dfd703e845d93ba1c5ab751055
|
|
||||||
# Revert "Add #include <cmath> in deMath.h when being compiled by C++ compiler"
|
|
||||||
# This also adds an alternative fix along with the revert.
|
|
||||||
6164879a0acce258637d261592a9c395e564b361
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gles_cts_patch_files=(
|
|
||||||
build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
gles_cts_patch_files+=(
|
|
||||||
build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch
|
|
||||||
build-deqp-gles_Android-prints-to-stdout-instead-of-logcat.patch
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
### Careful editing anything below this line
|
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
git config --global user.email "mesa@example.com"
|
||||||
git config --global user.name "Mesa CI"
|
git config --global user.name "Mesa CI"
|
||||||
|
git clone \
|
||||||
# shellcheck disable=SC2153
|
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
||||||
case "${DEQP_API}" in
|
-b vulkan-cts-1.3.3.0 \
|
||||||
tools) DEQP_VERSION="$DEQP_MAIN_COMMIT";;
|
--depth 1 \
|
||||||
*-main) DEQP_VERSION="$DEQP_MAIN_COMMIT";;
|
/VK-GL-CTS
|
||||||
VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";;
|
|
||||||
GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";;
|
|
||||||
GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";;
|
|
||||||
*) echo "Unexpected DEQP_API value: $DEQP_API"; exit 1;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
mkdir -p /VK-GL-CTS
|
|
||||||
pushd /VK-GL-CTS
|
pushd /VK-GL-CTS
|
||||||
[ -e .git ] || {
|
|
||||||
git init
|
|
||||||
git remote add origin https://github.com/KhronosGroup/VK-GL-CTS.git
|
|
||||||
}
|
|
||||||
git fetch --depth 1 origin "$DEQP_VERSION"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
DEQP_COMMIT=$(git rev-parse FETCH_HEAD)
|
|
||||||
|
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
|
||||||
git fetch origin main
|
|
||||||
if ! git merge-base --is-ancestor "$DEQP_MAIN_COMMIT" origin/main; then
|
|
||||||
echo "VK-GL-CTS commit $DEQP_MAIN_COMMIT is not a commit from the main branch."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p /deqp-$deqp_api
|
|
||||||
|
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
|
||||||
prefix="main"
|
|
||||||
else
|
|
||||||
prefix="$deqp_api"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cts_commits_to_backport="${prefix}_cts_commits_to_backport[@]"
|
|
||||||
for commit in "${!cts_commits_to_backport}"
|
|
||||||
do
|
|
||||||
PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
|
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \
|
|
||||||
GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am -
|
|
||||||
done
|
|
||||||
|
|
||||||
cts_patch_files="${prefix}_cts_patch_files[@]"
|
|
||||||
for patch in "${!cts_patch_files}"
|
|
||||||
do
|
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $patch"
|
|
||||||
GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch
|
|
||||||
done
|
|
||||||
|
|
||||||
{
|
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
|
||||||
commit_desc=$(git show --no-patch --format='commit %h on %ci' --abbrev=10 "$DEQP_COMMIT")
|
|
||||||
echo "dEQP $DEQP_API at $commit_desc"
|
|
||||||
else
|
|
||||||
echo "dEQP $DEQP_API version $DEQP_VERSION"
|
|
||||||
fi
|
|
||||||
if [ "$(git rev-parse HEAD)" != "$DEQP_COMMIT" ]; then
|
|
||||||
echo "The following local patches are applied on top:"
|
|
||||||
git log --reverse --oneline "$DEQP_COMMIT".. --format='- %s'
|
|
||||||
fi
|
|
||||||
} > /deqp-$deqp_api/deqp-$deqp_api-version
|
|
||||||
|
|
||||||
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
||||||
# libpng (sigh). The archives get their checksums checked anyway, and git
|
# libpng (sigh). The archives get their checksums checked anyway, and git
|
||||||
# always goes through ssh or https.
|
# always goes through ssh or https.
|
||||||
python3 external/fetch_sources.py --insecure
|
python3 external/fetch_sources.py --insecure
|
||||||
|
|
||||||
if [[ "$DEQP_API" = tools ]]; then
|
mkdir -p /deqp
|
||||||
# Save the testlog stylesheets:
|
|
||||||
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp-$deqp_api
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
# Save the testlog stylesheets:
|
||||||
|
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
|
||||||
popd
|
popd
|
||||||
|
|
||||||
deqp_build_targets=()
|
pushd /deqp
|
||||||
case "${DEQP_API}" in
|
# When including EGL/X11 testing, do that build first and save off its
|
||||||
VK|VK-main)
|
# deqp-egl binary.
|
||||||
deqp_build_targets+=(deqp-vk)
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
;;
|
-DDEQP_TARGET=x11_egl_glx \
|
||||||
GL)
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
deqp_build_targets+=(glcts)
|
$EXTRA_CMAKE_ARGS
|
||||||
;;
|
ninja modules/egl/deqp-egl
|
||||||
GLES)
|
cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
|
||||||
deqp_build_targets+=(deqp-gles{2,3,31})
|
|
||||||
deqp_build_targets+=(glcts) # needed for gles*-khr tests
|
|
||||||
# deqp-egl also comes from this build, but it is handled separately below.
|
|
||||||
;;
|
|
||||||
tools)
|
|
||||||
deqp_build_targets+=(testlog-to-xml)
|
|
||||||
deqp_build_targets+=(testlog-to-csv)
|
|
||||||
deqp_build_targets+=(testlog-to-junit)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
OLD_IFS="$IFS"
|
|
||||||
IFS=";"
|
|
||||||
CMAKE_SBT="${deqp_build_targets[*]}"
|
|
||||||
IFS="$OLD_IFS"
|
|
||||||
|
|
||||||
pushd /deqp-$deqp_api
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=android \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
|
||||||
ninja modules/egl/deqp-egl
|
|
||||||
mv modules/egl/deqp-egl{,-android}
|
|
||||||
else
|
|
||||||
# When including EGL/X11 testing, do that build first and save off its
|
|
||||||
# deqp-egl binary.
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=x11_egl_glx \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
|
||||||
ninja modules/egl/deqp-egl
|
|
||||||
mv modules/egl/deqp-egl{,-x11}
|
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=wayland \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
|
||||||
ninja modules/egl/deqp-egl
|
|
||||||
mv modules/egl/deqp-egl{,-wayland}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
-DDEQP_TARGET=${DEQP_TARGET} \
|
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DSELECTED_BUILD_TARGETS="${CMAKE_SBT}" \
|
$EXTRA_CMAKE_ARGS
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
ninja
|
||||||
|
|
||||||
# Make sure `default` doesn't silently stop detecting one of the platforms we care about
|
mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl
|
||||||
if [ "${DEQP_TARGET}" = 'default' ]; then
|
|
||||||
grep -q DEQP_SUPPORT_WAYLAND=1 build.ninja
|
|
||||||
grep -q DEQP_SUPPORT_X11=1 build.ninja
|
|
||||||
grep -q DEQP_SUPPORT_XCB=1 build.ninja
|
|
||||||
fi
|
|
||||||
|
|
||||||
ninja "${deqp_build_targets[@]}"
|
# Copy out the mustpass lists we want.
|
||||||
|
mkdir /deqp/mustpass
|
||||||
|
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
||||||
|
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
||||||
|
>> /deqp/mustpass/vk-master.txt
|
||||||
|
done
|
||||||
|
|
||||||
if [ "$DEQP_API" != tools ]; then
|
cp \
|
||||||
# Copy out the mustpass lists we want.
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
|
||||||
mkdir -p mustpass
|
/deqp/mustpass/.
|
||||||
|
cp \
|
||||||
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt \
|
||||||
|
/deqp/mustpass/.
|
||||||
|
cp \
|
||||||
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-master.txt \
|
||||||
|
/deqp/mustpass/.
|
||||||
|
cp \
|
||||||
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
|
||||||
|
/deqp/mustpass/.
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then
|
# Save *some* executor utils, but otherwise strip things down
|
||||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
# to reduct deqp build size:
|
||||||
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
mkdir /deqp/executor.save
|
||||||
>> mustpass/vk-main.txt
|
cp /deqp/executor/testlog-to-* /deqp/executor.save
|
||||||
done
|
rm -rf /deqp/executor
|
||||||
fi
|
mv /deqp/executor.save /deqp/executor
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GL' ]; then
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \
|
|
||||||
mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \
|
|
||||||
mustpass/
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \
|
|
||||||
mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \
|
|
||||||
mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \
|
|
||||||
mustpass/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Compress the caselists, since Vulkan's in particular are gigantic; higher
|
|
||||||
# compression levels provide no real measurable benefit.
|
|
||||||
zstd -1 --rm mustpass/*.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$DEQP_API" = tools ]; then
|
|
||||||
# Save *some* executor utils, but otherwise strip things down
|
|
||||||
# to reduct deqp build size:
|
|
||||||
mv executor/testlog-to-* .
|
|
||||||
rm -rf executor
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
|
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
|
||||||
rm -rf external/**/mustpass/
|
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
|
||||||
rm -rf external/vulkancts/modules/vulkan/vk-main*
|
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-master*
|
||||||
rm -rf external/vulkancts/modules/vulkan/vk-default
|
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default
|
||||||
|
|
||||||
rm -rf external/openglcts/modules/cts-runner
|
rm -rf /deqp/external/openglcts/modules/cts-runner
|
||||||
rm -rf modules/internal
|
rm -rf /deqp/modules/internal
|
||||||
rm -rf execserver
|
rm -rf /deqp/execserver
|
||||||
rm -rf framework
|
rm -rf /deqp/framework
|
||||||
find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \;
|
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
|
||||||
if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then
|
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
||||||
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
||||||
fi
|
${STRIP_CMD:-strip} modules/*/deqp-*
|
||||||
if [ "${DEQP_API}" = 'GL' ] || [ "${DEQP_API}" = 'GLES' ]; then
|
du -sh *
|
||||||
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
rm -rf /VK-GL-CTS
|
||||||
fi
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
${STRIP_CMD:-strip} modules/*/deqp-*
|
|
||||||
fi
|
|
||||||
du -sh ./*
|
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end deqp-$deqp_api
|
|
||||||
|
@@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
uncollapsed_section_start directx-headers "Building directx-headers"
|
|
||||||
|
|
||||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1
|
|
||||||
pushd DirectX-Headers
|
|
||||||
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-}
|
|
||||||
meson install -C build
|
|
||||||
popd
|
|
||||||
rm -rf DirectX-Headers
|
|
||||||
|
|
||||||
section_end directx-headers
|
|
@@ -1,40 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034 # Variables are used in scripts called from here
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# Install fluster in /usr/local.
|
|
||||||
|
|
||||||
FLUSTER_REVISION="e997402978f62428fffc8e5a4a709690d9ca9bc5"
|
|
||||||
|
|
||||||
git clone https://github.com/fluendo/fluster.git --single-branch --no-checkout
|
|
||||||
|
|
||||||
pushd fluster || exit
|
|
||||||
git checkout ${FLUSTER_REVISION}
|
|
||||||
popd || exit
|
|
||||||
|
|
||||||
if [ "${SKIP_UPDATE_FLUSTER_VECTORS}" != 1 ]; then
|
|
||||||
# Download the necessary vectors: H264, H265 and VP9
|
|
||||||
# When updating FLUSTER_REVISION, make sure to update the vectors if necessary or
|
|
||||||
# fluster-runner will report Missing results.
|
|
||||||
fluster/fluster.py download \
|
|
||||||
JVT-AVC_V1 JVT-FR-EXT JVT-MVC JVT-SVC_V1 \
|
|
||||||
JCT-VC-3D-HEVC JCT-VC-HEVC_V1 JCT-VC-MV-HEVC JCT-VC-RExt JCT-VC-SCC JCT-VC-SHVC \
|
|
||||||
VP9-TEST-VECTORS-HIGH VP9-TEST-VECTORS
|
|
||||||
|
|
||||||
# Build fluster vectors archive and upload it
|
|
||||||
tar --zstd -cf "vectors.tar.zst" fluster/resources/
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "vectors.tar.zst" \
|
|
||||||
"https://${S3_PATH_FLUSTER}/vectors.tar.zst"
|
|
||||||
|
|
||||||
touch /lava-files/done
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/done "https://${S3_PATH_FLUSTER}/done"
|
|
||||||
|
|
||||||
# Don't include the vectors in the rootfs
|
|
||||||
rm -fr fluster/resources/*
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p "${ROOTFS}/usr/local/"
|
|
||||||
mv fluster "${ROOTFS}/usr/local/"
|
|
||||||
|
|
@@ -1,17 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start fossilize "Building fossilize"
|
|
||||||
|
|
||||||
git clone https://github.com/ValveSoftware/Fossilize.git
|
git clone https://github.com/ValveSoftware/Fossilize.git
|
||||||
cd Fossilize
|
cd Fossilize
|
||||||
git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
|
git checkout 16fba1b8b5d9310126bb02323d7bae3227338461
|
||||||
git submodule update --init
|
git submodule update --init
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
@@ -19,5 +12,3 @@ cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
|
|||||||
ninja -C . install
|
ninja -C . install
|
||||||
cd ../..
|
cd ../..
|
||||||
rm -rf Fossilize
|
rm -rf Fossilize
|
||||||
|
|
||||||
section_end fossilize
|
|
||||||
|
@@ -1,10 +1,8 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start gfxreconstruct "Building gfxreconstruct"
|
GFXRECONSTRUCT_VERSION=5ed3caeecc46e976c4df31e263df8451ae176c26
|
||||||
|
|
||||||
GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae
|
|
||||||
|
|
||||||
git clone https://github.com/LunarG/gfxreconstruct.git \
|
git clone https://github.com/LunarG/gfxreconstruct.git \
|
||||||
--single-branch \
|
--single-branch \
|
||||||
@@ -19,5 +17,3 @@ cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:
|
|||||||
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end gfxreconstruct
|
|
||||||
|
16
.gitlab-ci/container/build-hang-detection.sh
Normal file
16
.gitlab-ci/container/build-hang-detection.sh
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
|
||||||
|
|
||||||
|
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
|
||||||
|
pushd /parallel-deqp-runner
|
||||||
|
git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
|
||||||
|
meson . _build
|
||||||
|
ninja -C _build hang-detection
|
||||||
|
mkdir -p build/bin
|
||||||
|
install _build/hang-detection build/bin
|
||||||
|
strip build/bin/*
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
@@ -1,32 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091 # the path is created by the script
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
uncollapsed_section_start kdl "Building kdl"
|
|
||||||
|
|
||||||
KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f"
|
|
||||||
KDL_CHECKOUT_DIR="/tmp/ci-kdl.git"
|
|
||||||
|
|
||||||
mkdir -p ${KDL_CHECKOUT_DIR}
|
|
||||||
pushd ${KDL_CHECKOUT_DIR}
|
|
||||||
git init
|
|
||||||
git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git
|
|
||||||
git fetch --depth 1 origin ${KDL_REVISION}
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
popd
|
|
||||||
|
|
||||||
# Run venv in a subshell, so we don't accidentally leak the venv state into
|
|
||||||
# calling scripts
|
|
||||||
(
|
|
||||||
python3 -m venv /ci-kdl
|
|
||||||
source /ci-kdl/bin/activate &&
|
|
||||||
pushd ${KDL_CHECKOUT_DIR} &&
|
|
||||||
pip install -r requirements.txt &&
|
|
||||||
pip install . &&
|
|
||||||
popd
|
|
||||||
)
|
|
||||||
|
|
||||||
rm -rf ${KDL_CHECKOUT_DIR}
|
|
||||||
|
|
||||||
section_end kdl
|
|
51
.gitlab-ci/container/build-kernel.sh
Normal file
51
.gitlab-ci/container/build-kernel.sh
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
mkdir -p kernel
|
||||||
|
wget -qO- ${KERNEL_URL} | tar -xj --strip-components=1 -C kernel
|
||||||
|
pushd kernel
|
||||||
|
|
||||||
|
# The kernel doesn't like the gold linker (or the old lld in our debians).
|
||||||
|
# Sneak in some override symlinks during kernel build until we can update
|
||||||
|
# debian (they'll get blown away by the rm of the kernel dir at the end).
|
||||||
|
mkdir -p ld-links
|
||||||
|
for i in /usr/bin/*-ld /usr/bin/ld; do
|
||||||
|
i=`basename $i`
|
||||||
|
ln -sf /usr/bin/$i.bfd ld-links/$i
|
||||||
|
done
|
||||||
|
export PATH=`pwd`/ld-links:$PATH
|
||||||
|
|
||||||
|
export LOCALVERSION="`basename $KERNEL_URL`"
|
||||||
|
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
|
||||||
|
make ${KERNEL_IMAGE_NAME}
|
||||||
|
for image in ${KERNEL_IMAGE_NAME}; do
|
||||||
|
cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -n ${DEVICE_TREES} ]]; then
|
||||||
|
make dtbs
|
||||||
|
cp ${DEVICE_TREES} /lava-files/.
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ${DEBIAN_ARCH} = "amd64" || ${DEBIAN_ARCH} = "arm64" ]]; then
|
||||||
|
make modules
|
||||||
|
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
|
||||||
|
make Image.lzma
|
||||||
|
mkimage \
|
||||||
|
-f auto \
|
||||||
|
-A arm \
|
||||||
|
-O linux \
|
||||||
|
-d arch/arm64/boot/Image.lzma \
|
||||||
|
-C lzma\
|
||||||
|
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
|
||||||
|
/lava-files/cheza-kernel
|
||||||
|
KERNEL_IMAGE_NAME+=" cheza-kernel"
|
||||||
|
fi
|
||||||
|
|
||||||
|
popd
|
||||||
|
rm -rf kernel
|
||||||
|
|
@@ -1,11 +1,8 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -uex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start libclc "Building libclc"
|
export LLVM_CONFIG="llvm-config-11"
|
||||||
|
|
||||||
export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
|
|
||||||
LLVM_TAG="llvmorg-15.0.7"
|
|
||||||
|
|
||||||
$LLVM_CONFIG --version
|
$LLVM_CONFIG --version
|
||||||
|
|
||||||
@@ -14,12 +11,12 @@ git config --global user.name "Mesa CI"
|
|||||||
git clone \
|
git clone \
|
||||||
https://github.com/llvm/llvm-project \
|
https://github.com/llvm/llvm-project \
|
||||||
--depth 1 \
|
--depth 1 \
|
||||||
-b "${LLVM_TAG}" \
|
-b llvmorg-12.0.0-rc3 \
|
||||||
/llvm-project
|
/llvm-project
|
||||||
|
|
||||||
mkdir /libclc
|
mkdir /libclc
|
||||||
pushd /libclc
|
pushd /libclc
|
||||||
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG="$LLVM_CONFIG" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
|
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
|
||||||
ninja
|
ninja
|
||||||
ninja install
|
ninja install
|
||||||
popd
|
popd
|
||||||
@@ -29,7 +26,5 @@ mkdir -p /usr/lib/clc
|
|||||||
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
|
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
|
||||||
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
||||||
|
|
||||||
du -sh ./*
|
du -sh *
|
||||||
rm -rf /libclc /llvm-project
|
rm -rf /libclc /llvm-project
|
||||||
|
|
||||||
section_end libclc
|
|
||||||
|
@@ -1,21 +1,14 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# Script used for Android and Fedora builds (Debian builds get their libdrm version
|
|
||||||
# from https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo - see PKG_REPO_REV)
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
set -uex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start libdrm "Building libdrm"
|
export LIBDRM_VERSION=libdrm-2.4.110
|
||||||
|
|
||||||
export LIBDRM_VERSION=libdrm-2.4.122
|
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||||
|
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||||
curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
|
cd $LIBDRM_VERSION
|
||||||
https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz
|
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
|
||||||
tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz
|
ninja -C build install
|
||||||
cd "$LIBDRM_VERSION"
|
|
||||||
meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled ${EXTRA_MESON_ARGS:-}
|
|
||||||
meson install -C build
|
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf "$LIBDRM_VERSION"
|
rm -rf $LIBDRM_VERSION
|
||||||
|
|
||||||
section_end libdrm
|
|
||||||
|
@@ -1,30 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
uncollapsed_section_start llvm-spirv "Building LLVM-SPIRV-Translator"
|
|
||||||
|
|
||||||
if [ "${LLVM_VERSION:?llvm version not set}" -ge 18 ]; then
|
|
||||||
VER="${LLVM_VERSION}.1.0"
|
|
||||||
else
|
|
||||||
VER="${LLVM_VERSION}.0.0"
|
|
||||||
fi
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz"
|
|
||||||
tar -xvf "v${VER}.tar.gz" && rm "v${VER}.tar.gz"
|
|
||||||
|
|
||||||
mkdir "SPIRV-LLVM-Translator-${VER}/build"
|
|
||||||
pushd "SPIRV-LLVM-Translator-${VER}/build"
|
|
||||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
|
|
||||||
ninja
|
|
||||||
ninja install
|
|
||||||
# For some reason llvm-spirv is not installed by default
|
|
||||||
ninja llvm-spirv
|
|
||||||
cp tools/llvm-spirv/llvm-spirv /usr/bin/
|
|
||||||
popd
|
|
||||||
|
|
||||||
du -sh "SPIRV-LLVM-Translator-${VER}"
|
|
||||||
rm -rf "SPIRV-LLVM-Translator-${VER}"
|
|
||||||
|
|
||||||
section_end llvm-spirv
|
|
@@ -1,32 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# ALPINE_X86_64_BUILD_TAG
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
uncollapsed_section_start mold "Building mold"
|
|
||||||
|
|
||||||
MOLD_VERSION="2.32.0"
|
|
||||||
|
|
||||||
git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
|
|
||||||
pushd mold
|
|
||||||
|
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
|
|
||||||
cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}"
|
|
||||||
cmake --install . --strip
|
|
||||||
|
|
||||||
# Always use mold from now on
|
|
||||||
find /usr/bin \( -name '*-ld' -o -name 'ld' \) \
|
|
||||||
-exec ln -sf /usr/local/bin/ld.mold {} \; \
|
|
||||||
-exec ls -l {} +
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf mold
|
|
||||||
|
|
||||||
section_end mold
|
|
@@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
|
|
||||||
set -ex -o pipefail
|
|
||||||
|
|
||||||
uncollapsed_section_start ninetests "Building Nine tests"
|
|
||||||
|
|
||||||
### Careful editing anything below this line
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
git clone https://github.com/axeldavy/Xnine.git /Xnine
|
|
||||||
mkdir /Xnine/build
|
|
||||||
pushd /Xnine/build
|
|
||||||
git checkout c64753d224c08006bcdcfa7880ada826f27164b1
|
|
||||||
|
|
||||||
cmake .. -DBUILD_TESTS=1 -DWITH_DRI3=1 -DD3DADAPTER9_LOCATION=/install/lib/d3d/d3dadapter9.so
|
|
||||||
make
|
|
||||||
|
|
||||||
mkdir -p /NineTests/
|
|
||||||
mv NineTests/NineTests /NineTests/
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf /Xnine
|
|
||||||
|
|
||||||
section_end ninetests
|
|
@@ -1,38 +1,23 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
uncollapsed_section_start piglit "Building piglit"
|
set -ex
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
REV="631b72944f56e688f56a08d26c8a9f3988801a08"
|
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||||
pushd /piglit
|
pushd /piglit
|
||||||
git checkout "$REV"
|
git checkout b2c9d8f56b45d79f804f4cb5ac62520f0edd8988
|
||||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
||||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS ${EXTRA_CMAKE_ARGS:-}
|
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
||||||
ninja ${PIGLIT_BUILD_TARGETS:-}
|
ninja $PIGLIT_BUILD_TARGETS
|
||||||
find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) \
|
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
|
||||||
! -name 'include_test.h' -exec rm -rf {} \;
|
|
||||||
rm -rf target_api
|
rm -rf target_api
|
||||||
if [ "${PIGLIT_BUILD_TARGETS:-}" = "piglit_replayer" ]; then
|
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
|
||||||
find . -depth \
|
find ! -regex "^\.$" \
|
||||||
! -regex "^\.$" \
|
|
||||||
! -regex "^\.\/piglit.*" \
|
! -regex "^\.\/piglit.*" \
|
||||||
! -regex "^\.\/framework.*" \
|
! -regex "^\.\/framework.*" \
|
||||||
! -regex "^\.\/bin$" \
|
! -regex "^\.\/bin$" \
|
||||||
! -regex "^\.\/bin\/replayer\.py" \
|
! -regex "^\.\/bin\/replayer\.py" \
|
||||||
! -regex "^\.\/templates.*" \
|
! -regex "^\.\/templates.*" \
|
||||||
! -regex "^\.\/tests$" \
|
! -regex "^\.\/tests$" \
|
||||||
! -regex "^\.\/tests\/replay\.py" \
|
! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
|
||||||
-exec rm -rf {} \; 2>/dev/null
|
|
||||||
fi
|
fi
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end piglit
|
|
||||||
|
@@ -5,27 +5,20 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start rust "Building Rust toolchain"
|
|
||||||
|
|
||||||
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
|
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
|
||||||
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
|
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
|
||||||
# are just available to all build jobs.
|
# are just available to all build jobs.
|
||||||
mkdir -p "$HOME"/.cargo
|
mkdir -p $HOME/.cargo
|
||||||
ln -s /usr/local/bin "$HOME"/.cargo/bin
|
ln -s /usr/local/bin $HOME/.cargo/bin
|
||||||
|
|
||||||
# Pick a specific snapshot from rustup so the compiler doesn't drift on us.
|
|
||||||
RUST_VERSION=1.78.0-2024-05-02
|
|
||||||
|
|
||||||
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
||||||
# version of the compiler, rather than whatever the container's Debian comes
|
# version of the compiler, rather than whatever the container's Debian comes
|
||||||
# with.
|
# with.
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
#
|
||||||
--proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \
|
# Pick the rust compiler (1.48) available in Debian stable, and pick a specific
|
||||||
--default-toolchain $RUST_VERSION \
|
# snapshot from rustup so the compiler doesn't drift on us.
|
||||||
--profile minimal \
|
wget https://sh.rustup.rs -O - | \
|
||||||
-y
|
sh -s -- -y --default-toolchain 1.49.0-2020-12-31
|
||||||
|
|
||||||
rustup component add clippy rustfmt
|
|
||||||
|
|
||||||
# Set up a config script for cross compiling -- cargo needs your system cc for
|
# Set up a config script for cross compiling -- cargo needs your system cc for
|
||||||
# linking in cross builds, but doesn't know what you want to use for system cc.
|
# linking in cross builds, but doesn't know what you want to use for system cc.
|
||||||
@@ -36,5 +29,3 @@ linker = "arm-linux-gnueabihf-gcc"
|
|||||||
[target.aarch64-unknown-linux-gnu]
|
[target.aarch64-unknown-linux-gnu]
|
||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
section_end rust
|
|
||||||
|
@@ -1,18 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
uncollapsed_section_start shader-db "Building shader-db"
|
|
||||||
|
|
||||||
pushd /usr/local
|
|
||||||
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
|
|
||||||
rm -rf shader-db/.git
|
|
||||||
cd shader-db
|
|
||||||
make
|
|
||||||
popd
|
|
||||||
|
|
||||||
section_end shader-db
|
|
@@ -1,40 +1,27 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# SPDX-License-Identifier: MIT
|
|
||||||
#
|
#
|
||||||
# Copyright © 2022 Collabora Limited
|
# Copyright (C) 2022 Collabora Limited
|
||||||
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
|
||||||
#
|
#
|
||||||
# When changing this file, you need to bump the following
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
# KERNEL_ROOTFS_TAG
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
uncollapsed_section_start skqp "Building skqp"
|
|
||||||
|
|
||||||
SKQP_BRANCH=android-cts-12.1_r5
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(pwd)/.gitlab-ci/container"
|
|
||||||
SKQP_PATCH_DIR="${SCRIPT_DIR}/patches"
|
|
||||||
BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
|
|
||||||
|
|
||||||
case "$DEBIAN_ARCH" in
|
|
||||||
amd64)
|
|
||||||
SKQP_ARCH=x64
|
|
||||||
;;
|
|
||||||
armhf)
|
|
||||||
SKQP_ARCH=arm
|
|
||||||
;;
|
|
||||||
arm64)
|
|
||||||
SKQP_ARCH=arm64
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
|
|
||||||
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
|
|
||||||
SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp}
|
|
||||||
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
|
|
||||||
SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms)
|
|
||||||
|
|
||||||
create_gn_args() {
|
create_gn_args() {
|
||||||
# gn can be configured to cross-compile skia and its tools
|
# gn can be configured to cross-compile skia and its tools
|
||||||
@@ -54,10 +41,24 @@ download_skia_source() {
|
|||||||
# Skia cloned from https://android.googlesource.com/platform/external/skqp
|
# Skia cloned from https://android.googlesource.com/platform/external/skqp
|
||||||
# has all needed assets tracked on git-fs
|
# has all needed assets tracked on git-fs
|
||||||
SKQP_REPO=https://android.googlesource.com/platform/external/skqp
|
SKQP_REPO=https://android.googlesource.com/platform/external/skqp
|
||||||
|
SKQP_BRANCH=android-cts-11.0_r7
|
||||||
|
|
||||||
git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
|
git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
SCRIPT_DIR=$(realpath "$(dirname "$0")")
|
||||||
|
SKQP_PATCH_DIR="${SCRIPT_DIR}"
|
||||||
|
BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
|
||||||
|
|
||||||
|
SKQP_ARCH=${SKQP_ARCH:-x64}
|
||||||
|
SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
|
||||||
|
SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
|
||||||
|
SKQP_INSTALL_DIR=/skqp
|
||||||
|
SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
|
||||||
|
SKQP_BINARIES=(skqp)
|
||||||
|
|
||||||
download_skia_source
|
download_skia_source
|
||||||
|
|
||||||
pushd "${SKIA_DIR}"
|
pushd "${SKIA_DIR}"
|
||||||
@@ -66,12 +67,6 @@ pushd "${SKIA_DIR}"
|
|||||||
cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
|
cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
|
||||||
patch -p1
|
patch -p1
|
||||||
|
|
||||||
# hack for skqp see the clang
|
|
||||||
pushd /usr/bin/
|
|
||||||
ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang" clang
|
|
||||||
ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang++" clang++
|
|
||||||
popd
|
|
||||||
|
|
||||||
# Fetch some needed build tools needed to build skia/skqp.
|
# Fetch some needed build tools needed to build skia/skqp.
|
||||||
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
|
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
|
||||||
# directory.
|
# directory.
|
||||||
@@ -100,5 +95,3 @@ popd
|
|||||||
rm -Rf "${SKIA_DIR}"
|
rm -Rf "${SKIA_DIR}"
|
||||||
|
|
||||||
set +ex
|
set +ex
|
||||||
|
|
||||||
section_end skqp
|
|
||||||
|
@@ -1,12 +1,7 @@
|
|||||||
cc = "clang"
|
cc = "clang"
|
||||||
cxx = "clang++"
|
cxx = "clang++"
|
||||||
|
|
||||||
extra_cflags = [
|
extra_cflags = [ "-DSK_ENABLE_DUMP_GPU", "-DSK_BUILD_FOR_SKQP" ]
|
||||||
"-Wno-error",
|
|
||||||
|
|
||||||
"-DSK_ENABLE_DUMP_GPU",
|
|
||||||
"-DSK_BUILD_FOR_SKQP"
|
|
||||||
]
|
|
||||||
extra_cflags_cc = [
|
extra_cflags_cc = [
|
||||||
"-Wno-error",
|
"-Wno-error",
|
||||||
|
|
||||||
@@ -27,18 +22,6 @@ extra_cflags_cc = [
|
|||||||
"-Wno-suggest-destructor-override",
|
"-Wno-suggest-destructor-override",
|
||||||
"-Wno-return-std-move-in-c++11",
|
"-Wno-return-std-move-in-c++11",
|
||||||
"-Wno-extra-semi-stmt",
|
"-Wno-extra-semi-stmt",
|
||||||
"-Wno-reserved-identifier",
|
|
||||||
"-Wno-bitwise-instead-of-logical",
|
|
||||||
"-Wno-reserved-identifier",
|
|
||||||
"-Wno-psabi",
|
|
||||||
"-Wno-unused-but-set-variable",
|
|
||||||
"-Wno-sizeof-array-div",
|
|
||||||
"-Wno-string-concatenation",
|
|
||||||
"-Wno-unsafe-buffer-usage",
|
|
||||||
"-Wno-switch-default",
|
|
||||||
"-Wno-cast-function-type-strict",
|
|
||||||
"-Wno-format",
|
|
||||||
"-Wno-enum-constexpr-conversion",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
cc_wrapper = "ccache"
|
cc_wrapper = "ccache"
|
||||||
|
@@ -1,29 +1,17 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start va-tools "Building va-tools"
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
git config --global user.email "mesa@example.com"
|
||||||
git config --global user.name "Mesa CI"
|
git config --global user.name "Mesa CI"
|
||||||
|
|
||||||
git clone \
|
git clone \
|
||||||
https://github.com/intel/libva-utils.git \
|
https://github.com/intel/libva-utils.git \
|
||||||
-b 2.18.1 \
|
-b 2.13.0 \
|
||||||
--depth 1 \
|
--depth 1 \
|
||||||
/va-utils
|
/va-utils
|
||||||
|
|
||||||
pushd /va-utils
|
pushd /va-utils
|
||||||
# Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch.
|
meson build -D tests=true -Dprefix=/va $EXTRA_MESON_ARGS
|
||||||
curl -L https://github.com/intel/libva-utils/pull/329.patch | git am
|
ninja -C build install
|
||||||
|
|
||||||
meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-}
|
|
||||||
meson install -C build
|
|
||||||
popd
|
popd
|
||||||
rm -rf /va-utils
|
rm -rf /va-utils
|
||||||
|
|
||||||
section_end va-tools
|
|
||||||
|
@@ -1,23 +1,18 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start vkd3d-proton "Building vkd3d-proton"
|
VKD3D_PROTON_COMMIT="5b73139f182d86cd58a757e4b5f0d4cfad96d319"
|
||||||
|
|
||||||
VKD3D_PROTON_COMMIT="b121e6d746341e0aaba7663e3d85f3194e8e20e1"
|
|
||||||
|
|
||||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
||||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
||||||
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build"
|
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION"
|
||||||
|
|
||||||
function build_arch {
|
function build_arch {
|
||||||
local arch="$1"
|
local arch="$1"
|
||||||
|
shift
|
||||||
|
|
||||||
meson setup \
|
meson "$@" \
|
||||||
-Denable_tests=true \
|
-Denable_tests=true \
|
||||||
--buildtype release \
|
--buildtype release \
|
||||||
--prefix "$VKD3D_PROTON_DST_DIR" \
|
--prefix "$VKD3D_PROTON_DST_DIR" \
|
||||||
@@ -38,14 +33,7 @@ git submodule update --init --recursive
|
|||||||
git submodule update --recursive
|
git submodule update --recursive
|
||||||
build_arch 64
|
build_arch 64
|
||||||
build_arch 86
|
build_arch 86
|
||||||
mkdir "$VKD3D_PROTON_DST_DIR/tests"
|
|
||||||
cp \
|
|
||||||
"tests/test-runner.sh" \
|
|
||||||
"tests/d3d12_tests.h" \
|
|
||||||
"$VKD3D_PROTON_DST_DIR/tests/"
|
|
||||||
popd
|
popd
|
||||||
|
|
||||||
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
||||||
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
||||||
|
|
||||||
section_end vkd3d-proton
|
|
||||||
|
@@ -1,25 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
uncollapsed_section_start vulkan-validation "Building Vulkan validation layers"
|
|
||||||
|
|
||||||
VALIDATION_TAG="snapshot-2024wk39"
|
|
||||||
|
|
||||||
git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
|
|
||||||
pushd Vulkan-ValidationLayers
|
|
||||||
# we don't need to build SPIRV-Tools tools
|
|
||||||
sed -i scripts/known_good.json -e 's/SPIRV_SKIP_EXECUTABLES=OFF/SPIRV_SKIP_EXECUTABLES=ON/'
|
|
||||||
python3 scripts/update_deps.py --dir external --config release --generator Ninja --optional tests
|
|
||||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build
|
|
||||||
ninja -C build
|
|
||||||
cmake --install build --strip
|
|
||||||
popd
|
|
||||||
rm -rf Vulkan-ValidationLayers
|
|
||||||
|
|
||||||
section_end vulkan-validation
|
|
@@ -1,36 +1,22 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
set -uex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start wayland "Building Wayland"
|
export LIBWAYLAND_VERSION="1.18.0"
|
||||||
|
export WAYLAND_PROTOCOLS_VERSION="1.24"
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
# DEBIAN_TEST_VK_TAG
|
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
export LIBWAYLAND_VERSION="1.21.0"
|
|
||||||
export WAYLAND_PROTOCOLS_VERSION="1.38"
|
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/wayland/wayland
|
git clone https://gitlab.freedesktop.org/wayland/wayland
|
||||||
cd wayland
|
cd wayland
|
||||||
git checkout "$LIBWAYLAND_VERSION"
|
git checkout "$LIBWAYLAND_VERSION"
|
||||||
meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build ${EXTRA_MESON_ARGS:-}
|
meson -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build
|
||||||
meson install -C _build
|
ninja -C _build install
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf wayland
|
rm -rf wayland
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
|
git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
|
||||||
cd wayland-protocols
|
cd wayland-protocols
|
||||||
git checkout "$WAYLAND_PROTOCOLS_VERSION"
|
git checkout "$WAYLAND_PROTOCOLS_VERSION"
|
||||||
meson setup -Dtests=false _build ${EXTRA_MESON_ARGS:-}
|
meson _build
|
||||||
meson install -C _build
|
ninja -C _build install
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf wayland-protocols
|
rm -rf wayland-protocols
|
||||||
|
|
||||||
section_end wayland
|
|
||||||
|
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, check if the *_BUIL_TAG tags in
|
|
||||||
# .gitlab-ci/image-tags.yml need updating.
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# Early check for required env variables, relies on `set -u`
|
|
||||||
: "$S3_JWT_FILE_SCRIPT"
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
echo "usage: $(basename "$0") <CONTAINER_CI_JOB_NAME>" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
CONTAINER_CI_JOB_NAME="$1"
|
|
||||||
|
|
||||||
# Tasks to perform before executing the script of a container job
|
|
||||||
eval "$S3_JWT_FILE_SCRIPT"
|
|
||||||
unset S3_JWT_FILE_SCRIPT
|
|
||||||
|
|
||||||
trap 'rm -f ${S3_JWT_FILE}' EXIT INT TERM
|
|
||||||
|
|
||||||
bash ".gitlab-ci/container/${CONTAINER_CI_JOB_NAME}.sh"
|
|
@@ -1,14 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/sh
|
||||||
|
|
||||||
if test -f /etc/debian_version; then
|
if test -f /etc/debian_version; then
|
||||||
apt-get autoremove -y --purge
|
apt-get autoremove -y --purge
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean up any build cache
|
# Clean up any build cache for rust.
|
||||||
rm -rf /root/.cache
|
|
||||||
rm -rf /root/.cargo
|
|
||||||
rm -rf /.cargo
|
rm -rf /.cargo
|
||||||
|
|
||||||
if test -x /usr/bin/ccache; then
|
ccache --show-stats
|
||||||
ccache --show-stats
|
|
||||||
fi
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user