Compare commits
341 Commits
submit/mes
...
mesa-24.3.
Author | SHA1 | Date | |
---|---|---|---|
|
769e51468b | ||
|
82728bb500 | ||
|
9999791582 | ||
|
01df20446e | ||
|
be24b60662 | ||
|
a4d35d8263 | ||
|
e791098f2e | ||
|
acc1e9efab | ||
|
be4ab526b2 | ||
|
993dbad958 | ||
|
26b10842a9 | ||
|
75a0bde863 | ||
|
7166b5adc9 | ||
|
4724adae9c | ||
|
9401fd9e7e | ||
|
ec927daa99 | ||
|
267e70ecd1 | ||
|
d774957ff6 | ||
|
75539eac3e | ||
|
95a2cc6a7a | ||
|
9e4fd4793a | ||
|
d9a841d799 | ||
|
ded07d7161 | ||
|
7f3b2c48cf | ||
|
b1d574c8f3 | ||
|
1eb956e7f9 | ||
|
6ea2997555 | ||
|
0e7cf6c7b5 | ||
|
d1c63709cf | ||
|
ea2f4877ab | ||
|
b86b4b85e4 | ||
|
db73e1121d | ||
|
1022c4589c | ||
|
15b82ea898 | ||
|
18a6195e48 | ||
|
cdca99fe84 | ||
|
e54b547d4b | ||
|
3eb1489bb1 | ||
|
f569f7bf4b | ||
|
7c3ecb9c69 | ||
|
49cab3dfb0 | ||
|
199ac8104b | ||
|
1c29ad0cef | ||
|
b68fd3acef | ||
|
ffdf10d308 | ||
|
b7e7e9a904 | ||
|
72fc2fb2f6 | ||
|
9a8f411ec4 | ||
|
6314df61f8 | ||
|
651beca687 | ||
|
7f9cf7fcdd | ||
|
e7770765f7 | ||
|
798cb57680 | ||
|
f65e49341b | ||
|
6cb52a8838 | ||
|
e275442e8f | ||
|
949faebea4 | ||
|
bca4ba9866 | ||
|
0d5f54b5ca | ||
|
e8fdb9fe5c | ||
|
17381f3190 | ||
|
b76840419d | ||
|
6f752d3d01 | ||
|
eb83b614c9 | ||
|
32a93c197e | ||
|
ded1ec58f7 | ||
|
2f364fa240 | ||
|
fa57ff1528 | ||
|
0a00c8471f | ||
|
e90c776fa0 | ||
|
6384ba4209 | ||
|
4c49b19a8c | ||
|
9be97d8dec | ||
|
ac434a4ff1 | ||
|
a2e52a3669 | ||
|
467783980a | ||
|
40cb39104d | ||
|
c4aa2be477 | ||
|
4ea0b2fd5c | ||
|
cd01162e81 | ||
|
d72bf5b122 | ||
|
e4276eca9b | ||
|
68143c7b65 | ||
|
1b706b4c90 | ||
|
e3a186a3ce | ||
|
361316b1ea | ||
|
6e0fce2a6e | ||
|
f63534c991 | ||
|
789abcc23e | ||
|
f688dfe665 | ||
|
2cbe3468a3 | ||
|
7bea0dd536 | ||
|
cc58288510 | ||
|
29c400ca56 | ||
|
c1ecf08e0d | ||
|
6d33b742b0 | ||
|
e716ff323b | ||
|
33337b54b0 | ||
|
5426f640f1 | ||
|
0427d36334 | ||
|
7a66881837 | ||
|
0683bbbf0d | ||
|
e3b1a93aaa | ||
|
aaf540f49e | ||
|
bb5927c31f | ||
|
8e15c99523 | ||
|
9a32249031 | ||
|
5457b5af74 | ||
|
0d97c42d7d | ||
|
18f5161fe9 | ||
|
cfff580b02 | ||
|
7ca563d522 | ||
|
39dab0efa3 | ||
|
99a3dbef34 | ||
|
9d2ec701d9 | ||
|
32a267518c | ||
|
e806d032e1 | ||
|
d96ad7736d | ||
|
9ba56c1592 | ||
|
5018676dfd | ||
|
9f193474c5 | ||
|
fdc4e04ce2 | ||
|
ab7d0a695c | ||
|
c7e4ee2e8e | ||
|
0cfd615388 | ||
|
ec31d9c236 | ||
|
2fffa079ac | ||
|
8fdb8974c7 | ||
|
a9a3ab4d4e | ||
|
607de2d472 | ||
|
fdc238cff0 | ||
|
e850b2c74a | ||
|
ee4c664d4d | ||
|
bbd060b35c | ||
|
9014c7e74f | ||
|
8135a7614d | ||
|
fa6e9e6587 | ||
|
f05bcf460a | ||
|
21e9a5e373 | ||
|
f5eb332773 | ||
|
6a946ade04 | ||
|
72c1e65d44 | ||
|
05e5d31b8b | ||
|
85b5cd234b | ||
|
4b47a5491c | ||
|
720b64c8d9 | ||
|
e3fc6715d7 | ||
|
7c16dfba50 | ||
|
cab3f06713 | ||
|
f565dcdf54 | ||
|
34c6edb029 | ||
|
9a73b89f28 | ||
|
8f7b85e8cd | ||
|
0f01543ac1 | ||
|
ee78db8c14 | ||
|
8f7216d53b | ||
|
d62312f836 | ||
|
e92e02a71f | ||
|
1a98685055 | ||
|
2ba6a1f300 | ||
|
851f519db6 | ||
|
00c60bd69e | ||
|
7bce0c8259 | ||
|
5feac905d0 | ||
|
17c7e04e8a | ||
|
37d72c978f | ||
|
017bc0cc2b | ||
|
17da2666c7 | ||
|
0f1abde4cc | ||
|
0d6238b6ab | ||
|
d8ba1e4143 | ||
|
03c9e6c71d | ||
|
757f3fb51b | ||
|
8b0a86d639 | ||
|
ee0d63011c | ||
|
7c44f03d7f | ||
|
046418948d | ||
|
532157abbd | ||
|
3cffcc3da7 | ||
|
f750108aa9 | ||
|
10f599a8b3 | ||
|
9319df3b07 | ||
|
dc04f5ca28 | ||
|
a72ba2fc47 | ||
|
f7f217f093 | ||
|
6e98298124 | ||
|
77248fa11f | ||
|
54adfc351f | ||
|
c815d651b8 | ||
|
d0586e16e6 | ||
|
4dea7e6bc8 | ||
|
69e950d853 | ||
|
1b79b681df | ||
|
dade5eab3f | ||
|
dc89d68118 | ||
|
06de469e7c | ||
|
bd30ab0682 | ||
|
be48136924 | ||
|
5019b21f0e | ||
|
9078a8becf | ||
|
4c06515892 | ||
|
12dc5d182e | ||
|
df49f29a50 | ||
|
95dd4c1992 | ||
|
6865204891 | ||
|
8b1bd4ed76 | ||
|
30a6487353 | ||
|
236b71542e | ||
|
935fd7e12c | ||
|
5771727e70 | ||
|
36b6f3ade4 | ||
|
dcb37073e9 | ||
|
108ab09453 | ||
|
bd411c4c5f | ||
|
7c57de1282 | ||
|
3d96958812 | ||
|
ff59f1f58e | ||
|
58dcff008b | ||
|
f2d10dc6bc | ||
|
c2f0bb8196 | ||
|
984f4c7a32 | ||
|
f20caba05f | ||
|
26429556c5 | ||
|
b5b7b0957b | ||
|
d68c6558bd | ||
|
e27ac0d686 | ||
|
e497a5ef40 | ||
|
48e5c246c4 | ||
|
afcbe85940 | ||
|
f563ce8c7e | ||
|
617591e708 | ||
|
428f2bea18 | ||
|
cd8c4e92cf | ||
|
c489be4522 | ||
|
2cc0be2044 | ||
|
58ee8b5576 | ||
|
ae93a2c95e | ||
|
58cf949137 | ||
|
199b3d07d2 | ||
|
3aae04545c | ||
|
5fb44f6df0 | ||
|
2938a1bf14 | ||
|
c2701a90c6 | ||
|
5770bac174 | ||
|
aac7787a79 | ||
|
49de8e26a2 | ||
|
2a4deafd01 | ||
|
1301c75216 | ||
|
a527f3ff23 | ||
|
5a60f9965d | ||
|
09a3504485 | ||
|
3ef5b47152 | ||
|
e92d808c35 | ||
|
9448cd6071 | ||
|
b1f8e15781 | ||
|
09ad1fbdf2 | ||
|
2b742dd9c8 | ||
|
9f2e62e2d7 | ||
|
29cf77957b | ||
|
bbf9d3fe5a | ||
|
3887c8643a | ||
|
f1f246cfda | ||
|
ae339fb113 | ||
|
0bf0f66c9e | ||
|
faacd80403 | ||
|
8790de10c9 | ||
|
4af6a47426 | ||
|
b3cb911eff | ||
|
ca6cccfe24 | ||
|
5ad7548183 | ||
|
5cb4c5bd47 | ||
|
185ae19141 | ||
|
49e5090f79 | ||
|
bc1c6a15c0 | ||
|
5a8be9c62b | ||
|
e3f886ac15 | ||
|
a3543ebc8d | ||
|
f35c690b12 | ||
|
7dc34f1147 | ||
|
8e45bd6365 | ||
|
9728a9075c | ||
|
f4d83eb508 | ||
|
3567dac750 | ||
|
d857c4a418 | ||
|
a3a064b92c | ||
|
de9faec619 | ||
|
1c6b2f701c | ||
|
436e5c06b9 | ||
|
922a339d91 | ||
|
b4b12c6708 | ||
|
232c6b2d8e | ||
|
1e9229fd09 | ||
|
3c450c640a | ||
|
14f9d6456a | ||
|
72271ed3fc | ||
|
bd8fb8a930 | ||
|
743b2fdf8e | ||
|
9c55d78353 | ||
|
c1517edde6 | ||
|
bab3391381 | ||
|
42822bbca2 | ||
|
ecc3f03d83 | ||
|
a725b1373e | ||
|
9116861d3c | ||
|
cbb58f2623 | ||
|
f6653b1f59 | ||
|
41af3ea120 | ||
|
439879abd3 | ||
|
c11a931703 | ||
|
4c41bb9bef | ||
|
1bc37bb465 | ||
|
e86386df89 | ||
|
e839ff344e | ||
|
3e45c3eec2 | ||
|
e7ebb97fdf | ||
|
97d974a3ad | ||
|
6c9587db99 | ||
|
dc8e19aede | ||
|
d185a4658e | ||
|
e3f3e315af | ||
|
27b2c2b869 | ||
|
a9f1c10a10 | ||
|
739c3615ce | ||
|
4a71355172 | ||
|
dd14b60b49 | ||
|
349687c73a | ||
|
67bd351553 | ||
|
eb34c059be | ||
|
6965aff4d1 | ||
|
ea9b3f928d | ||
|
7994534fe9 | ||
|
1e792b0933 | ||
|
08955d2ee8 | ||
|
8f53de4a5d | ||
|
baba2805ca | ||
|
7cef55b993 | ||
|
b856d0d3cc | ||
|
1ab129ba70 | ||
|
7dc84d1c96 | ||
|
93d5d587f5 | ||
|
85ba713d76 |
@@ -31,7 +31,7 @@ indent_size = 3
|
|||||||
[*.patch]
|
[*.patch]
|
||||||
trim_trailing_whitespace = false
|
trim_trailing_whitespace = false
|
||||||
|
|
||||||
[{meson.build,meson.options}]
|
[{meson.build,meson_options.txt}]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
|
@@ -65,12 +65,3 @@ c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402
|
|||||||
|
|
||||||
# ir3: Reformat source with clang-format
|
# ir3: Reformat source with clang-format
|
||||||
177138d8cb0b4f6a42ef0a1f8593e14d79f17c54
|
177138d8cb0b4f6a42ef0a1f8593e14d79f17c54
|
||||||
|
|
||||||
# ir3: reformat after refactoring in previous commit
|
|
||||||
8ae5b27ee0331a739d14b42e67586784d6840388
|
|
||||||
|
|
||||||
# ir3: don't use deprecated NIR_PASS_V anymore
|
|
||||||
2fedc82c0cc9d3fb2e54707b57941b79553b640c
|
|
||||||
|
|
||||||
# ir3: reformat after previous commit
|
|
||||||
7210054db8cfb445a8ccdeacfdcfecccf44fa266
|
|
||||||
|
2
.github/workflows/macos.yml
vendored
2
.github/workflows/macos.yml
vendored
@@ -42,7 +42,7 @@ jobs:
|
|||||||
[binaries]
|
[binaries]
|
||||||
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
||||||
EOL
|
EOL
|
||||||
$MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }}
|
$MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }}
|
||||||
- name: Build
|
- name: Build
|
||||||
run: $MESON_EXEC compile -C build
|
run: $MESON_EXEC compile -C build
|
||||||
- name: Test
|
- name: Test
|
||||||
|
291
.gitlab-ci.yml
291
.gitlab-ci.yml
@@ -30,73 +30,44 @@ workflow:
|
|||||||
# do not duplicate pipelines on merge pipelines
|
# do not duplicate pipelines on merge pipelines
|
||||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
|
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
|
||||||
when: never
|
when: never
|
||||||
# Tag pipelines are disabled as it's too late to run all the tests by
|
# merge pipeline
|
||||||
# then, the release has been made based on the staging pipelines results
|
|
||||||
- if: $CI_COMMIT_TAG
|
|
||||||
when: never
|
|
||||||
# Merge pipeline
|
|
||||||
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
|
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
variables:
|
variables:
|
||||||
MESA_CI_PERFORMANCE_ENABLED: 1
|
MESA_CI_PERFORMANCE_ENABLED: 1
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: priority:high
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: priority:high-kvm
|
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: priority:high-aarch64
|
|
||||||
CI_TRON_JOB_PRIORITY_TAG: "" # Empty tags are ignored by gitlab
|
|
||||||
JOB_PRIORITY: 75
|
JOB_PRIORITY: 75
|
||||||
# fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes
|
# fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes
|
||||||
DEQP_RUNNER_MAX_FAILS: 40
|
DEQP_RUNNER_MAX_FAILS: 40
|
||||||
# Post-merge pipeline
|
# post-merge pipeline
|
||||||
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
|
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
|
||||||
variables:
|
# Pre-merge pipeline
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: priority:high
|
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: priority:high-kvm
|
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: priority:high-aarch64
|
|
||||||
# Pre-merge pipeline (because merge pipelines are already caught above)
|
|
||||||
- if: &is-merge-request $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
# Push to a branch on a fork
|
# Push to a branch on a fork
|
||||||
- if: &is-push-to-fork $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
|
- if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
|
||||||
# a pipeline running within the upstream project
|
# nightly pipeline
|
||||||
- if: &is-upstream-pipeline $CI_PROJECT_PATH == $FDO_UPSTREAM_REPO
|
|
||||||
# an MR pipeline running within the upstream project, usually true for
|
|
||||||
# those with the Developer role or above
|
|
||||||
- if: &is-upstream-mr-pipeline $CI_PROJECT_PATH == $FDO_UPSTREAM_REPO && $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
||||||
# Nightly pipeline
|
|
||||||
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
|
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
|
||||||
variables:
|
variables:
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: priority:low
|
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: priority:low-kvm
|
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: priority:low-aarch64
|
|
||||||
JOB_PRIORITY: 45
|
|
||||||
# (some) nightly builds perform LTO, so they take much longer than the
|
# (some) nightly builds perform LTO, so they take much longer than the
|
||||||
# short timeout allowed in other pipelines.
|
# short timeout allowed in other pipelines.
|
||||||
# Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h
|
# Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h
|
||||||
BUILD_JOB_TIMEOUT_OVERRIDE: 0
|
BUILD_JOB_TIMEOUT_OVERRIDE: 0
|
||||||
# Pipeline for direct pushes to the default branch that bypassed the CI
|
# pipeline for direct pushes that bypassed the CI
|
||||||
- if: &is-push-to-upstream-default-branch $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
|
- if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
|
||||||
variables:
|
|
||||||
JOB_PRIORITY: 70
|
|
||||||
# Pipeline for direct pushes from release maintainer
|
|
||||||
- if: &is-push-to-upstream-staging-branch $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME =~ /^staging\//
|
|
||||||
variables:
|
variables:
|
||||||
JOB_PRIORITY: 70
|
JOB_PRIORITY: 70
|
||||||
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FDO_UPSTREAM_REPO: mesa/mesa
|
FDO_UPSTREAM_REPO: mesa/mesa
|
||||||
MESA_TEMPLATES_COMMIT: &ci-templates-commit c6aeb16f86e32525fa630fb99c66c4f3e62fc3cb
|
MESA_TEMPLATES_COMMIT: &ci-templates-commit e195d80f35b45cc73668be3767b923fd76c70ed5
|
||||||
CI_PRE_CLONE_SCRIPT: |-
|
CI_PRE_CLONE_SCRIPT: |-
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
curl --silent --location --fail --retry-connrefused --retry 3 --retry-delay 10 \
|
wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh
|
||||||
${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh | bash
|
bash download-git-cache.sh
|
||||||
|
rm download-git-cache.sh
|
||||||
set +o xtrace
|
set +o xtrace
|
||||||
S3_JWT_FILE: /s3_jwt
|
S3_JWT_FILE: /s3_jwt
|
||||||
S3_JWT_FILE_SCRIPT: |-
|
|
||||||
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
|
|
||||||
S3_JWT_FILE_SCRIPT= &&
|
|
||||||
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
|
|
||||||
S3_HOST: s3.freedesktop.org
|
S3_HOST: s3.freedesktop.org
|
||||||
# This bucket is used to fetch ANDROID prebuilts and images
|
|
||||||
S3_ANDROID_BUCKET: mesa-rootfs
|
|
||||||
# This bucket is used to fetch the kernel image
|
# This bucket is used to fetch the kernel image
|
||||||
S3_KERNEL_BUCKET: mesa-rootfs
|
S3_KERNEL_BUCKET: mesa-rootfs
|
||||||
# Bucket for git cache
|
# Bucket for git cache
|
||||||
@@ -107,8 +78,6 @@ variables:
|
|||||||
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
|
S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results
|
||||||
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
|
S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public
|
||||||
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
|
S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private
|
||||||
# Base path used for various artifacts
|
|
||||||
S3_BASE_PATH: "${S3_HOST}/${S3_KERNEL_BUCKET}"
|
|
||||||
# per-pipeline artifact storage on MinIO
|
# per-pipeline artifact storage on MinIO
|
||||||
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
||||||
# per-job artifact storage on MinIO
|
# per-job artifact storage on MinIO
|
||||||
@@ -122,37 +91,29 @@ variables:
|
|||||||
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
|
ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts
|
||||||
# Python scripts for structured logger
|
# Python scripts for structured logger
|
||||||
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
|
PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install"
|
||||||
# No point in continuing once the device is lost
|
# Drop once deqp-runner is upgraded to > 0.18.0
|
||||||
MESA_VK_ABORT_ON_DEVICE_LOSS: 1
|
MESA_VK_ABORT_ON_DEVICE_LOSS: 1
|
||||||
# Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output
|
# Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output
|
||||||
MESA_SPIRV_LOG_LEVEL: error
|
MESA_SPIRV_LOG_LEVEL: error
|
||||||
# Default priority for non-merge pipelines
|
# Default priority for non-merge pipelines
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: "" # Empty tags are ignored by gitlab
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm
|
|
||||||
FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64
|
|
||||||
CI_TRON_JOB_PRIORITY_TAG: ci-tron:priority:low
|
|
||||||
JOB_PRIORITY: 50
|
JOB_PRIORITY: 50
|
||||||
DATA_STORAGE_PATH: data_storage
|
|
||||||
KERNEL_IMAGE_BASE: "https://$S3_HOST/$S3_KERNEL_BUCKET/$KERNEL_REPO/$KERNEL_TAG"
|
|
||||||
# Mesa-specific variables that shouldn't be forwarded to DUTs and crosvm
|
|
||||||
CI_EXCLUDE_ENV_VAR_REGEX: 'SCRIPTS_DIR|RESULTS_DIR'
|
|
||||||
|
|
||||||
CI_TRON_JOB_TEMPLATE_PROJECT: &ci-tron-template-project gfx-ci/ci-tron
|
|
||||||
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit ddadab0006e43f1365cd30779f565b444a6538ee
|
|
||||||
CI_TRON_JOB_TEMPLATE_PROJECT_URL: "https://gitlab.freedesktop.org/$CI_TRON_JOB_TEMPLATE_PROJECT"
|
|
||||||
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
timeout: 1m # catch any jobs which don't specify a timeout
|
|
||||||
id_tokens:
|
id_tokens:
|
||||||
S3_JWT:
|
S3_JWT:
|
||||||
aud: https://s3.freedesktop.org
|
aud: https://s3.freedesktop.org
|
||||||
before_script:
|
before_script:
|
||||||
|
- |
|
||||||
|
if [ -z "${KERNEL_IMAGE_BASE:-}" ]; then
|
||||||
|
export KERNEL_IMAGE_BASE="https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${EXTERNAL_KERNEL_TAG:-$KERNEL_TAG}"
|
||||||
|
fi
|
||||||
- >
|
- >
|
||||||
export SCRIPTS_DIR=$(mktemp -d) &&
|
export SCRIPTS_DIR=$(mktemp -d) &&
|
||||||
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" &&
|
curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" &&
|
||||||
. ${SCRIPTS_DIR}/setup-test-env.sh
|
. ${SCRIPTS_DIR}/setup-test-env.sh &&
|
||||||
- eval "$S3_JWT_FILE_SCRIPT"
|
echo -n "${S3_JWT}" > "${S3_JWT_FILE}" &&
|
||||||
|
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
|
# Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338
|
||||||
@@ -181,41 +142,39 @@ stages:
|
|||||||
- build-only
|
- build-only
|
||||||
- code-validation
|
- code-validation
|
||||||
- amd
|
- amd
|
||||||
- amd-nightly
|
- amd-postmerge
|
||||||
- intel
|
- intel
|
||||||
- intel-nightly
|
- intel-postmerge
|
||||||
- nouveau
|
- nouveau
|
||||||
- nouveau-nightly
|
- nouveau-postmerge
|
||||||
- arm
|
- arm
|
||||||
- arm-nightly
|
- arm-postmerge
|
||||||
- broadcom
|
- broadcom
|
||||||
- broadcom-nightly
|
- broadcom-postmerge
|
||||||
- freedreno
|
- freedreno
|
||||||
- freedreno-nightly
|
- freedreno-postmerge
|
||||||
- etnaviv
|
- etnaviv
|
||||||
- etnaviv-nightly
|
- etnaviv-postmerge
|
||||||
- software-renderer
|
- software-renderer
|
||||||
- software-renderer-nightly
|
- software-renderer-postmerge
|
||||||
- layered-backends
|
- layered-backends
|
||||||
- layered-backends-nightly
|
- layered-backends-postmerge
|
||||||
- performance
|
- performance
|
||||||
- deploy
|
- deploy
|
||||||
|
|
||||||
include:
|
include:
|
||||||
|
- project: 'freedesktop/ci-templates'
|
||||||
|
ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811
|
||||||
|
file:
|
||||||
|
- '/templates/ci-fairy.yml'
|
||||||
- project: 'freedesktop/ci-templates'
|
- project: 'freedesktop/ci-templates'
|
||||||
ref: *ci-templates-commit
|
ref: *ci-templates-commit
|
||||||
file:
|
file:
|
||||||
- '/templates/alpine.yml'
|
- '/templates/alpine.yml'
|
||||||
- '/templates/debian.yml'
|
- '/templates/debian.yml'
|
||||||
- '/templates/fedora.yml'
|
- '/templates/fedora.yml'
|
||||||
- '/templates/ci-fairy.yml'
|
|
||||||
- project: *ci-tron-template-project
|
|
||||||
ref: *ci-tron-template-commit
|
|
||||||
file: '/.gitlab-ci/dut.yml'
|
|
||||||
- local: '.gitlab-ci/image-tags.yml'
|
- local: '.gitlab-ci/image-tags.yml'
|
||||||
- local: '.gitlab-ci/bare-metal/gitlab-ci.yml'
|
- local: '.gitlab-ci/lava/lava-gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/ci-tron/gitlab-ci.yml'
|
|
||||||
- local: '.gitlab-ci/lava/gitlab-ci.yml'
|
|
||||||
- local: '.gitlab-ci/container/gitlab-ci.yml'
|
- local: '.gitlab-ci/container/gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/build/gitlab-ci.yml'
|
- local: '.gitlab-ci/build/gitlab-ci.yml'
|
||||||
- local: '.gitlab-ci/test/gitlab-ci.yml'
|
- local: '.gitlab-ci/test/gitlab-ci.yml'
|
||||||
@@ -228,7 +187,7 @@ include:
|
|||||||
# Rules applied to every job in the pipeline
|
# Rules applied to every job in the pipeline
|
||||||
.common-rules:
|
.common-rules:
|
||||||
rules:
|
rules:
|
||||||
- if: *is-push-to-fork
|
- if: *is-fork-push
|
||||||
when: manual
|
when: manual
|
||||||
|
|
||||||
.never-post-merge-rules:
|
.never-post-merge-rules:
|
||||||
@@ -237,59 +196,7 @@ include:
|
|||||||
when: never
|
when: never
|
||||||
|
|
||||||
|
|
||||||
# Note: make sure the branches in this list are the same as in
|
.container+build-rules:
|
||||||
# `.build-only-delayed-rules` below.
|
|
||||||
.container-rules:
|
|
||||||
rules:
|
|
||||||
- !reference [.common-rules, rules]
|
|
||||||
# Run when re-enabling a disabled farm, but not when disabling it
|
|
||||||
- !reference [.disable-farm-mr-rules, rules]
|
|
||||||
# Never run immediately after merging, as we just ran everything
|
|
||||||
- !reference [.never-post-merge-rules, rules]
|
|
||||||
# Only rebuild containers in merge pipelines if any tags have been
|
|
||||||
# changed, else we'll just use the already-built containers
|
|
||||||
- if: *is-merge-attempt
|
|
||||||
changes: &image_tags_path
|
|
||||||
- .gitlab-ci/image-tags.yml
|
|
||||||
when: on_success
|
|
||||||
# Skip everything for pre-merge and merge pipelines which don't change
|
|
||||||
# anything in the build; we only do this for marge-bot and not user
|
|
||||||
# pipelines in a MR, because we might still need to run it to copy the
|
|
||||||
# container into the user's namespace.
|
|
||||||
- if: *is-merge-attempt
|
|
||||||
when: never
|
|
||||||
# Any MR pipeline which changes image-tags.yml needs to be able to
|
|
||||||
# rebuild the containers
|
|
||||||
- if: *is-merge-request
|
|
||||||
changes: *image_tags_path
|
|
||||||
when: manual
|
|
||||||
# ... if the MR pipeline runs as mesa/mesa and does not need a container
|
|
||||||
# rebuild, we can skip it
|
|
||||||
- if: *is-upstream-mr-pipeline
|
|
||||||
when: never
|
|
||||||
# ... however for MRs running inside the user namespace, we may need to
|
|
||||||
# run these jobs to copy the container images from upstream
|
|
||||||
- if: *is-merge-request
|
|
||||||
when: manual
|
|
||||||
# Build everything after someone bypassed the CI
|
|
||||||
- if: *is-push-to-upstream-default-branch
|
|
||||||
when: on_success
|
|
||||||
# Build everything when pushing to staging branches
|
|
||||||
- if: *is-push-to-upstream-staging-branch
|
|
||||||
when: on_success
|
|
||||||
# Scheduled pipelines reuse already-built containers
|
|
||||||
- if: *is-scheduled-pipeline
|
|
||||||
when: never
|
|
||||||
# Any other pipeline in the upstream should reuse already-built containers
|
|
||||||
- if: *is-upstream-pipeline
|
|
||||||
when: never
|
|
||||||
# Allow building everything in fork pipelines, but build nothing unless
|
|
||||||
# manually triggered
|
|
||||||
- when: manual
|
|
||||||
|
|
||||||
# Note: make sure the branches in this list are the same as in
|
|
||||||
# `.build-only-delayed-rules` below.
|
|
||||||
.build-rules:
|
|
||||||
rules:
|
rules:
|
||||||
- !reference [.common-rules, rules]
|
- !reference [.common-rules, rules]
|
||||||
# Run when re-enabling a disabled farm, but not when disabling it
|
# Run when re-enabling a disabled farm, but not when disabling it
|
||||||
@@ -304,7 +211,6 @@ include:
|
|||||||
- bin/git_sha1_gen.py
|
- bin/git_sha1_gen.py
|
||||||
- bin/install_megadrivers.py
|
- bin/install_megadrivers.py
|
||||||
- bin/symbols-check.py
|
- bin/symbols-check.py
|
||||||
- bin/ci/**/*
|
|
||||||
# GitLab CI
|
# GitLab CI
|
||||||
- .gitlab-ci.yml
|
- .gitlab-ci.yml
|
||||||
- .gitlab-ci/**/*
|
- .gitlab-ci/**/*
|
||||||
@@ -322,20 +228,18 @@ include:
|
|||||||
- src/**/*
|
- src/**/*
|
||||||
when: on_success
|
when: on_success
|
||||||
# Same as above, but for pre-merge pipelines
|
# Same as above, but for pre-merge pipelines
|
||||||
- if: *is-merge-request
|
- if: *is-pre-merge
|
||||||
changes: *all_paths
|
changes:
|
||||||
|
*all_paths
|
||||||
when: manual
|
when: manual
|
||||||
# Skip everything for pre-merge and merge pipelines which don't change
|
# Skip everything for pre-merge and merge pipelines which don't change
|
||||||
# anything in the build
|
# anything in the build
|
||||||
- if: *is-merge-attempt
|
- if: *is-merge-attempt
|
||||||
when: never
|
when: never
|
||||||
- if: *is-merge-request
|
- if: *is-pre-merge
|
||||||
when: never
|
when: never
|
||||||
# Build everything after someone bypassed the CI
|
# Build everything after someone bypassed the CI
|
||||||
- if: *is-push-to-upstream-default-branch
|
- if: *is-direct-push
|
||||||
when: on_success
|
|
||||||
# Build everything when pushing to staging branches
|
|
||||||
- if: *is-push-to-upstream-staging-branch
|
|
||||||
when: on_success
|
when: on_success
|
||||||
# Build everything in scheduled pipelines
|
# Build everything in scheduled pipelines
|
||||||
- if: *is-scheduled-pipeline
|
- if: *is-scheduled-pipeline
|
||||||
@@ -344,58 +248,47 @@ include:
|
|||||||
# manually triggered
|
# manually triggered
|
||||||
- when: manual
|
- when: manual
|
||||||
|
|
||||||
# Repeat of the above but with `when: on_success` replaced with
|
|
||||||
# `when: delayed` + `start_in:`, for build-only jobs.
|
.ci-deqp-artifacts:
|
||||||
# Note: make sure the branches in this list are the same as in
|
artifacts:
|
||||||
# `.container+build-rules` above.
|
name: "mesa_${CI_JOB_NAME}"
|
||||||
.build-only-delayed-rules:
|
when: always
|
||||||
|
untracked: false
|
||||||
|
paths:
|
||||||
|
# Watch out! Artifacts are relative to the build dir.
|
||||||
|
# https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
|
||||||
|
- artifacts
|
||||||
|
- _build/meson-logs/*.txt
|
||||||
|
- _build/meson-logs/strace
|
||||||
|
|
||||||
|
# Git archive
|
||||||
|
|
||||||
|
make git archive:
|
||||||
|
extends:
|
||||||
|
- .fdo.ci-fairy
|
||||||
|
stage: git-archive
|
||||||
rules:
|
rules:
|
||||||
- !reference [.common-rules, rules]
|
- !reference [.scheduled_pipeline-rules, rules]
|
||||||
# Run when re-enabling a disabled farm, but not when disabling it
|
# ensure we are running on packet
|
||||||
- !reference [.disable-farm-mr-rules, rules]
|
tags:
|
||||||
# Never run immediately after merging, as we just ran everything
|
- packet.net
|
||||||
- !reference [.never-post-merge-rules, rules]
|
script:
|
||||||
# Build everything in merge pipelines, if any files affecting the pipeline
|
# Compactify the .git directory
|
||||||
# were changed
|
- git gc --aggressive
|
||||||
- if: *is-merge-attempt
|
# Download & cache the perfetto subproject as well.
|
||||||
changes: *all_paths
|
- rm -rf subprojects/perfetto ; mkdir -p subprojects/perfetto && curl https://android.googlesource.com/platform/external/perfetto/+archive/$(grep 'revision =' subprojects/perfetto.wrap | cut -d ' ' -f3).tar.gz | tar zxf - -C subprojects/perfetto
|
||||||
when: delayed
|
# compress the current folder
|
||||||
start_in: &build-delay 5 minutes
|
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
|
||||||
# Same as above, but for pre-merge pipelines
|
|
||||||
- if: *is-merge-request
|
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
|
||||||
changes: *all_paths
|
|
||||||
when: manual
|
|
||||||
# Skip everything for pre-merge and merge pipelines which don't change
|
|
||||||
# anything in the build
|
|
||||||
- if: *is-merge-attempt
|
|
||||||
when: never
|
|
||||||
- if: *is-merge-request
|
|
||||||
when: never
|
|
||||||
# Build everything after someone bypassed the CI
|
|
||||||
- if: *is-push-to-upstream-default-branch
|
|
||||||
when: delayed
|
|
||||||
start_in: *build-delay
|
|
||||||
# Build everything when pushing to staging branches
|
|
||||||
- if: *is-push-to-upstream-staging-branch
|
|
||||||
when: delayed
|
|
||||||
start_in: *build-delay
|
|
||||||
# Build everything in scheduled pipelines
|
|
||||||
- if: *is-scheduled-pipeline
|
|
||||||
when: delayed
|
|
||||||
start_in: *build-delay
|
|
||||||
# Allow building everything in fork pipelines, but build nothing unless
|
|
||||||
# manually triggered
|
|
||||||
- when: manual
|
|
||||||
|
|
||||||
# Sanity checks of MR settings and commit logs
|
# Sanity checks of MR settings and commit logs
|
||||||
sanity:
|
sanity:
|
||||||
extends:
|
extends:
|
||||||
- .fdo.ci-fairy
|
- .fdo.ci-fairy
|
||||||
stage: sanity
|
stage: sanity
|
||||||
tags:
|
|
||||||
- placeholder-job
|
|
||||||
rules:
|
rules:
|
||||||
- if: *is-merge-request
|
- if: *is-pre-merge
|
||||||
when: on_success
|
when: on_success
|
||||||
- when: never
|
- when: never
|
||||||
variables:
|
variables:
|
||||||
@@ -408,14 +301,14 @@ sanity:
|
|||||||
image_tags=(
|
image_tags=(
|
||||||
ALPINE_X86_64_BUILD_TAG
|
ALPINE_X86_64_BUILD_TAG
|
||||||
ALPINE_X86_64_LAVA_SSH_TAG
|
ALPINE_X86_64_LAVA_SSH_TAG
|
||||||
ALPINE_X86_64_LAVA_TRIGGER_TAG
|
|
||||||
DEBIAN_BASE_TAG
|
DEBIAN_BASE_TAG
|
||||||
DEBIAN_BUILD_TAG
|
DEBIAN_BUILD_TAG
|
||||||
|
DEBIAN_PYUTILS_TAG
|
||||||
DEBIAN_TEST_ANDROID_TAG
|
DEBIAN_TEST_ANDROID_TAG
|
||||||
DEBIAN_TEST_GL_TAG
|
DEBIAN_TEST_GL_TAG
|
||||||
DEBIAN_TEST_VK_TAG
|
DEBIAN_TEST_VK_TAG
|
||||||
FEDORA_X86_64_BUILD_TAG
|
FEDORA_X86_64_BUILD_TAG
|
||||||
FIRMWARE_TAG
|
KERNEL_ROOTFS_TAG
|
||||||
KERNEL_TAG
|
KERNEL_TAG
|
||||||
PKG_REPO_REV
|
PKG_REPO_REV
|
||||||
WINDOWS_X64_BUILD_TAG
|
WINDOWS_X64_BUILD_TAG
|
||||||
@@ -434,4 +327,32 @@ sanity:
|
|||||||
when: on_failure
|
when: on_failure
|
||||||
reports:
|
reports:
|
||||||
junit: check-*.xml
|
junit: check-*.xml
|
||||||
|
tags:
|
||||||
|
- placeholder-job
|
||||||
|
|
||||||
|
|
||||||
|
mr-label-maker-test:
|
||||||
|
extends:
|
||||||
|
- .fdo.ci-fairy
|
||||||
|
stage: sanity
|
||||||
|
rules:
|
||||||
|
- !reference [.mr-label-maker-rules, rules]
|
||||||
|
variables:
|
||||||
|
GIT_STRATEGY: fetch
|
||||||
|
timeout: 10m
|
||||||
|
script:
|
||||||
|
- set -eu
|
||||||
|
- python3 -m venv .venv
|
||||||
|
- source .venv/bin/activate
|
||||||
|
- pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker
|
||||||
|
- mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID
|
||||||
|
|
||||||
|
# Jobs that need to pass before spending hardware resources on further testing
|
||||||
|
.required-for-hardware-jobs:
|
||||||
|
needs:
|
||||||
|
- job: clang-format
|
||||||
|
optional: true
|
||||||
|
- job: rustfmt
|
||||||
|
optional: true
|
||||||
|
- job: toml-lint
|
||||||
|
optional: true
|
||||||
|
@@ -1,33 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
exclude = .venv*,
|
|
||||||
|
|
||||||
# PEP 8 Style Guide limits line length to 79 characters
|
|
||||||
max-line-length = 159
|
|
||||||
|
|
||||||
ignore =
|
|
||||||
# continuation line under-indented for hanging indent
|
|
||||||
E121
|
|
||||||
# continuation line over-indented for hanging indent
|
|
||||||
E126,
|
|
||||||
# continuation line under-indented for visual indent
|
|
||||||
E128,
|
|
||||||
# whitespace before ':'
|
|
||||||
E203,
|
|
||||||
# missing whitespace around arithmetic operator
|
|
||||||
E226,
|
|
||||||
# missing whitespace after ','
|
|
||||||
E231,
|
|
||||||
# expected 2 blank lines, found 1
|
|
||||||
E302,
|
|
||||||
# too many blank lines
|
|
||||||
E303,
|
|
||||||
# imported but unused
|
|
||||||
F401,
|
|
||||||
# f-string is missing placeholders
|
|
||||||
F541,
|
|
||||||
# local variable assigned to but never used
|
|
||||||
F841,
|
|
||||||
# line break before binary operator
|
|
||||||
W503,
|
|
||||||
# line break after binary operator
|
|
||||||
W504,
|
|
@@ -85,31 +85,3 @@ wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other
|
|||||||
# Vulkan loader in Debian is too old
|
# Vulkan loader in Debian is too old
|
||||||
dEQP-VK.api.get_device_proc_addr.non_enabled
|
dEQP-VK.api.get_device_proc_addr.non_enabled
|
||||||
dEQP-VK.api.version_check.unavailable_entry_points
|
dEQP-VK.api.version_check.unavailable_entry_points
|
||||||
|
|
||||||
# These tests are flaking too much recently on almost all drivers, so better skip them until the cause is identified
|
|
||||||
spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex
|
|
||||||
spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT
|
|
||||||
|
|
||||||
# These tests attempt to read from the front buffer after a swap. They are skipped
|
|
||||||
# on both X11 and gbm, but for different reasons:
|
|
||||||
#
|
|
||||||
# On X11: Given that we run piglit tests in parallel in Mesa CI, and don't have a
|
|
||||||
# compositor running, the frontbuffer reads may end up with undefined results from
|
|
||||||
# windows overlapping us.
|
|
||||||
# Piglit does mark these tests as not to be run in parallel, but deqp-runner
|
|
||||||
# doesn't respect that. We need to extend deqp-runner to allow some tests to be
|
|
||||||
# marked as single-threaded and run after the rayon loop if we want to support
|
|
||||||
# them.
|
|
||||||
# Other front-buffer access tests like fbo-sys-blit, fbo-sys-sub-blit, or
|
|
||||||
# fcc-front-buffer-distraction don't appear here, because the DRI3 fake-front
|
|
||||||
# handling should be holding the pixels drawn by the test even if we happen to fail
|
|
||||||
# GL's window system pixel occlusion test.
|
|
||||||
# Note that glx skips don't appear here, they're in all-skips.txt (in case someone
|
|
||||||
# sets PIGLIT_PLATFORM=gbm to mostly use gbm, but still has an X server running).
|
|
||||||
#
|
|
||||||
# On gbm: gbm does not support reading the front buffer after a swapbuffers, and
|
|
||||||
# that's intentional. Don't bother running these tests when PIGLIT_PLATFORM=gbm.
|
|
||||||
# Note that this doesn't include tests like fbo-sys-blit, which draw/read front
|
|
||||||
# but don't swap.
|
|
||||||
spec@!opengl 1.0@gl-1.0-swapbuffers-behavior
|
|
||||||
spec@!opengl 1.1@read-front
|
|
||||||
|
@@ -1,71 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC1091 # paths only become valid at runtime
|
|
||||||
|
|
||||||
. "${SCRIPTS_DIR}/setup-test-env.sh"
|
|
||||||
|
|
||||||
ci_tag_test_time_check "ANDROID_CTS_TAG"
|
|
||||||
|
|
||||||
export PATH=/android-tools/build-tools:/android-cts/jdk/bin/:$PATH
|
|
||||||
export JAVA_HOME=/android-cts/jdk
|
|
||||||
|
|
||||||
# Wait for the appops service to show up
|
|
||||||
while [ "$($ADB shell dumpsys -l | grep appops)" = "" ] ; do sleep 1; done
|
|
||||||
|
|
||||||
SKIP_FILE="$INSTALL/${GPU_VERSION}-android-cts-skips.txt"
|
|
||||||
|
|
||||||
EXCLUDE_FILTERS=""
|
|
||||||
if [ -e "$SKIP_FILE" ]; then
|
|
||||||
EXCLUDE_FILTERS="$(grep -v -E "(^#|^[[:space:]]*$)" "$SKIP_FILE" | sed -e 's/\s*$//g' -e 's/.*/--exclude-filter "\0" /g')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
INCLUDE_FILE="$INSTALL/${GPU_VERSION}-android-cts-include.txt"
|
|
||||||
|
|
||||||
if [ ! -e "$INCLUDE_FILE" ]; then
|
|
||||||
set +x
|
|
||||||
echo "ERROR: No include file (${GPU_VERSION}-android-cts-include.txt) found."
|
|
||||||
echo "This means that we are running the all available CTS modules."
|
|
||||||
echo "But the time to run it might be too long, please provide an include file instead."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
INCLUDE_FILTERS="$(grep -v -E "(^#|^[[:space:]]*$)" "$INCLUDE_FILE" | sed -e 's/\s*$//g' -e 's/.*/--include-filter "\0" /g')"
|
|
||||||
|
|
||||||
if [ -n "${ANDROID_CTS_PREPARE_COMMAND:-}" ]; then
|
|
||||||
eval "$ANDROID_CTS_PREPARE_COMMAND"
|
|
||||||
fi
|
|
||||||
|
|
||||||
uncollapsed_section_switch android_cts_test "Android CTS: testing"
|
|
||||||
|
|
||||||
set +e
|
|
||||||
eval "/android-cts/tools/cts-tradefed" run commandAndExit cts-dev \
|
|
||||||
$INCLUDE_FILTERS \
|
|
||||||
$EXCLUDE_FILTERS
|
|
||||||
|
|
||||||
SUMMARY_FILE=/android-cts/results/latest/invocation_summary.txt
|
|
||||||
|
|
||||||
# Parse a line like `x/y modules completed` to check that all modules completed
|
|
||||||
COMPLETED_MODULES=$(sed -n -e '/modules completed/s/^\([0-9]\+\)\/\([0-9]\+\) .*$/\1/p' "$SUMMARY_FILE")
|
|
||||||
AVAILABLE_MODULES=$(sed -n -e '/modules completed/s/^\([0-9]\+\)\/\([0-9]\+\) .*$/\2/p' "$SUMMARY_FILE")
|
|
||||||
[ "$COMPLETED_MODULES" = "$AVAILABLE_MODULES" ]
|
|
||||||
# shellcheck disable=SC2319 # False-positive see https://github.com/koalaman/shellcheck/issues/2937#issuecomment-2660891195
|
|
||||||
MODULES_FAILED=$?
|
|
||||||
|
|
||||||
# Parse a line like `FAILED : x` to check that no tests failed
|
|
||||||
[ "$(grep "^FAILED" "$SUMMARY_FILE" | tr -d ' ' | cut -d ':' -f 2)" = "0" ]
|
|
||||||
# shellcheck disable=SC2319 # False-positive see https://github.com/koalaman/shellcheck/issues/2937#issuecomment-2660891195
|
|
||||||
TESTS_FAILED=$?
|
|
||||||
|
|
||||||
[ "$MODULES_FAILED" = "0" ] && [ "$TESTS_FAILED" = "0" ]
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034 # EXIT_CODE is used by the script that sources this one
|
|
||||||
EXIT_CODE=$?
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cp -r "/android-cts/results/latest"/* $RESULTS_DIR
|
|
||||||
cp -r "/android-cts/logs/latest"/* $RESULTS_DIR
|
|
||||||
|
|
||||||
echo "============================================"
|
|
||||||
echo "Review the Android CTS test results at: ${ARTIFACTS_BASE_URL}/results/test_result.html"
|
|
||||||
|
|
||||||
section_end android_cts_test
|
|
@@ -1,110 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC1091 # paths only become valid at runtime
|
|
||||||
|
|
||||||
. "${SCRIPTS_DIR}/setup-test-env.sh"
|
|
||||||
|
|
||||||
# deqp
|
|
||||||
|
|
||||||
$ADB shell mkdir -p /data/deqp
|
|
||||||
$ADB push /deqp-gles/modules/egl/deqp-egl-android /data/deqp
|
|
||||||
$ADB push /deqp-gles/mustpass/egl-main.txt.zst /data/deqp
|
|
||||||
$ADB push /deqp-gles/modules/gles2/deqp-gles2 /data/deqp
|
|
||||||
$ADB push /deqp-gles/mustpass/gles2-main.txt.zst /data/deqp
|
|
||||||
$ADB push /deqp-vk/external/vulkancts/modules/vulkan/* /data/deqp
|
|
||||||
$ADB push /deqp-vk/mustpass/vk-main.txt.zst /data/deqp
|
|
||||||
$ADB push /deqp-tools/* /data/deqp
|
|
||||||
$ADB push /deqp-runner/deqp-runner /data/deqp
|
|
||||||
|
|
||||||
$ADB push "$INSTALL/all-skips.txt" /data/deqp
|
|
||||||
$ADB push "$INSTALL/android-skips.txt" /data/deqp
|
|
||||||
$ADB push "$INSTALL/angle-skips.txt" /data/deqp
|
|
||||||
if [ -e "$INSTALL/$GPU_VERSION-flakes.txt" ]; then
|
|
||||||
$ADB push "$INSTALL/$GPU_VERSION-flakes.txt" /data/deqp
|
|
||||||
fi
|
|
||||||
if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
|
|
||||||
$ADB push "$INSTALL/$GPU_VERSION-fails.txt" /data/deqp
|
|
||||||
fi
|
|
||||||
if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
|
|
||||||
$ADB push "$INSTALL/$GPU_VERSION-skips.txt" /data/deqp
|
|
||||||
fi
|
|
||||||
$ADB push "$INSTALL/deqp-$DEQP_SUITE.toml" /data/deqp
|
|
||||||
|
|
||||||
BASELINE=""
|
|
||||||
if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
|
|
||||||
BASELINE="--baseline /data/deqp/$GPU_VERSION-fails.txt"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Default to an empty known flakes file if it doesn't exist.
|
|
||||||
$ADB shell "touch /data/deqp/$GPU_VERSION-flakes.txt"
|
|
||||||
|
|
||||||
DEQP_SKIPS=""
|
|
||||||
if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
|
|
||||||
DEQP_SKIPS="$DEQP_SKIPS /data/deqp/$GPU_VERSION-skips.txt"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${ANGLE_TAG:-}" ]; then
|
|
||||||
DEQP_SKIPS="$DEQP_SKIPS /data/deqp/angle-skips.txt"
|
|
||||||
fi
|
|
||||||
|
|
||||||
AOSP_RESULTS=/data/deqp/results
|
|
||||||
uncollapsed_section_switch cuttlefish_test "cuttlefish: testing"
|
|
||||||
|
|
||||||
# Print the detailed version with the list of backports and local patches
|
|
||||||
{ set +x; } 2>/dev/null
|
|
||||||
for api in vk-main vk gl gles; do
|
|
||||||
deqp_version_log=/deqp-$api/deqp-$api-version
|
|
||||||
if [ -r "$deqp_version_log" ]; then
|
|
||||||
cat "$deqp_version_log"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
set -x
|
|
||||||
|
|
||||||
set +e
|
|
||||||
$ADB shell "mkdir ${AOSP_RESULTS}; cd ${AOSP_RESULTS}/..; \
|
|
||||||
XDG_CACHE_HOME=/data/local/tmp \
|
|
||||||
./deqp-runner \
|
|
||||||
suite \
|
|
||||||
--suite /data/deqp/deqp-$DEQP_SUITE.toml \
|
|
||||||
--output $AOSP_RESULTS \
|
|
||||||
--skips /data/deqp/all-skips.txt $DEQP_SKIPS \
|
|
||||||
--flakes /data/deqp/$GPU_VERSION-flakes.txt \
|
|
||||||
--testlog-to-xml /data/deqp/testlog-to-xml \
|
|
||||||
--shader-cache-dir /data/local/tmp \
|
|
||||||
--fraction-start ${CI_NODE_INDEX:-1} \
|
|
||||||
--fraction $(( CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
|
|
||||||
--jobs ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
$BASELINE \
|
|
||||||
${DEQP_RUNNER_MAX_FAILS:+--max-fails \"$DEQP_RUNNER_MAX_FAILS\"} \
|
|
||||||
"
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034 # EXIT_CODE is used by the script that sources this one
|
|
||||||
EXIT_CODE=$?
|
|
||||||
set -e
|
|
||||||
section_switch cuttlefish_results "cuttlefish: gathering the results"
|
|
||||||
|
|
||||||
$ADB pull "$AOSP_RESULTS/." "$RESULTS_DIR"
|
|
||||||
|
|
||||||
# Remove all but the first 50 individual XML files uploaded as artifacts, to
|
|
||||||
# save fd.o space when you break everything.
|
|
||||||
find $RESULTS_DIR -name \*.xml | \
|
|
||||||
sort -n |
|
|
||||||
sed -n '1,+49!p' | \
|
|
||||||
xargs rm -f
|
|
||||||
|
|
||||||
# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
|
|
||||||
find $RESULTS_DIR -name \*.xml \
|
|
||||||
-exec cp /deqp-tools/testlog.css /deqp-tools/testlog.xsl "$RESULTS_DIR/" ";" \
|
|
||||||
-quit
|
|
||||||
|
|
||||||
$ADB shell "cd ${AOSP_RESULTS}/..; \
|
|
||||||
./deqp-runner junit \
|
|
||||||
--testsuite dEQP \
|
|
||||||
--results $AOSP_RESULTS/failures.csv \
|
|
||||||
--output $AOSP_RESULTS/junit.xml \
|
|
||||||
--limit 50 \
|
|
||||||
--template \"See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml\""
|
|
||||||
|
|
||||||
$ADB pull "$AOSP_RESULTS/junit.xml" "$RESULTS_DIR"
|
|
||||||
|
|
||||||
section_end cuttlefish_results
|
|
@@ -1,150 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC1091 # paths only become valid at runtime
|
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
# Set default ADB command if not set already
|
|
||||||
|
|
||||||
: "${ADB:=adb}"
|
|
||||||
|
|
||||||
$ADB wait-for-device root
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
# overlay
|
|
||||||
|
|
||||||
REMOUNT_PATHS="/vendor"
|
|
||||||
if [ "$ANDROID_VERSION" -ge 15 ]; then
|
|
||||||
REMOUNT_PATHS="$REMOUNT_PATHS /system"
|
|
||||||
fi
|
|
||||||
|
|
||||||
OV_TMPFS="/data/overlay-remount"
|
|
||||||
$ADB shell mkdir -p "$OV_TMPFS"
|
|
||||||
$ADB shell mount -t tmpfs none "$OV_TMPFS"
|
|
||||||
|
|
||||||
for path in $REMOUNT_PATHS; do
|
|
||||||
$ADB shell mkdir -p "${OV_TMPFS}${path}-upper"
|
|
||||||
$ADB shell mkdir -p "${OV_TMPFS}${path}-work"
|
|
||||||
|
|
||||||
opts="lowerdir=${path},upperdir=${OV_TMPFS}${path}-upper,workdir=${OV_TMPFS}${path}-work"
|
|
||||||
$ADB shell mount -t overlay -o "$opts" none ${path}
|
|
||||||
done
|
|
||||||
|
|
||||||
$ADB shell setenforce 0
|
|
||||||
|
|
||||||
$ADB push /android-tools/eglinfo /data
|
|
||||||
$ADB push /android-tools/vulkaninfo /data
|
|
||||||
|
|
||||||
get_gles_runtime_renderer() {
|
|
||||||
while [ "$($ADB shell XDG_CACHE_HOME=/data/local/tmp /data/eglinfo | grep 'OpenGL ES profile renderer':)" = "" ] ; do sleep 1; done
|
|
||||||
$ADB shell XDG_CACHE_HOME=/data/local/tmp /data/eglinfo | grep 'OpenGL ES profile renderer' | head -1
|
|
||||||
}
|
|
||||||
|
|
||||||
get_gles_runtime_version() {
|
|
||||||
while [ "$($ADB shell XDG_CACHE_HOME=/data/local/tmp /data/eglinfo | grep 'OpenGL ES profile version:')" = "" ] ; do sleep 1; done
|
|
||||||
$ADB shell XDG_CACHE_HOME=/data/local/tmp /data/eglinfo | grep 'OpenGL ES profile version:' | head -1
|
|
||||||
}
|
|
||||||
|
|
||||||
get_vk_runtime_device_name() {
|
|
||||||
$ADB shell XDG_CACHE_HOME=/data/local/tmp /data/vulkaninfo | grep deviceName | head -1
|
|
||||||
}
|
|
||||||
|
|
||||||
get_vk_runtime_version() {
|
|
||||||
$ADB shell XDG_CACHE_HOME=/data/local/tmp /data/vulkaninfo | grep driverInfo | head -1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check what GLES & VK implementation is used before uploading the new libraries
|
|
||||||
get_gles_runtime_renderer
|
|
||||||
get_gles_runtime_version
|
|
||||||
get_vk_runtime_device_name
|
|
||||||
get_vk_runtime_version
|
|
||||||
|
|
||||||
# replace libraries
|
|
||||||
|
|
||||||
$ADB shell rm -f /vendor/lib64/libgallium_dri.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libEGL_mesa.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libGLESv1_CM_mesa.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libGLESv2_mesa.so*
|
|
||||||
|
|
||||||
$ADB push "$INSTALL/lib/libgallium_dri.so" /vendor/lib64/libgallium_dri.so
|
|
||||||
$ADB push "$INSTALL/lib/libEGL.so" /vendor/lib64/egl/libEGL_mesa.so
|
|
||||||
$ADB push "$INSTALL/lib/libGLESv1_CM.so" /vendor/lib64/egl/libGLESv1_CM_mesa.so
|
|
||||||
$ADB push "$INSTALL/lib/libGLESv2.so" /vendor/lib64/egl/libGLESv2_mesa.so
|
|
||||||
|
|
||||||
$ADB shell rm -f /vendor/lib64/hw/vulkan.lvp.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/hw/vulkan.virtio.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/hw/vulkan.intel.so*
|
|
||||||
|
|
||||||
$ADB push "$INSTALL/lib/libvulkan_lvp.so" /vendor/lib64/hw/vulkan.lvp.so
|
|
||||||
$ADB push "$INSTALL/lib/libvulkan_virtio.so" /vendor/lib64/hw/vulkan.virtio.so
|
|
||||||
$ADB push "$INSTALL/lib/libvulkan_intel.so" /vendor/lib64/hw/vulkan.intel.so
|
|
||||||
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libEGL_emulation.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libGLESv1_CM_emulation.so*
|
|
||||||
$ADB shell rm -f /vendor/lib64/egl/libGLESv2_emulation.so*
|
|
||||||
|
|
||||||
if [ -n "${ANGLE_TAG:-}" ]; then
|
|
||||||
ANGLE_DEST_PATH=/vendor/lib64/egl
|
|
||||||
if [ "$ANDROID_VERSION" -ge 15 ]; then
|
|
||||||
ANGLE_DEST_PATH=/system/lib64
|
|
||||||
fi
|
|
||||||
|
|
||||||
$ADB shell rm -f "$ANGLE_DEST_PATH/libEGL_angle.so"*
|
|
||||||
$ADB shell rm -f "$ANGLE_DEST_PATH/libGLESv1_CM_angle.so"*
|
|
||||||
$ADB shell rm -f "$ANGLE_DEST_PATH/libGLESv2_angle.so"*
|
|
||||||
|
|
||||||
$ADB push /angle/libEGL_angle.so "$ANGLE_DEST_PATH/libEGL_angle.so"
|
|
||||||
$ADB push /angle/libGLESv1_CM_angle.so "$ANGLE_DEST_PATH/libGLESv1_CM_angle.so"
|
|
||||||
$ADB push /angle/libGLESv2_angle.so "$ANGLE_DEST_PATH/libGLESv2_angle.so"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check what GLES & VK implementation is used after uploading the new libraries
|
|
||||||
MESA_BUILD_VERSION=$(cat "$INSTALL/VERSION")
|
|
||||||
get_gles_runtime_renderer
|
|
||||||
GLES_RUNTIME_VERSION="$(get_gles_runtime_version)"
|
|
||||||
get_vk_runtime_device_name
|
|
||||||
VK_RUNTIME_VERSION="$(get_vk_runtime_version)"
|
|
||||||
|
|
||||||
if [ -n "${ANGLE_TAG:-}" ]; then
|
|
||||||
# Note: we are injecting the ANGLE libs too, so we need to check if the
|
|
||||||
# new ANGLE libs are being used.
|
|
||||||
ANGLE_HASH=$(head -c 12 /angle/version)
|
|
||||||
if ! printf "%s" "$GLES_RUNTIME_VERSION" | grep --quiet "${ANGLE_HASH}"; then
|
|
||||||
echo "Fatal: Android is loading a wrong version of the ANGLE libs: ${ANGLE_HASH}" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! printf "%s" "$VK_RUNTIME_VERSION" | grep -Fq -- "${MESA_BUILD_VERSION}"; then
|
|
||||||
echo "Fatal: Android is loading a wrong version of the Mesa3D Vulkan libs: ${VK_RUNTIME_VERSION}" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
get_surfaceflinger_pid() {
|
|
||||||
while [ "$($ADB shell dumpsys -l | grep 'SurfaceFlinger$')" = "" ] ; do sleep 1; done
|
|
||||||
$ADB shell ps -A | grep -i surfaceflinger | tr -s ' ' | cut -d ' ' -f 2
|
|
||||||
}
|
|
||||||
|
|
||||||
OLD_SF_PID=$(get_surfaceflinger_pid)
|
|
||||||
|
|
||||||
# restart Android shell, so that services use the new libraries
|
|
||||||
$ADB shell stop
|
|
||||||
$ADB shell start
|
|
||||||
|
|
||||||
# Check that SurfaceFlinger restarted, to ensure that new libraries have been picked up
|
|
||||||
NEW_SF_PID=$(get_surfaceflinger_pid)
|
|
||||||
|
|
||||||
if [ "$OLD_SF_PID" == "$NEW_SF_PID" ]; then
|
|
||||||
echo "Fatal: check that SurfaceFlinger restarted" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${ANDROID_CTS_TAG:-}" ]; then
|
|
||||||
# The script sets EXIT_CODE
|
|
||||||
. "$(dirname "$0")/android-cts-runner.sh"
|
|
||||||
else
|
|
||||||
# The script sets EXIT_CODE
|
|
||||||
. "$(dirname "$0")/android-deqp-runner.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit $EXIT_CODE
|
|
@@ -1,11 +0,0 @@
|
|||||||
# Skip these tests when running fractional dEQP batches, as the AHB tests are expected
|
|
||||||
# to be handled separately in a non-fractional run within the deqp-runner suite.
|
|
||||||
dEQP-VK.api.external.memory.android_hardware_buffer.*
|
|
||||||
|
|
||||||
# Skip all WSI tests: the DEQP_ANDROID_EXE build used can't create native windows, as
|
|
||||||
# only APKs support window creation on Android.
|
|
||||||
dEQP-VK.image.swapchain_mutable.*
|
|
||||||
dEQP-VK.wsi.*
|
|
||||||
|
|
||||||
# These tests cause hangs and need to be skipped for now.
|
|
||||||
dEQP-VK.synchronization*
|
|
89
.gitlab-ci/b2c/b2c.yml.jinja2.jinja2
Normal file
89
.gitlab-ci/b2c/b2c.yml.jinja2.jinja2
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
version: 1
|
||||||
|
|
||||||
|
# Rules to match for a machine to qualify
|
||||||
|
target:
|
||||||
|
id: '{{ ci_runner_id }}'
|
||||||
|
|
||||||
|
timeouts:
|
||||||
|
|
||||||
|
first_console_activity: # This limits the time it can take to receive the first console log
|
||||||
|
minutes: {{ timeout_first_console_activity_minutes | default(0, true) }}
|
||||||
|
seconds: {{ timeout_first_console_activity_seconds | default(0, true) }}
|
||||||
|
retries: {{ timeout_first_console_activity_retries }}
|
||||||
|
|
||||||
|
console_activity: # Reset every time we receive a message from the logs
|
||||||
|
minutes: {{ timeout_console_activity_minutes | default(0, true) }}
|
||||||
|
seconds: {{ timeout_console_activity_seconds | default(0, true) }}
|
||||||
|
retries: {{ timeout_console_activity_retries }}
|
||||||
|
|
||||||
|
boot_cycle:
|
||||||
|
minutes: {{ timeout_boot_minutes | default(0, true) }}
|
||||||
|
seconds: {{ timeout_boot_seconds | default(0, true) }}
|
||||||
|
retries: {{ timeout_boot_retries }}
|
||||||
|
|
||||||
|
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
|
||||||
|
minutes: {{ timeout_overall_minutes | default(0, true) }}
|
||||||
|
seconds: {{ timeout_overall_seconds | default(0, true) }}
|
||||||
|
retries: 0
|
||||||
|
# no retries possible here
|
||||||
|
|
||||||
|
console_patterns:
|
||||||
|
session_end:
|
||||||
|
regex: >-
|
||||||
|
{{ session_end_regex }}
|
||||||
|
{% if session_reboot_regex %}
|
||||||
|
session_reboot:
|
||||||
|
regex: >-
|
||||||
|
{{ session_reboot_regex }}
|
||||||
|
{% endif %}
|
||||||
|
job_success:
|
||||||
|
regex: >-
|
||||||
|
{{ job_success_regex }}
|
||||||
|
{% if job_warn_regex %}
|
||||||
|
job_warn:
|
||||||
|
regex: >-
|
||||||
|
{{ job_warn_regex }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Environment to deploy
|
||||||
|
deployment:
|
||||||
|
# Initial boot
|
||||||
|
start:
|
||||||
|
storage:
|
||||||
|
http:
|
||||||
|
- path: "/b2c-extra-args"
|
||||||
|
data: >
|
||||||
|
b2c.pipefail b2c.poweroff_delay={{ poweroff_delay }}
|
||||||
|
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
||||||
|
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
|
||||||
|
{% for volume in volumes %}
|
||||||
|
b2c.volume={{ volume }}
|
||||||
|
{% endfor %}
|
||||||
|
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
|
||||||
|
b2c.run="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
|
||||||
|
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd | replace('"', '\\\"') }}"
|
||||||
|
kernel:
|
||||||
|
{% if kernel_url %}
|
||||||
|
url: '{{ kernel_url }}'
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# NOTE: b2c.cache_device should not be here, but this works around
|
||||||
|
# a limitation of b2c which will be removed in the next release
|
||||||
|
cmdline: >
|
||||||
|
SALAD.machine_id={{ '{{' }} machine_id }}
|
||||||
|
console={{ '{{' }} local_tty_device }},115200
|
||||||
|
b2c.cache_device=auto b2c.ntp_peer=10.42.0.1
|
||||||
|
b2c.extra_args_url={{ '{{' }} job.http.url }}/b2c-extra-args
|
||||||
|
{% if kernel_cmdline_extras is defined %}
|
||||||
|
{{ kernel_cmdline_extras }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if initramfs_url %}
|
||||||
|
initramfs:
|
||||||
|
url: '{{ initramfs_url }}'
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if dtb_url %}
|
||||||
|
dtb:
|
||||||
|
url: '{{ dtb_url }}'
|
||||||
|
{% endif %}
|
55
.gitlab-ci/b2c/generate_b2c.py
Executable file
55
.gitlab-ci/b2c/generate_b2c.py
Executable file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Copyright © 2022 Valve Corporation
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
from os import environ, path
|
||||||
|
|
||||||
|
|
||||||
|
# Pass all the environment variables prefixed by B2C_
|
||||||
|
values = {
|
||||||
|
key.removeprefix("B2C_").lower(): environ[key]
|
||||||
|
for key in environ if key.startswith("B2C_")
|
||||||
|
}
|
||||||
|
|
||||||
|
env = Environment(loader=FileSystemLoader(path.dirname(values['job_template'])),
|
||||||
|
trim_blocks=True, lstrip_blocks=True)
|
||||||
|
|
||||||
|
template = env.get_template(path.basename(values['job_template']))
|
||||||
|
|
||||||
|
values['ci_job_id'] = environ['CI_JOB_ID']
|
||||||
|
values['ci_runner_id'] = environ['CI_RUNNER_ID']
|
||||||
|
values['job_volume_exclusions'] = [excl for excl in values['job_volume_exclusions'].split(",") if excl]
|
||||||
|
values['working_dir'] = environ['CI_PROJECT_DIR']
|
||||||
|
|
||||||
|
# Use the gateway's pull-through registry caches to reduce load on fd.o.
|
||||||
|
values['local_container'] = environ['IMAGE_UNDER_TEST']
|
||||||
|
values['local_container'] = values['local_container'].replace(
|
||||||
|
'registry.freedesktop.org',
|
||||||
|
'{{ fdo_proxy_registry }}'
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'kernel_cmdline_extras' not in values:
|
||||||
|
values['kernel_cmdline_extras'] = ''
|
||||||
|
|
||||||
|
with open(path.splitext(path.basename(values['job_template']))[0], "w") as f:
|
||||||
|
f.write(template.render(values))
|
17
.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
Executable file
17
.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
|
echo "Must supply the PoE Interface to power down"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_ADDRESS" ]; then
|
||||||
|
echo "Must supply the PoE Switch host"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
||||||
|
SNMP_OFF="i 4"
|
||||||
|
|
||||||
|
snmpset -v2c -r 3 -t 30 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
22
.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
Executable file
22
.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
Executable file
@@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
|
echo "Must supply the PoE Interface to power up"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_ADDRESS" ]; then
|
||||||
|
echo "Must supply the PoE Switch host"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
||||||
|
SNMP_ON="i 1"
|
||||||
|
SNMP_OFF="i 4"
|
||||||
|
|
||||||
|
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
||||||
|
sleep 3s
|
||||||
|
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_ON
|
129
.gitlab-ci/bare-metal/cros-servo.sh
Executable file
129
.gitlab-ci/bare-metal/cros-servo.sh
Executable file
@@ -0,0 +1,129 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
|
# Boot script for Chrome OS devices attached to a servo debug connector, using
|
||||||
|
# NFS and TFTP to boot.
|
||||||
|
|
||||||
|
# We're run from the root of the repo, make a helper var for our paths
|
||||||
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
|
CI_COMMON=$CI_PROJECT_DIR/install/common
|
||||||
|
CI_INSTALL=$CI_PROJECT_DIR/install
|
||||||
|
|
||||||
|
# Runner config checks
|
||||||
|
if [ -z "$BM_SERIAL" ]; then
|
||||||
|
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is the CPU serial device."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_SERIAL_EC" ]; then
|
||||||
|
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is the EC serial device for controlling board power"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d /nfs ]; then
|
||||||
|
echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d /tftp ]; then
|
||||||
|
echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# job config checks
|
||||||
|
if [ -z "$BM_KERNEL" ]; then
|
||||||
|
echo "Must set BM_KERNEL to your board's kernel FIT image"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_ROOTFS" ]; then
|
||||||
|
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_CMDLINE" ]; then
|
||||||
|
echo "Must set BM_CMDLINE to your board's kernel command line arguments"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
. "${SCRIPTS_DIR}/setup-test-env.sh"
|
||||||
|
|
||||||
|
section_start prepare_rootfs "Preparing rootfs components"
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Clear out any previous run's artifacts.
|
||||||
|
rm -rf results/
|
||||||
|
mkdir -p results
|
||||||
|
|
||||||
|
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
|
||||||
|
# state, since it's volume-mounted on the host.
|
||||||
|
rsync -a --delete $BM_ROOTFS/ /nfs/
|
||||||
|
mkdir -p /nfs/results
|
||||||
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
|
# Put the kernel/dtb image and the boot command line in the tftp directory for
|
||||||
|
# the board to find. For normal Mesa development, we build the kernel and
|
||||||
|
# store it in the docker container that this script is running in.
|
||||||
|
#
|
||||||
|
# However, container builds are expensive, so when you're hacking on the
|
||||||
|
# kernel, it's nice to be able to skip the half hour container build and plus
|
||||||
|
# moving that container to the runner. So, if BM_KERNEL is a URL, fetch it
|
||||||
|
# instead of looking in the container. Note that the kernel build should be
|
||||||
|
# the output of:
|
||||||
|
#
|
||||||
|
# make Image.lzma
|
||||||
|
#
|
||||||
|
# mkimage \
|
||||||
|
# -A arm64 \
|
||||||
|
# -f auto \
|
||||||
|
# -C lzma \
|
||||||
|
# -d arch/arm64/boot/Image.lzma \
|
||||||
|
# -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
|
||||||
|
# cheza-image.img
|
||||||
|
|
||||||
|
rm -rf /tftp/*
|
||||||
|
if echo "$BM_KERNEL" | grep -q http; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
$BM_KERNEL -o /tftp/vmlinuz
|
||||||
|
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/nfs/"
|
||||||
|
rm modules.tar.zst &
|
||||||
|
else
|
||||||
|
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
|
||||||
|
fi
|
||||||
|
echo "$BM_CMDLINE" > /tftp/cmdline
|
||||||
|
|
||||||
|
set +e
|
||||||
|
STRUCTURED_LOG_FILE=results/job_detail.json
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
|
||||||
|
section_end prepare_rootfs
|
||||||
|
|
||||||
|
python3 $BM/cros_servo_run.py \
|
||||||
|
--cpu $BM_SERIAL \
|
||||||
|
--ec $BM_SERIAL_EC \
|
||||||
|
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20}
|
||||||
|
ret=$?
|
||||||
|
|
||||||
|
section_start dut_cleanup "Cleaning up after job"
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
||||||
|
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
|
# will look for them.
|
||||||
|
cp -Rp /nfs/results/. results/
|
||||||
|
section_end dut_cleanup
|
||||||
|
|
||||||
|
exit $ret
|
206
.gitlab-ci/bare-metal/cros_servo_run.py
Executable file
206
.gitlab-ci/bare-metal/cros_servo_run.py
Executable file
@@ -0,0 +1,206 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright © 2020 Google LLC
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from custom_logger import CustomLogger
|
||||||
|
from serial_buffer import SerialBuffer
|
||||||
|
|
||||||
|
ANSI_ESCAPE="\x1b[0K"
|
||||||
|
ANSI_COLOUR="\x1b[0;36m"
|
||||||
|
ANSI_RESET="\x1b[0m"
|
||||||
|
SECTION_START="start"
|
||||||
|
SECTION_END="end"
|
||||||
|
|
||||||
|
class CrosServoRun:
|
||||||
|
def __init__(self, cpu, ec, test_timeout, logger):
|
||||||
|
self.cpu_ser = SerialBuffer(
|
||||||
|
cpu, "results/serial.txt", ": ")
|
||||||
|
# Merge the EC serial into the cpu_ser's line stream so that we can
|
||||||
|
# effectively poll on both at the same time and not have to worry about
|
||||||
|
self.ec_ser = SerialBuffer(
|
||||||
|
ec, "results/serial-ec.txt", " EC: ", line_queue=self.cpu_ser.line_queue)
|
||||||
|
self.test_timeout = test_timeout
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.ec_ser.close()
|
||||||
|
self.cpu_ser.close()
|
||||||
|
|
||||||
|
def ec_write(self, s):
|
||||||
|
print("EC> %s" % s)
|
||||||
|
self.ec_ser.serial.write(s.encode())
|
||||||
|
|
||||||
|
def cpu_write(self, s):
|
||||||
|
print("> %s" % s)
|
||||||
|
self.cpu_ser.serial.write(s.encode())
|
||||||
|
|
||||||
|
def print_error(self, message):
|
||||||
|
RED = '\033[0;31m'
|
||||||
|
NO_COLOR = '\033[0m'
|
||||||
|
print(RED + message + NO_COLOR)
|
||||||
|
self.logger.update_status_fail(message)
|
||||||
|
|
||||||
|
def get_rel_timestamp(self):
|
||||||
|
now = datetime.datetime.now(tz=datetime.UTC)
|
||||||
|
then_env = os.getenv("CI_JOB_STARTED_AT")
|
||||||
|
if not then_env:
|
||||||
|
return ""
|
||||||
|
delta = now - datetime.datetime.fromisoformat(then_env)
|
||||||
|
return f"[{math.floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}]"
|
||||||
|
|
||||||
|
def get_cur_timestamp(self):
|
||||||
|
return str(int(datetime.datetime.timestamp(datetime.datetime.now())))
|
||||||
|
|
||||||
|
def print_gitlab_section(self, action, name, description, collapse=True):
|
||||||
|
assert action in [SECTION_START, SECTION_END]
|
||||||
|
out = ANSI_ESCAPE + "section_" + action + ":"
|
||||||
|
out += self.get_cur_timestamp() + ":"
|
||||||
|
out += name
|
||||||
|
if action == "start" and collapse:
|
||||||
|
out += "[collapsed=true]"
|
||||||
|
out += "\r" + ANSI_ESCAPE + ANSI_COLOUR
|
||||||
|
out += self.get_rel_timestamp() + " " + description + ANSI_RESET
|
||||||
|
print(out)
|
||||||
|
|
||||||
|
def boot_section(self, action):
|
||||||
|
self.print_gitlab_section(action, "dut_boot", "Booting hardware device", True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# Flush any partial commands in the EC's prompt, then ask for a reboot.
|
||||||
|
self.ec_write("\n")
|
||||||
|
self.ec_write("reboot\n")
|
||||||
|
|
||||||
|
bootloader_done = False
|
||||||
|
self.logger.create_job_phase("boot")
|
||||||
|
self.boot_section(SECTION_START)
|
||||||
|
tftp_failures = 0
|
||||||
|
# This is emitted right when the bootloader pauses to check for input.
|
||||||
|
# Emit a ^N character to request network boot, because we don't have a
|
||||||
|
# direct-to-netboot firmware on cheza.
|
||||||
|
for line in self.cpu_ser.lines(timeout=120, phase="bootloader"):
|
||||||
|
if re.search("load_archive: loading locale_en.bin", line):
|
||||||
|
self.cpu_write("\016")
|
||||||
|
bootloader_done = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# The Cheza firmware seems to occasionally get stuck looping in
|
||||||
|
# this error state during TFTP booting, possibly based on amount of
|
||||||
|
# network traffic around it, but it'll usually recover after a
|
||||||
|
# reboot. Currently mostly visible on google-freedreno-cheza-14.
|
||||||
|
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
||||||
|
tftp_failures += 1
|
||||||
|
if tftp_failures >= 10:
|
||||||
|
self.print_error(
|
||||||
|
"Detected intermittent tftp failure, restarting run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# If the board has a netboot firmware and we made it to booting the
|
||||||
|
# kernel, proceed to processing of the test run.
|
||||||
|
if re.search("Booting Linux", line):
|
||||||
|
bootloader_done = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# The Cheza boards have issues with failing to bring up power to
|
||||||
|
# the system sometimes, possibly dependent on ambient temperature
|
||||||
|
# in the farm.
|
||||||
|
if re.search("POWER_GOOD not seen in time", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected intermittent poweron failure, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if not bootloader_done:
|
||||||
|
self.print_error("Failed to make it through bootloader, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
self.logger.create_job_phase("test")
|
||||||
|
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
||||||
|
if re.search("---. end Kernel panic", line):
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# There are very infrequent bus errors during power management transitions
|
||||||
|
# on cheza, which we don't expect to be the case on future boards.
|
||||||
|
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected cheza power management bus error, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||||
|
if re.search("NETDEV WATCHDOG", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected network device failure, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# These HFI response errors started appearing with the introduction
|
||||||
|
# of piglit runs. CosmicPenguin says:
|
||||||
|
#
|
||||||
|
# "message ID 106 isn't a thing, so likely what happened is that we
|
||||||
|
# got confused when parsing the HFI queue. If it happened on only
|
||||||
|
# one run, then memory corruption could be a possible clue"
|
||||||
|
#
|
||||||
|
# Given that it seems to trigger randomly near a GPU fault and then
|
||||||
|
# break many tests after that, just restart the whole run.
|
||||||
|
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected cheza power management bus error, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if re.search("coreboot.*bootblock starting", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected spontaneous reboot, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
|
||||||
|
self.print_error("Detected cheza MMU fail, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
||||||
|
if result:
|
||||||
|
status = result.group(1)
|
||||||
|
exit_code = int(result.group(2))
|
||||||
|
|
||||||
|
if status == "pass":
|
||||||
|
self.logger.update_dut_job("status", "pass")
|
||||||
|
else:
|
||||||
|
self.logger.update_status_fail("test fail")
|
||||||
|
|
||||||
|
self.logger.update_dut_job("exit_code", exit_code)
|
||||||
|
return exit_code
|
||||||
|
|
||||||
|
self.print_error(
|
||||||
|
"Reached the end of the CPU serial log without finding a result")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--cpu', type=str,
|
||||||
|
help='CPU Serial device', required=True)
|
||||||
|
parser.add_argument(
|
||||||
|
'--ec', type=str, help='EC Serial device', required=True)
|
||||||
|
parser.add_argument(
|
||||||
|
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
logger = CustomLogger("results/job_detail.json")
|
||||||
|
logger.update_dut_time("start", None)
|
||||||
|
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger)
|
||||||
|
retval = servo.run()
|
||||||
|
|
||||||
|
# power down the CPU on the device
|
||||||
|
servo.ec_write("power off\n")
|
||||||
|
logger.update_dut_time("end", None)
|
||||||
|
servo.close()
|
||||||
|
|
||||||
|
sys.exit(retval)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
10
.gitlab-ci/bare-metal/eth008-power-down.sh
Executable file
10
.gitlab-ci/bare-metal/eth008-power-down.sh
Executable file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
relay=$1
|
||||||
|
|
||||||
|
if [ -z "$relay" ]; then
|
||||||
|
echo "Must supply a relay arg"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
28
.gitlab-ci/bare-metal/eth008-power-relay.py
Executable file
28
.gitlab-ci/bare-metal/eth008-power-relay.py
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import socket
|
||||||
|
|
||||||
|
host = sys.argv[1]
|
||||||
|
port = sys.argv[2]
|
||||||
|
mode = sys.argv[3]
|
||||||
|
relay = sys.argv[4]
|
||||||
|
msg = None
|
||||||
|
|
||||||
|
if mode == "on":
|
||||||
|
msg = b'\x20'
|
||||||
|
else:
|
||||||
|
msg = b'\x21'
|
||||||
|
|
||||||
|
msg += int(relay).to_bytes(1, 'big')
|
||||||
|
msg += b'\x00'
|
||||||
|
|
||||||
|
c = socket.create_connection((host, int(port)))
|
||||||
|
c.sendall(msg)
|
||||||
|
|
||||||
|
data = c.recv(1)
|
||||||
|
c.close()
|
||||||
|
|
||||||
|
if data[0] == b'\x01':
|
||||||
|
print('Command failed')
|
||||||
|
sys.exit(1)
|
12
.gitlab-ci/bare-metal/eth008-power-up.sh
Executable file
12
.gitlab-ci/bare-metal/eth008-power-up.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
relay=$1
|
||||||
|
|
||||||
|
if [ -z "$relay" ]; then
|
||||||
|
echo "Must supply a relay arg"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
||||||
|
sleep 5
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" on "$relay"
|
31
.gitlab-ci/bare-metal/expect-output.sh
Executable file
31
.gitlab-ci/bare-metal/expect-output.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
STRINGS=$(mktemp)
|
||||||
|
ERRORS=$(mktemp)
|
||||||
|
|
||||||
|
trap 'rm $STRINGS; rm $ERRORS;' EXIT
|
||||||
|
|
||||||
|
FILE=$1
|
||||||
|
shift 1
|
||||||
|
|
||||||
|
while getopts "f:e:" opt; do
|
||||||
|
case $opt in
|
||||||
|
f) echo "$OPTARG" >> "$STRINGS";;
|
||||||
|
e) echo "$OPTARG" >> "$STRINGS" ; echo "$OPTARG" >> "$ERRORS";;
|
||||||
|
*) exit
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND -1))
|
||||||
|
|
||||||
|
echo "Waiting for $FILE to say one of following strings"
|
||||||
|
cat "$STRINGS"
|
||||||
|
|
||||||
|
while ! grep -E -wf "$STRINGS" "$FILE"; do
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
if grep -E -wf "$ERRORS" "$FILE"; then
|
||||||
|
exit 1
|
||||||
|
fi
|
171
.gitlab-ci/bare-metal/fastboot.sh
Executable file
171
.gitlab-ci/bare-metal/fastboot.sh
Executable file
@@ -0,0 +1,171 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
|
. "$SCRIPTS_DIR"/setup-test-env.sh
|
||||||
|
|
||||||
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
|
CI_COMMON=$CI_PROJECT_DIR/install/common
|
||||||
|
|
||||||
|
if [ -z "$BM_SERIAL" ] && [ -z "$BM_SERIAL_SCRIPT" ]; then
|
||||||
|
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "BM_SERIAL:"
|
||||||
|
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
|
||||||
|
echo "BM_SERIAL_SCRIPT:"
|
||||||
|
echo " This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POWERUP" ]; then
|
||||||
|
echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is a shell script that should reset the device and begin its boot sequence"
|
||||||
|
echo "such that it pauses at fastboot."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POWERDOWN" ]; then
|
||||||
|
echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is a shell script that should power off the device."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_FASTBOOT_SERIAL" ]; then
|
||||||
|
echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This must be the a stable-across-resets fastboot serial number."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_KERNEL" ]; then
|
||||||
|
echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_DTB" ]; then
|
||||||
|
echo "Must set BM_DTB to your board's DTB file in the job's variables:"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_ROOTFS" ]; then
|
||||||
|
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
|
||||||
|
BM_FASTBOOT_NFSROOT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
section_start prepare_rootfs "Preparing rootfs components"
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Clear out any previous run's artifacts.
|
||||||
|
rm -rf results/
|
||||||
|
mkdir -p results/
|
||||||
|
|
||||||
|
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
|
||||||
|
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
|
||||||
|
# state, since it's volume-mounted on the host.
|
||||||
|
rsync -a --delete $BM_ROOTFS/ /nfs/
|
||||||
|
mkdir -p /nfs/results
|
||||||
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
|
# Root on NFS, no need for an inintramfs.
|
||||||
|
rm -f rootfs.cpio.gz
|
||||||
|
touch rootfs.cpio
|
||||||
|
gzip rootfs.cpio
|
||||||
|
else
|
||||||
|
# Create the rootfs in a temp dir
|
||||||
|
rsync -a --delete $BM_ROOTFS/ rootfs/
|
||||||
|
. $BM/rootfs-setup.sh rootfs
|
||||||
|
|
||||||
|
# Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
|
||||||
|
# these devices use it and it would take up space in the initrd.
|
||||||
|
|
||||||
|
if [ -n "$PIGLIT_PROFILES" ]; then
|
||||||
|
EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
|
||||||
|
else
|
||||||
|
EXCLUDE_FILTER="piglit|python"
|
||||||
|
fi
|
||||||
|
|
||||||
|
pushd rootfs
|
||||||
|
find -H . | \
|
||||||
|
grep -E -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
|
||||||
|
grep -E -v "traces-db|apitrace|renderdoc" | \
|
||||||
|
grep -E -v $EXCLUDE_FILTER | \
|
||||||
|
cpio -H newc -o | \
|
||||||
|
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
|
||||||
|
popd
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"$BM_KERNEL" -o kernel
|
||||||
|
# FIXME: modules should be supplied too
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"$BM_DTB" -o dtb
|
||||||
|
|
||||||
|
cat kernel dtb > Image.gz-dtb
|
||||||
|
|
||||||
|
elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
|
||||||
|
if [ -n "$BM_DTB" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o dtb
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat kernel dtb > Image.gz-dtb || echo "No DTB available, using pure kernel."
|
||||||
|
rm kernel
|
||||||
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "$BM_ROOTFS/"
|
||||||
|
rm modules.tar.zst &
|
||||||
|
else
|
||||||
|
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
|
||||||
|
cp /baremetal-files/"$BM_DTB".dtb dtb
|
||||||
|
fi
|
||||||
|
|
||||||
|
export PATH=$BM:$PATH
|
||||||
|
|
||||||
|
mkdir -p artifacts
|
||||||
|
mkbootimg.py \
|
||||||
|
--kernel Image.gz-dtb \
|
||||||
|
--ramdisk rootfs.cpio.gz \
|
||||||
|
--dtb dtb \
|
||||||
|
--cmdline "$BM_CMDLINE" \
|
||||||
|
$BM_MKBOOT_PARAMS \
|
||||||
|
--header_version 2 \
|
||||||
|
-o artifacts/fastboot.img
|
||||||
|
|
||||||
|
rm Image.gz-dtb dtb
|
||||||
|
|
||||||
|
# Start background command for talking to serial if we have one.
|
||||||
|
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
||||||
|
$BM_SERIAL_SCRIPT > results/serial-output.txt &
|
||||||
|
|
||||||
|
while [ ! -e results/serial-output.txt ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
section_end prepare_rootfs
|
||||||
|
|
||||||
|
set +e
|
||||||
|
$BM/fastboot_run.py \
|
||||||
|
--dev="$BM_SERIAL" \
|
||||||
|
--test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} \
|
||||||
|
--fbserial="$BM_FASTBOOT_SERIAL" \
|
||||||
|
--powerup="$BM_POWERUP" \
|
||||||
|
--powerdown="$BM_POWERDOWN"
|
||||||
|
ret=$?
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
|
||||||
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
|
# will look for them.
|
||||||
|
cp -Rp /nfs/results/. results/
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit $ret
|
159
.gitlab-ci/bare-metal/fastboot_run.py
Executable file
159
.gitlab-ci/bare-metal/fastboot_run.py
Executable file
@@ -0,0 +1,159 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright © 2020 Google LLC
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
from serial_buffer import SerialBuffer
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
|
class FastbootRun:
|
||||||
|
def __init__(self, args, test_timeout):
|
||||||
|
self.powerup = args.powerup
|
||||||
|
self.ser = SerialBuffer(
|
||||||
|
args.dev, "results/serial-output.txt", "R SERIAL> ")
|
||||||
|
self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format(
|
||||||
|
ser=args.fbserial)
|
||||||
|
self.test_timeout = test_timeout
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.ser.close()
|
||||||
|
|
||||||
|
def print_error(self, message):
|
||||||
|
RED = '\033[0;31m'
|
||||||
|
NO_COLOR = '\033[0m'
|
||||||
|
print(RED + message + NO_COLOR)
|
||||||
|
|
||||||
|
def logged_system(self, cmd, timeout=60):
|
||||||
|
print("Running '{}'".format(cmd))
|
||||||
|
try:
|
||||||
|
return subprocess.call(cmd, shell=True, timeout=timeout)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
self.print_error("timeout, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
if ret := self.logged_system(self.powerup):
|
||||||
|
return ret
|
||||||
|
|
||||||
|
fastboot_ready = False
|
||||||
|
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
||||||
|
if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \
|
||||||
|
re.search("Listening for fastboot command on", line):
|
||||||
|
fastboot_ready = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if re.search("data abort", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected crash during boot, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if not fastboot_ready:
|
||||||
|
self.print_error(
|
||||||
|
"Failed to get to fastboot prompt, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if ret := self.logged_system(self.fastboot):
|
||||||
|
return ret
|
||||||
|
|
||||||
|
print_more_lines = -1
|
||||||
|
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
||||||
|
if print_more_lines == 0:
|
||||||
|
return 1
|
||||||
|
if print_more_lines > 0:
|
||||||
|
print_more_lines -= 1
|
||||||
|
|
||||||
|
if re.search("---. end Kernel panic", line):
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# The db820c boards intermittently reboot. Just restart the run
|
||||||
|
# when if we see a reboot after we got past fastboot.
|
||||||
|
if re.search("PON REASON", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected spontaneous reboot, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# db820c sometimes wedges around iommu fault recovery
|
||||||
|
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected kernel soft lockup, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# If the network device dies, it's probably not graphics's fault, just try again.
|
||||||
|
if re.search("NETDEV WATCHDOG", line):
|
||||||
|
self.print_error(
|
||||||
|
"Detected network device failure, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# A3xx recovery doesn't quite work. Sometimes the GPU will get
|
||||||
|
# wedged and recovery will fail (because power can't be reset?)
|
||||||
|
# This assumes that the jobs are sufficiently well-tested that GPU
|
||||||
|
# hangs aren't always triggered, so just try again. But print some
|
||||||
|
# more lines first so that we get better information on the cause
|
||||||
|
# of the hang. Once a hang happens, it's pretty chatty.
|
||||||
|
if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
|
||||||
|
self.print_error(
|
||||||
|
"Detected GPU hang, abandoning run.")
|
||||||
|
if print_more_lines == -1:
|
||||||
|
print_more_lines = 30
|
||||||
|
|
||||||
|
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
||||||
|
if result:
|
||||||
|
status = result.group(1)
|
||||||
|
exit_code = int(result.group(2))
|
||||||
|
|
||||||
|
return exit_code
|
||||||
|
|
||||||
|
self.print_error(
|
||||||
|
"Reached the end of the CPU serial log without finding a result, abandoning run.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
'--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||||
|
parser.add_argument('--powerup', type=str,
|
||||||
|
help='shell command for rebooting', required=True)
|
||||||
|
parser.add_argument('--powerdown', type=str,
|
||||||
|
help='shell command for powering off', required=True)
|
||||||
|
parser.add_argument('--fbserial', type=str,
|
||||||
|
help='fastboot serial number of the board', required=True)
|
||||||
|
parser.add_argument('--test-timeout', type=int,
|
||||||
|
help='Test phase timeout (minutes)', required=True)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
fastboot = FastbootRun(args, args.test_timeout * 60)
|
||||||
|
|
||||||
|
retval = fastboot.run()
|
||||||
|
fastboot.close()
|
||||||
|
|
||||||
|
fastboot.logged_system(args.powerdown)
|
||||||
|
|
||||||
|
sys.exit(retval)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@@ -1,129 +0,0 @@
|
|||||||
.baremetal-test:
|
|
||||||
extends:
|
|
||||||
- .test
|
|
||||||
# Cancel job if a newer commit is pushed to the same branch
|
|
||||||
interruptible: true
|
|
||||||
before_script:
|
|
||||||
- !reference [.download_s3, before_script]
|
|
||||||
variables:
|
|
||||||
BM_ROOTFS: /rootfs-${DEBIAN_ARCH}
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME_SLUG}"
|
|
||||||
paths:
|
|
||||||
- results/
|
|
||||||
- serial*.txt
|
|
||||||
exclude:
|
|
||||||
- results/*.shader_cache
|
|
||||||
reports:
|
|
||||||
junit: results/junit.xml
|
|
||||||
|
|
||||||
# ARM testing of bare-metal boards attached to an x86 gitlab-runner system
|
|
||||||
.baremetal-test-arm32-gl:
|
|
||||||
extends:
|
|
||||||
- .baremetal-test
|
|
||||||
- .use-debian/baremetal_arm32_test-gl
|
|
||||||
variables:
|
|
||||||
DEBIAN_ARCH: armhf
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm32-default-debugoptimized
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm32_test-gl
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm32
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
# ARM64 testing of bare-metal boards attached to an x86 gitlab-runner system
|
|
||||||
.baremetal-test-arm64-gl:
|
|
||||||
extends:
|
|
||||||
- .baremetal-test
|
|
||||||
- .use-debian/baremetal_arm64_test-gl
|
|
||||||
variables:
|
|
||||||
DEBIAN_ARCH: arm64
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-default-debugoptimized
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
# ARM64 testing of bare-metal boards attached to an x86 gitlab-runner system
|
|
||||||
.baremetal-test-arm64-vk:
|
|
||||||
extends:
|
|
||||||
- .baremetal-test
|
|
||||||
- .use-debian/baremetal_arm64_test-vk
|
|
||||||
variables:
|
|
||||||
DEBIAN_ARCH: arm64
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-default-debugoptimized
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-vk
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
# ARM32/64 testing of bare-metal boards attached to an x86 gitlab-runner system, using an asan mesa build
|
|
||||||
.baremetal-arm32-asan-test-gl:
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm32-asan-debugoptimized
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm32_test-gl
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm32-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
.baremetal-arm64-asan-test-gl:
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-asan-debugoptimized
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
.baremetal-arm64-asan-test-vk:
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-asan-debugoptimized
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-vk
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
.baremetal-arm64-ubsan-test-gl:
|
|
||||||
extends:
|
|
||||||
- .baremetal-test
|
|
||||||
- .use-debian/baremetal_arm64_test-gl
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-ubsan-debugoptimized
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-ubsan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
.baremetal-arm64-ubsan-test-vk:
|
|
||||||
extends:
|
|
||||||
- .baremetal-test
|
|
||||||
- .use-debian/baremetal_arm64_test-vk
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-ubsan-debugoptimized
|
|
||||||
needs:
|
|
||||||
- job: debian/baremetal_arm64_test-vk
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-ubsan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
|
|
||||||
.baremetal-deqp-test:
|
|
||||||
variables:
|
|
||||||
HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
|
|
||||||
FDO_CI_CONCURRENT: 0 # Default to number of CPUs
|
|
10
.gitlab-ci/bare-metal/google-power-down.sh
Executable file
10
.gitlab-ci/bare-metal/google-power-down.sh
Executable file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
relay=$1
|
||||||
|
|
||||||
|
if [ -z "$relay" ]; then
|
||||||
|
echo "Must supply a relay arg"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
19
.gitlab-ci/bare-metal/google-power-relay.py
Executable file
19
.gitlab-ci/bare-metal/google-power-relay.py
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import serial
|
||||||
|
|
||||||
|
mode = sys.argv[1]
|
||||||
|
relay = sys.argv[2]
|
||||||
|
|
||||||
|
# our relays are "off" means "board is powered".
|
||||||
|
mode_swap = {
|
||||||
|
"on": "off",
|
||||||
|
"off": "on",
|
||||||
|
}
|
||||||
|
mode = mode_swap[mode]
|
||||||
|
|
||||||
|
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2)
|
||||||
|
command = "relay {} {}\n\r".format(mode, relay)
|
||||||
|
ser.write(command.encode())
|
||||||
|
ser.close()
|
12
.gitlab-ci/bare-metal/google-power-up.sh
Executable file
12
.gitlab-ci/bare-metal/google-power-up.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
relay=$1
|
||||||
|
|
||||||
|
if [ -z "$relay" ]; then
|
||||||
|
echo "Must supply a relay arg"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
||||||
|
sleep 5
|
||||||
|
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py on "$relay"
|
569
.gitlab-ci/bare-metal/mkbootimg.py
Executable file
569
.gitlab-ci/bare-metal/mkbootimg.py
Executable file
@@ -0,0 +1,569 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2015, The Android Open Source Project
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""Creates the boot image."""
|
||||||
|
from argparse import (ArgumentParser, ArgumentTypeError,
|
||||||
|
FileType, RawDescriptionHelpFormatter)
|
||||||
|
from hashlib import sha1
|
||||||
|
from os import fstat
|
||||||
|
from struct import pack
|
||||||
|
import array
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
# Constant and structure definition is in
|
||||||
|
# system/tools/mkbootimg/include/bootimg/bootimg.h
|
||||||
|
BOOT_MAGIC = 'ANDROID!'
|
||||||
|
BOOT_MAGIC_SIZE = 8
|
||||||
|
BOOT_NAME_SIZE = 16
|
||||||
|
BOOT_ARGS_SIZE = 512
|
||||||
|
BOOT_EXTRA_ARGS_SIZE = 1024
|
||||||
|
BOOT_IMAGE_HEADER_V1_SIZE = 1648
|
||||||
|
BOOT_IMAGE_HEADER_V2_SIZE = 1660
|
||||||
|
BOOT_IMAGE_HEADER_V3_SIZE = 1580
|
||||||
|
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
||||||
|
BOOT_IMAGE_HEADER_V4_SIZE = 1584
|
||||||
|
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
|
||||||
|
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
|
||||||
|
VENDOR_BOOT_MAGIC_SIZE = 8
|
||||||
|
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
|
||||||
|
VENDOR_BOOT_ARGS_SIZE = 2048
|
||||||
|
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
|
||||||
|
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
|
||||||
|
VENDOR_RAMDISK_TYPE_NONE = 0
|
||||||
|
VENDOR_RAMDISK_TYPE_PLATFORM = 1
|
||||||
|
VENDOR_RAMDISK_TYPE_RECOVERY = 2
|
||||||
|
VENDOR_RAMDISK_TYPE_DLKM = 3
|
||||||
|
VENDOR_RAMDISK_NAME_SIZE = 32
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
|
||||||
|
# Names with special meaning, mustn't be specified in --ramdisk_name.
|
||||||
|
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
|
||||||
|
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
|
||||||
|
def filesize(f):
|
||||||
|
if f is None:
|
||||||
|
return 0
|
||||||
|
try:
|
||||||
|
return fstat(f.fileno()).st_size
|
||||||
|
except OSError:
|
||||||
|
return 0
|
||||||
|
def update_sha(sha, f):
|
||||||
|
if f:
|
||||||
|
sha.update(f.read())
|
||||||
|
f.seek(0)
|
||||||
|
sha.update(pack('I', filesize(f)))
|
||||||
|
else:
|
||||||
|
sha.update(pack('I', 0))
|
||||||
|
def pad_file(f, padding):
|
||||||
|
pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
|
||||||
|
f.write(pack(str(pad) + 'x'))
|
||||||
|
def get_number_of_pages(image_size, page_size):
|
||||||
|
"""calculates the number of pages required for the image"""
|
||||||
|
return (image_size + page_size - 1) // page_size
|
||||||
|
def get_recovery_dtbo_offset(args):
|
||||||
|
"""calculates the offset of recovery_dtbo image in the boot image"""
|
||||||
|
num_header_pages = 1 # header occupies a page
|
||||||
|
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
|
||||||
|
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
|
||||||
|
args.pagesize)
|
||||||
|
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
|
||||||
|
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
|
||||||
|
num_ramdisk_pages + num_second_pages)
|
||||||
|
return dtbo_offset
|
||||||
|
def write_header_v3_and_above(args):
|
||||||
|
if args.header_version > 3:
|
||||||
|
boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
|
||||||
|
else:
|
||||||
|
boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
|
||||||
|
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||||
|
# kernel size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.kernel)))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||||
|
# os version and patch level
|
||||||
|
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||||
|
args.output.write(pack('I', boot_header_size))
|
||||||
|
# reserved
|
||||||
|
args.output.write(pack('4I', 0, 0, 0, 0))
|
||||||
|
# version of boot image header
|
||||||
|
args.output.write(pack('I', args.header_version))
|
||||||
|
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
|
||||||
|
args.cmdline))
|
||||||
|
if args.header_version >= 4:
|
||||||
|
# The signature used to verify boot image v4.
|
||||||
|
args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
|
||||||
|
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||||
|
def write_vendor_boot_header(args):
|
||||||
|
if filesize(args.dtb) == 0:
|
||||||
|
raise ValueError('DTB image must not be empty.')
|
||||||
|
if args.header_version > 3:
|
||||||
|
vendor_ramdisk_size = args.vendor_ramdisk_total_size
|
||||||
|
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
|
||||||
|
else:
|
||||||
|
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
|
||||||
|
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
|
||||||
|
VENDOR_BOOT_MAGIC.encode()))
|
||||||
|
# version of boot image header
|
||||||
|
args.vendor_boot.write(pack('I', args.header_version))
|
||||||
|
# flash page size
|
||||||
|
args.vendor_boot.write(pack('I', args.pagesize))
|
||||||
|
# kernel physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
|
||||||
|
# ramdisk physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
|
||||||
|
args.vendor_cmdline))
|
||||||
|
# kernel tags physical load address
|
||||||
|
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
|
||||||
|
# asciiz product name
|
||||||
|
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
|
||||||
|
# header size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_boot_header_size))
|
||||||
|
# dtb size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', filesize(args.dtb)))
|
||||||
|
# dtb physical load address
|
||||||
|
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
|
||||||
|
if args.header_version > 3:
|
||||||
|
vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
|
||||||
|
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
|
||||||
|
# vendor ramdisk table size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
|
||||||
|
# number of vendor ramdisk table entries
|
||||||
|
args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
|
||||||
|
# vendor ramdisk table entry size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
|
||||||
|
# bootconfig section size in bytes
|
||||||
|
args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
|
||||||
|
pad_file(args.vendor_boot, args.pagesize)
|
||||||
|
def write_header(args):
|
||||||
|
if args.header_version > 4:
|
||||||
|
raise ValueError(
|
||||||
|
f'Boot header version {args.header_version} not supported')
|
||||||
|
if args.header_version in {3, 4}:
|
||||||
|
return write_header_v3_and_above(args)
|
||||||
|
ramdisk_load_address = ((args.base + args.ramdisk_offset)
|
||||||
|
if filesize(args.ramdisk) > 0 else 0)
|
||||||
|
second_load_address = ((args.base + args.second_offset)
|
||||||
|
if filesize(args.second) > 0 else 0)
|
||||||
|
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
||||||
|
# kernel size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.kernel)))
|
||||||
|
# kernel physical load address
|
||||||
|
args.output.write(pack('I', args.base + args.kernel_offset))
|
||||||
|
# ramdisk size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.ramdisk)))
|
||||||
|
# ramdisk physical load address
|
||||||
|
args.output.write(pack('I', ramdisk_load_address))
|
||||||
|
# second bootloader size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.second)))
|
||||||
|
# second bootloader physical load address
|
||||||
|
args.output.write(pack('I', second_load_address))
|
||||||
|
# kernel tags physical load address
|
||||||
|
args.output.write(pack('I', args.base + args.tags_offset))
|
||||||
|
# flash page size
|
||||||
|
args.output.write(pack('I', args.pagesize))
|
||||||
|
# version of boot image header
|
||||||
|
args.output.write(pack('I', args.header_version))
|
||||||
|
# os version and patch level
|
||||||
|
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
||||||
|
# asciiz product name
|
||||||
|
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
|
||||||
|
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
|
||||||
|
sha = sha1()
|
||||||
|
update_sha(sha, args.kernel)
|
||||||
|
update_sha(sha, args.ramdisk)
|
||||||
|
update_sha(sha, args.second)
|
||||||
|
if args.header_version > 0:
|
||||||
|
update_sha(sha, args.recovery_dtbo)
|
||||||
|
if args.header_version > 1:
|
||||||
|
update_sha(sha, args.dtb)
|
||||||
|
img_id = pack('32s', sha.digest())
|
||||||
|
args.output.write(img_id)
|
||||||
|
args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
|
||||||
|
if args.header_version > 0:
|
||||||
|
if args.recovery_dtbo:
|
||||||
|
# recovery dtbo size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.recovery_dtbo)))
|
||||||
|
# recovert dtbo offset in the boot image
|
||||||
|
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
|
||||||
|
else:
|
||||||
|
# Set to zero if no recovery dtbo
|
||||||
|
args.output.write(pack('I', 0))
|
||||||
|
args.output.write(pack('Q', 0))
|
||||||
|
# Populate boot image header size for header versions 1 and 2.
|
||||||
|
if args.header_version == 1:
|
||||||
|
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
|
||||||
|
elif args.header_version == 2:
|
||||||
|
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
|
||||||
|
if args.header_version > 1:
|
||||||
|
if filesize(args.dtb) == 0:
|
||||||
|
raise ValueError('DTB image must not be empty.')
|
||||||
|
# dtb size in bytes
|
||||||
|
args.output.write(pack('I', filesize(args.dtb)))
|
||||||
|
# dtb physical load address
|
||||||
|
args.output.write(pack('Q', args.base + args.dtb_offset))
|
||||||
|
pad_file(args.output, args.pagesize)
|
||||||
|
return img_id
|
||||||
|
class AsciizBytes:
|
||||||
|
"""Parses a string and encodes it as an asciiz bytes object.
|
||||||
|
>>> AsciizBytes(bufsize=4)('foo')
|
||||||
|
b'foo\\x00'
|
||||||
|
>>> AsciizBytes(bufsize=4)('foob')
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
|
||||||
|
"""
|
||||||
|
def __init__(self, bufsize):
|
||||||
|
self.bufsize = bufsize
|
||||||
|
def __call__(self, arg):
|
||||||
|
arg_bytes = arg.encode() + b'\x00'
|
||||||
|
if len(arg_bytes) > self.bufsize:
|
||||||
|
raise ArgumentTypeError(
|
||||||
|
'Encoded asciiz length exceeded: '
|
||||||
|
f'max {self.bufsize}, got {len(arg_bytes)}')
|
||||||
|
return arg_bytes
|
||||||
|
class VendorRamdiskTableBuilder:
|
||||||
|
"""Vendor ramdisk table builder.
|
||||||
|
Attributes:
|
||||||
|
entries: A list of VendorRamdiskTableEntry namedtuple.
|
||||||
|
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
|
||||||
|
"""
|
||||||
|
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
|
||||||
|
'VendorRamdiskTableEntry',
|
||||||
|
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
|
||||||
|
'ramdisk_name', 'board_id'])
|
||||||
|
def __init__(self):
|
||||||
|
self.entries = []
|
||||||
|
self.ramdisk_total_size = 0
|
||||||
|
self.ramdisk_names = set()
|
||||||
|
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
|
||||||
|
# Strip any trailing null for simple comparison.
|
||||||
|
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
|
||||||
|
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
|
||||||
|
raise ValueError(
|
||||||
|
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
|
||||||
|
if stripped_ramdisk_name in self.ramdisk_names:
|
||||||
|
raise ValueError(
|
||||||
|
f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
|
||||||
|
self.ramdisk_names.add(stripped_ramdisk_name)
|
||||||
|
if board_id is None:
|
||||||
|
board_id = array.array(
|
||||||
|
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
|
||||||
|
else:
|
||||||
|
board_id = array.array('I', board_id)
|
||||||
|
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
|
||||||
|
raise ValueError('board_id size must be '
|
||||||
|
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
|
||||||
|
with open(ramdisk_path, 'rb') as f:
|
||||||
|
ramdisk_size = filesize(f)
|
||||||
|
self.entries.append(self.VendorRamdiskTableEntry(
|
||||||
|
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
|
||||||
|
ramdisk_name, board_id))
|
||||||
|
self.ramdisk_total_size += ramdisk_size
|
||||||
|
def write_ramdisks_padded(self, fout, alignment):
|
||||||
|
for entry in self.entries:
|
||||||
|
with open(entry.ramdisk_path, 'rb') as f:
|
||||||
|
fout.write(f.read())
|
||||||
|
pad_file(fout, alignment)
|
||||||
|
def write_entries_padded(self, fout, alignment):
|
||||||
|
for entry in self.entries:
|
||||||
|
fout.write(pack('I', entry.ramdisk_size))
|
||||||
|
fout.write(pack('I', entry.ramdisk_offset))
|
||||||
|
fout.write(pack('I', entry.ramdisk_type))
|
||||||
|
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
||||||
|
entry.ramdisk_name))
|
||||||
|
fout.write(entry.board_id)
|
||||||
|
pad_file(fout, alignment)
|
||||||
|
def write_padded_file(f_out, f_in, padding):
|
||||||
|
if f_in is None:
|
||||||
|
return
|
||||||
|
f_out.write(f_in.read())
|
||||||
|
pad_file(f_out, padding)
|
||||||
|
def parse_int(x):
|
||||||
|
return int(x, 0)
|
||||||
|
def parse_os_version(x):
|
||||||
|
match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
|
||||||
|
if match:
|
||||||
|
a = int(match.group(1))
|
||||||
|
b = c = 0
|
||||||
|
if match.lastindex >= 2:
|
||||||
|
b = int(match.group(2))
|
||||||
|
if match.lastindex == 3:
|
||||||
|
c = int(match.group(3))
|
||||||
|
# 7 bits allocated for each field
|
||||||
|
assert a < 128
|
||||||
|
assert b < 128
|
||||||
|
assert c < 128
|
||||||
|
return (a << 14) | (b << 7) | c
|
||||||
|
return 0
|
||||||
|
def parse_os_patch_level(x):
|
||||||
|
match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
|
||||||
|
if match:
|
||||||
|
y = int(match.group(1)) - 2000
|
||||||
|
m = int(match.group(2))
|
||||||
|
# 7 bits allocated for the year, 4 bits for the month
|
||||||
|
assert 0 <= y < 128
|
||||||
|
assert 0 < m <= 12
|
||||||
|
return (y << 4) | m
|
||||||
|
return 0
|
||||||
|
def parse_vendor_ramdisk_type(x):
|
||||||
|
type_dict = {
|
||||||
|
'none': VENDOR_RAMDISK_TYPE_NONE,
|
||||||
|
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
|
||||||
|
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
|
||||||
|
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
|
||||||
|
}
|
||||||
|
if x.lower() in type_dict:
|
||||||
|
return type_dict[x.lower()]
|
||||||
|
return parse_int(x)
|
||||||
|
def get_vendor_boot_v4_usage():
|
||||||
|
return """vendor boot version 4 arguments:
|
||||||
|
--ramdisk_type {none,platform,recovery,dlkm}
|
||||||
|
specify the type of the ramdisk
|
||||||
|
--ramdisk_name NAME
|
||||||
|
specify the name of the ramdisk
|
||||||
|
--board_id{0..15} NUMBER
|
||||||
|
specify the value of the board_id vector, defaults to 0
|
||||||
|
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
|
||||||
|
path to the vendor ramdisk file
|
||||||
|
These options can be specified multiple times, where each vendor ramdisk
|
||||||
|
option group ends with a --vendor_ramdisk_fragment option.
|
||||||
|
Each option group appends an additional ramdisk to the vendor boot image.
|
||||||
|
"""
|
||||||
|
def parse_vendor_ramdisk_args(args, args_list):
|
||||||
|
"""Parses vendor ramdisk specific arguments.
|
||||||
|
Args:
|
||||||
|
args: An argparse.Namespace object. Parsed results are stored into this
|
||||||
|
object.
|
||||||
|
args_list: A list of argument strings to be parsed.
|
||||||
|
Returns:
|
||||||
|
A list argument strings that are not parsed by this method.
|
||||||
|
"""
|
||||||
|
parser = ArgumentParser(add_help=False)
|
||||||
|
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
|
||||||
|
default=VENDOR_RAMDISK_TYPE_NONE)
|
||||||
|
parser.add_argument('--ramdisk_name',
|
||||||
|
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
|
||||||
|
required=True)
|
||||||
|
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
|
||||||
|
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
|
||||||
|
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
|
||||||
|
unknown_args = []
|
||||||
|
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
|
||||||
|
if args.vendor_ramdisk is not None:
|
||||||
|
vendor_ramdisk_table_builder.add_entry(
|
||||||
|
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
|
||||||
|
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
|
||||||
|
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
|
||||||
|
vendor_ramdisk_args = args_list[:idx]
|
||||||
|
args_list = args_list[idx:]
|
||||||
|
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
|
||||||
|
ramdisk_args_dict = vars(ramdisk_args)
|
||||||
|
unknown_args.extend(extra_args)
|
||||||
|
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
|
||||||
|
ramdisk_type = ramdisk_args.ramdisk_type
|
||||||
|
ramdisk_name = ramdisk_args.ramdisk_name
|
||||||
|
board_id = [ramdisk_args_dict[f'board_id{i}']
|
||||||
|
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
|
||||||
|
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
|
||||||
|
ramdisk_name, board_id)
|
||||||
|
if len(args_list) > 0:
|
||||||
|
unknown_args.extend(args_list)
|
||||||
|
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
|
||||||
|
.ramdisk_total_size)
|
||||||
|
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
|
||||||
|
.entries)
|
||||||
|
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
|
||||||
|
return unknown_args
|
||||||
|
def parse_cmdline():
|
||||||
|
version_parser = ArgumentParser(add_help=False)
|
||||||
|
version_parser.add_argument('--header_version', type=parse_int, default=0)
|
||||||
|
if version_parser.parse_known_args()[0].header_version < 3:
|
||||||
|
# For boot header v0 to v2, the kernel commandline field is split into
|
||||||
|
# two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
|
||||||
|
# so we minus one here to ensure the encoded string plus the
|
||||||
|
# null-terminator can fit in the buffer size.
|
||||||
|
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
|
||||||
|
else:
|
||||||
|
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
|
||||||
|
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||||
|
epilog=get_vendor_boot_v4_usage())
|
||||||
|
parser.add_argument('--kernel', type=FileType('rb'),
|
||||||
|
help='path to the kernel')
|
||||||
|
parser.add_argument('--ramdisk', type=FileType('rb'),
|
||||||
|
help='path to the ramdisk')
|
||||||
|
parser.add_argument('--second', type=FileType('rb'),
|
||||||
|
help='path to the second bootloader')
|
||||||
|
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
|
||||||
|
dtbo_group = parser.add_mutually_exclusive_group()
|
||||||
|
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
|
||||||
|
help='path to the recovery DTBO')
|
||||||
|
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
|
||||||
|
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
|
||||||
|
help='path to the recovery ACPIO')
|
||||||
|
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
|
||||||
|
default='', help='kernel command line arguments')
|
||||||
|
parser.add_argument('--vendor_cmdline',
|
||||||
|
type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
|
||||||
|
default='',
|
||||||
|
help='vendor boot kernel command line arguments')
|
||||||
|
parser.add_argument('--base', type=parse_int, default=0x10000000,
|
||||||
|
help='base address')
|
||||||
|
parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
|
||||||
|
help='kernel offset')
|
||||||
|
parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
|
||||||
|
help='ramdisk offset')
|
||||||
|
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
|
||||||
|
help='second bootloader offset')
|
||||||
|
parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
|
||||||
|
help='dtb offset')
|
||||||
|
parser.add_argument('--os_version', type=parse_os_version, default=0,
|
||||||
|
help='operating system version')
|
||||||
|
parser.add_argument('--os_patch_level', type=parse_os_patch_level,
|
||||||
|
default=0, help='operating system patch level')
|
||||||
|
parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
|
||||||
|
help='tags offset')
|
||||||
|
parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
|
||||||
|
default='', help='board name')
|
||||||
|
parser.add_argument('--pagesize', type=parse_int,
|
||||||
|
choices=[2**i for i in range(11, 15)], default=2048,
|
||||||
|
help='page size')
|
||||||
|
parser.add_argument('--id', action='store_true',
|
||||||
|
help='print the image ID on standard output')
|
||||||
|
parser.add_argument('--header_version', type=parse_int, default=0,
|
||||||
|
help='boot image header version')
|
||||||
|
parser.add_argument('-o', '--output', type=FileType('wb'),
|
||||||
|
help='output file name')
|
||||||
|
parser.add_argument('--gki_signing_algorithm',
|
||||||
|
help='GKI signing algorithm to use')
|
||||||
|
parser.add_argument('--gki_signing_key',
|
||||||
|
help='path to RSA private key file')
|
||||||
|
parser.add_argument('--gki_signing_signature_args',
|
||||||
|
help='other hash arguments passed to avbtool')
|
||||||
|
parser.add_argument('--gki_signing_avbtool_path',
|
||||||
|
help='path to avbtool for boot signature generation')
|
||||||
|
parser.add_argument('--vendor_boot', type=FileType('wb'),
|
||||||
|
help='vendor boot output file name')
|
||||||
|
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
|
||||||
|
help='path to the vendor ramdisk')
|
||||||
|
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
|
||||||
|
help='path to the vendor bootconfig file')
|
||||||
|
args, extra_args = parser.parse_known_args()
|
||||||
|
if args.vendor_boot is not None and args.header_version > 3:
|
||||||
|
extra_args = parse_vendor_ramdisk_args(args, extra_args)
|
||||||
|
if len(extra_args) > 0:
|
||||||
|
raise ValueError(f'Unrecognized arguments: {extra_args}')
|
||||||
|
if args.header_version < 3:
|
||||||
|
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
|
||||||
|
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
|
||||||
|
assert len(args.cmdline) <= BOOT_ARGS_SIZE
|
||||||
|
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
|
||||||
|
return args
|
||||||
|
def add_boot_image_signature(args, pagesize):
|
||||||
|
"""Adds the boot image signature.
|
||||||
|
Note that the signature will only be verified in VTS to ensure a
|
||||||
|
generic boot.img is used. It will not be used by the device
|
||||||
|
bootloader at boot time. The bootloader should only verify
|
||||||
|
the boot vbmeta at the end of the boot partition (or in the top-level
|
||||||
|
vbmeta partition) via the Android Verified Boot process, when the
|
||||||
|
device boots.
|
||||||
|
"""
|
||||||
|
args.output.flush() # Flush the buffer for signature calculation.
|
||||||
|
# Appends zeros if the signing key is not specified.
|
||||||
|
if not args.gki_signing_key or not args.gki_signing_algorithm:
|
||||||
|
zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
|
||||||
|
args.output.write(zeros)
|
||||||
|
pad_file(args.output, pagesize)
|
||||||
|
return
|
||||||
|
avbtool = 'avbtool' # Used from otatools.zip or Android build env.
|
||||||
|
# We need to specify the path of avbtool in build/core/Makefile.
|
||||||
|
# Because avbtool is not guaranteed to be in $PATH there.
|
||||||
|
if args.gki_signing_avbtool_path:
|
||||||
|
avbtool = args.gki_signing_avbtool_path
|
||||||
|
# Need to specify a value of --partition_size for avbtool to work.
|
||||||
|
# We use 64 MB below, but avbtool will not resize the boot image to
|
||||||
|
# this size because --do_not_append_vbmeta_image is also specified.
|
||||||
|
avbtool_cmd = [
|
||||||
|
avbtool, 'add_hash_footer',
|
||||||
|
'--partition_name', 'boot',
|
||||||
|
'--partition_size', str(64 * 1024 * 1024),
|
||||||
|
'--image', args.output.name,
|
||||||
|
'--algorithm', args.gki_signing_algorithm,
|
||||||
|
'--key', args.gki_signing_key,
|
||||||
|
'--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
|
||||||
|
# Additional arguments passed to avbtool.
|
||||||
|
if args.gki_signing_signature_args:
|
||||||
|
avbtool_cmd += args.gki_signing_signature_args.split()
|
||||||
|
# Outputs the signed vbmeta to a separate file, then append to boot.img
|
||||||
|
# as the boot signature.
|
||||||
|
with tempfile.TemporaryDirectory() as temp_out_dir:
|
||||||
|
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
|
||||||
|
avbtool_cmd += ['--do_not_append_vbmeta_image',
|
||||||
|
'--output_vbmeta_image', boot_signature_output]
|
||||||
|
subprocess.check_call(avbtool_cmd)
|
||||||
|
with open(boot_signature_output, 'rb') as boot_signature:
|
||||||
|
if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
|
||||||
|
raise ValueError(
|
||||||
|
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
|
||||||
|
write_padded_file(args.output, boot_signature, pagesize)
|
||||||
|
def write_data(args, pagesize):
|
||||||
|
write_padded_file(args.output, args.kernel, pagesize)
|
||||||
|
write_padded_file(args.output, args.ramdisk, pagesize)
|
||||||
|
write_padded_file(args.output, args.second, pagesize)
|
||||||
|
if args.header_version > 0 and args.header_version < 3:
|
||||||
|
write_padded_file(args.output, args.recovery_dtbo, pagesize)
|
||||||
|
if args.header_version == 2:
|
||||||
|
write_padded_file(args.output, args.dtb, pagesize)
|
||||||
|
if args.header_version >= 4:
|
||||||
|
add_boot_image_signature(args, pagesize)
|
||||||
|
def write_vendor_boot_data(args):
|
||||||
|
if args.header_version > 3:
|
||||||
|
builder = args.vendor_ramdisk_table_builder
|
||||||
|
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||||
|
builder.write_entries_padded(args.vendor_boot, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
|
||||||
|
args.pagesize)
|
||||||
|
else:
|
||||||
|
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
|
||||||
|
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
||||||
|
def main():
|
||||||
|
args = parse_cmdline()
|
||||||
|
if args.vendor_boot is not None:
|
||||||
|
if args.header_version not in {3, 4}:
|
||||||
|
raise ValueError(
|
||||||
|
'--vendor_boot not compatible with given header version')
|
||||||
|
if args.header_version == 3 and args.vendor_ramdisk is None:
|
||||||
|
raise ValueError('--vendor_ramdisk missing or invalid')
|
||||||
|
write_vendor_boot_header(args)
|
||||||
|
write_vendor_boot_data(args)
|
||||||
|
if args.output is not None:
|
||||||
|
if args.second is not None and args.header_version > 2:
|
||||||
|
raise ValueError(
|
||||||
|
'--second not compatible with given header version')
|
||||||
|
img_id = write_header(args)
|
||||||
|
if args.header_version > 2:
|
||||||
|
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
||||||
|
else:
|
||||||
|
write_data(args, args.pagesize)
|
||||||
|
if args.id and img_id is not None:
|
||||||
|
print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@@ -103,11 +103,29 @@ if [ -f "${BM_BOOTFS}" ]; then
|
|||||||
BM_BOOTFS=/tmp/bootfs
|
BM_BOOTFS=/tmp/bootfs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If BM_KERNEL and BM_DTS is present
|
||||||
|
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
||||||
|
if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then
|
||||||
|
echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o "${BM_KERNEL}"
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o "${BM_DTB}.dtb"
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
fi
|
||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
# Install kernel modules (it could be either in /lib/modules or
|
# Install kernel modules (it could be either in /lib/modules or
|
||||||
# /usr/lib/modules, but we want to install in the latter)
|
# /usr/lib/modules, but we want to install in the latter)
|
||||||
if [ -n "${BM_BOOTFS}" ]; then
|
if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then
|
||||||
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/
|
||||||
|
rm modules.tar.zst &
|
||||||
|
elif [ -n "${BM_BOOTFS}" ]; then
|
||||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
||||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
||||||
else
|
else
|
||||||
@@ -118,7 +136,7 @@ fi
|
|||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
# Install kernel image + bootloader files
|
# Install kernel image + bootloader files
|
||||||
if [ -z "$BM_BOOTFS" ]; then
|
if [ -n "${EXTERNAL_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
|
||||||
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
|
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
|
||||||
else # BM_BOOTFS
|
else # BM_BOOTFS
|
||||||
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
||||||
@@ -126,6 +144,33 @@ fi
|
|||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
|
# Set up the pxelinux config for Jetson Nano
|
||||||
|
mkdir -p /tftp/pxelinux.cfg
|
||||||
|
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000
|
||||||
|
PROMPT 0
|
||||||
|
TIMEOUT 30
|
||||||
|
DEFAULT primary
|
||||||
|
MENU TITLE jetson nano boot options
|
||||||
|
LABEL primary
|
||||||
|
MENU LABEL CI kernel on TFTP
|
||||||
|
LINUX Image
|
||||||
|
FDT tegra210-p3450-0000.dtb
|
||||||
|
APPEND \${cbootargs} $BM_CMDLINE
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Set up the pxelinux config for Jetson TK1
|
||||||
|
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1
|
||||||
|
PROMPT 0
|
||||||
|
TIMEOUT 30
|
||||||
|
DEFAULT primary
|
||||||
|
MENU TITLE jetson TK1 boot options
|
||||||
|
LABEL primary
|
||||||
|
MENU LABEL CI kernel on TFTP
|
||||||
|
LINUX zImage
|
||||||
|
FDT tegra124-jetson-tk1.dtb
|
||||||
|
APPEND \${cbootargs} $BM_CMDLINE
|
||||||
|
EOF
|
||||||
|
|
||||||
# Create the rootfs in the NFS directory
|
# Create the rootfs in the NFS directory
|
||||||
. $BM/rootfs-setup.sh /nfs
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
|
@@ -87,11 +87,12 @@ class PoERun:
|
|||||||
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
|
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
result = re.search(r"hwci: mesa: exit_code: (\d+)", line)
|
result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line)
|
||||||
if result:
|
if result:
|
||||||
exit_code = int(result.group(1))
|
status = result.group(1)
|
||||||
|
exit_code = int(result.group(2))
|
||||||
|
|
||||||
if exit_code == 0:
|
if status == "pass":
|
||||||
self.logger.update_dut_job("status", "pass")
|
self.logger.update_dut_job("status", "pass")
|
||||||
else:
|
else:
|
||||||
self.logger.update_status_fail("test fail")
|
self.logger.update_status_fail("test fail")
|
||||||
|
@@ -17,13 +17,16 @@ cp "${S3_JWT_FILE}" "${rootfs_dst}${S3_JWT_FILE}"
|
|||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
|
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
|
||||||
|
cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
|
||||||
|
cp $CI_COMMON/kdl.sh $rootfs_dst/
|
||||||
cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
|
cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
|
|
||||||
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
||||||
echo "Variables passed through:"
|
echo "Variables passed through:"
|
||||||
filter_env_vars | tee $rootfs_dst/set-job-env-vars.sh
|
"$CI_COMMON"/generate-env.sh | tee $rootfs_dst/set-job-env-vars.sh
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
|
41
.gitlab-ci/bare-metal/telnet-buffer.py
Executable file
41
.gitlab-ci/bare-metal/telnet-buffer.py
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright © 2020 Christian Gmeiner
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
# IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
# Tiny script to read bytes from telnet, and write the output to stdout, with a
|
||||||
|
# buffer in between so we don't lose serial output from its buffer.
|
||||||
|
#
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import telnetlib
|
||||||
|
|
||||||
|
host = sys.argv[1]
|
||||||
|
port = sys.argv[2]
|
||||||
|
|
||||||
|
tn = telnetlib.Telnet(host, port, 1000000)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
bytes = tn.read_some()
|
||||||
|
sys.stdout.buffer.write(bytes)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
tn.close()
|
7
.gitlab-ci/build/compiler-wrapper-clang++-15.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-clang++-15.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=clang++-15
|
||||||
|
. compiler-wrapper.sh
|
7
.gitlab-ci/build/compiler-wrapper-clang++.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-clang++.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=clang++
|
||||||
|
. compiler-wrapper.sh
|
7
.gitlab-ci/build/compiler-wrapper-clang-15.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-clang-15.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=clang-15
|
||||||
|
. compiler-wrapper.sh
|
7
.gitlab-ci/build/compiler-wrapper-clang.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-clang.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=clang
|
||||||
|
. compiler-wrapper.sh
|
7
.gitlab-ci/build/compiler-wrapper-g++.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-g++.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=g++
|
||||||
|
. compiler-wrapper.sh
|
7
.gitlab-ci/build/compiler-wrapper-gcc.sh
Executable file
7
.gitlab-ci/build/compiler-wrapper-gcc.sh
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
_COMPILER=gcc
|
||||||
|
. compiler-wrapper.sh
|
21
.gitlab-ci/build/compiler-wrapper.sh
Normal file
21
.gitlab-ci/build/compiler-wrapper.sh
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# shellcheck disable=SC1091
|
||||||
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
if command -V ccache >/dev/null 2>/dev/null; then
|
||||||
|
CCACHE=ccache
|
||||||
|
else
|
||||||
|
CCACHE=
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$@" | grep -E 'meson-private/tmp[^ /]*/testfile.c' >/dev/null; then
|
||||||
|
# Invoked for meson feature check
|
||||||
|
exec $CCACHE $_COMPILER "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$(eval printf "'%s'" "\"\${$(($#-1))}\"")" = "-c" ]; then
|
||||||
|
# Not invoked for linking
|
||||||
|
exec $CCACHE $_COMPILER "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Compiler invoked by ninja for linking. Add -Werror to turn compiler warnings into errors
|
||||||
|
# with LTO. (meson's werror should arguably do this, but meanwhile we need to)
|
||||||
|
exec $CCACHE $_COMPILER "$@" -Werror
|
@@ -1,95 +0,0 @@
|
|||||||
.meson-build-for-tests:
|
|
||||||
extends:
|
|
||||||
- .build-linux
|
|
||||||
stage: build-for-tests
|
|
||||||
script:
|
|
||||||
- &meson-build timeout --verbose ${BUILD_JOB_TIMEOUT_OVERRIDE:-$BUILD_JOB_TIMEOUT} bash --login .gitlab-ci/meson/build.sh
|
|
||||||
- .gitlab-ci/prepare-artifacts.sh
|
|
||||||
|
|
||||||
.meson-build-only:
|
|
||||||
extends:
|
|
||||||
- .meson-build-for-tests
|
|
||||||
- .build-only-delayed-rules
|
|
||||||
stage: build-only
|
|
||||||
script:
|
|
||||||
- *meson-build
|
|
||||||
|
|
||||||
# Shared between windows and Linux
|
|
||||||
.build-common:
|
|
||||||
extends: .build-rules
|
|
||||||
# Cancel job if a newer commit is pushed to the same branch
|
|
||||||
interruptible: true
|
|
||||||
variables:
|
|
||||||
# Build jobs are typically taking between 5-12 minutes, depending on how
|
|
||||||
# much they build and how many new Rust compilers we have to build twice.
|
|
||||||
# Allow 25 minutes as a reasonable margin: beyond this point, something
|
|
||||||
# has gone badly wrong, and we should try again to see if we can get
|
|
||||||
# something from it.
|
|
||||||
#
|
|
||||||
# Some jobs not in the critical path use a higher timeout, particularly
|
|
||||||
# when building with ASan or UBSan.
|
|
||||||
BUILD_JOB_TIMEOUT: 12m
|
|
||||||
RUN_MESON_TESTS: "true"
|
|
||||||
timeout: 16m
|
|
||||||
# We don't want to download any previous job's artifacts
|
|
||||||
dependencies: []
|
|
||||||
artifacts:
|
|
||||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME_SLUG}"
|
|
||||||
when: always
|
|
||||||
paths:
|
|
||||||
- _build/meson-logs/*.txt
|
|
||||||
- _build/meson-logs/strace
|
|
||||||
- _build/.ninja_log
|
|
||||||
- artifacts
|
|
||||||
|
|
||||||
.build-run-long:
|
|
||||||
variables:
|
|
||||||
BUILD_JOB_TIMEOUT: 18m
|
|
||||||
timeout: 25m
|
|
||||||
|
|
||||||
|
|
||||||
# Just Linux
|
|
||||||
.build-linux:
|
|
||||||
extends: .build-common
|
|
||||||
variables:
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=deprecated-declarations
|
|
||||||
CCACHE_COMPILERCHECK: "content"
|
|
||||||
CCACHE_COMPRESS: "true"
|
|
||||||
CCACHE_DIR: /cache/mesa/ccache
|
|
||||||
# Use ccache transparently, and print stats before/after
|
|
||||||
before_script:
|
|
||||||
- !reference [default, before_script]
|
|
||||||
- |
|
|
||||||
export PATH="/usr/lib/ccache:$PATH"
|
|
||||||
export CCACHE_BASEDIR="$PWD"
|
|
||||||
if test -x /usr/bin/ccache; then
|
|
||||||
section_start ccache_before "ccache stats before build"
|
|
||||||
ccache --show-stats
|
|
||||||
section_end ccache_before
|
|
||||||
fi
|
|
||||||
after_script:
|
|
||||||
- if test -x /usr/bin/ccache; then ccache --show-stats | grep "Hits:"; fi
|
|
||||||
- !reference [default, after_script]
|
|
||||||
|
|
||||||
.build-windows:
|
|
||||||
extends:
|
|
||||||
- .build-common
|
|
||||||
- .windows-docker-tags
|
|
||||||
cache:
|
|
||||||
key: ${CI_JOB_NAME}
|
|
||||||
paths:
|
|
||||||
- subprojects/packagecache
|
|
||||||
|
|
||||||
.ci-deqp-artifacts:
|
|
||||||
artifacts:
|
|
||||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME_SLUG}"
|
|
||||||
when: always
|
|
||||||
untracked: false
|
|
||||||
paths:
|
|
||||||
# Watch out! Artifacts are relative to the build dir.
|
|
||||||
# https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521
|
|
||||||
- artifacts
|
|
||||||
- _build/meson-logs/*.txt
|
|
||||||
- _build/meson-logs/strace
|
|
||||||
- _build/.ninja_log
|
|
File diff suppressed because it is too large
Load Diff
@@ -1,268 +0,0 @@
|
|||||||
# For CI-tron based testing farm jobs.
|
|
||||||
.ci-tron-test:
|
|
||||||
extends:
|
|
||||||
- .ci-tron-b2c-job-v1
|
|
||||||
variables:
|
|
||||||
GIT_STRATEGY: none
|
|
||||||
B2C_VERSION: v0.9.15.1 # Linux 6.13.7
|
|
||||||
|
|
||||||
SCRIPTS_DIR: install
|
|
||||||
|
|
||||||
CI_TRON_PATTERN__JOB_SUCCESS__REGEX: 'hwci: mesa: exit_code: 0\r$'
|
|
||||||
CI_TRON_PATTERN__SESSION_END__REGEX: '^.*It''s now safe to turn off your computer\r$'
|
|
||||||
|
|
||||||
CI_TRON_TIMEOUT__FIRST_CONSOLE_ACTIVITY__MINUTES: 2
|
|
||||||
CI_TRON_TIMEOUT__FIRST_CONSOLE_ACTIVITY__RETRIES: 3
|
|
||||||
CI_TRON_TIMEOUT__CONSOLE_ACTIVITY__MINUTES: 5
|
|
||||||
|
|
||||||
CI_TRON__B2C_ARTIFACT_EXCLUSION: "*.shader_cache,install/*,*/install/*,*/vkd3d-proton.cache*,vkd3d-proton.cache*,*.qpa"
|
|
||||||
CI_TRON_HTTP_ARTIFACT__INSTALL__PATH: "/install.tar.zst"
|
|
||||||
CI_TRON_HTTP_ARTIFACT__INSTALL__URL: "https://$PIPELINE_ARTIFACTS_BASE/$S3_ARTIFACT_NAME.tar.zst"
|
|
||||||
|
|
||||||
CI_TRON__B2C_MACHINE_REGISTRATION_CMD: "setup --tags $CI_TRON_DUT_SETUP_TAGS"
|
|
||||||
CI_TRON__B2C_IMAGE_UNDER_TEST: $MESA_IMAGE
|
|
||||||
CI_TRON__B2C_EXEC_CMD: "curl --silent --fail-with-body {{ job.http.url }}$CI_TRON_HTTP_ARTIFACT__INSTALL__PATH | tar --zstd --extract && $SCRIPTS_DIR/common/init-stage2.sh"
|
|
||||||
|
|
||||||
# Assume by default this is running deqp, as that's almost always true
|
|
||||||
HWCI_TEST_SCRIPT: install/deqp-runner.sh
|
|
||||||
|
|
||||||
# Keep the job script in the artifacts
|
|
||||||
CI_TRON_JOB_SCRIPT_PATH: results/job_script.sh
|
|
||||||
needs:
|
|
||||||
- !reference [.required-for-hardware-jobs, needs]
|
|
||||||
tags:
|
|
||||||
- farm:$RUNNER_FARM_LOCATION
|
|
||||||
- $CI_TRON_DUT_SETUP_TAGS
|
|
||||||
|
|
||||||
# Override the default before_script, as it is not compatible with the CI-tron environment. We just keep the clearing
|
|
||||||
# of the JWT token for security reasons
|
|
||||||
before_script:
|
|
||||||
- |
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
eval "$S3_JWT_FILE_SCRIPT"
|
|
||||||
|
|
||||||
for var in CI_TRON_DUT_SETUP_TAGS; do
|
|
||||||
if [[ -z "$(eval echo \${$var:-})" ]]; then
|
|
||||||
echo "The required variable '$var' is missing"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Open a section that will be closed by b2c
|
|
||||||
echo -e "\n\e[0Ksection_start:`date +%s`:b2c_kernel_boot[collapsed=true]\r\e[0K\e[0;36m[$(cut -d ' ' -f1 /proc/uptime)]: Submitting the CI-tron job and booting the DUT\e[0m\n"
|
|
||||||
|
|
||||||
# Anything our job places in results/ will be collected by the
|
|
||||||
# Gitlab coordinator for status presentation. results/junit.xml
|
|
||||||
# will be parsed by the UI for more detailed explanations of
|
|
||||||
# test execution.
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME_SLUG}"
|
|
||||||
paths:
|
|
||||||
- results
|
|
||||||
reports:
|
|
||||||
junit: results/**/junit.xml
|
|
||||||
|
|
||||||
.ci-tron-x86_64-test:
|
|
||||||
extends:
|
|
||||||
- .ci-tron-test
|
|
||||||
variables:
|
|
||||||
CI_TRON_INITRAMFS__B2C__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/initramfs.linux_amd64.cpio.xz'
|
|
||||||
CI_TRON_KERNEL__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/linux-x86_64'
|
|
||||||
|
|
||||||
# Set the following variables if you need AMD, Intel, or NVIDIA support
|
|
||||||
# CI_TRON_INITRAMFS__DEPMOD__URL: "https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/linux-x86_64.depmod.cpio.xz"
|
|
||||||
# CI_TRON_INITRAMFS__GPU__URL: "https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/linux-x86_64.gpu.cpio"
|
|
||||||
# CI_TRON_INITRAMFS__GPU__FORMAT__0__ARCHIVE__KEEP__0__PATH: "(lib/(modules|firmware/amdgpu)/.*)"
|
|
||||||
|
|
||||||
S3_ARTIFACT_NAME: "mesa-x86_64-default-debugoptimized"
|
|
||||||
|
|
||||||
.ci-tron-x86_64-test-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-vk
|
|
||||||
- .ci-tron-x86_64-test
|
|
||||||
needs:
|
|
||||||
- job: debian/x86_64_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-x86_64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-x86_64-test-vk-manual:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-vk
|
|
||||||
- .ci-tron-x86_64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "debian-build-x86_64"
|
|
||||||
needs:
|
|
||||||
- job: debian/x86_64_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-build-x86_64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-x86_64-test-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-gl
|
|
||||||
- .ci-tron-x86_64-test
|
|
||||||
needs:
|
|
||||||
- job: debian/x86_64_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-x86_64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-x86_64-test-gl-manual:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-gl
|
|
||||||
- .ci-tron-x86_64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "debian-build-x86_64"
|
|
||||||
needs:
|
|
||||||
- job: debian/x86_64_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-build-x86_64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test:
|
|
||||||
extends:
|
|
||||||
- .ci-tron-test
|
|
||||||
variables:
|
|
||||||
CI_TRON_INITRAMFS__B2C__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/initramfs.linux_arm64.cpio.xz'
|
|
||||||
CI_TRON_KERNEL__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/linux-arm64'
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm64-default-debugoptimized"
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-vk
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-asan-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-vk
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm64-asan-debugoptimized"
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-ubsan-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-vk
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm64-ubsan-debugoptimized"
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-ubsan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-gl
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-asan-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-gl
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm64-asan-debugoptimized"
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm64-test-ubsan-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-gl
|
|
||||||
- .ci-tron-arm64-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm64-ubsan-debugoptimized"
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm64-ubsan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm32-test:
|
|
||||||
extends:
|
|
||||||
- .ci-tron-test
|
|
||||||
variables:
|
|
||||||
CI_TRON_INITRAMFS__B2C__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/initramfs.linux_arm.cpio.xz'
|
|
||||||
CI_TRON_KERNEL__URL: 'https://gitlab.freedesktop.org/gfx-ci/boot2container/-/releases/$B2C_VERSION/downloads/linux-arm'
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm32-default-debugoptimized"
|
|
||||||
|
|
||||||
.ci-tron-arm32-test-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm32_test-vk
|
|
||||||
- .ci-tron-arm32-test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm32_test-vk
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm32
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm32-test-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm32_test-gl
|
|
||||||
- .ci-tron-arm32-test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm32_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm32
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
||||||
|
|
||||||
.ci-tron-arm32-test-asan-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm32_test-gl
|
|
||||||
- .ci-tron-arm32-test
|
|
||||||
variables:
|
|
||||||
S3_ARTIFACT_NAME: "mesa-arm32-asan-debugoptimized"
|
|
||||||
DEQP_FORCE_ASAN: 1
|
|
||||||
needs:
|
|
||||||
- job: debian/arm32_test-gl
|
|
||||||
artifacts: false
|
|
||||||
optional: true
|
|
||||||
- job: debian-arm32-asan
|
|
||||||
artifacts: false
|
|
||||||
- !reference [.ci-tron-test, needs]
|
|
138
.gitlab-ci/common/generate-env.sh
Executable file
138
.gitlab-ci/common/generate-env.sh
Executable file
@@ -0,0 +1,138 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
VARS=(
|
||||||
|
ACO_DEBUG
|
||||||
|
ARTIFACTS_BASE_URL
|
||||||
|
ASAN_OPTIONS
|
||||||
|
BASE_SYSTEM_FORK_HOST_PREFIX
|
||||||
|
BASE_SYSTEM_MAINLINE_HOST_PREFIX
|
||||||
|
CI_COMMIT_BRANCH
|
||||||
|
CI_COMMIT_REF_NAME
|
||||||
|
CI_COMMIT_TITLE
|
||||||
|
CI_JOB_ID
|
||||||
|
S3_JWT_FILE
|
||||||
|
CI_JOB_STARTED_AT
|
||||||
|
CI_JOB_NAME
|
||||||
|
CI_JOB_URL
|
||||||
|
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
|
||||||
|
CI_MERGE_REQUEST_TITLE
|
||||||
|
CI_NODE_INDEX
|
||||||
|
CI_NODE_TOTAL
|
||||||
|
CI_PAGES_DOMAIN
|
||||||
|
CI_PIPELINE_ID
|
||||||
|
CI_PIPELINE_URL
|
||||||
|
CI_PROJECT_DIR
|
||||||
|
CI_PROJECT_NAME
|
||||||
|
CI_PROJECT_PATH
|
||||||
|
CI_PROJECT_ROOT_NAMESPACE
|
||||||
|
CI_RUNNER_DESCRIPTION
|
||||||
|
CI_SERVER_URL
|
||||||
|
CROSVM_GALLIUM_DRIVER
|
||||||
|
CROSVM_GPU_ARGS
|
||||||
|
CURRENT_SECTION
|
||||||
|
DEQP_BIN_DIR
|
||||||
|
DEQP_CONFIG
|
||||||
|
DEQP_EXPECTED_RENDERER
|
||||||
|
DEQP_FRACTION
|
||||||
|
DEQP_HEIGHT
|
||||||
|
DEQP_RUNNER_MAX_FAILS
|
||||||
|
DEQP_RUNNER_OPTIONS
|
||||||
|
DEQP_SUITE
|
||||||
|
DEQP_TEMP_DIR
|
||||||
|
DEQP_VER
|
||||||
|
DEQP_WIDTH
|
||||||
|
DEVICE_NAME
|
||||||
|
DRIVER_NAME
|
||||||
|
EGL_PLATFORM
|
||||||
|
ETNA_MESA_DEBUG
|
||||||
|
FDO_CI_CONCURRENT
|
||||||
|
FDO_UPSTREAM_REPO
|
||||||
|
FD_MESA_DEBUG
|
||||||
|
FLAKES_CHANNEL
|
||||||
|
FREEDRENO_HANGCHECK_MS
|
||||||
|
GALLIUM_DRIVER
|
||||||
|
GALLIVM_PERF
|
||||||
|
GPU_VERSION
|
||||||
|
GTEST
|
||||||
|
GTEST_FAILS
|
||||||
|
GTEST_FRACTION
|
||||||
|
GTEST_RUNNER_OPTIONS
|
||||||
|
GTEST_SKIPS
|
||||||
|
HWCI_FREQ_MAX
|
||||||
|
HWCI_KERNEL_MODULES
|
||||||
|
HWCI_KVM
|
||||||
|
HWCI_START_WESTON
|
||||||
|
HWCI_START_XORG
|
||||||
|
HWCI_TEST_ARGS
|
||||||
|
HWCI_TEST_SCRIPT
|
||||||
|
IR3_SHADER_DEBUG
|
||||||
|
JOB_ARTIFACTS_BASE
|
||||||
|
JOB_RESULTS_PATH
|
||||||
|
JOB_ROOTFS_OVERLAY_PATH
|
||||||
|
KERNEL_IMAGE_BASE
|
||||||
|
KERNEL_IMAGE_NAME
|
||||||
|
LD_LIBRARY_PATH
|
||||||
|
LIBGL_ALWAYS_SOFTWARE
|
||||||
|
LP_NUM_THREADS
|
||||||
|
MESA_BASE_TAG
|
||||||
|
MESA_BUILD_PATH
|
||||||
|
MESA_DEBUG
|
||||||
|
MESA_GLES_VERSION_OVERRIDE
|
||||||
|
MESA_GLSL_VERSION_OVERRIDE
|
||||||
|
MESA_GL_VERSION_OVERRIDE
|
||||||
|
MESA_IMAGE
|
||||||
|
MESA_IMAGE_PATH
|
||||||
|
MESA_IMAGE_TAG
|
||||||
|
MESA_LOADER_DRIVER_OVERRIDE
|
||||||
|
MESA_SPIRV_LOG_LEVEL
|
||||||
|
MESA_TEMPLATES_COMMIT
|
||||||
|
MESA_VK_ABORT_ON_DEVICE_LOSS
|
||||||
|
MESA_VK_IGNORE_CONFORMANCE_WARNING
|
||||||
|
S3_HOST
|
||||||
|
S3_RESULTS_UPLOAD
|
||||||
|
NIR_DEBUG
|
||||||
|
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER
|
||||||
|
PAN_MESA_DEBUG
|
||||||
|
PANVK_DEBUG
|
||||||
|
PIGLIT_FRACTION
|
||||||
|
PIGLIT_NO_WINDOW
|
||||||
|
PIGLIT_OPTIONS
|
||||||
|
PIGLIT_PLATFORM
|
||||||
|
PIGLIT_PROFILES
|
||||||
|
PIGLIT_REPLAY_ANGLE_TAG
|
||||||
|
PIGLIT_REPLAY_ARTIFACTS_BASE_URL
|
||||||
|
PIGLIT_REPLAY_DEVICE_NAME
|
||||||
|
PIGLIT_REPLAY_EXTRA_ARGS
|
||||||
|
PIGLIT_REPLAY_LOOP_TIMES
|
||||||
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE
|
||||||
|
PIGLIT_REPLAY_SUBCOMMAND
|
||||||
|
PIGLIT_RESULTS
|
||||||
|
PIGLIT_RUNNER_OPTIONS
|
||||||
|
PIGLIT_TESTS
|
||||||
|
PIGLIT_TRACES_FILE
|
||||||
|
PIPELINE_ARTIFACTS_BASE
|
||||||
|
RADEON_DEBUG
|
||||||
|
RADV_DEBUG
|
||||||
|
RADV_PERFTEST
|
||||||
|
SKQP_ASSETS_DIR
|
||||||
|
SKQP_BACKENDS
|
||||||
|
TU_DEBUG
|
||||||
|
USE_ANGLE
|
||||||
|
VIRGL_HOST_API
|
||||||
|
VIRGL_RENDER_SERVER
|
||||||
|
WAFFLE_PLATFORM
|
||||||
|
VK_DRIVER
|
||||||
|
ZINK_DESCRIPTORS
|
||||||
|
ZINK_DEBUG
|
||||||
|
LVP_POISON_MEMORY
|
||||||
|
|
||||||
|
# Dead code within Mesa CI, but required by virglrender CI
|
||||||
|
# (because they include our files in their CI)
|
||||||
|
VK_DRIVER_FILES
|
||||||
|
)
|
||||||
|
|
||||||
|
for var in "${VARS[@]}"; do
|
||||||
|
if [ -n "${!var+x}" ]; then
|
||||||
|
echo "export $var=${!var@Q}"
|
||||||
|
fi
|
||||||
|
done
|
@@ -3,10 +3,6 @@
|
|||||||
# Very early init, used to make sure devices and network are set up and
|
# Very early init, used to make sure devices and network are set up and
|
||||||
# reachable.
|
# reachable.
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# ALPINE_X86_64_LAVA_TRIGGER_TAG
|
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
cd /
|
cd /
|
||||||
@@ -27,8 +23,3 @@ echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
|||||||
# Set the time so we can validate certificates before we fetch anything;
|
# Set the time so we can validate certificates before we fetch anything;
|
||||||
# however as not all DUTs have network, make this non-fatal.
|
# however as not all DUTs have network, make this non-fatal.
|
||||||
for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
|
for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
|
||||||
|
|
||||||
# Create a symlink from /dev/fd to /proc/self/fd if /dev/fd is missing.
|
|
||||||
if [ ! -e /dev/fd ]; then
|
|
||||||
ln -s /proc/self/fd /dev/fd
|
|
||||||
fi
|
|
||||||
|
@@ -76,7 +76,9 @@ fi
|
|||||||
# - vmx for Intel VT
|
# - vmx for Intel VT
|
||||||
# - svm for AMD-V
|
# - svm for AMD-V
|
||||||
#
|
#
|
||||||
if [ -n "$HWCI_ENABLE_X86_KVM" ]; then
|
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
|
||||||
|
#
|
||||||
|
if [ "$HWCI_KVM" = "true" ]; then
|
||||||
unset KVM_KERNEL_MODULE
|
unset KVM_KERNEL_MODULE
|
||||||
{
|
{
|
||||||
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
|
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
|
||||||
@@ -89,6 +91,11 @@ if [ -n "$HWCI_ENABLE_X86_KVM" ]; then
|
|||||||
echo "WARNING: Failed to detect CPU virtualization extensions"
|
echo "WARNING: Failed to detect CPU virtualization extensions"
|
||||||
} || \
|
} || \
|
||||||
modprobe ${KVM_KERNEL_MODULE}
|
modprobe ${KVM_KERNEL_MODULE}
|
||||||
|
|
||||||
|
mkdir -p /lava-files
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
-o "/lava-files/${KERNEL_IMAGE_NAME}" \
|
||||||
|
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
||||||
@@ -102,9 +109,6 @@ export LIBGL_DRIVERS_PATH=/install/lib/dri
|
|||||||
# telling it to look in /usr/local/lib.
|
# telling it to look in /usr/local/lib.
|
||||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
|
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
|
||||||
|
|
||||||
# The Broadcom devices need /usr/local/bin unconditionally added to the path
|
|
||||||
export PATH=/usr/local/bin:$PATH
|
|
||||||
|
|
||||||
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
||||||
export XDG_CACHE_HOME=/tmp
|
export XDG_CACHE_HOME=/tmp
|
||||||
|
|
||||||
@@ -136,14 +140,13 @@ if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
|||||||
# and enable throttling detection & reporting.
|
# and enable throttling detection & reporting.
|
||||||
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
||||||
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
||||||
/install/common/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start a little daemon to capture sysfs records and produce a JSON file
|
# Start a little daemon to capture sysfs records and produce a JSON file
|
||||||
KDL_PATH=/install/common/kdl.sh
|
if [ -x /kdl.sh ]; then
|
||||||
if [ -x "$KDL_PATH" ]; then
|
|
||||||
echo "launch kdl.sh!"
|
echo "launch kdl.sh!"
|
||||||
$KDL_PATH &
|
/kdl.sh &
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||||
else
|
else
|
||||||
echo "kdl.sh not found!"
|
echo "kdl.sh not found!"
|
||||||
@@ -157,9 +160,8 @@ fi
|
|||||||
|
|
||||||
# Start a little daemon to capture the first devcoredump we encounter. (They
|
# Start a little daemon to capture the first devcoredump we encounter. (They
|
||||||
# expire after 5 minutes, so we poll for them).
|
# expire after 5 minutes, so we poll for them).
|
||||||
CAPTURE_DEVCOREDUMP=/install/common/capture-devcoredump.sh
|
if [ -x /capture-devcoredump.sh ]; then
|
||||||
if [ -x "$CAPTURE_DEVCOREDUMP" ]; then
|
/capture-devcoredump.sh &
|
||||||
$CAPTURE_DEVCOREDUMP &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -223,12 +225,16 @@ set -x
|
|||||||
# kill the job.
|
# kill the job.
|
||||||
cleanup
|
cleanup
|
||||||
|
|
||||||
# upload artifacts (lava jobs)
|
# upload artifacts
|
||||||
if [ -n "$S3_RESULTS_UPLOAD" ]; then
|
if [ -n "$S3_RESULTS_UPLOAD" ]; then
|
||||||
tar --zstd -cf results.tar.zst results/;
|
tar --zstd -cf results.tar.zst results/;
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst
|
ci-fairy s3cp --token-file "${S3_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
||||||
|
# as the python ones inside the bare-metal folder
|
||||||
|
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
section_end post_test_cleanup
|
section_end post_test_cleanup
|
||||||
|
|
||||||
@@ -236,6 +242,6 @@ section_end post_test_cleanup
|
|||||||
# the result of our run, so try really hard to get it out rather than losing
|
# the result of our run, so try really hard to get it out rather than losing
|
||||||
# the run. The device gets shut down right at this point, and a630 seems to
|
# the run. The device gets shut down right at this point, and a630 seems to
|
||||||
# enjoy corrupting the last line of serial output before shutdown.
|
# enjoy corrupting the last line of serial output before shutdown.
|
||||||
for _ in $(seq 0 3); do echo "hwci: mesa: exit_code: $EXIT_CODE"; sleep 1; echo; done
|
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done
|
||||||
|
|
||||||
exit $EXIT_CODE
|
exit $EXIT_CODE
|
||||||
|
@@ -35,27 +35,6 @@
|
|||||||
# - gt_act_freq_mhz (the actual GPU freq)
|
# - gt_act_freq_mhz (the actual GPU freq)
|
||||||
# - gt_cur_freq_mhz (the last requested freq)
|
# - gt_cur_freq_mhz (the last requested freq)
|
||||||
#
|
#
|
||||||
# Intel later switched to per-tile sysfs interfaces, which is what the Xe DRM
|
|
||||||
# driver exlusively uses, and the capabilites are now located under the
|
|
||||||
# following directory for the first tile:
|
|
||||||
#
|
|
||||||
# /sys/class/drm/card<n>/device/tile0/gt0/freq0/<freq_info>
|
|
||||||
#
|
|
||||||
# Where <n> is the DRM card index and <freq_info> one of the following:
|
|
||||||
#
|
|
||||||
# - max_freq (enforced maximum freq)
|
|
||||||
# - min_freq (enforced minimum freq)
|
|
||||||
#
|
|
||||||
# The hardware capabilities can be accessed via:
|
|
||||||
#
|
|
||||||
# - rp0_freq (supported maximum freq)
|
|
||||||
# - rpn_freq (supported minimum freq)
|
|
||||||
# - rpe_freq (most efficient freq)
|
|
||||||
#
|
|
||||||
# The current frequency can be read from:
|
|
||||||
# - act_freq (the actual GPU freq)
|
|
||||||
# - cur_freq (the last requested freq)
|
|
||||||
#
|
|
||||||
# Also note that in addition to GPU management, the script offers the
|
# Also note that in addition to GPU management, the script offers the
|
||||||
# possibility to adjust CPU operating frequencies. However, this is currently
|
# possibility to adjust CPU operating frequencies. However, this is currently
|
||||||
# limited to just setting the maximum scaling frequency as percentage of the
|
# limited to just setting the maximum scaling frequency as percentage of the
|
||||||
@@ -71,25 +50,10 @@
|
|||||||
# Constants
|
# Constants
|
||||||
#
|
#
|
||||||
|
|
||||||
# Check if any /sys/class/drm/cardX/device/tile0 directory exists to detect Xe
|
|
||||||
USE_XE=0
|
|
||||||
for i in $(seq 0 15); do
|
|
||||||
if [ -d "/sys/class/drm/card$i/device/tile0" ]; then
|
|
||||||
USE_XE=1
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# GPU
|
# GPU
|
||||||
if [ "$USE_XE" -eq 1 ]; then
|
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
|
||||||
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/device/tile0/gt0/freq0/%s_freq"
|
ENF_FREQ_INFO="max min boost"
|
||||||
ENF_FREQ_INFO="max min"
|
CAP_FREQ_INFO="RP0 RPn RP1"
|
||||||
CAP_FREQ_INFO="rp0 rpn rpe"
|
|
||||||
else
|
|
||||||
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
|
|
||||||
ENF_FREQ_INFO="max min boost"
|
|
||||||
CAP_FREQ_INFO="RP0 RPn RP1"
|
|
||||||
fi
|
|
||||||
ACT_FREQ_INFO="act cur"
|
ACT_FREQ_INFO="act cur"
|
||||||
THROTT_DETECT_SLEEP_SEC=2
|
THROTT_DETECT_SLEEP_SEC=2
|
||||||
THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
|
THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
|
||||||
@@ -148,11 +112,7 @@ identify_intel_gpu() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
path=$(print_freq_sysfs_path "" ${i})
|
path=$(print_freq_sysfs_path "" ${i})
|
||||||
if [ "$USE_XE" -eq 1 ]; then
|
path=${path%/*}/device/vendor
|
||||||
path=${path%/*/*/*/*/*}/device/vendor
|
|
||||||
else
|
|
||||||
path=${path%/*}/device/vendor
|
|
||||||
fi
|
|
||||||
|
|
||||||
[ -r "${path}" ] && read vendor < "${path}" && \
|
[ -r "${path}" ] && read vendor < "${path}" && \
|
||||||
[ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
|
[ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
|
||||||
@@ -237,13 +197,13 @@ compute_freq_set() {
|
|||||||
|
|
||||||
case "$1" in
|
case "$1" in
|
||||||
+)
|
+)
|
||||||
val=$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}") # FREQ_rp0 or FREQ_RP0
|
val=${FREQ_RP0}
|
||||||
;;
|
;;
|
||||||
-)
|
-)
|
||||||
val=$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") # FREQ_rpn or FREQ_RPn
|
val=${FREQ_RPn}
|
||||||
;;
|
;;
|
||||||
*%)
|
*%)
|
||||||
val=$((${1%?} * $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}") / 100))
|
val=$((${1%?} * FREQ_RP0 / 100))
|
||||||
# Adjust freq to comply with 50 MHz increments
|
# Adjust freq to comply with 50 MHz increments
|
||||||
val=$((val / 50 * 50))
|
val=$((val / 50 * 50))
|
||||||
;;
|
;;
|
||||||
@@ -272,17 +232,15 @@ set_freq_max() {
|
|||||||
|
|
||||||
read_freq_info n min || return $?
|
read_freq_info n min || return $?
|
||||||
|
|
||||||
# FREQ_rp0 or FREQ_RP0
|
[ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && {
|
||||||
[ ${SET_MAX_FREQ} -gt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}") ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
|
log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
|
||||||
"${SET_MAX_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}")"
|
"${SET_MAX_FREQ}" "${FREQ_RP0}"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# FREQ_rpn or FREQ_RPn
|
[ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && {
|
||||||
[ ${SET_MAX_FREQ} -lt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
|
log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
|
||||||
"${SET_MIN_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")"
|
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,21 +252,12 @@ set_freq_max() {
|
|||||||
|
|
||||||
[ -z "${DRY_RUN}" ] || return 0
|
[ -z "${DRY_RUN}" ] || return 0
|
||||||
|
|
||||||
# Write to max freq path
|
if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
|
||||||
if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) > /dev/null;
|
$(print_freq_sysfs_path boost) > /dev/null;
|
||||||
then
|
then
|
||||||
log ERROR "Failed to set GPU max frequency"
|
log ERROR "Failed to set GPU max frequency"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Only write to boost if the sysfs file exists, as it's removed in Xe
|
|
||||||
if [ -e "$(print_freq_sysfs_path boost)" ]; then
|
|
||||||
if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path boost) > /dev/null;
|
|
||||||
then
|
|
||||||
log ERROR "Failed to set GPU boost frequency"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -325,9 +274,9 @@ set_freq_min() {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
[ ${SET_MIN_FREQ} -lt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && {
|
[ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && {
|
||||||
log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
|
log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
|
||||||
"${SET_MIN_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")"
|
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,7 +294,7 @@ set_freq_min() {
|
|||||||
#
|
#
|
||||||
set_freq() {
|
set_freq() {
|
||||||
# Get hw max & min frequencies
|
# Get hw max & min frequencies
|
||||||
read_freq_info n $(echo $CAP_FREQ_INFO | cut -d' ' -f1,2) || return $? # RP0 RPn
|
read_freq_info n RP0 RPn || return $?
|
||||||
|
|
||||||
[ -z "${SET_MAX_FREQ}" ] || {
|
[ -z "${SET_MAX_FREQ}" ] || {
|
||||||
SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
|
SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
|
||||||
@@ -448,7 +397,7 @@ detect_throttling() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
read_freq_info n $(echo $CAP_FREQ_INFO | cut -d' ' -f2) || return $? # RPn
|
read_freq_info n RPn || exit $?
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
sleep ${THROTT_DETECT_SLEEP_SEC}
|
sleep ${THROTT_DETECT_SLEEP_SEC}
|
||||||
@@ -457,13 +406,13 @@ detect_throttling() {
|
|||||||
#
|
#
|
||||||
# The throttling seems to occur when act freq goes below min.
|
# The throttling seems to occur when act freq goes below min.
|
||||||
# However, it's necessary to exclude the idle states, where
|
# However, it's necessary to exclude the idle states, where
|
||||||
# act freq normally reaches rpn and cur goes below min.
|
# act freq normally reaches RPn and cur goes below min.
|
||||||
#
|
#
|
||||||
[ ${FREQ_act} -lt ${FREQ_min} ] && \
|
[ ${FREQ_act} -lt ${FREQ_min} ] && \
|
||||||
[ ${FREQ_act} -gt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && \
|
[ ${FREQ_act} -gt ${FREQ_RPn} ] && \
|
||||||
[ ${FREQ_cur} -ge ${FREQ_min} ] && \
|
[ ${FREQ_cur} -ge ${FREQ_min} ] && \
|
||||||
printf "GPU throttling detected: act=%s min=%s cur=%s rpn=%s\n" \
|
printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \
|
||||||
${FREQ_act} ${FREQ_min} ${FREQ_cur} $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")
|
${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn}
|
||||||
done
|
done
|
||||||
) &
|
) &
|
||||||
|
|
||||||
|
@@ -1,7 +0,0 @@
|
|||||||
variables:
|
|
||||||
CONDITIONAL_BUILD_ANDROID_CTS_TAG: b018634d732f438027ec58c0383615e7
|
|
||||||
CONDITIONAL_BUILD_ANGLE_TAG: f62910e55be46e37cc867d037e4a8121
|
|
||||||
CONDITIONAL_BUILD_CROSVM_TAG: 0f59350b1052bdbb28b65a832b494377
|
|
||||||
CONDITIONAL_BUILD_FLUSTER_TAG: 3bc3afd7468e106afcbfd569a85f34f9
|
|
||||||
CONDITIONAL_BUILD_PIGLIT_TAG: 827b708ab7309721395ea28cec512968
|
|
||||||
CONDITIONAL_BUILD_VKD3D_PROTON_TAG: 82cadf35246e64a8228bf759c9c19e5b
|
|
@@ -1,70 +0,0 @@
|
|||||||
# Build the CI Alpine docker images.
|
|
||||||
#
|
|
||||||
# MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the
|
|
||||||
# image doesn't exist yet, the container stage job generates it.
|
|
||||||
#
|
|
||||||
# In order to generate a new image, one should generally change the tag.
|
|
||||||
# While removing the image from the registry would also work, that's not
|
|
||||||
# recommended except for ephemeral images during development: Replacing
|
|
||||||
# an image after a significant amount of time might pull in newer
|
|
||||||
# versions of gcc/clang or other packages, which might break the build
|
|
||||||
# with older commits using the same tag.
|
|
||||||
#
|
|
||||||
# After merging a change resulting in generating a new image to the
|
|
||||||
# main repository, it's recommended to remove the image from the source
|
|
||||||
# repository's container registry, so that the image from the main
|
|
||||||
# repository's registry will be used there as well.
|
|
||||||
|
|
||||||
# Alpine based x86_64 build image
|
|
||||||
.alpine/x86_64_build-base:
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@alpine
|
|
||||||
- .container
|
|
||||||
variables:
|
|
||||||
FDO_DISTRIBUTION_VERSION: "3.21"
|
|
||||||
FDO_BASE_IMAGE: alpine:$FDO_DISTRIBUTION_VERSION # since cbuild ignores it
|
|
||||||
|
|
||||||
# Alpine based x86_64 build image
|
|
||||||
alpine/x86_64_build:
|
|
||||||
extends:
|
|
||||||
- .alpine/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &alpine-x86_64_build ${ALPINE_X86_64_BUILD_TAG}
|
|
||||||
LLVM_VERSION: &alpine-llvm_version 19
|
|
||||||
rules:
|
|
||||||
- !reference [.container, rules]
|
|
||||||
# Note: the next three lines must remain in that order, so that the rules
|
|
||||||
# in `linkcheck-docs` catch nightly pipelines before the rules in `deploy-docs`
|
|
||||||
# exclude them.
|
|
||||||
- !reference [linkcheck-docs, rules]
|
|
||||||
- !reference [deploy-docs, rules]
|
|
||||||
- !reference [test-docs, rules]
|
|
||||||
|
|
||||||
.use-alpine/x86_64_build:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_PATH: "alpine/x86_64_build"
|
|
||||||
MESA_IMAGE_TAG: *alpine-x86_64_build
|
|
||||||
LLVM_VERSION: *alpine-llvm_version
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: alpine/x86_64_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Alpine based x86_64 image for LAVA SSH dockerized client
|
|
||||||
alpine/x86_64_lava_ssh_client:
|
|
||||||
extends:
|
|
||||||
- .alpine/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &alpine-x86_64_lava_ssh_client ${ALPINE_X86_64_LAVA_SSH_TAG}
|
|
||||||
|
|
||||||
# Alpine based x86_64 image to run LAVA jobs
|
|
||||||
alpine/x86_64_lava-trigger:
|
|
||||||
extends:
|
|
||||||
- .alpine/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &alpine-x86_64_lava_trigger ${ALPINE_X86_64_LAVA_TRIGGER_TAG}
|
|
@@ -6,9 +6,6 @@
|
|||||||
# ALPINE_X86_64_BUILD_TAG
|
# ALPINE_X86_64_BUILD_TAG
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
EPHEMERAL=(
|
EPHEMERAL=(
|
||||||
@@ -20,28 +17,31 @@ DEPS=(
|
|||||||
bison
|
bison
|
||||||
ccache
|
ccache
|
||||||
"clang${LLVM_VERSION}-dev"
|
"clang${LLVM_VERSION}-dev"
|
||||||
clang-dev
|
|
||||||
cmake
|
cmake
|
||||||
|
clang-dev
|
||||||
coreutils
|
coreutils
|
||||||
curl
|
curl
|
||||||
elfutils-dev
|
|
||||||
expat-dev
|
|
||||||
flex
|
flex
|
||||||
g++
|
|
||||||
gcc
|
gcc
|
||||||
gettext
|
g++
|
||||||
git
|
git
|
||||||
|
gettext
|
||||||
glslang
|
glslang
|
||||||
graphviz
|
graphviz
|
||||||
libclc-dev
|
|
||||||
libdrm-dev
|
|
||||||
libpciaccess-dev
|
|
||||||
libva-dev
|
|
||||||
linux-headers
|
linux-headers
|
||||||
"llvm${LLVM_VERSION}-dev"
|
|
||||||
"llvm${LLVM_VERSION}-static"
|
"llvm${LLVM_VERSION}-static"
|
||||||
|
"llvm${LLVM_VERSION}-dev"
|
||||||
|
meson
|
||||||
mold
|
mold
|
||||||
musl-dev
|
musl-dev
|
||||||
|
expat-dev
|
||||||
|
elfutils-dev
|
||||||
|
libclc-dev
|
||||||
|
libdrm-dev
|
||||||
|
libva-dev
|
||||||
|
libpciaccess-dev
|
||||||
|
zlib-dev
|
||||||
|
python3-dev
|
||||||
py3-clang
|
py3-clang
|
||||||
py3-cparser
|
py3-cparser
|
||||||
py3-mako
|
py3-mako
|
||||||
@@ -49,25 +49,20 @@ DEPS=(
|
|||||||
py3-pip
|
py3-pip
|
||||||
py3-ply
|
py3-ply
|
||||||
py3-yaml
|
py3-yaml
|
||||||
python3-dev
|
|
||||||
samurai
|
|
||||||
spirv-llvm-translator-dev
|
|
||||||
spirv-tools-dev
|
|
||||||
util-macros
|
|
||||||
vulkan-headers
|
vulkan-headers
|
||||||
zlib-dev
|
spirv-tools-dev
|
||||||
|
spirv-llvm-translator-dev
|
||||||
|
util-macros
|
||||||
|
wayland-dev
|
||||||
|
wayland-protocols
|
||||||
)
|
)
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
||||||
|
|
||||||
pip3 install --break-system-packages sphinx===8.2.3 hawkmoth===0.19.0
|
pip3 install --break-system-packages sphinx===5.1.1 hawkmoth===0.16.0
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
. .gitlab-ci/container/container_pre_build.sh
|
||||||
|
|
||||||
. .gitlab-ci/container/install-meson.sh
|
|
||||||
|
|
||||||
EXTRA_MESON_ARGS='--prefix=/usr' \
|
|
||||||
. .gitlab-ci/container/build-wayland.sh
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
############### Uninstall the build software
|
||||||
|
|
||||||
|
@@ -1,50 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This is a ci-templates build script to generate a container for triggering LAVA jobs.
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# ALPINE_X86_64_LAVA_TRIGGER_TAG
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
set -e
|
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
uncollapsed_section_start alpine_setup "Base Alpine system setup"
|
|
||||||
|
|
||||||
# Ephemeral packages (installed for this script and removed again at the end)
|
|
||||||
EPHEMERAL=(
|
|
||||||
git
|
|
||||||
py3-pip
|
|
||||||
)
|
|
||||||
|
|
||||||
# We only need these very basic packages to run the LAVA jobs
|
|
||||||
DEPS=(
|
|
||||||
curl
|
|
||||||
python3
|
|
||||||
tar
|
|
||||||
zstd
|
|
||||||
)
|
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
pip3 install --break-system-packages -r bin/ci/requirements-lava.txt
|
|
||||||
|
|
||||||
cp -Rp .gitlab-ci/lava /
|
|
||||||
cp -Rp .gitlab-ci/bin/*_logger.py /lava
|
|
||||||
cp -Rp .gitlab-ci/common/init-stage1.sh /lava
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
|
||||||
|
|
||||||
uncollapsed_section_switch alpine_cleanup "Cleaning up base Alpine system"
|
|
||||||
|
|
||||||
apk del "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
||||||
|
|
||||||
section_end alpine_cleanup
|
|
@@ -4,9 +4,6 @@
|
|||||||
|
|
||||||
# shellcheck disable=SC1091
|
# shellcheck disable=SC1091
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
EPHEMERAL=(
|
EPHEMERAL=(
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
@@ -7,12 +6,11 @@ set -o xtrace
|
|||||||
# Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves
|
# Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves
|
||||||
# network transfer, disk usage, and runtime on test jobs)
|
# network transfer, disk usage, and runtime on test jobs)
|
||||||
|
|
||||||
S3_PATH="https://${S3_HOST}/${S3_KERNEL_BUCKET}"
|
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
||||||
|
if curl -X HEAD -s "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
|
||||||
if curl -L --retry 3 -f --retry-delay 10 -s --head "${S3_PATH}/${FDO_UPSTREAM_REPO}/${LAVA_DISTRIBUTION_TAG}/lava-rootfs.tar.zst"; then
|
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||||
ARTIFACTS_URL="${S3_PATH}/${FDO_UPSTREAM_REPO}/${LAVA_DISTRIBUTION_TAG}"
|
|
||||||
else
|
else
|
||||||
ARTIFACTS_URL="${S3_PATH}/${CI_PROJECT_PATH}/${LAVA_DISTRIBUTION_TAG}"
|
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
@@ -26,8 +24,39 @@ if [[ $arch == "arm64" ]]; then
|
|||||||
pushd /baremetal-files
|
pushd /baremetal-files
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image
|
-O "${KERNEL_IMAGE_BASE}"/arm64/Image
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
|
-O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
-O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel
|
||||||
|
|
||||||
|
DEVICE_TREES=""
|
||||||
|
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
|
||||||
|
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
||||||
|
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
||||||
|
DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb"
|
||||||
|
|
||||||
|
for DTB in $DEVICE_TREES; do
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
-O "${KERNEL_IMAGE_BASE}/arm64/$DTB"
|
||||||
|
done
|
||||||
|
|
||||||
|
popd
|
||||||
|
elif [[ $arch == "armhf" ]]; then
|
||||||
|
mkdir -p /baremetal-files
|
||||||
|
pushd /baremetal-files
|
||||||
|
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
-O "${KERNEL_IMAGE_BASE}"/armhf/zImage
|
||||||
|
|
||||||
|
DEVICE_TREES=""
|
||||||
|
DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
|
||||||
|
DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb"
|
||||||
|
|
||||||
|
for DTB in $DEVICE_TREES; do
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
-O "${KERNEL_IMAGE_BASE}/armhf/$DTB"
|
||||||
|
done
|
||||||
|
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
@@ -1,67 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
#
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
|
||||||
|
|
||||||
# This script runs in a container to:
|
|
||||||
# 1. Download the Android CTS (Compatibility Test Suite)
|
|
||||||
# 2. Filter out unneeded test modules
|
|
||||||
# 3. Compress and upload the stripped version to S3
|
|
||||||
# Note: The 'build-' prefix in the filename is only to make it compatible
|
|
||||||
# with the bin/ci/update_tag.py script.
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
section_start android-cts "Downloading Android CTS"
|
|
||||||
|
|
||||||
# xtrace is getting lost with the section switching
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "ANDROID_CTS_TAG"
|
|
||||||
|
|
||||||
# List of all CTS modules we might want to run in CI
|
|
||||||
# This should be the union of all modules required by our CI jobs
|
|
||||||
# Specific modules to run are selected via the ${GPU_VERSION}-android-cts-include.txt files
|
|
||||||
ANDROID_CTS_MODULES=(
|
|
||||||
"CtsDeqpTestCases"
|
|
||||||
"CtsGraphicsTestCases"
|
|
||||||
"CtsNativeHardwareTestCases"
|
|
||||||
"CtsSkQPTestCases"
|
|
||||||
)
|
|
||||||
|
|
||||||
ANDROID_CTS_VERSION="${ANDROID_VERSION}_r1"
|
|
||||||
ANDROID_CTS_DEVICE_ARCH="x86"
|
|
||||||
|
|
||||||
# Download the stripped CTS from S3, because the CTS download from Google can take 20 minutes
|
|
||||||
CTS_FILENAME="android-cts-${ANDROID_CTS_VERSION}-linux_x86-${ANDROID_CTS_DEVICE_ARCH}"
|
|
||||||
ARTIFACT_PATH="${DATA_STORAGE_PATH}/android-cts/${ANDROID_CTS_TAG}.tar.zst"
|
|
||||||
|
|
||||||
if FOUND_ARTIFACT_URL="$(find_s3_project_artifact "${ARTIFACT_PATH}")"; then
|
|
||||||
echo "Found Android CTS at: ${FOUND_ARTIFACT_URL}"
|
|
||||||
curl-with-retry "${FOUND_ARTIFACT_URL}" | tar --zstd -x -C /
|
|
||||||
else
|
|
||||||
echo "No cached CTS found, downloading from Google and uploading to S3..."
|
|
||||||
curl-with-retry --remote-name "https://dl.google.com/dl/android/cts/${CTS_FILENAME}.zip"
|
|
||||||
|
|
||||||
# Disable zipbomb detection, because the CTS zip file is too big
|
|
||||||
# At least locally, it is detected as a zipbomb
|
|
||||||
UNZIP_DISABLE_ZIPBOMB_DETECTION=true \
|
|
||||||
unzip -q -d / "${CTS_FILENAME}.zip"
|
|
||||||
rm "${CTS_FILENAME}.zip"
|
|
||||||
|
|
||||||
# Keep only the interesting tests to save space
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
ANDROID_CTS_MODULES_KEEP_EXPRESSION=$(printf "%s|" "${ANDROID_CTS_MODULES[@]}" | sed -e 's/|$//g')
|
|
||||||
find /android-cts/testcases/ -mindepth 1 -type d | grep -v -E "$ANDROID_CTS_MODULES_KEEP_EXPRESSION" | xargs rm -rf
|
|
||||||
|
|
||||||
# Using zstd compressed tarball instead of zip, the compression ratio is almost the same, but
|
|
||||||
# the extraction is faster, also LAVA overlays don't support zip compression.
|
|
||||||
tar --zstd -cf "${CTS_FILENAME}.tar.zst" /android-cts
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "${CTS_FILENAME}.tar.zst" \
|
|
||||||
"https://${S3_BASE_PATH}/${CI_PROJECT_PATH}/${ARTIFACT_PATH}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
section_end android-cts
|
|
@@ -1,121 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml and .gitlab-ci/container/gitlab-ci.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
# ANDROID_LLVM_ARTIFACT_NAME
|
|
||||||
|
|
||||||
set -exu
|
|
||||||
|
|
||||||
# If CI vars are not set, assign an empty value, this prevents -u to fail
|
|
||||||
: "${CI:=}"
|
|
||||||
: "${CI_PROJECT_PATH:=}"
|
|
||||||
|
|
||||||
# Early check for required env variables, relies on `set -u`
|
|
||||||
: "$ANDROID_NDK_VERSION"
|
|
||||||
: "$ANDROID_SDK_VERSION"
|
|
||||||
: "$ANDROID_LLVM_VERSION"
|
|
||||||
: "$ANDROID_LLVM_ARTIFACT_NAME"
|
|
||||||
: "$S3_JWT_FILE"
|
|
||||||
: "$S3_HOST"
|
|
||||||
: "$S3_ANDROID_BUCKET"
|
|
||||||
|
|
||||||
# Check for CI if the auth file used later on is non-empty
|
|
||||||
if [ -n "$CI" ] && [ ! -s "${S3_JWT_FILE}" ]; then
|
|
||||||
echo "Error: ${S3_JWT_FILE} is empty." 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"; then
|
|
||||||
echo "Artifact ${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst already exists, skip re-building."
|
|
||||||
|
|
||||||
# Download prebuilt LLVM libraries for Android when they have not changed,
|
|
||||||
# to save some time
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
tar -C / --zstd -xf "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
rm "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ephemeral packages (installed for this script and removed again at the end)
|
|
||||||
EPHEMERAL=(
|
|
||||||
unzip
|
|
||||||
)
|
|
||||||
|
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends --no-remove "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
ANDROID_NDK="android-ndk-${ANDROID_NDK_VERSION}"
|
|
||||||
ANDROID_NDK_ROOT="/${ANDROID_NDK}"
|
|
||||||
if [ ! -d "$ANDROID_NDK_ROOT" ];
|
|
||||||
then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "${ANDROID_NDK}.zip" \
|
|
||||||
"https://dl.google.com/android/repository/${ANDROID_NDK}-linux.zip"
|
|
||||||
unzip -d / "${ANDROID_NDK}.zip" "$ANDROID_NDK/source.properties" "$ANDROID_NDK/build/cmake/*" "$ANDROID_NDK/toolchains/llvm/*"
|
|
||||||
rm "${ANDROID_NDK}.zip"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d "/llvm-project" ];
|
|
||||||
then
|
|
||||||
mkdir "/llvm-project"
|
|
||||||
pushd "/llvm-project"
|
|
||||||
git init
|
|
||||||
git remote add origin https://github.com/llvm/llvm-project.git
|
|
||||||
git fetch --depth 1 origin "$ANDROID_LLVM_VERSION"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
popd
|
|
||||||
fi
|
|
||||||
|
|
||||||
pushd "/llvm-project"
|
|
||||||
|
|
||||||
# Checkout again the intended version, just in case of a pre-existing full clone
|
|
||||||
git checkout "$ANDROID_LLVM_VERSION" || true
|
|
||||||
|
|
||||||
LLVM_INSTALL_PREFIX="/${ANDROID_LLVM_ARTIFACT_NAME}"
|
|
||||||
|
|
||||||
rm -rf build/
|
|
||||||
cmake -GNinja -S llvm -B build/ \
|
|
||||||
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake" \
|
|
||||||
-DANDROID_ABI=x86_64 \
|
|
||||||
-DANDROID_PLATFORM="android-${ANDROID_SDK_VERSION}" \
|
|
||||||
-DANDROID_NDK="${ANDROID_NDK_ROOT}" \
|
|
||||||
-DCMAKE_ANDROID_ARCH_ABI=x86_64 \
|
|
||||||
-DCMAKE_ANDROID_NDK="${ANDROID_NDK_ROOT}" \
|
|
||||||
-DCMAKE_BUILD_TYPE=MinSizeRel \
|
|
||||||
-DCMAKE_SYSTEM_NAME=Android \
|
|
||||||
-DCMAKE_SYSTEM_VERSION="${ANDROID_SDK_VERSION}" \
|
|
||||||
-DCMAKE_INSTALL_PREFIX="${LLVM_INSTALL_PREFIX}" \
|
|
||||||
-DCMAKE_CXX_FLAGS="-march=x86-64 --target=x86_64-linux-android${ANDROID_SDK_VERSION} -fno-rtti" \
|
|
||||||
-DLLVM_HOST_TRIPLE="x86_64-linux-android${ANDROID_SDK_VERSION}" \
|
|
||||||
-DLLVM_TARGETS_TO_BUILD=X86 \
|
|
||||||
-DLLVM_BUILD_LLVM_DYLIB=OFF \
|
|
||||||
-DLLVM_BUILD_TESTS=OFF \
|
|
||||||
-DLLVM_BUILD_EXAMPLES=OFF \
|
|
||||||
-DLLVM_BUILD_DOCS=OFF \
|
|
||||||
-DLLVM_BUILD_TOOLS=OFF \
|
|
||||||
-DLLVM_ENABLE_RTTI=OFF \
|
|
||||||
-DLLVM_BUILD_INSTRUMENTED_COVERAGE=OFF \
|
|
||||||
-DLLVM_NATIVE_TOOL_DIR="${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin" \
|
|
||||||
-DLLVM_ENABLE_PIC=False \
|
|
||||||
-DLLVM_OPTIMIZED_TABLEGEN=ON
|
|
||||||
|
|
||||||
ninja "-j${FDO_CI_CONCURRENT:-4}" -C build/ install
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
rm -rf /llvm-project
|
|
||||||
|
|
||||||
tar --zstd -cf "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "$LLVM_INSTALL_PREFIX"
|
|
||||||
|
|
||||||
# If run in CI upload the tar.zst archive to S3 to avoid rebuilding it if the
|
|
||||||
# version does not change, and delete it.
|
|
||||||
# The file is not deleted for non-CI because it can be useful in local runs.
|
|
||||||
if [ -n "$CI" ]; then
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
rm "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"
|
|
||||||
fi
|
|
||||||
|
|
||||||
apt-get purge -y "${EPHEMERAL[@]}"
|
|
148
.gitlab-ci/container/build-angle.sh
Executable file → Normal file
148
.gitlab-ci/container/build-angle.sh
Executable file → Normal file
@@ -2,38 +2,18 @@
|
|||||||
|
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
# KERNEL_ROOTFS_TAG
|
||||||
# DEBIAN_TEST_GL_TAG
|
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
section_start angle "Building ANGLE"
|
ANGLE_REV="76025caa1a059f464a2b0e8f879dbd4746f092b9"
|
||||||
|
SCRIPTS_DIR="$(pwd)/.gitlab-ci"
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
ANGLE_PATCH_DIR="${SCRIPTS_DIR}/container/patches"
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "ANGLE_TAG"
|
|
||||||
|
|
||||||
ANGLE_REV="c39f4a5c553cbee39af8f866aa82a9ffa4f02f5b"
|
|
||||||
DEPOT_REV="5982a1aeb33dc36382ed8c62eddf52a6135e7dd3"
|
|
||||||
|
|
||||||
# Set ANGLE_ARCH based on DEBIAN_ARCH if it hasn't been explicitly defined
|
|
||||||
if [[ -z "${ANGLE_ARCH:-}" ]]; then
|
|
||||||
case "$DEBIAN_ARCH" in
|
|
||||||
amd64) ANGLE_ARCH=x64;;
|
|
||||||
arm64) ANGLE_ARCH=arm64;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
# DEPOT tools
|
# DEPOT tools
|
||||||
mkdir /depot-tools
|
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git /depot-tools
|
||||||
pushd /depot-tools
|
|
||||||
git init
|
|
||||||
git remote add origin https://chromium.googlesource.com/chromium/tools/depot_tools.git
|
|
||||||
git fetch --depth 1 origin "$DEPOT_REV"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
export PATH=/depot-tools:$PATH
|
export PATH=/depot-tools:$PATH
|
||||||
export DEPOT_TOOLS_UPDATE=0
|
export DEPOT_TOOLS_UPDATE=0
|
||||||
popd
|
|
||||||
|
|
||||||
mkdir /angle-build
|
mkdir /angle-build
|
||||||
mkdir /angle
|
mkdir /angle
|
||||||
@@ -43,39 +23,37 @@ git remote add origin https://chromium.googlesource.com/angle/angle.git
|
|||||||
git fetch --depth 1 origin "$ANGLE_REV"
|
git fetch --depth 1 origin "$ANGLE_REV"
|
||||||
git checkout FETCH_HEAD
|
git checkout FETCH_HEAD
|
||||||
|
|
||||||
echo "$ANGLE_REV" > /angle/version
|
angle_patch_files=(
|
||||||
|
build-angle_deps_Make-more-sources-conditional.patch
|
||||||
|
)
|
||||||
|
for patch in "${angle_patch_files[@]}"; do
|
||||||
|
echo "Apply patch to ANGLE from ${patch}"
|
||||||
|
GIT_COMMITTER_DATE=$(date -d@0) git am < "${ANGLE_PATCH_DIR}/${patch}"
|
||||||
|
done
|
||||||
|
|
||||||
GCLIENT_CUSTOM_VARS=()
|
{
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_cl=False')
|
echo "ANGLE base version $ANGLE_REV"
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_cl_testing=False')
|
echo "The following local patches are applied on top:"
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_vulkan_validation_layers=False')
|
git log --reverse --oneline $ANGLE_REV.. --format='- %s'
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_wgpu=False')
|
} > /angle/version
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=build_angle_deqp_tests=False')
|
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=build_angle_perftests=False')
|
|
||||||
if [[ "$ANGLE_TARGET" == "android" ]]; then
|
|
||||||
GCLIENT_CUSTOM_VARS+=('--custom-var=checkout_android=True')
|
|
||||||
fi
|
|
||||||
|
|
||||||
# source preparation
|
# source preparation
|
||||||
gclient config --name REPLACE-WITH-A-DOT --unmanaged \
|
gclient config --name REPLACE-WITH-A-DOT --unmanaged \
|
||||||
"${GCLIENT_CUSTOM_VARS[@]}" \
|
--custom-var='angle_enable_cl=False' \
|
||||||
|
--custom-var='angle_enable_cl_testing=False' \
|
||||||
|
--custom-var='angle_enable_vulkan_validation_layers=False' \
|
||||||
|
--custom-var='angle_enable_wgpu=False' \
|
||||||
|
--custom-var='build_allow_regenerate=False' \
|
||||||
|
--custom-var='build_angle_deqp_tests=False' \
|
||||||
|
--custom-var='build_angle_perftests=False' \
|
||||||
|
--custom-var='build_with_catapult=False' \
|
||||||
|
--custom-var='build_with_swiftshader=False' \
|
||||||
https://chromium.googlesource.com/angle/angle.git
|
https://chromium.googlesource.com/angle/angle.git
|
||||||
sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient
|
sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient
|
||||||
sed -e 's|"custom_deps" : {|"custom_deps" : {\
|
gclient sync -j"${FDO_CI_CONCURRENT:-4}"
|
||||||
"third_party/clspv/src": None,\
|
|
||||||
"third_party/dawn": None,\
|
|
||||||
"third_party/glmark2/src": None,\
|
|
||||||
"third_party/libjpeg_turbo": None,\
|
|
||||||
"third_party/llvm/src": None,\
|
|
||||||
"third_party/OpenCL-CTS/src": None,\
|
|
||||||
"third_party/SwiftShader": None,\
|
|
||||||
"third_party/VK-GL-CTS/src": None,\
|
|
||||||
"third_party/vulkan-validation-layers/src": None,|' -i .gclient
|
|
||||||
gclient sync --no-history -j"${FDO_CI_CONCURRENT:-4}"
|
|
||||||
|
|
||||||
mkdir -p out/Release
|
mkdir -p out/Release
|
||||||
cat > out/Release/args.gn <<EOF
|
echo '
|
||||||
angle_assert_always_on=false
|
|
||||||
angle_build_all=false
|
angle_build_all=false
|
||||||
angle_build_tests=false
|
angle_build_tests=false
|
||||||
angle_enable_cl=false
|
angle_enable_cl=false
|
||||||
@@ -91,81 +69,31 @@ angle_enable_vulkan_api_dump_layer=false
|
|||||||
angle_enable_vulkan_validation_layers=false
|
angle_enable_vulkan_validation_layers=false
|
||||||
angle_has_frame_capture=false
|
angle_has_frame_capture=false
|
||||||
angle_has_histograms=false
|
angle_has_histograms=false
|
||||||
angle_has_rapidjson=false
|
|
||||||
angle_use_custom_libvulkan=false
|
angle_use_custom_libvulkan=false
|
||||||
|
angle_egl_extension="so.1"
|
||||||
|
angle_glesv2_extension="so.2"
|
||||||
build_angle_deqp_tests=false
|
build_angle_deqp_tests=false
|
||||||
dcheck_always_on=true
|
dcheck_always_on=true
|
||||||
enable_expensive_dchecks=false
|
enable_expensive_dchecks=false
|
||||||
is_component_build=false
|
|
||||||
is_debug=false
|
is_debug=false
|
||||||
target_cpu="${ANGLE_ARCH}"
|
' > out/Release/args.gn
|
||||||
target_os="${ANGLE_TARGET}"
|
|
||||||
treat_warnings_as_errors=false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
case "$ANGLE_TARGET" in
|
|
||||||
linux) cat >> out/Release/args.gn <<EOF
|
|
||||||
angle_egl_extension="so.1"
|
|
||||||
angle_glesv2_extension="so.2"
|
|
||||||
use_custom_libcxx=false
|
|
||||||
custom_toolchain="//build/toolchain/linux/unbundle:default"
|
|
||||||
host_toolchain="//build/toolchain/linux/unbundle:default"
|
|
||||||
EOF
|
|
||||||
;;
|
|
||||||
android) cat >> out/Release/args.gn <<EOF
|
|
||||||
android_ndk_version="${ANDROID_NDK_VERSION}"
|
|
||||||
android64_ndk_api_level=${ANDROID_SDK_VERSION}
|
|
||||||
android32_ndk_api_level=${ANDROID_SDK_VERSION}
|
|
||||||
use_custom_libcxx=true
|
|
||||||
EOF
|
|
||||||
;;
|
|
||||||
*) echo "Unexpected ANGLE_TARGET value: $ANGLE_TARGET"; exit 1;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
||||||
# We need to get an AArch64 sysroot - because ANGLE isn't great friends with
|
|
||||||
# system dependencies - but use the default system toolchain, because the
|
|
||||||
# 'arm64' toolchain you get from Google infrastructure is a cross-compiler
|
|
||||||
# from x86-64
|
|
||||||
build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
|
build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
|
||||||
fi
|
fi
|
||||||
|
|
||||||
(
|
gn gen out/Release
|
||||||
# The 'unbundled' toolchain configuration requires clang, and it also needs to
|
# depot_tools overrides ninja with a version that doesn't work. We want
|
||||||
# be configured via environment variables.
|
# ninja with FDO_CI_CONCURRENT anyway.
|
||||||
export CC="clang-${LLVM_VERSION}"
|
/usr/local/bin/ninja -C out/Release/ libEGL libGLESv2
|
||||||
export HOST_CC="$CC"
|
|
||||||
export CFLAGS="-Wno-unknown-warning-option"
|
|
||||||
export HOST_CFLAGS="$CFLAGS"
|
|
||||||
export CXX="clang++-${LLVM_VERSION}"
|
|
||||||
export HOST_CXX="$CXX"
|
|
||||||
export CXXFLAGS="-Wno-unknown-warning-option"
|
|
||||||
export HOST_CXXFLAGS="$CXXFLAGS"
|
|
||||||
export AR="ar"
|
|
||||||
export HOST_AR="$AR"
|
|
||||||
export NM="nm"
|
|
||||||
export HOST_NM="$NM"
|
|
||||||
export LDFLAGS="-fuse-ld=lld-${LLVM_VERSION} -lpthread -ldl"
|
|
||||||
export HOST_LDFLAGS="$LDFLAGS"
|
|
||||||
|
|
||||||
gn gen out/Release
|
rm -f out/Release/libvulkan.so* out/Release/*.so.TOC
|
||||||
# depot_tools overrides ninja with a version that doesn't work. We want
|
|
||||||
# ninja with FDO_CI_CONCURRENT anyway.
|
|
||||||
/usr/local/bin/ninja -C out/Release/ libEGL libGLESv1_CM libGLESv2
|
|
||||||
)
|
|
||||||
|
|
||||||
rm -f out/Release/libvulkan.so* out/Release/*.so*.TOC
|
|
||||||
cp out/Release/lib*.so* /angle/
|
cp out/Release/lib*.so* /angle/
|
||||||
|
ln -s libEGL.so.1 /angle/libEGL.so
|
||||||
if [[ "$ANGLE_TARGET" == "linux" ]]; then
|
ln -s libGLESv2.so.2 /angle/libGLESv2.so
|
||||||
ln -s libEGL.so.1 /angle/libEGL.so
|
|
||||||
ln -s libGLESv2.so.2 /angle/libGLESv2.so
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf out
|
rm -rf out
|
||||||
|
|
||||||
popd
|
popd
|
||||||
rm -rf /depot-tools
|
rm -rf /depot-tools
|
||||||
rm -rf /angle-build
|
rm -rf /angle-build
|
||||||
|
|
||||||
section_end angle
|
|
||||||
|
@@ -3,13 +3,13 @@
|
|||||||
|
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_BASE_TAG
|
# DEBIAN_TEST_GL_TAG
|
||||||
|
# DEBIAN_TEST_VK_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start apitrace "Building apitrace"
|
APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a"
|
||||||
|
|
||||||
APITRACE_VERSION="b6102d10960c9f43b1b473903fc67937dd19fb98"
|
|
||||||
|
|
||||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
||||||
pushd /apitrace
|
pushd /apitrace
|
||||||
@@ -23,5 +23,3 @@ cp _build/eglretrace build
|
|||||||
${STRIP_CMD:-strip} build/*
|
${STRIP_CMD:-strip} build/*
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end apitrace
|
|
||||||
|
@@ -1,14 +1,7 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
BINDGEN_VER=0.65.1
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
|
||||||
|
|
||||||
uncollapsed_section_start bindgen "Building bindgen"
|
|
||||||
|
|
||||||
BINDGEN_VER=0.71.1
|
|
||||||
CBINDGEN_VER=0.26.0
|
CBINDGEN_VER=0.26.0
|
||||||
|
|
||||||
# bindgen
|
# bindgen
|
||||||
@@ -25,4 +18,3 @@ RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
--root /usr/local
|
--root /usr/local
|
||||||
|
|
||||||
section_end bindgen
|
|
||||||
|
@@ -1,28 +1,18 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
|
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "CROSVM_TAG"
|
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
section_start crosvm "Building crosvm"
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
git config --global user.email "mesa@example.com"
|
||||||
git config --global user.name "Mesa CI"
|
git config --global user.name "Mesa CI"
|
||||||
|
|
||||||
CROSVM_VERSION=4a6b4316155742fbfa1be7087c2ee578cfee884d
|
CROSVM_VERSION=1641c55bcc922588e24de73e9cca7b5e4005bd6d
|
||||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
||||||
pushd /platform/crosvm
|
pushd /platform/crosvm
|
||||||
git checkout "$CROSVM_VERSION"
|
git checkout "$CROSVM_VERSION"
|
||||||
git submodule update --init
|
git submodule update --init
|
||||||
|
|
||||||
VIRGLRENDERER_VERSION=06d43ce974b664f9dc521b706a0ad7f91dbf2866
|
VIRGLRENDERER_VERSION=d9c002fac153b834a2c17731f2b85c36e333e102
|
||||||
rm -rf third_party/virglrenderer
|
rm -rf third_party/virglrenderer
|
||||||
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
||||||
pushd third_party/virglrenderer
|
pushd third_party/virglrenderer
|
||||||
@@ -31,14 +21,14 @@ meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true $
|
|||||||
meson install -C build
|
meson install -C build
|
||||||
popd
|
popd
|
||||||
|
|
||||||
rm rust-toolchain
|
cargo update -p pkg-config@0.3.26 --precise 0.3.27
|
||||||
|
|
||||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||||
bindgen-cli \
|
bindgen-cli \
|
||||||
--locked \
|
--locked \
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
--root /usr/local \
|
--root /usr/local \
|
||||||
--version 0.71.1 \
|
--version 0.65.1 \
|
||||||
${EXTRA_CARGO_ARGS:-}
|
${EXTRA_CARGO_ARGS:-}
|
||||||
|
|
||||||
CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
||||||
@@ -52,5 +42,3 @@ CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L nati
|
|||||||
popd
|
popd
|
||||||
|
|
||||||
rm -rf /platform/crosvm
|
rm -rf /platform/crosvm
|
||||||
|
|
||||||
section_end crosvm
|
|
||||||
|
@@ -5,12 +5,11 @@
|
|||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
# DEBIAN_TEST_ANDROID_TAG
|
||||||
# DEBIAN_BASE_TAG
|
# DEBIAN_BASE_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
section_start deqp-runner "Building deqp-runner"
|
DEQP_RUNNER_VERSION=0.20.2
|
||||||
|
|
||||||
DEQP_RUNNER_VERSION=0.20.3
|
|
||||||
|
|
||||||
commits_to_backport=(
|
commits_to_backport=(
|
||||||
)
|
)
|
||||||
@@ -95,5 +94,3 @@ popd
|
|||||||
if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then
|
if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then
|
||||||
rm -f /usr/local/bin/igt-runner
|
rm -f /usr/local/bin/igt-runner
|
||||||
fi
|
fi
|
||||||
|
|
||||||
section_end deqp-runner
|
|
||||||
|
255
.gitlab-ci/container/build-deqp.sh
Executable file → Normal file
255
.gitlab-ci/container/build-deqp.sh
Executable file → Normal file
@@ -6,27 +6,19 @@
|
|||||||
# DEBIAN_TEST_ANDROID_TAG
|
# DEBIAN_TEST_ANDROID_TAG
|
||||||
# DEBIAN_TEST_GL_TAG
|
# DEBIAN_TEST_GL_TAG
|
||||||
# DEBIAN_TEST_VK_TAG
|
# DEBIAN_TEST_VK_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -ue -o pipefail
|
set -uex -o pipefail
|
||||||
|
|
||||||
# shellcheck disable=SC2153
|
|
||||||
deqp_api=${DEQP_API,,}
|
|
||||||
|
|
||||||
section_start deqp-$deqp_api "Building dEQP $DEQP_API"
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# See `deqp_build_targets` below for which release is used to produce which
|
# See `deqp_build_targets` below for which release is used to produce which
|
||||||
# binary. Unless this comment has bitrotten:
|
# binary. Unless this comment has bitrotten:
|
||||||
# - the commit from the main branch produces the deqp tools and `deqp-vk`,
|
|
||||||
# - the VK release produces `deqp-vk`,
|
# - the VK release produces `deqp-vk`,
|
||||||
# - the GL release produces `glcts`, and
|
# - the GL release produces `glcts`, and
|
||||||
# - the GLES release produces `deqp-gles*` and `deqp-egl`
|
# - the GLES release produces `deqp-gles*` and `deqp-egl`
|
||||||
|
|
||||||
DEQP_MAIN_COMMIT=9cc8e038994c32534b3d2c4ba88c1dc49ef53228
|
DEQP_VK_VERSION=1.3.10.0
|
||||||
DEQP_VK_VERSION=1.4.1.1
|
DEQP_GL_VERSION=4.6.5.0
|
||||||
DEQP_GL_VERSION=4.6.6.0
|
DEQP_GLES_VERSION=3.2.11.0
|
||||||
DEQP_GLES_VERSION=3.2.12.0
|
|
||||||
|
|
||||||
# Patches to VulkanCTS may come from commits in their repo (listed in
|
# Patches to VulkanCTS may come from commits in their repo (listed in
|
||||||
# cts_commits_to_backport) or patch files stored in our repo (in the patch
|
# cts_commits_to_backport) or patch files stored in our repo (in the patch
|
||||||
@@ -34,40 +26,38 @@ DEQP_GLES_VERSION=3.2.12.0
|
|||||||
# Both list variables would have comments explaining the reasons behind the
|
# Both list variables would have comments explaining the reasons behind the
|
||||||
# patches.
|
# patches.
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
main_cts_commits_to_backport=(
|
|
||||||
# If you find yourself wanting to add something in here, consider whether
|
|
||||||
# bumping DEQP_MAIN_COMMIT is not a better solution :)
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
main_cts_patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
vk_cts_commits_to_backport=(
|
vk_cts_commits_to_backport=(
|
||||||
# Stop querying device address from unbound buffers
|
# Remove multi-line test results in DRM format modifier tests
|
||||||
046343f46f7d39d53b47842d7fd8ed3279528046
|
8c95af68a2a85cbdc7e1d9267ab029f73e9427d2
|
||||||
)
|
)
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
vk_cts_patch_files=(
|
vk_cts_patch_files=(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if [ "${DEQP_TARGET}" = 'android' ]; then
|
||||||
|
vk_cts_patch_files+=(
|
||||||
|
build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch
|
||||||
|
build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
gl_cts_commits_to_backport=(
|
gl_cts_commits_to_backport=(
|
||||||
# Add testing for GL_PRIMITIVES_SUBMITTED_ARB query.
|
|
||||||
e075ce73ddc5973aa46a5236c715bb281c9501fa
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
gl_cts_patch_files=(
|
gl_cts_patch_files=(
|
||||||
build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch
|
|
||||||
build-deqp-gl_Revert-Add-missing-context-deletion.patch
|
|
||||||
build-deqp-gl_Revert-Fix-issues-with-GLX-reset-notification-strate.patch
|
|
||||||
build-deqp-gl_Revert-Fix-spurious-failures-when-using-a-config-wit.patch
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if [ "${DEQP_TARGET}" = 'android' ]; then
|
||||||
|
gl_cts_patch_files+=(
|
||||||
|
build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch
|
||||||
|
build-deqp-gl_Android-prints-to-stdout-instead-of-logcat.patch
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
# GLES builds also EGL
|
# GLES builds also EGL
|
||||||
gles_cts_commits_to_backport=(
|
gles_cts_commits_to_backport=(
|
||||||
@@ -75,12 +65,15 @@ gles_cts_commits_to_backport=(
|
|||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
gles_cts_patch_files=(
|
gles_cts_patch_files=(
|
||||||
build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch
|
|
||||||
build-deqp-gl_Revert-Add-missing-context-deletion.patch
|
|
||||||
build-deqp-gl_Revert-Fix-issues-with-GLX-reset-notification-strate.patch
|
|
||||||
build-deqp-gl_Revert-Fix-spurious-failures-when-using-a-config-wit.patch
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if [ "${DEQP_TARGET}" = 'android' ]; then
|
||||||
|
gles_cts_patch_files+=(
|
||||||
|
build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch
|
||||||
|
build-deqp-gles_Android-prints-to-stdout-instead-of-logcat.patch
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
### Careful editing anything below this line
|
### Careful editing anything below this line
|
||||||
|
|
||||||
@@ -90,149 +83,86 @@ git config --global user.name "Mesa CI"
|
|||||||
|
|
||||||
# shellcheck disable=SC2153
|
# shellcheck disable=SC2153
|
||||||
case "${DEQP_API}" in
|
case "${DEQP_API}" in
|
||||||
tools) DEQP_VERSION="$DEQP_MAIN_COMMIT";;
|
|
||||||
*-main) DEQP_VERSION="$DEQP_MAIN_COMMIT";;
|
|
||||||
VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";;
|
VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";;
|
||||||
GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";;
|
GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";;
|
||||||
GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";;
|
GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";;
|
||||||
*) echo "Unexpected DEQP_API value: $DEQP_API"; exit 1;;
|
|
||||||
esac
|
esac
|
||||||
|
|
||||||
mkdir -p /VK-GL-CTS
|
git clone \
|
||||||
|
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
||||||
|
-b $DEQP_VERSION \
|
||||||
|
--depth 1 \
|
||||||
|
/VK-GL-CTS
|
||||||
pushd /VK-GL-CTS
|
pushd /VK-GL-CTS
|
||||||
[ -e .git ] || {
|
|
||||||
git init
|
|
||||||
git remote add origin https://github.com/KhronosGroup/VK-GL-CTS.git
|
|
||||||
}
|
|
||||||
git fetch --depth 1 origin "$DEQP_VERSION"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
DEQP_COMMIT=$(git rev-parse FETCH_HEAD)
|
|
||||||
|
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
mkdir -p /deqp
|
||||||
merge_base="$(curl-with-retry -s https://api.github.com/repos/KhronosGroup/VK-GL-CTS/compare/main...$DEQP_MAIN_COMMIT | jq -r .merge_base_commit.sha)"
|
|
||||||
if [[ "$merge_base" != "$DEQP_MAIN_COMMIT" ]]; then
|
|
||||||
echo "VK-GL-CTS commit $DEQP_MAIN_COMMIT is not a commit from the main branch."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p /deqp-$deqp_api
|
# shellcheck disable=SC2153
|
||||||
|
deqp_api=${DEQP_API,,}
|
||||||
|
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
cts_commits_to_backport="${deqp_api}_cts_commits_to_backport[@]"
|
||||||
prefix="main"
|
|
||||||
else
|
|
||||||
prefix="$deqp_api"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cts_commits_to_backport="${prefix}_cts_commits_to_backport[@]"
|
|
||||||
for commit in "${!cts_commits_to_backport}"
|
for commit in "${!cts_commits_to_backport}"
|
||||||
do
|
do
|
||||||
PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
|
PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL"
|
echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL"
|
||||||
curl-with-retry $PATCH_URL | GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am -
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \
|
||||||
|
GIT_COMMITTER_DATE=$(date -d@0) git am -
|
||||||
done
|
done
|
||||||
|
|
||||||
cts_patch_files="${prefix}_cts_patch_files[@]"
|
cts_patch_files="${deqp_api}_cts_patch_files[@]"
|
||||||
for patch in "${!cts_patch_files}"
|
for patch in "${!cts_patch_files}"
|
||||||
do
|
do
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $patch"
|
echo "Apply patch to ${DEQP_API} CTS from $patch"
|
||||||
GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch
|
GIT_COMMITTER_DATE=$(date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch
|
||||||
done
|
done
|
||||||
|
|
||||||
{
|
{
|
||||||
if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then
|
echo "dEQP base version $DEQP_VERSION"
|
||||||
commit_desc=$(git show --no-patch --format='commit %h on %ci' --abbrev=10 "$DEQP_COMMIT")
|
echo "The following local patches are applied on top:"
|
||||||
echo "dEQP $DEQP_API at $commit_desc"
|
git log --reverse --oneline $DEQP_VERSION.. --format='- %s'
|
||||||
else
|
} > /deqp/version-$deqp_api
|
||||||
echo "dEQP $DEQP_API version $DEQP_VERSION"
|
|
||||||
fi
|
|
||||||
if [ "$(git rev-parse HEAD)" != "$DEQP_COMMIT" ]; then
|
|
||||||
echo "The following local patches are applied on top:"
|
|
||||||
git log --reverse --oneline "$DEQP_COMMIT".. --format='- %s'
|
|
||||||
fi
|
|
||||||
} > /deqp-$deqp_api/deqp-$deqp_api-version
|
|
||||||
|
|
||||||
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
||||||
# libpng (sigh). The archives get their checksums checked anyway, and git
|
# libpng (sigh). The archives get their checksums checked anyway, and git
|
||||||
# always goes through ssh or https.
|
# always goes through ssh or https.
|
||||||
python3 external/fetch_sources.py --insecure
|
python3 external/fetch_sources.py --insecure
|
||||||
|
|
||||||
case "${DEQP_API}" in
|
# Save the testlog stylesheets:
|
||||||
VK-main)
|
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
|
||||||
# Video tests rely on external files
|
|
||||||
python3 external/fetch_video_decode_samples.py
|
|
||||||
python3 external/fetch_video_encode_samples.py
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [[ "$DEQP_API" = tools ]]; then
|
|
||||||
# Save the testlog stylesheets:
|
|
||||||
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp-$deqp_api
|
|
||||||
fi
|
|
||||||
|
|
||||||
popd
|
popd
|
||||||
|
|
||||||
deqp_build_targets=()
|
pushd /deqp
|
||||||
case "${DEQP_API}" in
|
|
||||||
VK|VK-main)
|
|
||||||
deqp_build_targets+=(deqp-vk)
|
|
||||||
;;
|
|
||||||
GL)
|
|
||||||
deqp_build_targets+=(glcts)
|
|
||||||
;;
|
|
||||||
GLES)
|
|
||||||
deqp_build_targets+=(deqp-gles{2,3,31})
|
|
||||||
deqp_build_targets+=(glcts) # needed for gles*-khr tests
|
|
||||||
# deqp-egl also comes from this build, but it is handled separately below.
|
|
||||||
;;
|
|
||||||
tools)
|
|
||||||
deqp_build_targets+=(testlog-to-xml)
|
|
||||||
deqp_build_targets+=(testlog-to-csv)
|
|
||||||
deqp_build_targets+=(testlog-to-junit)
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
OLD_IFS="$IFS"
|
|
||||||
IFS=";"
|
|
||||||
CMAKE_SBT="${deqp_build_targets[*]}"
|
|
||||||
IFS="$OLD_IFS"
|
|
||||||
|
|
||||||
pushd /deqp-$deqp_api
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
if [ "${DEQP_API}" = 'GLES' ]; then
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
if [ "${DEQP_TARGET}" = 'android' ]; then
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
-DDEQP_TARGET=android \
|
-DDEQP_TARGET=android \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
${EXTRA_CMAKE_ARGS:-}
|
||||||
ninja modules/egl/deqp-egl
|
mold --run ninja modules/egl/deqp-egl
|
||||||
mv modules/egl/deqp-egl{,-android}
|
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android
|
||||||
else
|
else
|
||||||
# When including EGL/X11 testing, do that build first and save off its
|
# When including EGL/X11 testing, do that build first and save off its
|
||||||
# deqp-egl binary.
|
# deqp-egl binary.
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
-DDEQP_TARGET=x11_egl_glx \
|
-DDEQP_TARGET=x11_egl_glx \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
${EXTRA_CMAKE_ARGS:-}
|
||||||
ninja modules/egl/deqp-egl
|
mold --run ninja modules/egl/deqp-egl
|
||||||
mv modules/egl/deqp-egl{,-x11}
|
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
-DDEQP_TARGET=wayland \
|
-DDEQP_TARGET=wayland \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DSELECTED_BUILD_TARGETS="deqp-egl" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
${EXTRA_CMAKE_ARGS:-}
|
||||||
ninja modules/egl/deqp-egl
|
mold --run ninja modules/egl/deqp-egl
|
||||||
mv modules/egl/deqp-egl{,-wayland}
|
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
-DDEQP_TARGET=${DEQP_TARGET} \
|
-DDEQP_TARGET=${DEQP_TARGET} \
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
-DSELECTED_BUILD_TARGETS="${CMAKE_SBT}" \
|
|
||||||
${EXTRA_CMAKE_ARGS:-}
|
${EXTRA_CMAKE_ARGS:-}
|
||||||
|
|
||||||
# Make sure `default` doesn't silently stop detecting one of the platforms we care about
|
# Make sure `default` doesn't silently stop detecting one of the platforms we care about
|
||||||
@@ -242,73 +172,90 @@ if [ "${DEQP_TARGET}" = 'default' ]; then
|
|||||||
grep -q DEQP_SUPPORT_XCB=1 build.ninja
|
grep -q DEQP_SUPPORT_XCB=1 build.ninja
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ninja "${deqp_build_targets[@]}"
|
deqp_build_targets=()
|
||||||
|
case "${DEQP_API}" in
|
||||||
|
VK)
|
||||||
|
deqp_build_targets+=(deqp-vk)
|
||||||
|
;;
|
||||||
|
GL)
|
||||||
|
deqp_build_targets+=(glcts)
|
||||||
|
;;
|
||||||
|
GLES)
|
||||||
|
deqp_build_targets+=(deqp-gles{2,3,31})
|
||||||
|
# deqp-egl also comes from this build, but it is handled separately above.
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [ "${DEQP_TARGET}" != 'android' ]; then
|
||||||
|
deqp_build_targets+=(testlog-to-xml)
|
||||||
|
deqp_build_targets+=(testlog-to-csv)
|
||||||
|
deqp_build_targets+=(testlog-to-junit)
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$DEQP_API" != tools ]; then
|
mold --run ninja "${deqp_build_targets[@]}"
|
||||||
|
|
||||||
|
if [ "${DEQP_TARGET}" != 'android' ]; then
|
||||||
# Copy out the mustpass lists we want.
|
# Copy out the mustpass lists we want.
|
||||||
mkdir -p mustpass
|
mkdir -p /deqp/mustpass
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then
|
if [ "${DEQP_API}" = 'VK' ]; then
|
||||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
||||||
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
||||||
>> mustpass/vk-main.txt
|
>> /deqp/mustpass/vk-main.txt
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GL' ]; then
|
if [ "${DEQP_API}" = 'GL' ]; then
|
||||||
cp \
|
cp \
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \
|
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \
|
||||||
mustpass/
|
/deqp/mustpass/
|
||||||
cp \
|
cp \
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \
|
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \
|
||||||
mustpass/
|
/deqp/mustpass/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
if [ "${DEQP_API}" = 'GLES' ]; then
|
||||||
cp \
|
cp \
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \
|
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \
|
||||||
mustpass/
|
/deqp/mustpass/
|
||||||
cp \
|
cp \
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \
|
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \
|
||||||
mustpass/
|
/deqp/mustpass/
|
||||||
cp \
|
cp \
|
||||||
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \
|
/VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \
|
||||||
mustpass/
|
/deqp/mustpass/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Compress the caselists, since Vulkan's in particular are gigantic; higher
|
|
||||||
# compression levels provide no real measurable benefit.
|
|
||||||
zstd -f -1 --rm mustpass/*.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$DEQP_API" = tools ]; then
|
|
||||||
# Save *some* executor utils, but otherwise strip things down
|
# Save *some* executor utils, but otherwise strip things down
|
||||||
# to reduct deqp build size:
|
# to reduct deqp build size:
|
||||||
mv executor/testlog-to-* .
|
mkdir /deqp/executor.save
|
||||||
rm -rf executor
|
cp /deqp/executor/testlog-to-* /deqp/executor.save
|
||||||
|
rm -rf /deqp/executor
|
||||||
|
mv /deqp/executor.save /deqp/executor
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Compress the caselists, since Vulkan's in particular are gigantic; higher
|
||||||
|
# compression levels provide no real measurable benefit.
|
||||||
|
zstd -1 --rm /deqp/mustpass/*.txt
|
||||||
|
|
||||||
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
|
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
|
||||||
rm -rf assets/**/mustpass/
|
rm -rf /deqp/external/**/mustpass/
|
||||||
rm -rf external/**/mustpass/
|
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-main*
|
||||||
rm -rf external/vulkancts/modules/vulkan/vk-main*
|
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default
|
||||||
rm -rf external/vulkancts/modules/vulkan/vk-default
|
|
||||||
|
|
||||||
rm -rf external/openglcts/modules/cts-runner
|
rm -rf /deqp/external/openglcts/modules/cts-runner
|
||||||
rm -rf modules/internal
|
rm -rf /deqp/modules/internal
|
||||||
rm -rf execserver
|
rm -rf /deqp/execserver
|
||||||
rm -rf framework
|
rm -rf /deqp/framework
|
||||||
find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \;
|
find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \;
|
||||||
if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then
|
if [ "${DEQP_API}" = 'VK' ]; then
|
||||||
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
||||||
fi
|
fi
|
||||||
if [ "${DEQP_API}" = 'GL' ] || [ "${DEQP_API}" = 'GLES' ]; then
|
if [ "${DEQP_API}" = 'GL' ]; then
|
||||||
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
||||||
fi
|
fi
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
if [ "${DEQP_API}" = 'GLES' ]; then
|
||||||
${STRIP_CMD:-strip} modules/*/deqp-*
|
${STRIP_CMD:-strip} modules/*/deqp-*
|
||||||
fi
|
fi
|
||||||
du -sh ./*
|
du -sh ./*
|
||||||
|
rm -rf /VK-GL-CTS
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end deqp-$deqp_api
|
|
||||||
|
@@ -7,13 +7,9 @@
|
|||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start directx-headers "Building directx-headers"
|
|
||||||
|
|
||||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1
|
git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1
|
||||||
pushd DirectX-Headers
|
pushd DirectX-Headers
|
||||||
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-}
|
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-}
|
||||||
meson install -C build
|
meson install -C build
|
||||||
popd
|
popd
|
||||||
rm -rf DirectX-Headers
|
rm -rf DirectX-Headers
|
||||||
|
|
||||||
section_end directx-headers
|
|
||||||
|
@@ -1,52 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034 # Variables are used in scripts called from here
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_TEST_VIDEO_TAG
|
|
||||||
|
|
||||||
# Install fluster in /fluster.
|
|
||||||
|
|
||||||
set -uex
|
|
||||||
|
|
||||||
section_start fluster "Installing Fluster"
|
|
||||||
|
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "FLUSTER_TAG"
|
|
||||||
|
|
||||||
FLUSTER_REVISION="e997402978f62428fffc8e5a4a709690d9ca9bc5"
|
|
||||||
|
|
||||||
git clone https://github.com/fluendo/fluster.git --single-branch --no-checkout
|
|
||||||
|
|
||||||
pushd fluster || exit
|
|
||||||
git checkout "${FLUSTER_REVISION}"
|
|
||||||
popd || exit
|
|
||||||
|
|
||||||
ARTIFACT_PATH="${DATA_STORAGE_PATH}/fluster/${FLUSTER_TAG}/vectors.tar.zst"
|
|
||||||
|
|
||||||
if FOUND_ARTIFACT_URL="$(find_s3_project_artifact "${ARTIFACT_PATH}")"; then
|
|
||||||
echo "Found fluster vectors at: ${FOUND_ARTIFACT_URL}"
|
|
||||||
mv fluster/ /
|
|
||||||
curl-with-retry "${FOUND_ARTIFACT_URL}" | tar --zstd -x -C /
|
|
||||||
else
|
|
||||||
echo "No cached vectors found, rebuilding..."
|
|
||||||
# Download the necessary vectors: H264, H265 and VP9
|
|
||||||
# When updating FLUSTER_REVISION, make sure to update the vectors if necessary or
|
|
||||||
# fluster-runner will report Missing results.
|
|
||||||
fluster/fluster.py download -j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
JVT-AVC_V1 JVT-FR-EXT JVT-MVC JVT-SVC_V1 \
|
|
||||||
JCT-VC-3D-HEVC JCT-VC-HEVC_V1 JCT-VC-MV-HEVC JCT-VC-RExt JCT-VC-SCC JCT-VC-SHVC \
|
|
||||||
VP9-TEST-VECTORS-HIGH VP9-TEST-VECTORS
|
|
||||||
|
|
||||||
# Build fluster vectors archive and upload it
|
|
||||||
tar --zstd -cf "vectors.tar.zst" fluster/resources/
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "vectors.tar.zst" \
|
|
||||||
"https://${S3_BASE_PATH}/${CI_PROJECT_PATH}/${ARTIFACT_PATH}"
|
|
||||||
mv fluster/ /
|
|
||||||
fi
|
|
||||||
|
|
||||||
section_end fluster
|
|
@@ -3,11 +3,10 @@
|
|||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_VK_TAG
|
# DEBIAN_TEST_VK_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start fossilize "Building fossilize"
|
|
||||||
|
|
||||||
git clone https://github.com/ValveSoftware/Fossilize.git
|
git clone https://github.com/ValveSoftware/Fossilize.git
|
||||||
cd Fossilize
|
cd Fossilize
|
||||||
git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
|
git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
|
||||||
@@ -18,5 +17,3 @@ cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
|
|||||||
ninja -C . install
|
ninja -C . install
|
||||||
cd ../..
|
cd ../..
|
||||||
rm -rf Fossilize
|
rm -rf Fossilize
|
||||||
|
|
||||||
section_end fossilize
|
|
||||||
|
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start gfxreconstruct "Building gfxreconstruct"
|
|
||||||
|
|
||||||
GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae
|
GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae
|
||||||
|
|
||||||
git clone https://github.com/LunarG/gfxreconstruct.git \
|
git clone https://github.com/LunarG/gfxreconstruct.git \
|
||||||
@@ -19,5 +17,3 @@ cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:
|
|||||||
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end gfxreconstruct
|
|
||||||
|
@@ -3,8 +3,6 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start kdl "Building kdl"
|
|
||||||
|
|
||||||
KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f"
|
KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f"
|
||||||
KDL_CHECKOUT_DIR="/tmp/ci-kdl.git"
|
KDL_CHECKOUT_DIR="/tmp/ci-kdl.git"
|
||||||
|
|
||||||
@@ -28,5 +26,3 @@ popd
|
|||||||
)
|
)
|
||||||
|
|
||||||
rm -rf ${KDL_CHECKOUT_DIR}
|
rm -rf ${KDL_CHECKOUT_DIR}
|
||||||
|
|
||||||
section_end kdl
|
|
||||||
|
@@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start libclc "Building libclc"
|
|
||||||
|
|
||||||
export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
|
export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
|
||||||
LLVM_TAG="llvmorg-15.0.7"
|
LLVM_TAG="llvmorg-15.0.7"
|
||||||
|
|
||||||
@@ -31,5 +29,3 @@ ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
|||||||
|
|
||||||
du -sh ./*
|
du -sh ./*
|
||||||
rm -rf /libclc /llvm-project
|
rm -rf /libclc /llvm-project
|
||||||
|
|
||||||
section_end libclc
|
|
||||||
|
@@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start libdrm "Building libdrm"
|
|
||||||
|
|
||||||
export LIBDRM_VERSION=libdrm-2.4.122
|
export LIBDRM_VERSION=libdrm-2.4.122
|
||||||
|
|
||||||
curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
@@ -17,5 +15,3 @@ meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled ${EX
|
|||||||
meson install -C build
|
meson install -C build
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf "$LIBDRM_VERSION"
|
rm -rf "$LIBDRM_VERSION"
|
||||||
|
|
||||||
section_end libdrm
|
|
||||||
|
@@ -2,13 +2,7 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start llvm-spirv "Building LLVM-SPIRV-Translator"
|
VER="${LLVM_VERSION:?llvm not set}.0.0"
|
||||||
|
|
||||||
if [ "${LLVM_VERSION:?llvm version not set}" -ge 18 ]; then
|
|
||||||
VER="${LLVM_VERSION}.1.0"
|
|
||||||
else
|
|
||||||
VER="${LLVM_VERSION}.0.0"
|
|
||||||
fi
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
-O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz"
|
-O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz"
|
||||||
@@ -26,5 +20,3 @@ popd
|
|||||||
|
|
||||||
du -sh "SPIRV-LLVM-Translator-${VER}"
|
du -sh "SPIRV-LLVM-Translator-${VER}"
|
||||||
rm -rf "SPIRV-LLVM-Translator-${VER}"
|
rm -rf "SPIRV-LLVM-Translator-${VER}"
|
||||||
|
|
||||||
section_end llvm-spirv
|
|
||||||
|
@@ -8,8 +8,7 @@ set -ex
|
|||||||
# DEBIAN_BASE_TAG
|
# DEBIAN_BASE_TAG
|
||||||
# DEBIAN_BUILD_TAG
|
# DEBIAN_BUILD_TAG
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
# FEDORA_X86_64_BUILD_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
uncollapsed_section_start mold "Building mold"
|
|
||||||
|
|
||||||
MOLD_VERSION="2.32.0"
|
MOLD_VERSION="2.32.0"
|
||||||
|
|
||||||
@@ -20,12 +19,5 @@ cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
|
|||||||
cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}"
|
cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}"
|
||||||
cmake --install . --strip
|
cmake --install . --strip
|
||||||
|
|
||||||
# Always use mold from now on
|
|
||||||
find /usr/bin \( -name '*-ld' -o -name 'ld' \) \
|
|
||||||
-exec ln -sf /usr/local/bin/ld.mold {} \; \
|
|
||||||
-exec ls -l {} +
|
|
||||||
|
|
||||||
popd
|
popd
|
||||||
rm -rf mold
|
rm -rf mold
|
||||||
|
|
||||||
section_end mold
|
|
||||||
|
25
.gitlab-ci/container/build-ninetests.sh
Normal file
25
.gitlab-ci/container/build-ninetests.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# When changing this file, you need to bump the following
|
||||||
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
|
# DEBIAN_TEST_GL_TAG
|
||||||
|
|
||||||
|
set -ex -o pipefail
|
||||||
|
|
||||||
|
### Careful editing anything below this line
|
||||||
|
|
||||||
|
git config --global user.email "mesa@example.com"
|
||||||
|
git config --global user.name "Mesa CI"
|
||||||
|
git clone https://github.com/axeldavy/Xnine.git /Xnine
|
||||||
|
mkdir /Xnine/build
|
||||||
|
pushd /Xnine/build
|
||||||
|
git checkout c64753d224c08006bcdcfa7880ada826f27164b1
|
||||||
|
|
||||||
|
cmake .. -DBUILD_TESTS=1 -DWITH_DRI3=1 -DD3DADAPTER9_LOCATION=/install/lib/d3d/d3dadapter9.so
|
||||||
|
make
|
||||||
|
|
||||||
|
mkdir -p /NineTests/
|
||||||
|
mv NineTests/NineTests /NineTests/
|
||||||
|
|
||||||
|
popd
|
||||||
|
rm -rf /Xnine
|
@@ -2,18 +2,13 @@
|
|||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
section_start piglit "Building piglit"
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_GL_TAG
|
# DEBIAN_TEST_GL_TAG
|
||||||
# DEBIAN_TEST_VK_TAG
|
# DEBIAN_TEST_VK_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
REV="c2b31333926a6171c3c02d182b756efad7770410"
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "PIGLIT_TAG"
|
|
||||||
|
|
||||||
REV="a0a27e528f643dfeb785350a1213bfff09681950"
|
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||||
pushd /piglit
|
pushd /piglit
|
||||||
@@ -37,5 +32,3 @@ if [ "${PIGLIT_BUILD_TARGETS:-}" = "piglit_replayer" ]; then
|
|||||||
-exec rm -rf {} \; 2>/dev/null
|
-exec rm -rf {} \; 2>/dev/null
|
||||||
fi
|
fi
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end piglit
|
|
||||||
|
@@ -5,10 +5,17 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
section_start rust "Building Rust toolchain"
|
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
|
||||||
|
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
|
||||||
|
# are just available to all build jobs.
|
||||||
|
mkdir -p "$HOME"/.cargo
|
||||||
|
ln -s /usr/local/bin "$HOME"/.cargo/bin
|
||||||
|
|
||||||
# Pick a specific snapshot from rustup so the compiler doesn't drift on us.
|
# Rusticl requires at least Rust 1.66.0 and NAK requires 1.73.0
|
||||||
RUST_VERSION=1.81.0-2024-09-05
|
#
|
||||||
|
# Also, pick a specific snapshot from rustup so the compiler doesn't drift on
|
||||||
|
# us.
|
||||||
|
RUST_VERSION=1.76.0-2024-02-08
|
||||||
|
|
||||||
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
||||||
# version of the compiler, rather than whatever the container's Debian comes
|
# version of the compiler, rather than whatever the container's Debian comes
|
||||||
@@ -19,20 +26,14 @@ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|||||||
--profile minimal \
|
--profile minimal \
|
||||||
-y
|
-y
|
||||||
|
|
||||||
# Make rustup tools available in the PATH environment variable
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
. "$HOME/.cargo/env"
|
|
||||||
|
|
||||||
rustup component add clippy rustfmt
|
rustup component add clippy rustfmt
|
||||||
|
|
||||||
# Set up a config script for cross compiling -- cargo needs your system cc for
|
# Set up a config script for cross compiling -- cargo needs your system cc for
|
||||||
# linking in cross builds, but doesn't know what you want to use for system cc.
|
# linking in cross builds, but doesn't know what you want to use for system cc.
|
||||||
cat > "$HOME/.cargo/config" <<EOF
|
cat > /root/.cargo/config <<EOF
|
||||||
[target.armv7-unknown-linux-gnueabihf]
|
[target.armv7-unknown-linux-gnueabihf]
|
||||||
linker = "arm-linux-gnueabihf-gcc"
|
linker = "arm-linux-gnueabihf-gcc"
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-gnu]
|
[target.aarch64-unknown-linux-gnu]
|
||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
section_end rust
|
|
||||||
|
@@ -6,13 +6,9 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
uncollapsed_section_start shader-db "Building shader-db"
|
|
||||||
|
|
||||||
pushd /usr/local
|
pushd /usr/local
|
||||||
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
|
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
|
||||||
rm -rf shader-db/.git
|
rm -rf shader-db/.git
|
||||||
cd shader-db
|
cd shader-db
|
||||||
make
|
make
|
||||||
popd
|
popd
|
||||||
|
|
||||||
section_end shader-db
|
|
||||||
|
@@ -6,12 +6,10 @@
|
|||||||
#
|
#
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_GL_TAG
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start skqp "Building SkQP"
|
|
||||||
|
|
||||||
SKQP_BRANCH=android-cts-12.1_r5
|
SKQP_BRANCH=android-cts-12.1_r5
|
||||||
|
|
||||||
SCRIPT_DIR="$(pwd)/.gitlab-ci/container"
|
SCRIPT_DIR="$(pwd)/.gitlab-ci/container"
|
||||||
@@ -68,14 +66,14 @@ cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
|
|||||||
|
|
||||||
# hack for skqp see the clang
|
# hack for skqp see the clang
|
||||||
pushd /usr/bin/
|
pushd /usr/bin/
|
||||||
ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang" clang
|
ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang" clang
|
||||||
ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang++" clang++
|
ln -s "../lib/llvm-${LLVM_VERSION:-15}/bin/clang++" clang++
|
||||||
popd
|
popd
|
||||||
|
|
||||||
# Fetch some needed build tools needed to build skia/skqp.
|
# Fetch some needed build tools needed to build skia/skqp.
|
||||||
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
|
# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
|
||||||
# directory.
|
# directory.
|
||||||
python3 tools/git-sync-deps
|
python tools/git-sync-deps
|
||||||
|
|
||||||
mkdir -p "${SKQP_OUT_DIR}"
|
mkdir -p "${SKQP_OUT_DIR}"
|
||||||
mkdir -p "${SKQP_INSTALL_DIR}"
|
mkdir -p "${SKQP_INSTALL_DIR}"
|
||||||
@@ -100,5 +98,3 @@ popd
|
|||||||
rm -Rf "${SKIA_DIR}"
|
rm -Rf "${SKIA_DIR}"
|
||||||
|
|
||||||
set +ex
|
set +ex
|
||||||
|
|
||||||
section_end skqp
|
|
||||||
|
@@ -34,11 +34,6 @@ extra_cflags_cc = [
|
|||||||
"-Wno-unused-but-set-variable",
|
"-Wno-unused-but-set-variable",
|
||||||
"-Wno-sizeof-array-div",
|
"-Wno-sizeof-array-div",
|
||||||
"-Wno-string-concatenation",
|
"-Wno-string-concatenation",
|
||||||
"-Wno-unsafe-buffer-usage",
|
|
||||||
"-Wno-switch-default",
|
|
||||||
"-Wno-cast-function-type-strict",
|
|
||||||
"-Wno-format",
|
|
||||||
"-Wno-enum-constexpr-conversion",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
cc_wrapper = "ccache"
|
cc_wrapper = "ccache"
|
||||||
|
@@ -1,14 +1,11 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_VIDEO_TAG
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
section_start va-tools "Building va-tools"
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
git config --global user.email "mesa@example.com"
|
||||||
git config --global user.name "Mesa CI"
|
git config --global user.name "Mesa CI"
|
||||||
|
|
||||||
@@ -20,11 +17,9 @@ git clone \
|
|||||||
|
|
||||||
pushd /va-utils
|
pushd /va-utils
|
||||||
# Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch.
|
# Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch.
|
||||||
curl --fail -L https://github.com/intel/libva-utils/pull/329.patch | git am
|
curl -L https://github.com/intel/libva-utils/pull/329.patch | git am
|
||||||
|
|
||||||
meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-}
|
meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-}
|
||||||
meson install -C build
|
meson install -C build
|
||||||
popd
|
popd
|
||||||
rm -rf /va-utils
|
rm -rf /va-utils
|
||||||
|
|
||||||
section_end va-tools
|
|
||||||
|
@@ -3,45 +3,39 @@
|
|||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_VK_TAG
|
# DEBIAN_TEST_VK_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
section_start vkd3d-proton "Building vkd3d-proton"
|
VKD3D_PROTON_COMMIT="59d6d4b5ed23766e69fe252408a3401d2fd52ce8"
|
||||||
|
|
||||||
# Do a very early check to make sure the tag is correct without the need of
|
|
||||||
# setting up the environment variables locally
|
|
||||||
ci_tag_build_time_check "VKD3D_PROTON_TAG"
|
|
||||||
|
|
||||||
VKD3D_PROTON_COMMIT="6be781076617cb2cb3038710618acc3b57a674db"
|
|
||||||
|
|
||||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
|
||||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
|
||||||
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build"
|
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build"
|
||||||
VKD3D_PROTON_WINE_DIR="/vkd3d-proton-wine64"
|
|
||||||
VKD3D_PROTON_S3_ARTIFACT="vkd3d-proton.tar.zst"
|
|
||||||
|
|
||||||
if [ ! -d "$VKD3D_PROTON_WINE_DIR" ]; then
|
function build_arch {
|
||||||
echo "Fatal: Directory '$VKD3D_PROTON_WINE_DIR' does not exist. Aborting."
|
local arch="$1"
|
||||||
exit 1
|
|
||||||
fi
|
meson setup \
|
||||||
|
-Denable_tests=true \
|
||||||
|
--buildtype release \
|
||||||
|
--prefix "$VKD3D_PROTON_DST_DIR" \
|
||||||
|
--strip \
|
||||||
|
--bindir "x${arch}" \
|
||||||
|
--libdir "x${arch}" \
|
||||||
|
"$VKD3D_PROTON_BUILD_DIR/build.${arch}"
|
||||||
|
|
||||||
|
ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
|
||||||
|
|
||||||
|
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/d3d12"
|
||||||
|
}
|
||||||
|
|
||||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR"
|
||||||
pushd "$VKD3D_PROTON_SRC_DIR"
|
pushd "$VKD3D_PROTON_SRC_DIR"
|
||||||
git checkout "$VKD3D_PROTON_COMMIT"
|
git checkout "$VKD3D_PROTON_COMMIT"
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
git submodule update --recursive
|
git submodule update --recursive
|
||||||
|
build_arch 64
|
||||||
meson setup \
|
build_arch 86
|
||||||
-D enable_tests=true \
|
|
||||||
--buildtype release \
|
|
||||||
--prefix "$VKD3D_PROTON_DST_DIR" \
|
|
||||||
--strip \
|
|
||||||
--libdir "lib" \
|
|
||||||
"$VKD3D_PROTON_BUILD_DIR/build"
|
|
||||||
|
|
||||||
ninja -C "$VKD3D_PROTON_BUILD_DIR/build" install
|
|
||||||
|
|
||||||
install -m755 -t "${VKD3D_PROTON_DST_DIR}/" "$VKD3D_PROTON_BUILD_DIR/build/tests/d3d12"
|
|
||||||
|
|
||||||
mkdir "$VKD3D_PROTON_DST_DIR/tests"
|
mkdir "$VKD3D_PROTON_DST_DIR/tests"
|
||||||
cp \
|
cp \
|
||||||
"tests/test-runner.sh" \
|
"tests/test-runner.sh" \
|
||||||
@@ -49,19 +43,5 @@ cp \
|
|||||||
"$VKD3D_PROTON_DST_DIR/tests/"
|
"$VKD3D_PROTON_DST_DIR/tests/"
|
||||||
popd
|
popd
|
||||||
|
|
||||||
# Archive and upload vkd3d-proton for use as a LAVA overlay, if the archive doesn't exist yet
|
|
||||||
ARTIFACT_PATH="${DATA_STORAGE_PATH}/vkd3d-proton/${VKD3D_PROTON_TAG}/${CI_JOB_NAME}/${VKD3D_PROTON_S3_ARTIFACT}"
|
|
||||||
if FOUND_ARTIFACT_URL="$(find_s3_project_artifact "${ARTIFACT_PATH}")"; then
|
|
||||||
echo "Found vkd3d-proton at: ${FOUND_ARTIFACT_URL}, skipping upload"
|
|
||||||
else
|
|
||||||
echo "Uploaded vkd3d-proton not found, reuploading..."
|
|
||||||
tar --zstd -cf "$VKD3D_PROTON_S3_ARTIFACT" -C / "${VKD3D_PROTON_DST_DIR#/}" "${VKD3D_PROTON_WINE_DIR#/}"
|
|
||||||
ci-fairy s3cp --token-file "${S3_JWT_FILE}" "$VKD3D_PROTON_S3_ARTIFACT" \
|
|
||||||
"https://${S3_BASE_PATH}/${CI_PROJECT_PATH}/${ARTIFACT_PATH}"
|
|
||||||
rm "$VKD3D_PROTON_S3_ARTIFACT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
rm -rf "$VKD3D_PROTON_BUILD_DIR"
|
||||||
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
rm -rf "$VKD3D_PROTON_SRC_DIR"
|
||||||
|
|
||||||
section_end vkd3d-proton
|
|
||||||
|
@@ -3,22 +3,17 @@
|
|||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_TEST_GL_TAG
|
# DEBIAN_TEST_GL_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start vulkan-validation "Building Vulkan validation layers"
|
VALIDATION_TAG="snapshot-2024wk39"
|
||||||
|
|
||||||
VALIDATION_TAG="snapshot-2025wk15"
|
|
||||||
|
|
||||||
git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
|
git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
|
||||||
pushd Vulkan-ValidationLayers
|
pushd Vulkan-ValidationLayers
|
||||||
# we don't need to build SPIRV-Tools tools
|
python3 scripts/update_deps.py --dir external --config release --generator Ninja
|
||||||
sed -i scripts/known_good.json -e 's/SPIRV_SKIP_EXECUTABLES=OFF/SPIRV_SKIP_EXECUTABLES=ON/'
|
|
||||||
python3 scripts/update_deps.py --dir external --config release --generator Ninja --optional tests
|
|
||||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build
|
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build
|
||||||
ninja -C build -j"${FDO_CI_CONCURRENT:-4}"
|
ninja -C build
|
||||||
cmake --install build --strip
|
cmake --install build --strip
|
||||||
popd
|
popd
|
||||||
rm -rf Vulkan-ValidationLayers
|
rm -rf Vulkan-ValidationLayers
|
||||||
|
|
||||||
section_end vulkan-validation
|
|
||||||
|
@@ -3,20 +3,17 @@
|
|||||||
|
|
||||||
set -uex
|
set -uex
|
||||||
|
|
||||||
uncollapsed_section_start wayland "Building Wayland"
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# ALPINE_X86_64_BUILD_TAG
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
# DEBIAN_BUILD_TAG
|
# DEBIAN_BUILD_TAG
|
||||||
# DEBIAN_TEST_ANDROID_TAG
|
# DEBIAN_TEST_ANDROID_TAG
|
||||||
# DEBIAN_TEST_GL_TAG
|
# DEBIAN_TEST_GL_TAG
|
||||||
# DEBIAN_TEST_VK_TAG
|
# DEBIAN_TEST_VK_TAG
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
# FEDORA_X86_64_BUILD_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
export LIBWAYLAND_VERSION="1.21.0"
|
export LIBWAYLAND_VERSION="1.21.0"
|
||||||
export WAYLAND_PROTOCOLS_VERSION="1.41"
|
export WAYLAND_PROTOCOLS_VERSION="1.38"
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/wayland/wayland
|
git clone https://gitlab.freedesktop.org/wayland/wayland
|
||||||
cd wayland
|
cd wayland
|
||||||
@@ -33,5 +30,3 @@ meson setup -Dtests=false _build ${EXTRA_MESON_ARGS:-}
|
|||||||
meson install -C _build
|
meson install -C _build
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf wayland-protocols
|
rm -rf wayland-protocols
|
||||||
|
|
||||||
section_end wayland
|
|
||||||
|
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, all the linux tags in
|
|
||||||
# .gitlab-ci/image-tags.yml need updating.
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# Early check for required env variables, relies on `set -u`
|
|
||||||
: "$S3_JWT_FILE_SCRIPT"
|
|
||||||
|
|
||||||
if [ -z "$1" ]; then
|
|
||||||
echo "usage: $(basename "$0") <CONTAINER_CI_JOB_NAME>" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
CONTAINER_CI_JOB_NAME="$1"
|
|
||||||
|
|
||||||
# Tasks to perform before executing the script of a container job
|
|
||||||
eval "$S3_JWT_FILE_SCRIPT"
|
|
||||||
unset S3_JWT_FILE_SCRIPT
|
|
||||||
|
|
||||||
trap 'rm -f ${S3_JWT_FILE}' EXIT INT TERM
|
|
||||||
|
|
||||||
bash ".gitlab-ci/container/${CONTAINER_CI_JOB_NAME}.sh"
|
|
@@ -6,6 +6,8 @@ fi
|
|||||||
|
|
||||||
# Clean up any build cache
|
# Clean up any build cache
|
||||||
rm -rf /root/.cache
|
rm -rf /root/.cache
|
||||||
|
rm -rf /root/.cargo
|
||||||
|
rm -rf /.cargo
|
||||||
|
|
||||||
if test -x /usr/bin/ccache; then
|
if test -x /usr/bin/ccache; then
|
||||||
ccache --show-stats
|
ccache --show-stats
|
||||||
|
@@ -1,7 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
|
|
||||||
if test -x /usr/bin/ccache; then
|
if test -x /usr/bin/ccache; then
|
||||||
if test -f /etc/debian_version; then
|
if test -f /etc/debian_version; then
|
||||||
@@ -26,6 +23,19 @@ if test -x /usr/bin/ccache; then
|
|||||||
ccache --show-stats
|
ccache --show-stats
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# When not using the mold linker (e.g. unsupported architecture), force
|
||||||
|
# linkers to gold, since it's so much faster for building. We can't use
|
||||||
|
# lld because we're on old debian and it's buggy. mingw fails meson builds
|
||||||
|
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
|
||||||
|
if [ -e /usr/bin/ld.gold ]; then
|
||||||
|
find /usr/bin -name \*-ld -o -name ld | \
|
||||||
|
grep -v mingw | \
|
||||||
|
xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
|
||||||
|
else
|
||||||
|
echo "ld.gold is missing, not replacing ld with it."
|
||||||
|
echo "Builds might be slower, consider installing gold."
|
||||||
|
fi
|
||||||
|
|
||||||
# Make a wrapper script for ninja to always include the -j flags
|
# Make a wrapper script for ninja to always include the -j flags
|
||||||
{
|
{
|
||||||
echo '#!/bin/sh -x'
|
echo '#!/bin/sh -x'
|
||||||
@@ -38,29 +48,10 @@ chmod +x /usr/local/bin/ninja
|
|||||||
# flags (doesn't apply to non-container builds, but we don't run make there)
|
# flags (doesn't apply to non-container builds, but we don't run make there)
|
||||||
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
|
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
|
||||||
|
|
||||||
# Ensure that rust tools are in PATH if they exist
|
# make wget to try more than once, when download fails or timeout
|
||||||
CARGO_ENV_FILE="$HOME/.cargo/env"
|
echo -e "retry_connrefused = on\n" \
|
||||||
if [ -f "$CARGO_ENV_FILE" ]; then
|
"read_timeout = 300\n" \
|
||||||
# shellcheck disable=SC1090
|
"tries = 4\n" \
|
||||||
source "$CARGO_ENV_FILE"
|
"retry_on_host_error = on\n" \
|
||||||
fi
|
"retry_on_http_error = 429,500,502,503,504\n" \
|
||||||
|
"wait_retry = 32" >> /etc/wgetrc
|
||||||
ci_tag_early_checks() {
|
|
||||||
# Runs the first part of the build script to perform the tag check only
|
|
||||||
uncollapsed_section_switch "ci_tag_early_checks" "Ensuring component versions match declared tags in CI builds"
|
|
||||||
echo "[Structured Tagging] Checking components: ${CI_BUILD_COMPONENTS}"
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
for component in ${CI_BUILD_COMPONENTS}; do
|
|
||||||
bin/ci/update_tag.py --check ${component} || exit 1
|
|
||||||
done
|
|
||||||
echo "[Structured Tagging] Components check done"
|
|
||||||
section_end "ci_tag_early_checks"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if each declared tag component is up to date before building
|
|
||||||
if [ -n "${CI_BUILD_COMPONENTS:-}" ]; then
|
|
||||||
# Remove any duplicates by splitting on whitespace, sorting, then joining back
|
|
||||||
CI_BUILD_COMPONENTS="$(echo "${CI_BUILD_COMPONENTS}" | xargs -n1 | sort -u | xargs)"
|
|
||||||
|
|
||||||
ci_tag_early_checks
|
|
||||||
fi
|
|
||||||
|
@@ -18,7 +18,7 @@ cat > "$cross_file" <<EOF
|
|||||||
[binaries]
|
[binaries]
|
||||||
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ar'
|
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ar'
|
||||||
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
|
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
|
||||||
cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '--start-no-unused-arguments', '-static-libstdc++', '--end-no-unused-arguments']
|
cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '-static-libstdc++']
|
||||||
c_ld = 'lld'
|
c_ld = 'lld'
|
||||||
cpp_ld = 'lld'
|
cpp_ld = 'lld'
|
||||||
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip'
|
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip'
|
||||||
|
@@ -2,13 +2,10 @@
|
|||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
: "${LLVM_VERSION:?llvm version not set!}"
|
export LLVM_VERSION="${LLVM_VERSION:=15}"
|
||||||
|
|
||||||
# Ephemeral packages (installed for this script and removed again at the end)
|
# Ephemeral packages (installed for this script and removed again at the end)
|
||||||
EPHEMERAL=(
|
EPHEMERAL=(
|
||||||
|
31
.gitlab-ci/container/debian/android_build.sh
Executable file → Normal file
31
.gitlab-ci/container/debian/android_build.sh
Executable file → Normal file
@@ -5,11 +5,7 @@
|
|||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_BUILD_TAG
|
# DEBIAN_BUILD_TAG
|
||||||
|
|
||||||
set -e
|
set -ex
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
EPHEMERAL=(
|
EPHEMERAL=(
|
||||||
autoconf
|
autoconf
|
||||||
@@ -19,13 +15,11 @@ EPHEMERAL=(
|
|||||||
|
|
||||||
apt-get install -y --no-remove "${EPHEMERAL[@]}"
|
apt-get install -y --no-remove "${EPHEMERAL[@]}"
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
# Fetch the NDK and extract just the toolchain we want.
|
# Fetch the NDK and extract just the toolchain we want.
|
||||||
ndk="android-ndk-${ANDROID_NDK_VERSION}"
|
ndk=$ANDROID_NDK
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
-o $ndk.zip https://dl.google.com/android/repository/$ndk-linux.zip
|
-o $ndk.zip https://dl.google.com/android/repository/$ndk-linux.zip
|
||||||
unzip -d / $ndk.zip "$ndk/source.properties" "$ndk/build/cmake/*" "$ndk/toolchains/llvm/*"
|
unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
|
||||||
rm $ndk.zip
|
rm $ndk.zip
|
||||||
# Since it was packed as a zip file, symlinks/hardlinks got turned into
|
# Since it was packed as a zip file, symlinks/hardlinks got turned into
|
||||||
# duplicate files. Turn them into hardlinks to save on container space.
|
# duplicate files. Turn them into hardlinks to save on container space.
|
||||||
@@ -40,12 +34,6 @@ sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x8
|
|||||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android aarch64 armv8 $ANDROID_SDK_VERSION
|
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android aarch64 armv8 $ANDROID_SDK_VERSION
|
||||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl $ANDROID_SDK_VERSION armv7a-linux-androideabi
|
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl $ANDROID_SDK_VERSION armv7a-linux-androideabi
|
||||||
|
|
||||||
# Build libdrm for the host (Debian) environment, so it's available for
|
|
||||||
# binaries we'll run as part of the build process
|
|
||||||
. .gitlab-ci/container/build-libdrm.sh
|
|
||||||
|
|
||||||
# Build libdrm for the NDK environment, so it's available when building for
|
|
||||||
# the Android target
|
|
||||||
for arch in \
|
for arch in \
|
||||||
x86_64-linux-android \
|
x86_64-linux-android \
|
||||||
i686-linux-android \
|
i686-linux-android \
|
||||||
@@ -97,22 +85,9 @@ for arch in \
|
|||||||
--libdir=/usr/local/lib/${arch}
|
--libdir=/usr/local/lib/${arch}
|
||||||
make install
|
make install
|
||||||
make distclean
|
make distclean
|
||||||
|
|
||||||
unset CC
|
|
||||||
unset CC
|
|
||||||
unset CXX
|
|
||||||
unset LD
|
|
||||||
unset RANLIB
|
|
||||||
done
|
done
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf $LIBELF_VERSION
|
rm -rf $LIBELF_VERSION
|
||||||
|
|
||||||
|
|
||||||
# Build LLVM libraries for Android only if necessary, uploading a copy to S3
|
|
||||||
# to avoid rebuilding it in a future run if the version does not change.
|
|
||||||
bash .gitlab-ci/container/build-android-x86_64-llvm.sh
|
|
||||||
|
|
||||||
apt-get purge -y "${EPHEMERAL[@]}"
|
apt-get purge -y "${EPHEMERAL[@]}"
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
DEBIAN_ARCH=armhf \
|
|
||||||
. .gitlab-ci/container/debian/test-base.sh
|
|
@@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
DEBIAN_ARCH="armhf" \
|
|
||||||
. .gitlab-ci/container/debian/test-gl.sh
|
|
@@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
DEBIAN_ARCH="armhf" \
|
|
||||||
. .gitlab-ci/container/debian/test-vk.sh
|
|
@@ -1,23 +1,15 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
# shellcheck disable=SC2086 # we want word splitting
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
: "${LLVM_VERSION:?llvm version not set}"
|
export LLVM_VERSION="${LLVM_VERSION:=15}"
|
||||||
|
|
||||||
apt-get -y install ca-certificates curl gnupg2
|
apt-get -y install ca-certificates
|
||||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
|
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
|
||||||
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
|
echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list
|
||||||
|
apt-get update
|
||||||
. .gitlab-ci/container/debian/maybe-add-llvm-repo.sh
|
|
||||||
|
|
||||||
# Ephemeral packages (installed for this script and removed again at the end)
|
# Ephemeral packages (installed for this script and removed again at the end)
|
||||||
EPHEMERAL=(
|
EPHEMERAL=(
|
||||||
@@ -71,6 +63,7 @@ DEPS=(
|
|||||||
libwayland-egl-backend-dev
|
libwayland-egl-backend-dev
|
||||||
"llvm-${LLVM_VERSION}-dev"
|
"llvm-${LLVM_VERSION}-dev"
|
||||||
ninja-build
|
ninja-build
|
||||||
|
meson
|
||||||
openssh-server
|
openssh-server
|
||||||
pkgconf
|
pkgconf
|
||||||
python3-mako
|
python3-mako
|
||||||
@@ -79,28 +72,17 @@ DEPS=(
|
|||||||
python3-pycparser
|
python3-pycparser
|
||||||
python3-requests
|
python3-requests
|
||||||
python3-setuptools
|
python3-setuptools
|
||||||
python3-venv
|
|
||||||
shellcheck
|
|
||||||
u-boot-tools
|
u-boot-tools
|
||||||
xz-utils
|
xz-utils
|
||||||
yamllint
|
|
||||||
zlib1g-dev
|
zlib1g-dev
|
||||||
zstd
|
zstd
|
||||||
)
|
)
|
||||||
|
|
||||||
apt-get update
|
|
||||||
|
|
||||||
apt-get -y install "${DEPS[@]}" "${EPHEMERAL[@]}"
|
apt-get -y install "${DEPS[@]}" "${EPHEMERAL[@]}"
|
||||||
|
|
||||||
# Needed for ci-fairy s3cp
|
pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
|
||||||
pip3 install --break-system-packages "ci-fairy[s3] @ git+https://gitlab.freedesktop.org/freedesktop/ci-templates@$MESA_TEMPLATES_COMMIT"
|
|
||||||
|
|
||||||
pip3 install --break-system-packages -r bin/ci/test/requirements.txt
|
|
||||||
|
|
||||||
. .gitlab-ci/container/install-meson.sh
|
|
||||||
|
|
||||||
arch=armhf
|
arch=armhf
|
||||||
|
|
||||||
. .gitlab-ci/container/cross_build.sh
|
. .gitlab-ci/container/cross_build.sh
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
. .gitlab-ci/container/container_pre_build.sh
|
||||||
@@ -113,6 +95,8 @@ arch=armhf
|
|||||||
|
|
||||||
. .gitlab-ci/container/build-libclc.sh
|
. .gitlab-ci/container/build-libclc.sh
|
||||||
|
|
||||||
|
. .gitlab-ci/container/install-meson.sh
|
||||||
|
|
||||||
. .gitlab-ci/container/build-rust.sh
|
. .gitlab-ci/container/build-rust.sh
|
||||||
|
|
||||||
. .gitlab-ci/container/build-bindgen.sh
|
. .gitlab-ci/container/build-bindgen.sh
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
DEBIAN_ARCH="arm64" \
|
|
||||||
. .gitlab-ci/container/debian/test-gl.sh
|
. .gitlab-ci/container/debian/test-gl.sh
|
||||||
|
|
||||||
|
. .gitlab-ci/container/strip-rootfs.sh
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
DEBIAN_ARCH="arm64" \
|
|
||||||
. .gitlab-ci/container/debian/test-vk.sh
|
. .gitlab-ci/container/debian/test-vk.sh
|
||||||
|
|
||||||
|
. .gitlab-ci/container/strip-rootfs.sh
|
||||||
|
@@ -1,5 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
arch=arm64 . .gitlab-ci/container/debian/baremetal_arm_test.sh
|
|
@@ -3,17 +3,16 @@
|
|||||||
# When changing this file, you need to bump the following
|
# When changing this file, you need to bump the following
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
# .gitlab-ci/image-tags.yml tags:
|
||||||
# DEBIAN_BASE_TAG
|
# DEBIAN_BASE_TAG
|
||||||
|
# KERNEL_ROOTFS_TAG
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
. .gitlab-ci/setup-test-env.sh
|
|
||||||
|
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
############### Install packages for baremetal testing
|
############### Install packages for baremetal testing
|
||||||
DEPS=(
|
DEPS=(
|
||||||
cpio
|
cpio
|
||||||
curl
|
curl
|
||||||
|
fastboot
|
||||||
netcat-openbsd
|
netcat-openbsd
|
||||||
openssh-server
|
openssh-server
|
||||||
procps
|
procps
|
||||||
@@ -40,3 +39,15 @@ curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|||||||
-o /usr/share/snmp/mibs/SNMPv2-SMI.txt
|
-o /usr/share/snmp/mibs/SNMPv2-SMI.txt
|
||||||
|
|
||||||
. .gitlab-ci/container/baremetal_build.sh
|
. .gitlab-ci/container/baremetal_build.sh
|
||||||
|
|
||||||
|
mkdir -p /baremetal-files/jetson-nano/boot/
|
||||||
|
ln -s \
|
||||||
|
/baremetal-files/Image \
|
||||||
|
/baremetal-files/tegra210-p3450-0000.dtb \
|
||||||
|
/baremetal-files/jetson-nano/boot/
|
||||||
|
|
||||||
|
mkdir -p /baremetal-files/jetson-tk1/boot/
|
||||||
|
ln -s \
|
||||||
|
/baremetal-files/zImage \
|
||||||
|
/baremetal-files/tegra124-jetson-tk1.dtb \
|
||||||
|
/baremetal-files/jetson-tk1/boot/
|
||||||
|
@@ -1,577 +0,0 @@
|
|||||||
# Build the CI Debian docker images.
|
|
||||||
#
|
|
||||||
# MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the
|
|
||||||
# image doesn't exist yet, the container stage job generates it.
|
|
||||||
#
|
|
||||||
# In order to generate a new image, one should generally change the tag.
|
|
||||||
# While removing the image from the registry would also work, that's not
|
|
||||||
# recommended except for ephemeral images during development: Replacing
|
|
||||||
# an image after a significant amount of time might pull in newer
|
|
||||||
# versions of gcc/clang or other packages, which might break the build
|
|
||||||
# with older commits using the same tag.
|
|
||||||
#
|
|
||||||
# After merging a change resulting in generating a new image to the
|
|
||||||
# main repository, it's recommended to remove the image from the source
|
|
||||||
# repository's container registry, so that the image from the main
|
|
||||||
# repository's registry will be used there as well.
|
|
||||||
|
|
||||||
.debian-container-version:
|
|
||||||
variables:
|
|
||||||
FDO_DISTRIBUTION_VERSION: bookworm-slim
|
|
||||||
|
|
||||||
.debian-container:
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .container
|
|
||||||
- .debian-container-version
|
|
||||||
|
|
||||||
# Debian based x86_64 build image base
|
|
||||||
debian/x86_64_build-base:
|
|
||||||
extends:
|
|
||||||
- .debian-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_build-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LLVM_VERSION: &debian-x86_64-llvm 19
|
|
||||||
|
|
||||||
.use-debian/x86_64_build-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .debian-container-version
|
|
||||||
- .use-base-image
|
|
||||||
variables:
|
|
||||||
MESA_BASE_IMAGE: "debian/x86_64_build-base"
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_build-base
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_64 main build image
|
|
||||||
debian/x86_64_build:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_build ${DEBIAN_BUILD_TAG}
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
|
|
||||||
.use-debian/x86_64_build:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_64_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_64_build
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_32 cross-build image
|
|
||||||
debian/x86_32_build:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_32_build ${DEBIAN_BUILD_TAG}
|
|
||||||
|
|
||||||
.use-debian/x86_32_build:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_32_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_32_build
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_32_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based ppc64el cross-build image
|
|
||||||
debian/ppc64el_build:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-ppc64el_build ${DEBIAN_BUILD_TAG}
|
|
||||||
LLVM_VERSION: &debian-ppc64el-llvm 15 # no LLVM packages for PPC
|
|
||||||
|
|
||||||
.use-debian/ppc64el_build:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
MESA_IMAGE_PATH: "debian/ppc64el_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-ppc64el_build
|
|
||||||
LLVM_VERSION: *debian-ppc64el-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/ppc64el_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based s390x cross-build image
|
|
||||||
debian/s390x_build:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-s390x_build ${DEBIAN_BUILD_TAG}
|
|
||||||
LLVM_VERSION: &debian-s390x-llvm 19
|
|
||||||
|
|
||||||
.use-debian/s390x_build:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
MESA_IMAGE_PATH: "debian/s390x_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-s390x_build
|
|
||||||
LLVM_VERSION: *debian-s390x-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/s390x_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Android NDK cross-build image
|
|
||||||
.android-variables:
|
|
||||||
variables:
|
|
||||||
ANDROID_VERSION: 14
|
|
||||||
ANDROID_NDK_VERSION: "r27c"
|
|
||||||
ANDROID_SDK_VERSION: 34
|
|
||||||
ANDROID_LLVM_VERSION: llvmorg-19.1.7
|
|
||||||
ANDROID_LLVM_ARTIFACT_NAME: android-x86_64-llvm-20250324
|
|
||||||
# This can be confusing: LLVM_VERSION refers to the host LLVM toolchain
|
|
||||||
# used (LLVM 19 in our Debian system), but ANDROID_LLVM_VERSION refers to
|
|
||||||
# the cross-compiling LLVM toolchain used to build for the Android system.
|
|
||||||
LLVM_VERSION: &debian-android-llvm 19
|
|
||||||
CUTTLEFISH_PROJECT_PATH: ao2/aosp-manifest
|
|
||||||
CUTTLEFISH_BUILD_VERSION_TAGS: mesa-venus
|
|
||||||
CUTTLEFISH_BUILD_NUMBER: 20250701.001
|
|
||||||
AOSP_KERNEL_PROJECT_PATH: ao2/aosp-kernel-manifest
|
|
||||||
AOSP_KERNEL_BUILD_VERSION_TAGS: common-android14-6.1-venus
|
|
||||||
AOSP_KERNEL_BUILD_NUMBER: 20241107.001
|
|
||||||
|
|
||||||
debian/android_build:
|
|
||||||
extends:
|
|
||||||
- .android-variables
|
|
||||||
- .use-debian/x86_64_build-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-android_build ${DEBIAN_BUILD_TAG}
|
|
||||||
|
|
||||||
.use-debian/android_build:
|
|
||||||
extends:
|
|
||||||
- .android-variables
|
|
||||||
- .set-image-base-tag
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_build-base
|
|
||||||
MESA_IMAGE_PATH: "debian/android_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-android_build
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/android_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based ARM build image
|
|
||||||
debian/arm64_build:
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .container
|
|
||||||
- .debian-container-version
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm64_build "${DEBIAN_BUILD_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LLVM_VERSION: &debian-arm64-llvm 19
|
|
||||||
|
|
||||||
.use-debian/arm64_build:
|
|
||||||
extends:
|
|
||||||
- .set-image
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_PATH: "debian/arm64_build"
|
|
||||||
MESA_IMAGE_TAG: *debian-arm64_build
|
|
||||||
MESA_ARTIFACTS_TAG: *debian-arm64_build
|
|
||||||
LLVM_VERSION: *debian-arm64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm64_build
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
|
|
||||||
# Debian based x86_64 test image base
|
|
||||||
debian/x86_64_test-base:
|
|
||||||
extends:
|
|
||||||
- .debian-container
|
|
||||||
- .container-builds-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_test-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
|
|
||||||
.use-debian/x86_64_test-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .debian-container-version
|
|
||||||
- .use-base-image
|
|
||||||
variables:
|
|
||||||
MESA_BASE_IMAGE: "debian/x86_64_test-base"
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_test-base
|
|
||||||
LLVM_VERSION: *debian-x86_64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_test-base
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based ARMv7/armhf test image base
|
|
||||||
debian/arm32_test-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .debian-container
|
|
||||||
- .container-builds-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm32_test-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LLVM_VERSION: *debian-arm64-llvm
|
|
||||||
FDO_DISTRIBUTION_PLATFORM: "linux/arm/v7"
|
|
||||||
|
|
||||||
.use-debian/arm32_test-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .debian-container-version
|
|
||||||
- .use-base-image
|
|
||||||
variables:
|
|
||||||
MESA_BASE_IMAGE: "debian/arm32_test-base"
|
|
||||||
MESA_BASE_TAG: *debian-arm32_test-base
|
|
||||||
LLVM_VERSION: *debian-arm64-llvm
|
|
||||||
FDO_DISTRIBUTION_PLATFORM: "linux/arm/v7"
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm32_test-base
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based aarch64 test image base
|
|
||||||
debian/arm64_test-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .debian-container
|
|
||||||
- .container-builds-base
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm64_test-base "${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LLVM_VERSION: *debian-arm64-llvm
|
|
||||||
|
|
||||||
.use-debian/arm64_test-base:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .debian-container-version
|
|
||||||
- .use-base-image
|
|
||||||
variables:
|
|
||||||
MESA_BASE_IMAGE: "debian/arm64_test-base"
|
|
||||||
MESA_BASE_TAG: *debian-arm64_test-base
|
|
||||||
LLVM_VERSION: *debian-arm64-llvm
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm64_test-base
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_64 test image for GL
|
|
||||||
debian/x86_64_test-gl:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-base
|
|
||||||
- .container-builds-gl
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_test-gl ${DEBIAN_TEST_GL_TAG}
|
|
||||||
|
|
||||||
.use-debian/x86_64_test-gl:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_64_test-gl"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_64_test-gl
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_test-gl
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_64 test image for VK
|
|
||||||
debian/x86_64_test-vk:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-base
|
|
||||||
- .container-builds-vk
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_test-vk ${DEBIAN_TEST_VK_TAG}
|
|
||||||
|
|
||||||
.use-debian/x86_64_test-vk:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_64_test-vk"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_64_test-vk
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_test-vk
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_64 test image for Android
|
|
||||||
debian/x86_64_test-android:
|
|
||||||
extends:
|
|
||||||
- .android-variables
|
|
||||||
- .use-debian/x86_64_test-base
|
|
||||||
- .container-builds-android
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_test-android ${DEBIAN_TEST_ANDROID_TAG}
|
|
||||||
|
|
||||||
.use-debian/x86_64_test-android:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM
|
|
||||||
extends:
|
|
||||||
- .android-variables
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_64_test-android"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_64_test-android
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_test-android
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based x86_64 test image for video
|
|
||||||
debian/x86_64_test-video:
|
|
||||||
extends:
|
|
||||||
- .use-debian/x86_64_test-base
|
|
||||||
- .container-builds-video
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-x86_64_test-video ${DEBIAN_TEST_VIDEO_TAG}
|
|
||||||
|
|
||||||
.use-debian/x86_64_test-video:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-x86_64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/x86_64_test-video"
|
|
||||||
MESA_IMAGE_TAG: *debian-x86_64_test-video
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/x86_64_test-video
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based ARMv7/armhf test image for GL
|
|
||||||
debian/arm32_test-gl:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm32_test-base
|
|
||||||
- .container-builds-arm32
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm32_test-gl ${DEBIAN_TEST_GL_TAG}
|
|
||||||
|
|
||||||
.use-debian/arm32_test-gl:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-arm32_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/arm32_test-gl"
|
|
||||||
MESA_IMAGE_TAG: *debian-arm32_test-gl
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm32_test-gl
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based ARMv7/armhf test image for VK
|
|
||||||
debian/arm32_test-vk:
|
|
||||||
rules:
|
|
||||||
- when: never # There are currently no arm32 VK jobs
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm32_test-base
|
|
||||||
- .container-builds-arm32
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm32_test-vk ${DEBIAN_TEST_VK_TAG}
|
|
||||||
|
|
||||||
.use-debian/arm32_test-vk:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-arm32_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/arm32_test-vk"
|
|
||||||
MESA_IMAGE_TAG: *debian-arm32_test-vk
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm32_test-vk
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based aarch64 test image for GL
|
|
||||||
debian/arm64_test-gl:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-base
|
|
||||||
- .container-builds-gl
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm64_test-gl ${DEBIAN_TEST_GL_TAG}
|
|
||||||
|
|
||||||
.use-debian/arm64_test-gl:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-arm64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/arm64_test-gl"
|
|
||||||
MESA_IMAGE_TAG: *debian-arm64_test-gl
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# Debian based aarch64 test image for VK
|
|
||||||
debian/arm64_test-vk:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .use-debian/arm64_test-base
|
|
||||||
- .container-builds-vk
|
|
||||||
- .export-container
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &debian-arm64_test-vk ${DEBIAN_TEST_VK_TAG}
|
|
||||||
|
|
||||||
.use-debian/arm64_test-vk:
|
|
||||||
tags:
|
|
||||||
- $FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64
|
|
||||||
extends:
|
|
||||||
- .set-image-base-tag
|
|
||||||
variables:
|
|
||||||
MESA_BASE_TAG: *debian-arm64_test-base
|
|
||||||
MESA_IMAGE_PATH: "debian/arm64_test-vk"
|
|
||||||
MESA_IMAGE_TAG: *debian-arm64_test-vk
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/arm64_test-vk
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
# x86_64 image with ARM64 & ARM32 kernel & rootfs for baremetal testing
|
|
||||||
.debian/baremetal_arm_test:
|
|
||||||
extends:
|
|
||||||
- .fdo.container-build@debian
|
|
||||||
- .container
|
|
||||||
- .debian-container-version
|
|
||||||
variables:
|
|
||||||
FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
MESA_ARTIFACTS_TAG: *debian-arm64_build
|
|
||||||
|
|
||||||
debian/baremetal_arm32_test-gl:
|
|
||||||
extends:
|
|
||||||
- .debian/baremetal_arm_test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm32_test-gl
|
|
||||||
optional: true
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &baremetal-arm32_test-gl "${DEBIAN_TEST_GL_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LAVA_DISTRIBUTION_TAG: "debian/arm32_test-gl:${DEBIAN_TEST_GL_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
|
|
||||||
debian/baremetal_arm64_test-gl:
|
|
||||||
extends:
|
|
||||||
- .debian/baremetal_arm_test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &baremetal-arm64_test-gl "${DEBIAN_TEST_GL_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LAVA_DISTRIBUTION_TAG: "debian/arm64_test-gl:${DEBIAN_TEST_GL_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
|
|
||||||
debian/baremetal_arm64_test-vk:
|
|
||||||
extends:
|
|
||||||
- .debian/baremetal_arm_test
|
|
||||||
needs:
|
|
||||||
- job: debian/arm64_test-vk
|
|
||||||
optional: true
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_TAG: &baremetal-arm64_test-vk "${DEBIAN_TEST_VK_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}"
|
|
||||||
LAVA_DISTRIBUTION_TAG: "debian/arm64_test-vk:${DEBIAN_TEST_VK_TAG}--${DEBIAN_BASE_TAG}--${PKG_REPO_REV}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
|
|
||||||
.use-debian/baremetal_arm32_test-gl:
|
|
||||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_PATH: "debian/baremetal_arm32_test-gl"
|
|
||||||
MESA_IMAGE_TAG: *baremetal-arm32_test-gl
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/baremetal_arm32_test-gl
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
.use-debian/baremetal_arm64_test-gl:
|
|
||||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_PATH: "debian/baremetal_arm64_test-gl"
|
|
||||||
MESA_IMAGE_TAG: *baremetal-arm64_test-gl
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/baremetal_arm64_test-gl
|
|
||||||
optional: true
|
|
||||||
|
|
||||||
.use-debian/baremetal_arm64_test-vk:
|
|
||||||
image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
|
|
||||||
variables:
|
|
||||||
MESA_IMAGE_PATH: "debian/baremetal_arm64_test-vk"
|
|
||||||
MESA_IMAGE_TAG: *baremetal-arm64_test-vk
|
|
||||||
needs:
|
|
||||||
- job: sanity
|
|
||||||
optional: true
|
|
||||||
- job: debian/baremetal_arm64_test-vk
|
|
||||||
optional: true
|
|
@@ -1,18 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Check to see if we need a separate repo to install LLVM.
|
|
||||||
|
|
||||||
case "${FDO_DISTRIBUTION_VERSION%-*},${LLVM_VERSION}" in
|
|
||||||
bookworm,15)
|
|
||||||
NEED_LLVM_REPO="false"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
NEED_LLVM_REPO="true"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if [ "$NEED_LLVM_REPO" = "true" ]; then
|
|
||||||
curl --fail -s https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
|
||||||
export LLVM_APT_REPO="deb [trusted=yes] https://apt.llvm.org/${FDO_DISTRIBUTION_VERSION%-*}/ llvm-toolchain-${FDO_DISTRIBUTION_VERSION%-*}-${LLVM_VERSION} main"
|
|
||||||
echo "$LLVM_APT_REPO" | tee /etc/apt/sources.list.d/llvm.list
|
|
||||||
fi
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user