Compare commits
	
		
			254 Commits
		
	
	
		
			mesa-25.1.
			...
			texman_0_1
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | bcc4804410 | ||
|  | a03b239041 | ||
|  | 4890c7ed87 | ||
|  | 17c33d6f04 | ||
|  | 5ab599600e | ||
|  | db4b6fd759 | ||
|  | dbb2f30ad3 | ||
|  | 9c84dfe156 | ||
|  | 8d86325516 | ||
|  | 8ecd83f6cd | ||
|  | cff7e025ed | ||
|  | 3de9a9bd1d | ||
|  | e6cae6e6f6 | ||
|  | 9cf5945b83 | ||
|  | d6ddc33f07 | ||
|  | ec32d129b7 | ||
|  | cd41c27446 | ||
|  | af6a480117 | ||
|  | 14e9700621 | ||
|  | 7fb177c367 | ||
|  | d65cda4ce3 | ||
|  | 460a375d85 | ||
|  | c3c5652602 | ||
|  | e1998baef8 | ||
|  | 3e980901b0 | ||
|  | 14fe63a12b | ||
|  | fbd147c0b5 | ||
|  | 7b7c54e94c | ||
|  | 84dd9c8112 | ||
|  | bba85343e7 | ||
|  | 6484b373c3 | ||
|  | 245a3c54c7 | ||
|  | 1266e633cc | ||
|  | 86042f53f2 | ||
|  | 068062f997 | ||
|  | c62af8e3b7 | ||
|  | 8e5650d7f4 | ||
|  | 65f1cf2cbf | ||
|  | f04f5e990a | ||
|  | 1854ebe77e | ||
|  | 3500f2e3c4 | ||
|  | cfeefc9b81 | ||
|  | 3ad6adfb57 | ||
|  | 8a126f1166 | ||
|  | 6315aabcf2 | ||
|  | a8c2344364 | ||
|  | 2929b2569b | ||
|  | f14790d7b0 | ||
|  | 8fb2e61801 | ||
|  | b3c94f9d9d | ||
|  | 5b889f7f2d | ||
|  | 0f531b5b48 | ||
|  | c6482c0d41 | ||
|  | b71748efd1 | ||
|  | 1c4b9edc41 | ||
|  | 2b464d93d1 | ||
|  | 497f80ac34 | ||
|  | 2be748769b | ||
|  | 45a1083ee7 | ||
|  | 3a45baa70d | ||
|  | 0bc3a7ac2e | ||
|  | 08f0579176 | ||
|  | d689cd0715 | ||
|  | e957f39f67 | ||
|  | 416f09d3e6 | ||
|  | 40cc5d36fd | ||
|  | 62f9613dbc | ||
|  | 7f31255eb2 | ||
|  | 78382b4bbe | ||
|  | bd8e90857c | ||
|  | b7cc448ff8 | ||
|  | 0ea582acbe | ||
|  | 05c824db87 | ||
|  | 829f6909bd | ||
|  | 34f1ebc72f | ||
|  | c6e586ee0c | ||
|  | 9180d932c6 | ||
|  | 95df1b67a2 | ||
|  | bc400c3f30 | ||
|  | 3a3db0e9ec | ||
|  | 613e395ab8 | ||
|  | 86c5de8b85 | ||
|  | 44c8c42ab9 | ||
|  | 31a82a663c | ||
|  | dafdf6512e | ||
|  | 9caacb015a | ||
|  | 2887770534 | ||
|  | fb440c9f82 | ||
|  | 576c9c310f | ||
|  | ec1db9db1f | ||
|  | e1f70cf5e2 | ||
|  | a7cdbf5c38 | ||
|  | fe37adfde3 | ||
|  | 315396ac6c | ||
|  | a7252f42ae | ||
|  | bffc66109e | ||
|  | 0089c7f4ee | ||
|  | 54f435340b | ||
|  | 66d887d605 | ||
|  | c6dc5cc8ab | ||
|  | e0412bb726 | ||
|  | cc5cbd1bca | ||
|  | 93fbfa5781 | ||
|  | 30709caad6 | ||
|  | a647198f65 | ||
|  | 7ad6ea6ff2 | ||
|  | ceee2c45cd | ||
|  | 26dc161fde | ||
|  | b176ef05f2 | ||
|  | 6d6e7a08c7 | ||
|  | eb75ec2349 | ||
|  | 2a8bd4e329 | ||
|  | c247268499 | ||
|  | b2cb8920c2 | ||
|  | af4d93f256 | ||
|  | c8dd839acb | ||
|  | e43c3c38fb | ||
|  | 9194782fdc | ||
|  | ec36d5b537 | ||
|  | 6e5da1a860 | ||
|  | 21f6e8a2d0 | ||
|  | 18772c9e88 | ||
|  | 5df4283b53 | ||
|  | 6734bab3b2 | ||
|  | 0a5fea5f96 | ||
|  | 9b2762d7b9 | ||
|  | 571bf7a71b | ||
|  | 4ab3b9aa76 | ||
|  | 88c018ee47 | ||
|  | cead52ef68 | ||
|  | 9cf0ed6059 | ||
|  | 2c1c8dccf7 | ||
|  | b87f9f6056 | ||
|  | 427bf0171d | ||
|  | 98c5445c75 | ||
|  | 15ffb3bbab | ||
|  | 97b570b999 | ||
|  | e8e6374a16 | ||
|  | d9f31f16c0 | ||
|  | 1173198f51 | ||
|  | 01a56d23e6 | ||
|  | bf050799ac | ||
|  | 3697eca493 | ||
|  | de2b3506ea | ||
|  | bfbe2eb6e9 | ||
|  | 8bd395f601 | ||
|  | 735925d15b | ||
|  | 448a3456a9 | ||
|  | e176b11b74 | ||
|  | cdc8771893 | ||
|  | 789399d3f4 | ||
|  | dfa8624e83 | ||
|  | 36b64bbd4f | ||
|  | 651985f813 | ||
|  | 9f2b49a7b3 | ||
|  | 05c6361b0b | ||
|  | 9621817253 | ||
|  | 7e2e4a6b1a | ||
|  | 656c71ca3f | ||
|  | 6ce7055dcc | ||
|  | c848a65b5e | ||
|  | 1ff99fcd50 | ||
|  | 67342052d2 | ||
|  | ccf52b6784 | ||
|  | 78aa58c914 | ||
|  | 4291f24abd | ||
|  | f4211722f1 | ||
|  | 71380a1325 | ||
|  | 33e55c6a7d | ||
|  | cd32164cc6 | ||
|  | 7c1348ac8f | ||
|  | 2560061cfe | ||
|  | 0c047b8847 | ||
|  | c64a08c286 | ||
|  | bb409e88d8 | ||
|  | af69eefcba | ||
|  | d9c071adec | ||
|  | 833d19c21d | ||
|  | 7245757174 | ||
|  | 3d96cb7d48 | ||
|  | f565ab2dd5 | ||
|  | 7ad0c51f48 | ||
|  | fea684c533 | ||
|  | 4e8845fc0c | ||
|  | ffcc1cdb41 | ||
|  | afc90075ba | ||
|  | 79acea2708 | ||
|  | a12063a540 | ||
|  | 9826d83439 | ||
|  | 84f7ded925 | ||
|  | 59dd8ad752 | ||
|  | 4afd26982e | ||
|  | 17e4a7ea17 | ||
|  | 3d67655cce | ||
|  | b5d7c69884 | ||
|  | b6759e2be7 | ||
|  | af853b0da7 | ||
|  | 8d349798da | ||
|  | 4aea628194 | ||
|  | 008ddf628a | ||
|  | 716496f263 | ||
|  | b3b325e560 | ||
|  | 382ce985ac | ||
|  | 54b7eca7a0 | ||
|  | f74e06bf11 | ||
|  | 9c02649d18 | ||
|  | 2c34704e41 | ||
|  | 85d0041ff0 | ||
|  | 67ff8b354e | ||
|  | 08e7957571 | ||
|  | 9c1f7089d8 | ||
|  | 5381ac5f11 | ||
|  | a6f78d4eee | ||
|  | ae695e3566 | ||
|  | c82c3335e1 | ||
|  | f8246f504b | ||
|  | 499458bcdb | ||
|  | 37a53fd6a0 | ||
|  | 5ec38fb2ea | ||
|  | 11c0215bf8 | ||
|  | 6a13b6c346 | ||
|  | a103097ee5 | ||
|  | 7efad0d84c | ||
|  | 42c88cd072 | ||
|  | 2541c54e79 | ||
|  | 33529e3d8e | ||
|  | 84c1b82081 | ||
|  | a97a1439ae | ||
|  | 2fd2910010 | ||
|  | fbbda155e0 | ||
|  | a49c3c0fae | ||
|  | baf5998d59 | ||
|  | 4c5acef241 | ||
|  | 9839e272cf | ||
|  | b57e79ff14 | ||
|  | 43824acb4e | ||
|  | d8f509e749 | ||
|  | eb91c93c2e | ||
|  | 79de983b6f | ||
|  | fdb3acf016 | ||
|  | 2807d1f58a | ||
|  | 93f913926e | ||
|  | 33ca04f379 | ||
|  | 398cb30c72 | ||
|  | f67bb30314 | ||
|  | 4578d7b9f0 | ||
|  | dcdfc154c3 | ||
|  | 696ba32779 | ||
|  | dcfe55539f | ||
|  | ff84b1f1b2 | ||
|  | 3bca9c47f4 | ||
|  | d4d7fdb43b | ||
|  | d65dab5777 | ||
|  | 638ca019ef | 
| @@ -1,2 +0,0 @@ | ||||
| # Vendored code | ||||
| src/amd/vulkan/radix_sort/* | ||||
| @@ -1,10 +0,0 @@ | ||||
| # The following files are opted into `ninja clang-format` and | ||||
| # enforcement in the CI. | ||||
|  | ||||
| src/gallium/drivers/i915 | ||||
| src/gallium/drivers/r300/compiler/* | ||||
| src/gallium/targets/teflon/**/* | ||||
| src/amd/vulkan/**/* | ||||
| src/amd/compiler/**/* | ||||
| src/egl/**/* | ||||
| src/etnaviv/isa/**/* | ||||
| @@ -1,18 +0,0 @@ | ||||
| ((nil . ((show-trailing-whitespace . t))) | ||||
|  (prog-mode | ||||
|   (indent-tabs-mode . nil) | ||||
|   (tab-width . 8) | ||||
|   (c-basic-offset . 3) | ||||
|   (c-file-style . "stroustrup") | ||||
|   (fill-column . 78) | ||||
|   (eval . (progn | ||||
| 	    (c-set-offset 'case-label '0) | ||||
| 	    (c-set-offset 'innamespace '0) | ||||
| 	    (c-set-offset 'inline-open '0))) | ||||
|   (whitespace-style face indentation) | ||||
|   (whitespace-line-column . 79) | ||||
|   (eval ignore-errors | ||||
|         (require 'whitespace) | ||||
|         (whitespace-mode 1))) | ||||
|  (makefile-mode (indent-tabs-mode . t)) | ||||
|  ) | ||||
| @@ -1,44 +0,0 @@ | ||||
| # To use this config on you editor, follow the instructions at: | ||||
| # http://editorconfig.org | ||||
|  | ||||
| root = true | ||||
|  | ||||
| [*] | ||||
| charset = utf-8 | ||||
| insert_final_newline = true | ||||
| tab_width = 8 | ||||
|  | ||||
| [*.{c,h,cpp,hpp,cc,hh,y,yy}] | ||||
| indent_style = space | ||||
| indent_size = 3 | ||||
| max_line_length = 78 | ||||
|  | ||||
| [{Makefile*,*.mk}] | ||||
| indent_style = tab | ||||
|  | ||||
| [*.py] | ||||
| indent_style = space | ||||
| indent_size = 4 | ||||
|  | ||||
| [*.yml] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
|  | ||||
| [*.rst] | ||||
| indent_style = space | ||||
| indent_size = 3 | ||||
|  | ||||
| [*.patch] | ||||
| trim_trailing_whitespace = false | ||||
|  | ||||
| [{meson.build,meson.options}] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
|  | ||||
| [*.ps1] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
|  | ||||
| [*.rs] | ||||
| indent_style = space | ||||
| indent_size = 4 | ||||
| @@ -1,76 +0,0 @@ | ||||
| # List of commits to ignore when using `git blame`. | ||||
| # Enable with: | ||||
| #   git config blame.ignoreRevsFile .git-blame-ignore-revs | ||||
| # | ||||
| # Per git-blame(1): | ||||
| #   Ignore revisions listed in the file, one unabbreviated object name | ||||
| #   per line, in git-blame. Whitespace and comments beginning with # are | ||||
| #   ignored. | ||||
| # | ||||
| # Please keep these in chronological order :) | ||||
| # | ||||
| # You can add a new commit with the following command: | ||||
| #   git log -1 --pretty=format:'%n# %s%n%H%n' >> .git-blame-ignore-revs $COMMIT | ||||
|  | ||||
| # pvr: Fix clang-format error. | ||||
| 0ad5b0a74ef73f5fcbe1406ad9d57fe5dc00a5b1 | ||||
|  | ||||
| # panfrost: Fix up some formatting for clang-format | ||||
| a4705afe63412498d13ded73cba969c66be67907 | ||||
|  | ||||
| # asahi: clang-format the world again | ||||
| 26c51bb8d8a33098b1990425a391f56ffba5728c | ||||
|  | ||||
| # perfetto: Add a .clang-format for the directory. | ||||
| da78d5d729b1800136dd713b68492cb339993f4a | ||||
|  | ||||
| # panfrost/winsys: Clang-format | ||||
| c90f036516a5376002be6550a917e8bad6a8a3b8 | ||||
|  | ||||
| # panfrost: Re-run clang-format | ||||
| 4ccf174009af6732cbffa5d8ebb4687da7517505 | ||||
|  | ||||
| # panvk: Clang-format | ||||
| c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402 | ||||
|  | ||||
| # pan/mdg: Fix icky formatting | ||||
| 133af0d6c945d3aaca8989edd15283a2b7dcc6c7 | ||||
|  | ||||
| # mapi: clang-format _glapi_add_dispatch() | ||||
| 30332529663268a6406e910848e906e725e6fda7 | ||||
|  | ||||
| # radv: reformat according to its .clang-format | ||||
| 8b319c6db8bd93603b18bd783eb75225fcfd51b7 | ||||
|  | ||||
| # aco: reformat according to its .clang-format | ||||
| 6b21653ab4d3a67e711fe10e3d403128b6d26eb2 | ||||
|  | ||||
| # egl: re-format using clang-format | ||||
| 2f670d89db038d5a29f6b72732fd7ad63dfaf4c6 | ||||
|  | ||||
| # panfrost: clang-format the tree | ||||
| 0afd691f29683f6e9dde60f79eca094373521806 | ||||
|  | ||||
| # aco: Format. | ||||
| 1e2639026fec7069806449f9ba2a124ce4eb5569 | ||||
|  | ||||
| # radv: Format. | ||||
| 59c501ca353f8ec9d2717c98af2bfa1a1dbf4d75 | ||||
|  | ||||
| # pvr: clang-format fixes | ||||
| 953c04ebd39c52d457301bdd8ac803949001da2d | ||||
|  | ||||
| # freedreno: Re-indent | ||||
| 2d439343ea1aee146d4ce32800992cd389bd505d | ||||
|  | ||||
| # ir3: Reformat source with clang-format | ||||
| 177138d8cb0b4f6a42ef0a1f8593e14d79f17c54 | ||||
|  | ||||
| # ir3: reformat after refactoring in previous commit | ||||
| 8ae5b27ee0331a739d14b42e67586784d6840388 | ||||
|  | ||||
| # ir3: don't use deprecated NIR_PASS_V anymore | ||||
| 2fedc82c0cc9d3fb2e54707b57941b79553b640c | ||||
|  | ||||
| # ir3: reformat after previous commit | ||||
| 7210054db8cfb445a8ccdeacfdcfecccf44fa266 | ||||
							
								
								
									
										7
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							| @@ -1,7 +0,0 @@ | ||||
| *.csv eol=crlf | ||||
| * text=auto | ||||
| *.jpg binary | ||||
| *.png binary | ||||
| *.gif binary | ||||
| *.ico binary | ||||
| *.cl gitlab-language=c | ||||
							
								
								
									
										60
									
								
								.github/workflows/macos.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										60
									
								
								.github/workflows/macos.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,60 +0,0 @@ | ||||
| name: macOS-CI | ||||
| on: push | ||||
|  | ||||
| permissions: | ||||
|   contents: read | ||||
|  | ||||
| jobs: | ||||
|   macOS-CI: | ||||
|     strategy: | ||||
|       matrix: | ||||
|         glx_option: ['dri', 'xlib'] | ||||
|     runs-on: macos-11 | ||||
|     env: | ||||
|       GALLIUM_DUMP_CPU: true | ||||
|       MESON_EXEC: /Users/runner/Library/Python/3.11/bin/meson | ||||
|     steps: | ||||
|     - name: Checkout | ||||
|       uses: actions/checkout@v3 | ||||
|     - name: Install Dependencies | ||||
|       run: | | ||||
|         cat > Brewfile <<EOL | ||||
|         brew "bison" | ||||
|         brew "expat" | ||||
|         brew "gettext" | ||||
|         brew "libx11" | ||||
|         brew "libxcb" | ||||
|         brew "libxdamage" | ||||
|         brew "libxext" | ||||
|         brew "molten-vk" | ||||
|         brew "ninja" | ||||
|         brew "pkg-config" | ||||
|         brew "python@3.10" | ||||
|         EOL | ||||
|  | ||||
|         brew update | ||||
|         brew bundle --verbose | ||||
|     - name: Install Mako and meson | ||||
|       run: pip3 install --user mako meson | ||||
|     - name: Configure | ||||
|       run: | | ||||
|         cat > native_config <<EOL | ||||
|         [binaries] | ||||
|         llvm-config = '/usr/local/opt/llvm/bin/llvm-config' | ||||
|         EOL | ||||
|         $MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }} | ||||
|     - name: Build | ||||
|       run: $MESON_EXEC compile -C build | ||||
|     - name: Test | ||||
|       run: $MESON_EXEC test -C build --print-errorlogs | ||||
|     - name: Install | ||||
|       run: $MESON_EXEC install -C build --destdir $PWD/install | ||||
|     - name: 'Upload Artifact' | ||||
|       if: always() | ||||
|       uses: actions/upload-artifact@v3 | ||||
|       with: | ||||
|         name: macos-${{ matrix.glx_option }}-result | ||||
|         path: | | ||||
|           build/meson-logs/ | ||||
|           install/ | ||||
|         retention-days: 5 | ||||
							
								
								
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -1,7 +0,0 @@ | ||||
| .cache | ||||
| .vscode* | ||||
| *.pyc | ||||
| *.pyo | ||||
| *.out | ||||
| /build | ||||
| .venv/ | ||||
							
								
								
									
										431
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							
							
						
						
									
										431
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							| @@ -1,431 +0,0 @@ | ||||
| # Types of CI pipelines: | ||||
| # | pipeline name        | context   | description                                                 | | ||||
| # |----------------------|-----------|-------------------------------------------------------------| | ||||
| # | merge pipeline       | mesa/mesa | pipeline running for an MR; if it passes the MR gets merged | | ||||
| # | pre-merge pipeline   | mesa/mesa | same as above, except its status doesn't affect the MR      | | ||||
| # | post-merge pipeline  | mesa/mesa | pipeline immediately after merging                          | | ||||
| # | fork pipeline        | fork      | pipeline running in a user fork                             | | ||||
| # | scheduled pipeline   | mesa/mesa | nightly pipelines, running every morning at 4am UTC         | | ||||
| # | direct-push pipeline | mesa/mesa | when commits are pushed directly to mesa/mesa, bypassing Marge and its gating pipeline | | ||||
| # | ||||
| # Note that the release branches maintained by the release manager fall under | ||||
| # the "direct push" category. | ||||
| # | ||||
| # "context" indicates the permissions that the jobs get; notably, any | ||||
| # container created in mesa/mesa gets pushed immediately for everyone to use | ||||
| # as soon as the image tag change is merged. | ||||
| # | ||||
| # Merge pipelines contain all jobs that must pass before the MR can be merged. | ||||
| # Pre-merge pipelines contain the exact same jobs as merge pipelines. | ||||
| # Post-merge pipelines contain *only* the `pages` job that deploys the new | ||||
| # version of the website. | ||||
| # Fork pipelines contain everything. | ||||
| # Scheduled pipelines only contain the container+build jobs, and some extra | ||||
| # test jobs (typically "full" variants of pre-merge jobs that only run 1/X | ||||
| # test cases), but not a repeat of the merge pipeline jobs. | ||||
| # Direct-push pipelines contain the same jobs as merge pipelines. | ||||
|  | ||||
| workflow: | ||||
|   rules: | ||||
|     # do not duplicate pipelines on merge pipelines | ||||
|     - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push" | ||||
|       when: never | ||||
|     # tag pipelines are disabled as it's too late to run all the tests by | ||||
|     # then, the release has been made based on the staging pipelines results | ||||
|     - if: $CI_COMMIT_TAG | ||||
|       when: never | ||||
|     # merge pipeline | ||||
|     - if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event" | ||||
|       variables: | ||||
|         MESA_CI_PERFORMANCE_ENABLED: 1 | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: priority:high | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: priority:high-kvm | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: priority:high-aarch64 | ||||
|         CI_TRON_JOB_PRIORITY_TAG: ""  # Empty tags are ignored by gitlab | ||||
|         JOB_PRIORITY: 75 | ||||
|         # fast-fail in merge pipelines: stop early if we get this many unexpected fails/crashes | ||||
|         DEQP_RUNNER_MAX_FAILS: 40 | ||||
|     # post-merge pipeline | ||||
|     - if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push" | ||||
|     # Pre-merge pipeline | ||||
|     - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event" | ||||
|     # Push to a branch on a fork | ||||
|     - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push" | ||||
|     # nightly pipeline | ||||
|     - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule" | ||||
|       variables: | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: priority:low | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: priority:low-kvm | ||||
|         FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: priority:low-aarch64 | ||||
|         JOB_PRIORITY: 45 | ||||
|         # (some) nightly builds perform LTO, so they take much longer than the | ||||
|         # short timeout allowed in other pipelines. | ||||
|         # Note: 0 = infinity = gitlab's job `timeout:` applies, which is 1h | ||||
|         BUILD_JOB_TIMEOUT_OVERRIDE: 0 | ||||
|     # pipeline for direct pushes that bypassed the CI | ||||
|     - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH | ||||
|       variables: | ||||
|         JOB_PRIORITY: 70 | ||||
|     # pipeline for direct pushes from release maintainer | ||||
|     - if: &is-staging-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_REF_NAME =~ /^staging\// | ||||
|       variables: | ||||
|         JOB_PRIORITY: 70 | ||||
|  | ||||
|  | ||||
| variables: | ||||
|   FDO_UPSTREAM_REPO: mesa/mesa | ||||
|   MESA_TEMPLATES_COMMIT: &ci-templates-commit 48e4b6c9a2015f969fbe648999d16d5fb3eef6c4 | ||||
|   CI_PRE_CLONE_SCRIPT: |- | ||||
|           set -o xtrace | ||||
|           wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh | ||||
|           bash download-git-cache.sh | ||||
|           rm download-git-cache.sh | ||||
|           set +o xtrace | ||||
|   S3_JWT_FILE: /s3_jwt | ||||
|   S3_JWT_FILE_SCRIPT: |- | ||||
|       echo -n '${S3_JWT}' > '${S3_JWT_FILE}' && | ||||
|       unset CI_JOB_JWT S3_JWT  # Unsetting vulnerable env variables | ||||
|   S3_HOST: s3.freedesktop.org | ||||
|   # This bucket is used to fetch ANDROID prebuilts and images | ||||
|   S3_ANDROID_BUCKET: mesa-rootfs | ||||
|   # This bucket is used to fetch the kernel image | ||||
|   S3_KERNEL_BUCKET: mesa-rootfs | ||||
|   # Bucket for git cache | ||||
|   S3_GITCACHE_BUCKET: git-cache | ||||
|   # Bucket for the pipeline artifacts pushed to S3 | ||||
|   S3_ARTIFACTS_BUCKET: artifacts | ||||
|   # Buckets for traces | ||||
|   S3_TRACIE_RESULTS_BUCKET: mesa-tracie-results | ||||
|   S3_TRACIE_PUBLIC_BUCKET: mesa-tracie-public | ||||
|   S3_TRACIE_PRIVATE_BUCKET: mesa-tracie-private | ||||
|   # per-pipeline artifact storage on MinIO | ||||
|   PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/${S3_ARTIFACTS_BUCKET}/${CI_PROJECT_PATH}/${CI_PIPELINE_ID} | ||||
|   # per-job artifact storage on MinIO | ||||
|   JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID} | ||||
|   # reference images stored for traces | ||||
|   PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/${S3_TRACIE_RESULTS_BUCKET}/$FDO_UPSTREAM_REPO" | ||||
|   # For individual CI farm status see .ci-farms folder | ||||
|   # Disable farm with   `git mv .ci-farms{,-disabled}/$farm_name` | ||||
|   # Re-enable farm with `git mv .ci-farms{-disabled,}/$farm_name` | ||||
|   # NEVER MIX FARM MAINTENANCE WITH ANY OTHER CHANGE IN THE SAME MERGE REQUEST! | ||||
|   ARTIFACTS_BASE_URL: https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts | ||||
|   # Python scripts for structured logger | ||||
|   PYTHONPATH: "$PYTHONPATH:$CI_PROJECT_DIR/install" | ||||
|   # No point in continuing once the device is lost | ||||
|   MESA_VK_ABORT_ON_DEVICE_LOSS: 1 | ||||
|   # Avoid the wall of "Unsupported SPIR-V capability" warnings in CI job log, hiding away useful output | ||||
|   MESA_SPIRV_LOG_LEVEL: error | ||||
|   # Default priority for non-merge pipelines | ||||
|   FDO_RUNNER_JOB_PRIORITY_TAG_X86_64: ""  # Empty tags are ignored by gitlab | ||||
|   FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM: kvm | ||||
|   FDO_RUNNER_JOB_PRIORITY_TAG_AARCH64: aarch64 | ||||
|   CI_TRON_JOB_PRIORITY_TAG: ci-tron:priority:low | ||||
|   JOB_PRIORITY: 50 | ||||
|   DATA_STORAGE_PATH: data_storage | ||||
|  | ||||
| default: | ||||
|   timeout: 1m # catch any jobs which don't specify a timeout | ||||
|   id_tokens: | ||||
|     S3_JWT: | ||||
|       aud: https://s3.freedesktop.org | ||||
|   before_script: | ||||
|     - | | ||||
|       if [ -z "${KERNEL_IMAGE_BASE:-}" ]; then | ||||
|         export KERNEL_IMAGE_BASE="https://${S3_HOST}/${S3_KERNEL_BUCKET}/${KERNEL_REPO}/${EXTERNAL_KERNEL_TAG:-$KERNEL_TAG}" | ||||
|       fi | ||||
|     - > | ||||
|       export SCRIPTS_DIR=$(mktemp -d) && | ||||
|       curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 -O --output-dir "${SCRIPTS_DIR}" "${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/setup-test-env.sh" && | ||||
|       . ${SCRIPTS_DIR}/setup-test-env.sh | ||||
|     - eval "$S3_JWT_FILE_SCRIPT" | ||||
|  | ||||
|   after_script: | ||||
|     # Work around https://gitlab.com/gitlab-org/gitlab/-/issues/20338 | ||||
|     - find -name '*.log' -exec mv {} {}.txt \; | ||||
|  | ||||
|   # Retry when job fails. Failed jobs can be found in the Mesa CI Daily Reports: | ||||
|   # https://gitlab.freedesktop.org/mesa/mesa/-/issues/?sort=created_date&state=opened&label_name%5B%5D=CI%20daily | ||||
|   retry: | ||||
|     max: 1 | ||||
|     # Ignore runner_unsupported, stale_schedule, archived_failure, or | ||||
|     # unmet_prerequisites | ||||
|     when: | ||||
|       - api_failure | ||||
|       - runner_system_failure | ||||
|       - script_failure | ||||
|       - job_execution_timeout | ||||
|       - scheduler_failure | ||||
|       - data_integrity_failure | ||||
|       - unknown_failure | ||||
|  | ||||
| stages: | ||||
|   - sanity | ||||
|   - container | ||||
|   - git-archive | ||||
|   - build-for-tests | ||||
|   - build-only | ||||
|   - code-validation | ||||
|   - amd | ||||
|   - amd-postmerge | ||||
|   - intel | ||||
|   - intel-postmerge | ||||
|   - nouveau | ||||
|   - nouveau-postmerge | ||||
|   - arm | ||||
|   - arm-postmerge | ||||
|   - broadcom | ||||
|   - broadcom-postmerge | ||||
|   - freedreno | ||||
|   - freedreno-postmerge | ||||
|   - etnaviv | ||||
|   - etnaviv-postmerge | ||||
|   - software-renderer | ||||
|   - software-renderer-postmerge | ||||
|   - layered-backends | ||||
|   - layered-backends-postmerge | ||||
|   - performance | ||||
|   - deploy | ||||
|  | ||||
| include: | ||||
|   - project: 'freedesktop/ci-templates' | ||||
|     ref: 16bc29078de5e0a067ff84a1a199a3760d3b3811 | ||||
|     file: | ||||
|       - '/templates/ci-fairy.yml' | ||||
|   - project: 'freedesktop/ci-templates' | ||||
|     ref: *ci-templates-commit | ||||
|     file: | ||||
|       - '/templates/alpine.yml' | ||||
|       - '/templates/debian.yml' | ||||
|       - '/templates/fedora.yml' | ||||
|   - local: '.gitlab-ci/image-tags.yml' | ||||
|   - local: '.gitlab-ci/lava/lava-gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/container/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/build/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/test/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/farm-rules.yml' | ||||
|   - local: '.gitlab-ci/test-source-dep.yml' | ||||
|   - local: 'docs/gitlab-ci.yml' | ||||
|   - local: 'src/**/ci/gitlab-ci.yml' | ||||
|  | ||||
|  | ||||
| # Rules applied to every job in the pipeline | ||||
| .common-rules: | ||||
|   rules: | ||||
|     - if: *is-fork-push | ||||
|       when: manual | ||||
|  | ||||
| .never-post-merge-rules: | ||||
|   rules: | ||||
|     - if: *is-post-merge | ||||
|       when: never | ||||
|  | ||||
|  | ||||
| # Note: make sure the branches in this list are the same as in | ||||
| # `.build-only-delayed-rules` below. | ||||
| .container+build-rules: | ||||
|   rules: | ||||
|     - !reference [.common-rules, rules] | ||||
|     # Run when re-enabling a disabled farm, but not when disabling it | ||||
|     - !reference [.disable-farm-mr-rules, rules] | ||||
|     # Never run immediately after merging, as we just ran everything | ||||
|     - !reference [.never-post-merge-rules, rules] | ||||
|     # Build everything in merge pipelines, if any files affecting the pipeline | ||||
|     # were changed | ||||
|     - if: *is-merge-attempt | ||||
|       changes: &all_paths | ||||
|         - VERSION | ||||
|         - bin/git_sha1_gen.py | ||||
|         - bin/install_megadrivers.py | ||||
|         - bin/symbols-check.py | ||||
|         - bin/ci/**/* | ||||
|         # GitLab CI | ||||
|         - .gitlab-ci.yml | ||||
|         - .gitlab-ci/**/* | ||||
|         - .ci-farms/* | ||||
|         # Meson | ||||
|         - meson* | ||||
|         - build-support/**/* | ||||
|         - subprojects/**/* | ||||
|         # clang format | ||||
|         - .clang-format | ||||
|         - .clang-format-include | ||||
|         - .clang-format-ignore | ||||
|         # Source code | ||||
|         - include/**/* | ||||
|         - src/**/* | ||||
|       when: on_success | ||||
|     # Same as above, but for pre-merge pipelines | ||||
|     - if: *is-pre-merge | ||||
|       changes: | ||||
|         *all_paths | ||||
|       when: manual | ||||
|     # Skip everything for pre-merge and merge pipelines which don't change | ||||
|     # anything in the build | ||||
|     - if: *is-merge-attempt | ||||
|       when: never | ||||
|     - if: *is-pre-merge | ||||
|       when: never | ||||
|     # Build everything after someone bypassed the CI | ||||
|     - if: *is-direct-push | ||||
|       when: on_success | ||||
|     # Build everything when pushing to staging branches | ||||
|     - if: *is-staging-push | ||||
|       when: on_success | ||||
|     # Build everything in scheduled pipelines | ||||
|     - if: *is-scheduled-pipeline | ||||
|       when: on_success | ||||
|     # Allow building everything in fork pipelines, but build nothing unless | ||||
|     # manually triggered | ||||
|     - when: manual | ||||
|  | ||||
| # Repeat of the above but with `when: on_success` replaced with | ||||
| # `when: delayed` + `start_in:`, for build-only jobs. | ||||
| # Note: make sure the branches in this list are the same as in | ||||
| # `.container+build-rules` above. | ||||
| .build-only-delayed-rules: | ||||
|   rules: | ||||
|     - !reference [.common-rules, rules] | ||||
|     # Run when re-enabling a disabled farm, but not when disabling it | ||||
|     - !reference [.disable-farm-mr-rules, rules] | ||||
|     # Never run immediately after merging, as we just ran everything | ||||
|     - !reference [.never-post-merge-rules, rules] | ||||
|     # Build everything in merge pipelines, if any files affecting the pipeline | ||||
|     # were changed | ||||
|     - if: *is-merge-attempt | ||||
|       changes: *all_paths | ||||
|       when: delayed | ||||
|       start_in: &build-delay 5 minutes | ||||
|     # Same as above, but for pre-merge pipelines | ||||
|     - if: *is-pre-merge | ||||
|       changes: *all_paths | ||||
|       when: manual | ||||
|     # Skip everything for pre-merge and merge pipelines which don't change | ||||
|     # anything in the build | ||||
|     - if: *is-merge-attempt | ||||
|       when: never | ||||
|     - if: *is-pre-merge | ||||
|       when: never | ||||
|     # Build everything after someone bypassed the CI | ||||
|     - if: *is-direct-push | ||||
|       when: delayed | ||||
|       start_in: *build-delay | ||||
|     # Build everything when pushing to staging branches | ||||
|     - if: *is-staging-push | ||||
|       when: delayed | ||||
|       start_in: *build-delay | ||||
|     # Build everything in scheduled pipelines | ||||
|     - if: *is-scheduled-pipeline | ||||
|       when: delayed | ||||
|       start_in: *build-delay | ||||
|     # Allow building everything in fork pipelines, but build nothing unless | ||||
|     # manually triggered | ||||
|     - when: manual | ||||
|  | ||||
|  | ||||
| .ci-deqp-artifacts: | ||||
|   artifacts: | ||||
|     name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}" | ||||
|     when: always | ||||
|     untracked: false | ||||
|     paths: | ||||
|       # Watch out!  Artifacts are relative to the build dir. | ||||
|       # https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521 | ||||
|       - artifacts | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _build/meson-logs/strace | ||||
|       - _build/.ninja_log | ||||
|  | ||||
| # Git archive | ||||
|  | ||||
| make git archive: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|   stage: git-archive | ||||
|   rules: | ||||
|     - !reference [.scheduled_pipeline-rules, rules] | ||||
|   script: | ||||
|     # Compactify the .git directory | ||||
|     - git gc --aggressive | ||||
|     # Download & cache the perfetto subproject as well. | ||||
|     - rm -rf subprojects/perfetto ; mkdir -p subprojects/perfetto && curl --fail https://android.googlesource.com/platform/external/perfetto/+archive/$(grep 'revision =' subprojects/perfetto.wrap | cut -d ' ' -f3).tar.gz | tar zxf - -C subprojects/perfetto | ||||
|     # compress the current folder | ||||
|     - tar -cvzf ../$CI_PROJECT_NAME.tar.gz . | ||||
|  | ||||
|     - s3_upload ../$CI_PROJECT_NAME.tar.gz "https://$S3_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/" | ||||
|  | ||||
| # Sanity checks of MR settings and commit logs | ||||
| sanity: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|   stage: sanity | ||||
|   tags: | ||||
|     - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64 | ||||
|   rules: | ||||
|     - if: *is-pre-merge | ||||
|       when: on_success | ||||
|     - when: never | ||||
|   variables: | ||||
|     GIT_STRATEGY: none | ||||
|   script: | ||||
|     # ci-fairy check-commits --junit-xml=check-commits.xml | ||||
|     - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml | ||||
|     - | | ||||
|       set -eu | ||||
|       image_tags=( | ||||
|         ALPINE_X86_64_BUILD_TAG | ||||
|         ALPINE_X86_64_LAVA_SSH_TAG | ||||
|         DEBIAN_BASE_TAG | ||||
|         DEBIAN_BUILD_TAG | ||||
|         DEBIAN_PYUTILS_TAG | ||||
|         DEBIAN_TEST_ANDROID_TAG | ||||
|         DEBIAN_TEST_GL_TAG | ||||
|         DEBIAN_TEST_VK_TAG | ||||
|         FEDORA_X86_64_BUILD_TAG | ||||
|         KERNEL_ROOTFS_TAG | ||||
|         KERNEL_TAG | ||||
|         PKG_REPO_REV | ||||
|         WINDOWS_X64_BUILD_TAG | ||||
|         WINDOWS_X64_MSVC_TAG | ||||
|         WINDOWS_X64_TEST_TAG | ||||
|       ) | ||||
|       for var in "${image_tags[@]}" | ||||
|       do | ||||
|         if [ "$(echo -n "${!var}" | wc -c)" -gt 20 ] | ||||
|         then | ||||
|           echo "$var is too long; please make sure it is at most 20 chars." | ||||
|           exit 1 | ||||
|         fi | ||||
|       done | ||||
|   artifacts: | ||||
|     when: on_failure | ||||
|     reports: | ||||
|       junit: check-*.xml | ||||
|  | ||||
|  | ||||
| mr-label-maker-test: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|   stage: sanity | ||||
|   tags: | ||||
|     - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64 | ||||
|   rules: | ||||
|     - !reference [.mr-label-maker-rules, rules] | ||||
|   variables: | ||||
|     GIT_STRATEGY: fetch | ||||
|   timeout: 10m | ||||
|   script: | ||||
|     - set -eu | ||||
|     - python3 -m venv .venv | ||||
|     - source .venv/bin/activate | ||||
|     - pip install git+https://gitlab.freedesktop.org/freedesktop/mr-label-maker | ||||
|     - mr-label-maker --dry-run --mr $CI_MERGE_REQUEST_IID | ||||
|  | ||||
| # Jobs that need to pass before spending hardware resources on further testing | ||||
| .required-for-hardware-jobs: | ||||
|   needs: | ||||
|     - job: rustfmt | ||||
|       optional: true | ||||
|       artifacts: false | ||||
|     - job: yaml-toml-shell-py-test | ||||
|       optional: true | ||||
|       artifacts: false | ||||
| @@ -1,33 +0,0 @@ | ||||
| [flake8] | ||||
| exclude = .venv*, | ||||
|  | ||||
| # PEP 8 Style Guide limits line length to 79 characters | ||||
| max-line-length = 159 | ||||
|  | ||||
| ignore = | ||||
|     # continuation line under-indented for hanging indent | ||||
|     E121 | ||||
|     # continuation line over-indented for hanging indent | ||||
|     E126, | ||||
|     # continuation line under-indented for visual indent | ||||
|     E128, | ||||
|     # whitespace before ':' | ||||
|     E203, | ||||
|     # missing whitespace around arithmetic operator | ||||
|     E226, | ||||
|     # missing whitespace after ',' | ||||
|     E231, | ||||
|      # expected 2 blank lines, found 1 | ||||
|     E302, | ||||
|     # too many blank lines | ||||
|     E303, | ||||
|     # imported but unused | ||||
|     F401, | ||||
|     # f-string is missing placeholders | ||||
|     F541, | ||||
|     # local variable assigned to but never used | ||||
|     F841, | ||||
|     # line break before binary operator | ||||
|     W503, | ||||
|     #  line break after binary operator | ||||
|     W504, | ||||
| @@ -1,115 +0,0 @@ | ||||
| # Note: skips lists for CI are just a list of lines that, when | ||||
| # non-zero-length and not starting with '#', will regex match to | ||||
| # delete lines from the test list.  Be careful. | ||||
|  | ||||
| # This test checks the driver's reported conformance version against the | ||||
| # version of the CTS we're running. This check fails every few months | ||||
| # and everyone has to go and bump the number in every driver. | ||||
| # Running this check only makes sense while preparing a conformance | ||||
| # submission, so skip it in the regular CI. | ||||
| dEQP-VK.api.driver_properties.conformance_version | ||||
|  | ||||
| # Exclude this test which might fail when a new extension is implemented. | ||||
| dEQP-VK.info.device_extensions | ||||
|  | ||||
| # These are tremendously slow (pushing toward a minute), and aren't | ||||
| # reliable to be run in parallel with other tests due to CPU-side timing. | ||||
| dEQP-GLES[0-9]*.functional.flush_finish.* | ||||
|  | ||||
| # piglit: WGL is Windows-only | ||||
| wgl@.* | ||||
|  | ||||
| # These are sensitive to CPU timing, and would need to be run in isolation | ||||
| # on the system rather than in parallel with other tests. | ||||
| glx@glx_arb_sync_control@timing.* | ||||
|  | ||||
| # This test is not built with waffle, while we do build tests with waffle | ||||
| spec@!opengl 1.1@windowoverlap | ||||
|  | ||||
| # These tests all read from the front buffer after a swap.  Given that we | ||||
| # run piglit tests in parallel in Mesa CI, and don't have a compositor | ||||
| # running, the frontbuffer reads may end up with undefined results from | ||||
| # windows overlapping us. | ||||
| # | ||||
| # Piglit does mark these tests as not to be run in parallel, but deqp-runner | ||||
| # doesn't respect that.  We need to extend deqp-runner to allow some tests to be | ||||
| # marked as single-threaded and run after the rayon loop if we want to support | ||||
| # them. | ||||
| # | ||||
| # Note that "glx-" tests don't appear in x11-skips.txt because they can be | ||||
| # run even if PIGLIT_PLATFORM=gbm (for example) | ||||
| glx@glx-copy-sub-buffer.* | ||||
|  | ||||
| # A majority of the tests introduced in CTS 1.3.7.0 are experiencing failures and flakes. | ||||
| # Disable these tests until someone with a more deeper understanding of EGL examines them. | ||||
| # | ||||
| # Note: on sc8280xp/a690 I get identical results (same passes and fails) | ||||
| # between freedreno, zink, and llvmpipe, so I believe this is either a | ||||
| # deqp bug or egl/wayland bug, rather than driver issue. | ||||
| # | ||||
| # With llvmpipe, the failing tests have the error message: | ||||
| # | ||||
| #   "Illegal sampler view creation without bind flag" | ||||
| # | ||||
| # which might be a hint.  (But some passing tests also have the same | ||||
| # error message.) | ||||
| # | ||||
| # more context from David Heidelberg on IRC: the deqp commit where these | ||||
| # started failing is: https://github.com/KhronosGroup/VK-GL-CTS/commit/79b25659bcbced0cfc2c3fe318951c585f682abe | ||||
| # prior to that they were skipping. | ||||
| wayland-dEQP-EGL.functional.color_clears.single_context.gles1.other | ||||
| wayland-dEQP-EGL.functional.color_clears.single_context.gles2.other | ||||
| wayland-dEQP-EGL.functional.color_clears.single_context.gles3.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_context.gles1.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_context.gles2.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_context.gles3.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2_gles3.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_thread.gles2.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_thread.gles3.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2.other | ||||
| wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2_gles3.other | ||||
|  | ||||
| # Seems to be the same is as wayland-dEQP-EGL.functional.color_clears.* | ||||
| wayland-dEQP-EGL.functional.render.single_context.gles2.other | ||||
| wayland-dEQP-EGL.functional.render.single_context.gles3.other | ||||
| wayland-dEQP-EGL.functional.render.multi_context.gles2.other | ||||
| wayland-dEQP-EGL.functional.render.multi_context.gles3.other | ||||
| wayland-dEQP-EGL.functional.render.multi_context.gles2_gles3.other | ||||
| wayland-dEQP-EGL.functional.render.multi_thread.gles2.other | ||||
| wayland-dEQP-EGL.functional.render.multi_thread.gles3.other | ||||
| wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other | ||||
|  | ||||
| # These test the loader more than the implementation and are broken because the | ||||
| # Vulkan loader in Debian is too old | ||||
| dEQP-VK.api.get_device_proc_addr.non_enabled | ||||
| dEQP-VK.api.version_check.unavailable_entry_points | ||||
|  | ||||
| # These tests are flaking too much recently on almost all drivers, so better skip them until the cause is identified | ||||
| spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex | ||||
| spec@arb_program_interface_query@arb_program_interface_query-getprogramresourceindex@'vs_input2[1][0]' on GL_PROGRAM_INPUT | ||||
|  | ||||
| # These tests attempt to read from the front buffer after a swap. They are skipped | ||||
| # on both X11 and gbm, but for different reasons: | ||||
| # | ||||
| # On X11: Given that we run piglit tests in parallel in Mesa CI, and don't have a | ||||
| # compositor running, the frontbuffer reads may end up with undefined results from | ||||
| # windows overlapping us. | ||||
| # Piglit does mark these tests as not to be run in parallel, but deqp-runner | ||||
| # doesn't respect that.  We need to extend deqp-runner to allow some tests to be | ||||
| # marked as single-threaded and run after the rayon loop if we want to support | ||||
| # them. | ||||
| # Other front-buffer access tests like fbo-sys-blit, fbo-sys-sub-blit, or | ||||
| # fcc-front-buffer-distraction don't appear here, because the DRI3 fake-front | ||||
| # handling should be holding the pixels drawn by the test even if we happen to fail | ||||
| # GL's window system pixel occlusion test. | ||||
| # Note that glx skips don't appear here, they're in all-skips.txt (in case someone | ||||
| # sets PIGLIT_PLATFORM=gbm to mostly use gbm, but still has an X server running). | ||||
| # | ||||
| # On gbm: gbm does not support reading the front buffer after a swapbuffers, and | ||||
| # that's intentional. Don't bother running these tests when PIGLIT_PLATFORM=gbm. | ||||
| # Note that this doesn't include tests like fbo-sys-blit, which draw/read front | ||||
| # but don't swap. | ||||
| spec@!opengl 1.0@gl-1.0-swapbuffers-behavior | ||||
| spec@!opengl 1.1@read-front | ||||
| @@ -1,44 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # shellcheck disable=SC1091 # paths only become valid at runtime | ||||
|  | ||||
| . "${SCRIPTS_DIR}/setup-test-env.sh" | ||||
|  | ||||
| export PATH=/android-tools/android-cts/jdk/bin/:/android-tools/build-tools:$PATH | ||||
| export JAVA_HOME=/android-tools/android-cts/jdk | ||||
|  | ||||
| # Wait for the appops service to show up | ||||
| while [ "$($ADB shell dumpsys -l | grep appops)" = "" ] ; do sleep 1; done | ||||
|  | ||||
| SKIP_FILE="$INSTALL/${GPU_VERSION}-android-cts-skips.txt" | ||||
|  | ||||
| EXCLUDE_FILTERS="" | ||||
| if [ -e "$SKIP_FILE" ]; then | ||||
|   EXCLUDE_FILTERS="$(grep -v -E "(^#|^[[:space:]]*$)" "$SKIP_FILE" | sed -s 's/.*/--exclude-filter "\0" /g')" | ||||
| fi | ||||
|  | ||||
| INCLUDE_FILE="$INSTALL/${GPU_VERSION}-android-cts-include.txt" | ||||
|  | ||||
| if [ -e "$INCLUDE_FILE" ]; then | ||||
|   INCLUDE_FILTERS="$(grep -v -E "(^#|^[[:space:]]*$)" "$INCLUDE_FILE" | sed -s 's/.*/--include-filter "\0" /g')" | ||||
| else | ||||
|   INCLUDE_FILTERS=$(printf -- "--include-filter %s " $ANDROID_CTS_MODULES | sed -e 's/ $//g') | ||||
| fi | ||||
|  | ||||
| set +e | ||||
| eval "/android-tools/android-cts/tools/cts-tradefed" run commandAndExit cts-dev \ | ||||
|   $EXCLUDE_FILTERS \ | ||||
|   $INCLUDE_FILTERS | ||||
|  | ||||
| [ "$(grep "^FAILED" /android-tools/android-cts/results/latest/invocation_summary.txt | tr -d ' ' | cut -d ':' -f 2)" = "0" ] | ||||
|  | ||||
| # shellcheck disable=SC2034 # EXIT_CODE is used by the script that sources this one | ||||
| EXIT_CODE=$? | ||||
| set -e | ||||
|  | ||||
| section_switch cuttlefish_results "cuttlefish: gathering the results" | ||||
|  | ||||
| cp -r "/android-tools/android-cts/results/latest"/* $RESULTS_DIR | ||||
| cp -r "/android-tools/android-cts/logs/latest"/* $RESULTS_DIR | ||||
|  | ||||
| section_end cuttlefish_results | ||||
| @@ -1,96 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # shellcheck disable=SC1091 # paths only become valid at runtime | ||||
|  | ||||
| . "${SCRIPTS_DIR}/setup-test-env.sh" | ||||
|  | ||||
| # deqp | ||||
|  | ||||
| $ADB shell mkdir -p /data/deqp | ||||
| $ADB push /deqp-gles/modules/egl/deqp-egl-android /data/deqp | ||||
| $ADB push /deqp-gles/mustpass/egl-main.txt.zst /data/deqp | ||||
| $ADB push /deqp-vk/external/vulkancts/modules/vulkan/* /data/deqp | ||||
| $ADB push /deqp-vk/mustpass/vk-main.txt.zst /data/deqp | ||||
| $ADB push /deqp-tools/* /data/deqp | ||||
| $ADB push /deqp-runner/deqp-runner /data/deqp | ||||
|  | ||||
| $ADB push "$INSTALL/all-skips.txt" /data/deqp | ||||
| $ADB push "$INSTALL/angle-skips.txt" /data/deqp | ||||
| if [ -e "$INSTALL/$GPU_VERSION-flakes.txt" ]; then | ||||
|   $ADB push "$INSTALL/$GPU_VERSION-flakes.txt" /data/deqp | ||||
| fi | ||||
| if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then | ||||
|   $ADB push "$INSTALL/$GPU_VERSION-fails.txt" /data/deqp | ||||
| fi | ||||
| if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then | ||||
|   $ADB push "$INSTALL/$GPU_VERSION-skips.txt" /data/deqp | ||||
| fi | ||||
| $ADB push "$INSTALL/deqp-$DEQP_SUITE.toml" /data/deqp | ||||
|  | ||||
| BASELINE="" | ||||
| if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then | ||||
|     BASELINE="--baseline /data/deqp/$GPU_VERSION-fails.txt" | ||||
| fi | ||||
|  | ||||
| # Default to an empty known flakes file if it doesn't exist. | ||||
| $ADB shell "touch /data/deqp/$GPU_VERSION-flakes.txt" | ||||
|  | ||||
| if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS /data/deqp/$GPU_VERSION-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -n "$ANGLE_TAG" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS /data/deqp/angle-skips.txt" | ||||
| fi | ||||
|  | ||||
| AOSP_RESULTS=/data/deqp/results | ||||
| uncollapsed_section_switch cuttlefish_test "cuttlefish: testing" | ||||
|  | ||||
| set +e | ||||
| $ADB shell "mkdir ${AOSP_RESULTS}; cd ${AOSP_RESULTS}/..; \ | ||||
|   XDG_CACHE_HOME=/data/local/tmp \ | ||||
|   ./deqp-runner \ | ||||
|     suite \ | ||||
|     --suite /data/deqp/deqp-$DEQP_SUITE.toml \ | ||||
|     --output $AOSP_RESULTS \ | ||||
|     --skips /data/deqp/all-skips.txt $DEQP_SKIPS \ | ||||
|     --flakes /data/deqp/$GPU_VERSION-flakes.txt \ | ||||
|     --testlog-to-xml /data/deqp/testlog-to-xml \ | ||||
|     --shader-cache-dir /data/local/tmp \ | ||||
|     --fraction-start ${CI_NODE_INDEX:-1} \ | ||||
|     --fraction $(( CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \ | ||||
|     --jobs ${FDO_CI_CONCURRENT:-4} \ | ||||
|     $BASELINE \ | ||||
|     ${DEQP_RUNNER_MAX_FAILS:+--max-fails \"$DEQP_RUNNER_MAX_FAILS\"} \ | ||||
|     " | ||||
|  | ||||
| # shellcheck disable=SC2034 # EXIT_CODE is used by the script that sources this one | ||||
| EXIT_CODE=$? | ||||
| set -e | ||||
| section_switch cuttlefish_results "cuttlefish: gathering the results" | ||||
|  | ||||
| $ADB pull "$AOSP_RESULTS/." "$RESULTS_DIR" | ||||
|  | ||||
| # Remove all but the first 50 individual XML files uploaded as artifacts, to | ||||
| # save fd.o space when you break everything. | ||||
| find $RESULTS_DIR -name \*.xml | \ | ||||
|     sort -n | | ||||
|     sed -n '1,+49!p' | \ | ||||
|     xargs rm -f | ||||
|  | ||||
| # If any QPA XMLs are there, then include the XSL/CSS in our artifacts. | ||||
| find $RESULTS_DIR -name \*.xml \ | ||||
|     -exec cp /deqp-tools/testlog.css /deqp-tools/testlog.xsl "$RESULTS_DIR/" ";" \ | ||||
|     -quit | ||||
|  | ||||
| $ADB shell "cd ${AOSP_RESULTS}/..; \ | ||||
| ./deqp-runner junit \ | ||||
|    --testsuite dEQP \ | ||||
|    --results $AOSP_RESULTS/failures.csv \ | ||||
|    --output $AOSP_RESULTS/junit.xml \ | ||||
|    --limit 50 \ | ||||
|    --template \"See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml\"" | ||||
|  | ||||
| $ADB pull "$AOSP_RESULTS/junit.xml" "$RESULTS_DIR" | ||||
|  | ||||
| section_end cuttlefish_results | ||||
| @@ -1,118 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # shellcheck disable=SC1091 # paths only become valid at runtime | ||||
|  | ||||
| # Set default ADB command if not set already | ||||
|  | ||||
| : "${ADB:=adb}" | ||||
|  | ||||
| $ADB wait-for-device root | ||||
| sleep 1 | ||||
|  | ||||
| # overlay  | ||||
|  | ||||
| REMOUNT_PATHS="/vendor" | ||||
| if [ "$ANDROID_VERSION" -ge 15 ]; then | ||||
|   REMOUNT_PATHS="$REMOUNT_PATHS /system" | ||||
| fi | ||||
|  | ||||
| OV_TMPFS="/data/overlay-remount" | ||||
| $ADB shell mkdir -p "$OV_TMPFS" | ||||
| $ADB shell mount -t tmpfs none "$OV_TMPFS" | ||||
|  | ||||
| for path in $REMOUNT_PATHS; do | ||||
|   $ADB shell mkdir -p "${OV_TMPFS}${path}-upper" | ||||
|   $ADB shell mkdir -p "${OV_TMPFS}${path}-work" | ||||
|  | ||||
|   opts="lowerdir=${path},upperdir=${OV_TMPFS}${path}-upper,workdir=${OV_TMPFS}${path}-work" | ||||
|   $ADB shell mount -t overlay -o "$opts" none ${path} | ||||
| done | ||||
|  | ||||
| $ADB shell setenforce 0 | ||||
|  | ||||
| # download Android Mesa from S3 | ||||
| MESA_ANDROID_ARTIFACT_URL=https://${PIPELINE_ARTIFACTS_BASE}/${S3_ANDROID_ARTIFACT_NAME}.tar.zst | ||||
| curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -o ${S3_ANDROID_ARTIFACT_NAME}.tar.zst ${MESA_ANDROID_ARTIFACT_URL} | ||||
| mkdir /mesa-android | ||||
| tar -C /mesa-android -xvf ${S3_ANDROID_ARTIFACT_NAME}.tar.zst | ||||
| rm "${S3_ANDROID_ARTIFACT_NAME}.tar.zst" & | ||||
|  | ||||
| INSTALL="/mesa-android/install" | ||||
|  | ||||
| # replace libraries | ||||
|  | ||||
| $ADB shell rm -f /vendor/lib64/libgallium_dri.so* | ||||
| $ADB shell rm -f /vendor/lib64/egl/libEGL_mesa.so* | ||||
| $ADB shell rm -f /vendor/lib64/egl/libGLESv1_CM_mesa.so* | ||||
| $ADB shell rm -f /vendor/lib64/egl/libGLESv2_mesa.so* | ||||
|  | ||||
| $ADB push "$INSTALL/lib/libgallium_dri.so" /vendor/lib64/libgallium_dri.so | ||||
| $ADB push "$INSTALL/lib/libEGL.so" /vendor/lib64/egl/libEGL_mesa.so | ||||
| $ADB push "$INSTALL/lib/libGLESv1_CM.so" /vendor/lib64/egl/libGLESv1_CM_mesa.so | ||||
| $ADB push "$INSTALL/lib/libGLESv2.so" /vendor/lib64/egl/libGLESv2_mesa.so | ||||
|  | ||||
| $ADB shell rm -f /vendor/lib64/hw/vulkan.lvp.so* | ||||
| $ADB shell rm -f /vendor/lib64/hw/vulkan.virtio.so* | ||||
| $ADB shell rm -f /vendor/lib64/hw/vulkan.intel.so* | ||||
|  | ||||
| $ADB push "$INSTALL/lib/libvulkan_lvp.so" /vendor/lib64/hw/vulkan.lvp.so | ||||
| $ADB push "$INSTALL/lib/libvulkan_virtio.so" /vendor/lib64/hw/vulkan.virtio.so | ||||
| $ADB push "$INSTALL/lib/libvulkan_intel.so" /vendor/lib64/hw/vulkan.intel.so | ||||
|  | ||||
| $ADB shell rm -f /vendor/lib64/egl/libEGL_emulation.so* | ||||
| $ADB shell rm -f /vendor/lib64/egl/libGLESv1_CM_emulation.so* | ||||
| $ADB shell rm -f /vendor/lib64/egl/libGLESv2_emulation.so* | ||||
|  | ||||
| ANGLE_DEST_PATH=/vendor/lib64/egl | ||||
| if [ "$ANDROID_VERSION" -ge 15 ]; then | ||||
|   ANGLE_DEST_PATH=/system/lib64 | ||||
| fi | ||||
|  | ||||
| $ADB shell rm -f "$ANGLE_DEST_PATH/libEGL_angle.so"* | ||||
| $ADB shell rm -f "$ANGLE_DEST_PATH/libGLESv1_CM_angle.so"* | ||||
| $ADB shell rm -f "$ANGLE_DEST_PATH/libGLESv2_angle.so"* | ||||
|  | ||||
| $ADB push /angle/libEGL_angle.so "$ANGLE_DEST_PATH/libEGL_angle.so" | ||||
| $ADB push /angle/libGLESv1_CM_angle.so "$ANGLE_DEST_PATH/libGLESv1_CM_angle.so" | ||||
| $ADB push /angle/libGLESv2_angle.so "$ANGLE_DEST_PATH/libGLESv2_angle.so" | ||||
|  | ||||
| get_gles_runtime_version() { | ||||
|   while [ "$($ADB shell dumpsys SurfaceFlinger | grep GLES:)" = "" ] ; do sleep 1; done | ||||
|   $ADB shell dumpsys SurfaceFlinger | grep GLES | ||||
| } | ||||
|  | ||||
| # Check what GLES implementation is used before loading the new libraries | ||||
| get_gles_runtime_version | ||||
|  | ||||
| # restart Android shell, so that services use the new libraries | ||||
| $ADB shell stop | ||||
| $ADB shell start | ||||
|  | ||||
| # Check what GLES implementation is used after loading the new libraries | ||||
| GLES_RUNTIME_VERSION="$(get_gles_runtime_version)" | ||||
|  | ||||
| if [ -n "$ANGLE_TAG" ]; then | ||||
|   # Note: we are injecting the ANGLE libs too, so we need to check if the | ||||
|   #       ANGLE libs are being used after the shell restart. | ||||
|   ANGLE_HASH=$(head -c 12 /angle/version) | ||||
|   if ! printf "%s" "$GLES_RUNTIME_VERSION" | grep --quiet "${ANGLE_HASH}"; then | ||||
|     echo "Fatal: Android is loading a wrong version of the ANGLE libs: ${ANGLE_HASH}" 1>&2 | ||||
|     exit 1 | ||||
|   fi | ||||
| else | ||||
|   MESA_BUILD_VERSION=$(cat "$INSTALL/VERSION") | ||||
|   if ! printf "%s" "$GLES_RUNTIME_VERSION" | grep --quiet "${MESA_BUILD_VERSION}$"; then | ||||
|      echo "Fatal: Android is loading a wrong version of the Mesa3D GLES libs: ${GLES_RUNTIME_VERSION}" 1>&2 | ||||
|      exit 1 | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| if [ -n "$USE_ANDROID_CTS" ]; then | ||||
|   # The script sets EXIT_CODE | ||||
|   . "$(dirname "$0")/android-cts-runner.sh" | ||||
| else | ||||
|   # The script sets EXIT_CODE | ||||
|   . "$(dirname "$0")/android-deqp-runner.sh" | ||||
| fi | ||||
|  | ||||
| exit $EXIT_CODE | ||||
| @@ -1,7 +0,0 @@ | ||||
| # Unlike zink which does support it, ANGLE relies on a waiver to not implement | ||||
| # capturing individual array elements (see waivers.xml and gles3-waivers.txt in the CTS) | ||||
| dEQP-GLES3.functional.transform_feedback.array_element.* | ||||
| dEQP-GLES3.functional.transform_feedback.random.* | ||||
| dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.*_array_element | ||||
| dEQP-GLES31.functional.program_interface_query.transform_feedback_varying.type.*.array.* | ||||
| KHR-GLES31.core.program_interface_query.transform-feedback-types | ||||
| @@ -1,155 +0,0 @@ | ||||
| version: 1 | ||||
|  | ||||
| # Rules to match for a machine to qualify | ||||
| target: | ||||
|   id: '{{ CI_RUNNER_DESCRIPTION }}' | ||||
|  | ||||
| timeouts: | ||||
|  | ||||
|   first_console_activity:  # This limits the time it can take to receive the first console log | ||||
|     minutes: {{ B2C_TIMEOUT_FIRST_CONSOLE_ACTIVITY_MINUTES | default(0, true) }} | ||||
|     seconds: {{ B2C_TIMEOUT_FIRST_CONSOLE_ACTIVITY_SECONDS | default(0, true) }} | ||||
|     retries: {{ B2C_TIMEOUT_FIRST_CONSOLE_ACTIVITY_RETRIES }} | ||||
|  | ||||
|   console_activity:        # Reset every time we receive a message from the logs | ||||
|     minutes: {{ B2C_TIMEOUT_CONSOLE_ACTIVITY_MINUTES | default(0, true) }} | ||||
|     seconds: {{ B2C_TIMEOUT_CONSOLE_ACTIVITY_SECONDS | default(0, true) }} | ||||
|     retries: {{ B2C_TIMEOUT_CONSOLE_ACTIVITY_RETRIES }} | ||||
|  | ||||
|   boot_cycle: | ||||
|     minutes: {{ B2C_TIMEOUT_BOOT_MINUTES | default(0, true) }} | ||||
|     seconds: {{ B2C_TIMEOUT_BOOT_SECONDS | default(0, true) }} | ||||
|     retries: {{ B2C_TIMEOUT_BOOT_RETRIES }} | ||||
|  | ||||
|   overall:                 # Maximum time the job can take, not overrideable by the "continue" deployment | ||||
|     minutes: {{ B2C_TIMEOUT_OVERALL_MINUTES | default(0, true) }} | ||||
|     seconds: {{ B2C_TIMEOUT_OVERALL_SECONDS | default(0, true) }} | ||||
|     retries: 0 | ||||
|     # no retries possible here | ||||
|  | ||||
|   watchdogs: | ||||
|     boot: | ||||
|       minutes: {{ B2C_TIMEOUT_BOOT_WD_MINUTES | default(0, true) }} | ||||
|       seconds: {{ B2C_TIMEOUT_BOOT_WD_SECONDS | default(0, true) }} | ||||
|       retries: {{ B2C_TIMEOUT_BOOT_WD_RETRIES | default(0, true) }} | ||||
|  | ||||
| console_patterns: | ||||
|     session_end: | ||||
|         regex: >- | ||||
|           {{ B2C_SESSION_END_REGEX }} | ||||
| {% if B2C_SESSION_REBOOT_REGEX %} | ||||
|     session_reboot: | ||||
|         regex: >- | ||||
|           {{ B2C_SESSION_REBOOT_REGEX }} | ||||
| {% endif %} | ||||
|     job_success: | ||||
|         regex: >- | ||||
|           {{ B2C_JOB_SUCCESS_REGEX }} | ||||
| {% if B2C_JOB_WARN_REGEX %} | ||||
|     job_warn: | ||||
|         regex: >- | ||||
|           {{ B2C_JOB_WARN_REGEX }} | ||||
| {% endif %} | ||||
|  | ||||
| {% if B2C_BOOT_WD_START_REGEX and B2C_BOOT_WD_STOP_REGEX %} | ||||
|     watchdogs: | ||||
|         boot: | ||||
|             start: | ||||
|               regex: >- | ||||
|                 {{ B2C_BOOT_WD_START_REGEX }} | ||||
|             reset: | ||||
|               regex: >- | ||||
|                 {{ B2C_BOOT_WD_RESET_REGEX | default(B2C_BOOT_WD_START_REGEX, true) }} | ||||
|             stop: | ||||
|               regex: >- | ||||
|                 {{ B2C_BOOT_WD_STOP_REGEX }} | ||||
| {% endif %} | ||||
|  | ||||
| # Environment to deploy | ||||
| deployment: | ||||
|   # Initial boot | ||||
|   start: | ||||
|     storage: | ||||
| {% if B2C_IMAGESTORE_PLATFORM %} | ||||
|         imagestore: | ||||
|           public: | ||||
|             # List of images that should be pulled into the image store ahead of execution | ||||
|             images: | ||||
|               mars: | ||||
|                 name: "{{ B2C_MACHINE_REGISTRATION_IMAGE }}" | ||||
|                 platform: "{{ B2C_IMAGESTORE_PLATFORM }}" | ||||
|                 tls_verify: false | ||||
|                 {% set machine_registration_image="{% raw %}{{ job.imagestore.public.mars.image_id }}{% endraw %}" %} | ||||
|               telegraf: | ||||
|                 name: "{{ B2C_TELEGRAF_IMAGE }}" | ||||
|                 platform: "{{ B2C_IMAGESTORE_PLATFORM }}" | ||||
|                 tls_verify: false | ||||
|                 {% set telegraf_image="{% raw %}{{ job.imagestore.public.telegraf.image_id }}{% endraw %}" %} | ||||
|               image_under_test: | ||||
|                 name: "{{ B2C_IMAGE_UNDER_TEST }}" | ||||
|                 platform: "{{ B2C_IMAGESTORE_PLATFORM }}" | ||||
|                 tls_verify: false | ||||
|                 {% set image_under_test="{% raw %}{{ job.imagestore.public.image_under_test.image_id }}{% endraw %}" %} | ||||
|         nbd: | ||||
|           storage: | ||||
|             max_connections: 5 | ||||
|             size: 10G | ||||
| {% endif %} | ||||
|  | ||||
|         http: | ||||
|           - path: "/install.tar.zst" | ||||
|             url: "{{ B2C_INSTALL_TARBALL_URL }}" | ||||
|           - path: "/b2c-extra-args" | ||||
|             data: > | ||||
|               b2c.pipefail b2c.poweroff_delay={{ B2C_POWEROFF_DELAY }} | ||||
|               b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}" | ||||
|               b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in B2C_JOB_VOLUME_EXCLUSIONS.split(',') %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve" | ||||
|               {% for volume in B2C_VOLUMES %} | ||||
|               b2c.volume={{ volume }} | ||||
|               {% endfor %} | ||||
|               b2c.run_service="--privileged --tls-verify=false --pid=host {{ B2C_TELEGRAF_IMAGE }}" b2c.hostname=dut-{{ '{{' }} machine.full_name }} | ||||
|               b2c.run="-ti --tls-verify=false {{ B2C_MACHINE_REGISTRATION_IMAGE }} {% if B2C_MARS_SETUP_TAGS %}setup --tags {{ B2C_MARS_SETUP_TAGS }}{% else %}check{% endif %}" | ||||
|               b2c.run="-v {{ '{{' }} job_bucket }}-results:{{ CI_PROJECT_DIR }} -w {{ CI_PROJECT_DIR }} {% for mount_volume in B2C_MOUNT_VOLUMES %} -v {{ mount_volume }}{% endfor %} --tls-verify=false --entrypoint bash {{ B2C_IMAGE_UNDER_TEST }} -euc 'curl --fail -q {{ '{{' }} job.http.url }}/install.tar.zst | tar --zstd -x; {{ B2C_CONTAINER_CMD }}'" | ||||
|     kernel: | ||||
| {% if B2C_KERNEL_URL %} | ||||
|       url: '{{ B2C_KERNEL_URL }}' | ||||
| {% endif %} | ||||
|  | ||||
|       # NOTE: b2c.cache_device should not be here, but this works around | ||||
|       # a limitation of b2c which will be removed in the next release | ||||
|       cmdline: > | ||||
|         SALAD.machine_id={{ '{{' }} machine_id }} | ||||
|         console={{ '{{' }} local_tty_device }},115200 | ||||
|         b2c.ntp_peer=10.42.0.1 | ||||
|         b2c.extra_args_url={{ '{{' }} job.http.url }}/b2c-extra-args | ||||
|         {% if B2C_IMAGESTORE_PLATFORM is defined %} | ||||
|         {{ '{{' }} imagestore.mount("public").nfs.to_b2c_filesystem("publicimgstore") }} | ||||
|         b2c.storage="additionalimagestores=publicimgstore" | ||||
|         b2c.nbd=/dev/nbd0,host=ci-gateway,port={% raw %}{{ '{{' }} job.nbd.storage.tcp_port }}{% endraw %},connections=5 | ||||
|         b2c.cache_device=/dev/nbd0 | ||||
|         {% else %} | ||||
|         b2c.cache_device=auto | ||||
|         {% endif %} | ||||
|         {% if B2C_KERNEL_CMDLINE_EXTRAS is defined %} | ||||
|         {{ B2C_KERNEL_CMDLINE_EXTRAS }} | ||||
|         {% endif %} | ||||
|  | ||||
| {% if B2C_INITRAMFS_URL or B2C_FIRMWARE_URL %} | ||||
|     initramfs: | ||||
| {% if B2C_FIRMWARE_URL %} | ||||
|       - url: '{{ B2C_FIRMWARE_URL }}' | ||||
| {% endif %} | ||||
| {% if B2C_INITRAMFS_URL %} | ||||
|       - url: '{{ B2C_INITRAMFS_URL }}' | ||||
| {% endif %} | ||||
| {% endif %} | ||||
|  | ||||
| {% if B2C_DTB_URL %} | ||||
|     dtb: | ||||
|       url: '{{ B2C_DTB_URL }}' | ||||
| {% if B2C_DTB_MATCH %} | ||||
|       format: | ||||
|         archive: | ||||
|           match: "{{ B2C_DTB_MATCH }}" | ||||
| {% endif %} | ||||
| {% endif %} | ||||
| @@ -1,40 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
|  | ||||
| # Copyright © 2022 Valve Corporation | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| from jinja2 import Environment, FileSystemLoader | ||||
| from os import environ, path | ||||
|  | ||||
|  | ||||
| # Pass through all the CI and B2C environment variables | ||||
| values = { | ||||
|     key: environ[key] | ||||
|     for key in environ if key.startswith("B2C_") or key.startswith("CI_") | ||||
| } | ||||
|  | ||||
| env = Environment(loader=FileSystemLoader(path.dirname(environ['B2C_JOB_TEMPLATE'])), | ||||
|                   trim_blocks=True, lstrip_blocks=True) | ||||
|  | ||||
| template = env.get_template(path.basename(environ['B2C_JOB_TEMPLATE'])) | ||||
|  | ||||
| with open(path.splitext(path.basename(environ['B2C_JOB_TEMPLATE']))[0], "w") as f: | ||||
|     f.write(template.render(values)) | ||||
| @@ -1,2 +0,0 @@ | ||||
| [*.sh] | ||||
| indent_size = 2 | ||||
| @@ -1,15 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Init entrypoint for bare-metal devices; calls common init code. | ||||
|  | ||||
| # First stage: very basic setup to bring up network and /dev etc | ||||
| /init-stage1.sh | ||||
|  | ||||
| export CURRENT_SECTION=dut_boot | ||||
|  | ||||
| # Second stage: run jobs | ||||
| test $? -eq 0 && /init-stage2.sh | ||||
|  | ||||
| # Wait until the job would have timed out anyway, so we don't spew a "init | ||||
| # exited" panic. | ||||
| sleep 6000 | ||||
| @@ -1,17 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power down" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE" | ||||
| SNMP_OFF="i 4" | ||||
|  | ||||
| snmpset -v2c -r 3 -t 30 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF | ||||
| @@ -1,22 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 4" | ||||
|  | ||||
| snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF | ||||
| sleep 3s | ||||
| snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_ON | ||||
| @@ -1,129 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime. | ||||
| # shellcheck disable=SC2034 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # Boot script for Chrome OS devices attached to a servo debug connector, using | ||||
| # NFS and TFTP to boot. | ||||
|  | ||||
| # We're run from the root of the repo, make a helper var for our paths | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
| CI_INSTALL=$CI_PROJECT_DIR/install | ||||
|  | ||||
| # Runner config checks | ||||
| if [ -z "$BM_SERIAL" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the CPU serial device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_SERIAL_EC" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the EC serial device for controlling board power" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /nfs ]; then | ||||
|   echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /tftp ]; then | ||||
|   echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # job config checks | ||||
| if [ -z "$BM_KERNEL" ]; then | ||||
|   echo "Must set BM_KERNEL to your board's kernel FIT image" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_CMDLINE" ]; then | ||||
|   echo "Must set BM_CMDLINE to your board's kernel command line arguments" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| . "${SCRIPTS_DIR}/setup-test-env.sh" | ||||
|  | ||||
| section_start prepare_rootfs "Preparing rootfs components" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results | ||||
|  | ||||
| # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
| # state, since it's volume-mounted on the host. | ||||
| rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
| mkdir -p /nfs/results | ||||
| . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
| # Put the kernel/dtb image and the boot command line in the tftp directory for | ||||
| # the board to find.  For normal Mesa development, we build the kernel and | ||||
| # store it in the docker container that this script is running in. | ||||
| # | ||||
| # However, container builds are expensive, so when you're hacking on the | ||||
| # kernel, it's nice to be able to skip the half hour container build and plus | ||||
| # moving that container to the runner.  So, if BM_KERNEL is a URL, fetch it | ||||
| # instead of looking in the container.  Note that the kernel build should be | ||||
| # the output of: | ||||
| # | ||||
| # make Image.lzma | ||||
| # | ||||
| # mkimage \ | ||||
| #  -A arm64 \ | ||||
| #  -f auto \ | ||||
| #  -C lzma \ | ||||
| #  -d arch/arm64/boot/Image.lzma \ | ||||
| #  -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \ | ||||
| #  cheza-image.img | ||||
|  | ||||
| rm -rf /tftp/* | ||||
| if echo "$BM_KERNEL" | grep -q http; then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       $BM_KERNEL -o /tftp/vmlinuz | ||||
| elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst | ||||
|   tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/nfs/" | ||||
|   rm modules.tar.zst & | ||||
| else | ||||
|   cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz | ||||
| fi | ||||
| echo "$BM_CMDLINE" > /tftp/cmdline | ||||
|  | ||||
| set +e | ||||
| STRUCTURED_LOG_FILE=results/job_detail.json | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}" | ||||
| section_end prepare_rootfs | ||||
|  | ||||
| python3 $BM/cros_servo_run.py \ | ||||
|         --cpu $BM_SERIAL \ | ||||
|         --ec $BM_SERIAL_EC \ | ||||
|         --test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} | ||||
| ret=$? | ||||
|  | ||||
| section_start dut_cleanup "Cleaning up after job" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close | ||||
| set -e | ||||
|  | ||||
| # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
| # will look for them. | ||||
| cp -Rp /nfs/results/. results/ | ||||
| section_end dut_cleanup | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,206 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # SPDX-License-Identifier: MIT | ||||
|  | ||||
| import argparse | ||||
| import datetime | ||||
| import math | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
|  | ||||
| from custom_logger import CustomLogger | ||||
| from serial_buffer import SerialBuffer | ||||
|  | ||||
| ANSI_ESCAPE="\x1b[0K" | ||||
| ANSI_COLOUR="\x1b[0;36m" | ||||
| ANSI_RESET="\x1b[0m" | ||||
| SECTION_START="start" | ||||
| SECTION_END="end" | ||||
|  | ||||
| class CrosServoRun: | ||||
|     def __init__(self, cpu, ec, test_timeout, logger): | ||||
|         self.cpu_ser = SerialBuffer( | ||||
|             cpu, "results/serial.txt", ": ") | ||||
|         # Merge the EC serial into the cpu_ser's line stream so that we can | ||||
|         # effectively poll on both at the same time and not have to worry about | ||||
|         self.ec_ser = SerialBuffer( | ||||
|             ec, "results/serial-ec.txt", " EC: ", line_queue=self.cpu_ser.line_queue) | ||||
|         self.test_timeout = test_timeout | ||||
|         self.logger = logger | ||||
|  | ||||
|     def close(self): | ||||
|         self.ec_ser.close() | ||||
|         self.cpu_ser.close() | ||||
|  | ||||
|     def ec_write(self, s): | ||||
|         print("EC> %s" % s) | ||||
|         self.ec_ser.serial.write(s.encode()) | ||||
|  | ||||
|     def cpu_write(self, s): | ||||
|         print("> %s" % s) | ||||
|         self.cpu_ser.serial.write(s.encode()) | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|         self.logger.update_status_fail(message) | ||||
|  | ||||
|     def get_rel_timestamp(self): | ||||
|         now = datetime.datetime.now(tz=datetime.UTC) | ||||
|         then_env = os.getenv("CI_JOB_STARTED_AT") | ||||
|         if not then_env: | ||||
|             return "" | ||||
|         delta = now - datetime.datetime.fromisoformat(then_env) | ||||
|         return f"[{math.floor(delta.seconds / 60):02}:{(delta.seconds % 60):02}]" | ||||
|  | ||||
|     def get_cur_timestamp(self): | ||||
|         return str(int(datetime.datetime.timestamp(datetime.datetime.now()))) | ||||
|  | ||||
|     def print_gitlab_section(self, action, name, description, collapse=True): | ||||
|         assert action in [SECTION_START, SECTION_END] | ||||
|         out = ANSI_ESCAPE + "section_" + action + ":" | ||||
|         out += self.get_cur_timestamp() + ":" | ||||
|         out += name | ||||
|         if action == "start" and collapse: | ||||
|             out += "[collapsed=true]" | ||||
|         out += "\r" + ANSI_ESCAPE + ANSI_COLOUR | ||||
|         out += self.get_rel_timestamp() + " " + description + ANSI_RESET | ||||
|         print(out) | ||||
|  | ||||
|     def boot_section(self, action): | ||||
|         self.print_gitlab_section(action, "dut_boot", "Booting hardware device", True) | ||||
|  | ||||
|     def run(self): | ||||
|         # Flush any partial commands in the EC's prompt, then ask for a reboot. | ||||
|         self.ec_write("\n") | ||||
|         self.ec_write("reboot\n") | ||||
|  | ||||
|         bootloader_done = False | ||||
|         self.logger.create_job_phase("boot") | ||||
|         self.boot_section(SECTION_START) | ||||
|         tftp_failures = 0 | ||||
|         # This is emitted right when the bootloader pauses to check for input. | ||||
|         # Emit a ^N character to request network boot, because we don't have a | ||||
|         # direct-to-netboot firmware on cheza. | ||||
|         for line in self.cpu_ser.lines(timeout=120, phase="bootloader"): | ||||
|             if re.search("load_archive: loading locale_en.bin", line): | ||||
|                 self.cpu_write("\016") | ||||
|                 bootloader_done = True | ||||
|                 break | ||||
|  | ||||
|             # The Cheza firmware seems to occasionally get stuck looping in | ||||
|             # this error state during TFTP booting, possibly based on amount of | ||||
|             # network traffic around it, but it'll usually recover after a | ||||
|             # reboot. Currently mostly visible on google-freedreno-cheza-14. | ||||
|             if re.search("R8152: Bulk read error 0xffffffbf", line): | ||||
|                 tftp_failures += 1 | ||||
|                 if tftp_failures >= 10: | ||||
|                     self.print_error( | ||||
|                         "Detected intermittent tftp failure, restarting run.") | ||||
|                     return 1 | ||||
|  | ||||
|             # If the board has a netboot firmware and we made it to booting the | ||||
|             # kernel, proceed to processing of the test run. | ||||
|             if re.search("Booting Linux", line): | ||||
|                 bootloader_done = True | ||||
|                 break | ||||
|  | ||||
|             # The Cheza boards have issues with failing to bring up power to | ||||
|             # the system sometimes, possibly dependent on ambient temperature | ||||
|             # in the farm. | ||||
|             if re.search("POWER_GOOD not seen in time", line): | ||||
|                 self.print_error( | ||||
|                     "Detected intermittent poweron failure, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|         if not bootloader_done: | ||||
|             self.print_error("Failed to make it through bootloader, abandoning run.") | ||||
|             return 1 | ||||
|  | ||||
|         self.logger.create_job_phase("test") | ||||
|         for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"): | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 return 1 | ||||
|  | ||||
|             # There are very infrequent bus errors during power management transitions | ||||
|             # on cheza, which we don't expect to be the case on future boards. | ||||
|             if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line): | ||||
|                 self.print_error( | ||||
|                     "Detected cheza power management bus error, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # If the network device dies, it's probably not graphics's fault, just try again. | ||||
|             if re.search("NETDEV WATCHDOG", line): | ||||
|                 self.print_error( | ||||
|                     "Detected network device failure, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # These HFI response errors started appearing with the introduction | ||||
|             # of piglit runs.  CosmicPenguin says: | ||||
|             # | ||||
|             # "message ID 106 isn't a thing, so likely what happened is that we | ||||
|             # got confused when parsing the HFI queue.  If it happened on only | ||||
|             # one run, then memory corruption could be a possible clue" | ||||
|             # | ||||
|             # Given that it seems to trigger randomly near a GPU fault and then | ||||
|             # break many tests after that, just restart the whole run. | ||||
|             if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line): | ||||
|                 self.print_error( | ||||
|                     "Detected cheza power management bus error, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             if re.search("coreboot.*bootblock starting", line): | ||||
|                 self.print_error( | ||||
|                     "Detected spontaneous reboot, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line): | ||||
|                 self.print_error("Detected cheza MMU fail, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) | ||||
|             if result: | ||||
|                 status = result.group(1) | ||||
|                 exit_code = int(result.group(2)) | ||||
|  | ||||
|                 if status == "pass": | ||||
|                     self.logger.update_dut_job("status", "pass") | ||||
|                 else: | ||||
|                     self.logger.update_status_fail("test fail") | ||||
|  | ||||
|                 self.logger.update_dut_job("exit_code", exit_code) | ||||
|                 return exit_code | ||||
|  | ||||
|         self.print_error( | ||||
|             "Reached the end of the CPU serial log without finding a result") | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--cpu', type=str, | ||||
|                         help='CPU Serial device', required=True) | ||||
|     parser.add_argument( | ||||
|         '--ec', type=str, help='EC Serial device', required=True) | ||||
|     parser.add_argument( | ||||
|         '--test-timeout', type=int, help='Test phase timeout (minutes)', required=True) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     logger = CustomLogger("results/job_detail.json") | ||||
|     logger.update_dut_time("start", None) | ||||
|     servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger) | ||||
|     retval = servo.run() | ||||
|  | ||||
|     # power down the CPU on the device | ||||
|     servo.ec_write("power off\n") | ||||
|     logger.update_dut_time("end", None) | ||||
|     servo.close() | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay" | ||||
| @@ -1,28 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import sys | ||||
| import socket | ||||
|  | ||||
| host = sys.argv[1] | ||||
| port = sys.argv[2] | ||||
| mode = sys.argv[3] | ||||
| relay = sys.argv[4] | ||||
| msg = None | ||||
|  | ||||
| if mode == "on": | ||||
|     msg = b'\x20' | ||||
| else: | ||||
|     msg = b'\x21' | ||||
|  | ||||
| msg += int(relay).to_bytes(1, 'big') | ||||
| msg += b'\x00' | ||||
|  | ||||
| c = socket.create_connection((host, int(port))) | ||||
| c.sendall(msg) | ||||
|  | ||||
| data = c.recv(1) | ||||
| c.close() | ||||
|  | ||||
| if data[0] == b'\x01': | ||||
|     print('Command failed') | ||||
|     sys.exit(1) | ||||
| @@ -1,12 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay" | ||||
| sleep 5 | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" on "$relay" | ||||
| @@ -1,31 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
|  | ||||
| STRINGS=$(mktemp) | ||||
| ERRORS=$(mktemp) | ||||
|  | ||||
| trap 'rm $STRINGS; rm $ERRORS;' EXIT | ||||
|  | ||||
| FILE=$1 | ||||
| shift 1 | ||||
|  | ||||
| while getopts "f:e:" opt; do | ||||
|   case $opt in | ||||
|     f) echo "$OPTARG" >> "$STRINGS";; | ||||
|     e) echo "$OPTARG" >> "$STRINGS" ; echo "$OPTARG" >> "$ERRORS";; | ||||
|     *) exit | ||||
|   esac | ||||
| done | ||||
| shift $((OPTIND -1)) | ||||
|  | ||||
| echo "Waiting for $FILE to say one of following strings" | ||||
| cat "$STRINGS" | ||||
|  | ||||
| while ! grep -E -wf "$STRINGS" "$FILE"; do | ||||
|   sleep 2 | ||||
| done | ||||
|  | ||||
| if grep -E -wf "$ERRORS" "$FILE"; then | ||||
|   exit 1 | ||||
| fi | ||||
| @@ -1,167 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime. | ||||
| # shellcheck disable=SC2034 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| . "$SCRIPTS_DIR"/setup-test-env.sh | ||||
|  | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
|  | ||||
| if [ -z "$BM_SERIAL" ] && [ -z "$BM_SERIAL_SCRIPT" ]; then | ||||
|   echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "BM_SERIAL:" | ||||
|   echo "  This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel." | ||||
|   echo "BM_SERIAL_SCRIPT:" | ||||
|   echo "  This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERUP" ]; then | ||||
|   echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should reset the device and begin its boot sequence" | ||||
|   echo "such that it pauses at fastboot." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERDOWN" ]; then | ||||
|   echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power off the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_FASTBOOT_SERIAL" ]; then | ||||
|   echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This must be the a stable-across-resets fastboot serial number." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_KERNEL" ]; then | ||||
|   echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_DTB" ]; then | ||||
|   echo "Must set BM_DTB to your board's DTB file in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then | ||||
|   BM_FASTBOOT_NFSROOT=1 | ||||
| fi | ||||
|  | ||||
| section_start prepare_rootfs "Preparing rootfs components" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results/ | ||||
|  | ||||
| if [ -n "$BM_FASTBOOT_NFSROOT" ]; then | ||||
|   # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
|   # state, since it's volume-mounted on the host. | ||||
|   rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
|   mkdir -p /nfs/results | ||||
|   . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
|   # Root on NFS, no need for an inintramfs. | ||||
|   rm -f rootfs.cpio.gz | ||||
|   touch rootfs.cpio | ||||
|   gzip rootfs.cpio | ||||
| else | ||||
|   # Create the rootfs in a temp dir | ||||
|   rsync -a --delete $BM_ROOTFS/ rootfs/ | ||||
|   . $BM/rootfs-setup.sh rootfs | ||||
|  | ||||
|   # Finally, pack it up into a cpio rootfs.  Skip the vulkan CTS since none of | ||||
|   # these devices use it and it would take up space in the initrd. | ||||
|  | ||||
|   EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader" | ||||
|  | ||||
|   pushd rootfs | ||||
|   find -H . | \ | ||||
|     grep -E -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" | | ||||
|     grep -E -v "traces-db|apitrace|renderdoc" | \ | ||||
|     grep -E -v $EXCLUDE_FILTER | \ | ||||
|     cpio -H newc -o | \ | ||||
|     xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz | ||||
|   popd | ||||
| fi | ||||
|  | ||||
| if echo "$BM_KERNEL $BM_DTB" | grep -q http; then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "$BM_KERNEL" -o kernel | ||||
|   # FIXME: modules should be supplied too | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "$BM_DTB" -o dtb | ||||
|  | ||||
|   cat kernel dtb > Image.gz-dtb | ||||
|  | ||||
| elif [ -n "${EXTERNAL_KERNEL_TAG}" ]; then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst | ||||
|  | ||||
|   if [ -n "$BM_DTB" ]; then | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
| 	"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o dtb | ||||
|   fi | ||||
|  | ||||
|   cat kernel dtb > Image.gz-dtb || echo "No DTB available, using pure kernel." | ||||
|   rm kernel | ||||
|   tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "$BM_ROOTFS/" | ||||
|   rm modules.tar.zst & | ||||
| else | ||||
|   cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb | ||||
|   cp /baremetal-files/"$BM_DTB".dtb dtb | ||||
| fi | ||||
|  | ||||
| export PATH=$BM:$PATH | ||||
|  | ||||
| mkdir -p artifacts | ||||
| mkbootimg.py \ | ||||
|   --kernel Image.gz-dtb \ | ||||
|   --ramdisk rootfs.cpio.gz \ | ||||
|   --dtb dtb \ | ||||
|   --cmdline "$BM_CMDLINE" \ | ||||
|   $BM_MKBOOT_PARAMS \ | ||||
|   --header_version 2 \ | ||||
|   -o artifacts/fastboot.img | ||||
|  | ||||
| rm Image.gz-dtb dtb | ||||
|  | ||||
| # Start background command for talking to serial if we have one. | ||||
| if [ -n "$BM_SERIAL_SCRIPT" ]; then | ||||
|   $BM_SERIAL_SCRIPT > results/serial-output.txt & | ||||
|  | ||||
|   while [ ! -e results/serial-output.txt ]; do | ||||
|     sleep 1 | ||||
|   done | ||||
| fi | ||||
|  | ||||
| section_end prepare_rootfs | ||||
|  | ||||
| set +e | ||||
| $BM/fastboot_run.py \ | ||||
|   --dev="$BM_SERIAL" \ | ||||
|   --test-timeout ${TEST_PHASE_TIMEOUT_MINUTES:-20} \ | ||||
|   --fbserial="$BM_FASTBOOT_SERIAL" \ | ||||
|   --powerup="$BM_POWERUP" \ | ||||
|   --powerdown="$BM_POWERDOWN" | ||||
| ret=$? | ||||
| set -e | ||||
|  | ||||
| if [ -n "$BM_FASTBOOT_NFSROOT" ]; then | ||||
|   # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
|   # will look for them. | ||||
|   cp -Rp /nfs/results/. results/ | ||||
| fi | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,159 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| import subprocess | ||||
| import re | ||||
| from serial_buffer import SerialBuffer | ||||
| import sys | ||||
| import threading | ||||
|  | ||||
|  | ||||
| class FastbootRun: | ||||
|     def __init__(self, args, test_timeout): | ||||
|         self.powerup = args.powerup | ||||
|         self.ser = SerialBuffer( | ||||
|             args.dev, "results/serial-output.txt", "R SERIAL> ") | ||||
|         self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format( | ||||
|             ser=args.fbserial) | ||||
|         self.test_timeout = test_timeout | ||||
|  | ||||
|     def close(self): | ||||
|         self.ser.close() | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|  | ||||
|     def logged_system(self, cmd, timeout=60): | ||||
|         print("Running '{}'".format(cmd)) | ||||
|         try: | ||||
|             return subprocess.call(cmd, shell=True, timeout=timeout) | ||||
|         except subprocess.TimeoutExpired: | ||||
|             self.print_error("timeout, abandoning run.") | ||||
|             return 1 | ||||
|  | ||||
|     def run(self): | ||||
|         if ret := self.logged_system(self.powerup): | ||||
|             return ret | ||||
|  | ||||
|         fastboot_ready = False | ||||
|         for line in self.ser.lines(timeout=2 * 60, phase="bootloader"): | ||||
|             if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \ | ||||
|                     re.search("Listening for fastboot command on", line): | ||||
|                 fastboot_ready = True | ||||
|                 break | ||||
|  | ||||
|             if re.search("data abort", line): | ||||
|                 self.print_error( | ||||
|                     "Detected crash during boot, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|         if not fastboot_ready: | ||||
|             self.print_error( | ||||
|                 "Failed to get to fastboot prompt, abandoning run.") | ||||
|             return 1 | ||||
|  | ||||
|         if ret := self.logged_system(self.fastboot): | ||||
|             return ret | ||||
|  | ||||
|         print_more_lines = -1 | ||||
|         for line in self.ser.lines(timeout=self.test_timeout, phase="test"): | ||||
|             if print_more_lines == 0: | ||||
|                 return 1 | ||||
|             if print_more_lines > 0: | ||||
|                 print_more_lines -= 1 | ||||
|  | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 return 1 | ||||
|  | ||||
|             # The db820c boards intermittently reboot.  Just restart the run | ||||
|             # when if we see a reboot after we got past fastboot. | ||||
|             if re.search("PON REASON", line): | ||||
|                 self.print_error( | ||||
|                     "Detected spontaneous reboot, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # db820c sometimes wedges around iommu fault recovery | ||||
|             if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line): | ||||
|                 self.print_error( | ||||
|                     "Detected kernel soft lockup, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # If the network device dies, it's probably not graphics's fault, just try again. | ||||
|             if re.search("NETDEV WATCHDOG", line): | ||||
|                 self.print_error( | ||||
|                     "Detected network device failure, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # A3xx recovery doesn't quite work. Sometimes the GPU will get | ||||
|             # wedged and recovery will fail (because power can't be reset?) | ||||
|             # This assumes that the jobs are sufficiently well-tested that GPU | ||||
|             # hangs aren't always triggered, so just try again. But print some | ||||
|             # more lines first so that we get better information on the cause | ||||
|             # of the hang. Once a hang happens, it's pretty chatty. | ||||
|             if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line: | ||||
|                 self.print_error( | ||||
|                     "Detected GPU hang, abandoning run.") | ||||
|                 if print_more_lines == -1: | ||||
|                     print_more_lines = 30 | ||||
|  | ||||
|             result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) | ||||
|             if result: | ||||
|                 status = result.group(1) | ||||
|                 exit_code = int(result.group(2)) | ||||
|  | ||||
|                 return exit_code | ||||
|  | ||||
|         self.print_error( | ||||
|             "Reached the end of the CPU serial log without finding a result, abandoning run.") | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument( | ||||
|         '--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)') | ||||
|     parser.add_argument('--powerup', type=str, | ||||
|                         help='shell command for rebooting', required=True) | ||||
|     parser.add_argument('--powerdown', type=str, | ||||
|                         help='shell command for powering off', required=True) | ||||
|     parser.add_argument('--fbserial', type=str, | ||||
|                         help='fastboot serial number of the board', required=True) | ||||
|     parser.add_argument('--test-timeout', type=int, | ||||
|                         help='Test phase timeout (minutes)', required=True) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     fastboot = FastbootRun(args, args.test_timeout * 60) | ||||
|  | ||||
|     retval = fastboot.run() | ||||
|     fastboot.close() | ||||
|  | ||||
|     fastboot.logged_system(args.powerdown) | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay" | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import sys | ||||
| import serial | ||||
|  | ||||
| mode = sys.argv[1] | ||||
| relay = sys.argv[2] | ||||
|  | ||||
| # our relays are "off" means "board is powered". | ||||
| mode_swap = { | ||||
|     "on": "off", | ||||
|     "off": "on", | ||||
| } | ||||
| mode = mode_swap[mode] | ||||
|  | ||||
| ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2) | ||||
| command = "relay {} {}\n\r".format(mode, relay) | ||||
| ser.write(command.encode()) | ||||
| ser.close() | ||||
| @@ -1,12 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay" | ||||
| sleep 5 | ||||
| "$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py on "$relay" | ||||
| @@ -1,569 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright 2015, The Android Open Source Project | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| """Creates the boot image.""" | ||||
| from argparse import (ArgumentParser, ArgumentTypeError, | ||||
|                       FileType, RawDescriptionHelpFormatter) | ||||
| from hashlib import sha1 | ||||
| from os import fstat | ||||
| from struct import pack | ||||
| import array | ||||
| import collections | ||||
| import os | ||||
| import re | ||||
| import subprocess | ||||
| import tempfile | ||||
| # Constant and structure definition is in | ||||
| # system/tools/mkbootimg/include/bootimg/bootimg.h | ||||
| BOOT_MAGIC = 'ANDROID!' | ||||
| BOOT_MAGIC_SIZE = 8 | ||||
| BOOT_NAME_SIZE = 16 | ||||
| BOOT_ARGS_SIZE = 512 | ||||
| BOOT_EXTRA_ARGS_SIZE = 1024 | ||||
| BOOT_IMAGE_HEADER_V1_SIZE = 1648 | ||||
| BOOT_IMAGE_HEADER_V2_SIZE = 1660 | ||||
| BOOT_IMAGE_HEADER_V3_SIZE = 1580 | ||||
| BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096 | ||||
| BOOT_IMAGE_HEADER_V4_SIZE = 1584 | ||||
| BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096 | ||||
| VENDOR_BOOT_MAGIC = 'VNDRBOOT' | ||||
| VENDOR_BOOT_MAGIC_SIZE = 8 | ||||
| VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE | ||||
| VENDOR_BOOT_ARGS_SIZE = 2048 | ||||
| VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112 | ||||
| VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128 | ||||
| VENDOR_RAMDISK_TYPE_NONE = 0 | ||||
| VENDOR_RAMDISK_TYPE_PLATFORM = 1 | ||||
| VENDOR_RAMDISK_TYPE_RECOVERY = 2 | ||||
| VENDOR_RAMDISK_TYPE_DLKM = 3 | ||||
| VENDOR_RAMDISK_NAME_SIZE = 32 | ||||
| VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16 | ||||
| VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108 | ||||
| # Names with special meaning, mustn't be specified in --ramdisk_name. | ||||
| VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'} | ||||
| PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment' | ||||
| def filesize(f): | ||||
|     if f is None: | ||||
|         return 0 | ||||
|     try: | ||||
|         return fstat(f.fileno()).st_size | ||||
|     except OSError: | ||||
|         return 0 | ||||
| def update_sha(sha, f): | ||||
|     if f: | ||||
|         sha.update(f.read()) | ||||
|         f.seek(0) | ||||
|         sha.update(pack('I', filesize(f))) | ||||
|     else: | ||||
|         sha.update(pack('I', 0)) | ||||
| def pad_file(f, padding): | ||||
|     pad = (padding - (f.tell() & (padding - 1))) & (padding - 1) | ||||
|     f.write(pack(str(pad) + 'x')) | ||||
| def get_number_of_pages(image_size, page_size): | ||||
|     """calculates the number of pages required for the image""" | ||||
|     return (image_size + page_size - 1) // page_size | ||||
| def get_recovery_dtbo_offset(args): | ||||
|     """calculates the offset of recovery_dtbo image in the boot image""" | ||||
|     num_header_pages = 1 # header occupies a page | ||||
|     num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize) | ||||
|     num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk), | ||||
|                                             args.pagesize) | ||||
|     num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize) | ||||
|     dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages + | ||||
|                                    num_ramdisk_pages + num_second_pages) | ||||
|     return dtbo_offset | ||||
| def write_header_v3_and_above(args): | ||||
|     if args.header_version > 3: | ||||
|         boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE | ||||
|     else: | ||||
|         boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE | ||||
|     args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode())) | ||||
|     # kernel size in bytes | ||||
|     args.output.write(pack('I', filesize(args.kernel))) | ||||
|     # ramdisk size in bytes | ||||
|     args.output.write(pack('I', filesize(args.ramdisk))) | ||||
|     # os version and patch level | ||||
|     args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level)) | ||||
|     args.output.write(pack('I', boot_header_size)) | ||||
|     # reserved | ||||
|     args.output.write(pack('4I', 0, 0, 0, 0)) | ||||
|     # version of boot image header | ||||
|     args.output.write(pack('I', args.header_version)) | ||||
|     args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s', | ||||
|                            args.cmdline)) | ||||
|     if args.header_version >= 4: | ||||
|         # The signature used to verify boot image v4. | ||||
|         args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE)) | ||||
|     pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE) | ||||
| def write_vendor_boot_header(args): | ||||
|     if filesize(args.dtb) == 0: | ||||
|         raise ValueError('DTB image must not be empty.') | ||||
|     if args.header_version > 3: | ||||
|         vendor_ramdisk_size = args.vendor_ramdisk_total_size | ||||
|         vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE | ||||
|     else: | ||||
|         vendor_ramdisk_size = filesize(args.vendor_ramdisk) | ||||
|         vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE | ||||
|     args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s', | ||||
|                                 VENDOR_BOOT_MAGIC.encode())) | ||||
|     # version of boot image header | ||||
|     args.vendor_boot.write(pack('I', args.header_version)) | ||||
|     # flash page size | ||||
|     args.vendor_boot.write(pack('I', args.pagesize)) | ||||
|     # kernel physical load address | ||||
|     args.vendor_boot.write(pack('I', args.base + args.kernel_offset)) | ||||
|     # ramdisk physical load address | ||||
|     args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset)) | ||||
|     # ramdisk size in bytes | ||||
|     args.vendor_boot.write(pack('I', vendor_ramdisk_size)) | ||||
|     args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s', | ||||
|                                 args.vendor_cmdline)) | ||||
|     # kernel tags physical load address | ||||
|     args.vendor_boot.write(pack('I', args.base + args.tags_offset)) | ||||
|     # asciiz product name | ||||
|     args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board)) | ||||
|     # header size in bytes | ||||
|     args.vendor_boot.write(pack('I', vendor_boot_header_size)) | ||||
|     # dtb size in bytes | ||||
|     args.vendor_boot.write(pack('I', filesize(args.dtb))) | ||||
|     # dtb physical load address | ||||
|     args.vendor_boot.write(pack('Q', args.base + args.dtb_offset)) | ||||
|     if args.header_version > 3: | ||||
|         vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num * | ||||
|                                      VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE) | ||||
|         # vendor ramdisk table size in bytes | ||||
|         args.vendor_boot.write(pack('I', vendor_ramdisk_table_size)) | ||||
|         # number of vendor ramdisk table entries | ||||
|         args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num)) | ||||
|         # vendor ramdisk table entry size in bytes | ||||
|         args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)) | ||||
|         # bootconfig section size in bytes | ||||
|         args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig))) | ||||
|     pad_file(args.vendor_boot, args.pagesize) | ||||
| def write_header(args): | ||||
|     if args.header_version > 4: | ||||
|         raise ValueError( | ||||
|             f'Boot header version {args.header_version} not supported') | ||||
|     if args.header_version in {3, 4}: | ||||
|         return write_header_v3_and_above(args) | ||||
|     ramdisk_load_address = ((args.base + args.ramdisk_offset) | ||||
|                             if filesize(args.ramdisk) > 0 else 0) | ||||
|     second_load_address = ((args.base + args.second_offset) | ||||
|                            if filesize(args.second) > 0 else 0) | ||||
|     args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode())) | ||||
|     # kernel size in bytes | ||||
|     args.output.write(pack('I', filesize(args.kernel))) | ||||
|     # kernel physical load address | ||||
|     args.output.write(pack('I', args.base + args.kernel_offset)) | ||||
|     # ramdisk size in bytes | ||||
|     args.output.write(pack('I', filesize(args.ramdisk))) | ||||
|     # ramdisk physical load address | ||||
|     args.output.write(pack('I', ramdisk_load_address)) | ||||
|     # second bootloader size in bytes | ||||
|     args.output.write(pack('I', filesize(args.second))) | ||||
|     # second bootloader physical load address | ||||
|     args.output.write(pack('I', second_load_address)) | ||||
|     # kernel tags physical load address | ||||
|     args.output.write(pack('I', args.base + args.tags_offset)) | ||||
|     # flash page size | ||||
|     args.output.write(pack('I', args.pagesize)) | ||||
|     # version of boot image header | ||||
|     args.output.write(pack('I', args.header_version)) | ||||
|     # os version and patch level | ||||
|     args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level)) | ||||
|     # asciiz product name | ||||
|     args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board)) | ||||
|     args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline)) | ||||
|     sha = sha1() | ||||
|     update_sha(sha, args.kernel) | ||||
|     update_sha(sha, args.ramdisk) | ||||
|     update_sha(sha, args.second) | ||||
|     if args.header_version > 0: | ||||
|         update_sha(sha, args.recovery_dtbo) | ||||
|     if args.header_version > 1: | ||||
|         update_sha(sha, args.dtb) | ||||
|     img_id = pack('32s', sha.digest()) | ||||
|     args.output.write(img_id) | ||||
|     args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline)) | ||||
|     if args.header_version > 0: | ||||
|         if args.recovery_dtbo: | ||||
|             # recovery dtbo size in bytes | ||||
|             args.output.write(pack('I', filesize(args.recovery_dtbo))) | ||||
|             # recovert dtbo offset in the boot image | ||||
|             args.output.write(pack('Q', get_recovery_dtbo_offset(args))) | ||||
|         else: | ||||
|             # Set to zero if no recovery dtbo | ||||
|             args.output.write(pack('I', 0)) | ||||
|             args.output.write(pack('Q', 0)) | ||||
|     # Populate boot image header size for header versions 1 and 2. | ||||
|     if args.header_version == 1: | ||||
|         args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE)) | ||||
|     elif args.header_version == 2: | ||||
|         args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE)) | ||||
|     if args.header_version > 1: | ||||
|         if filesize(args.dtb) == 0: | ||||
|             raise ValueError('DTB image must not be empty.') | ||||
|         # dtb size in bytes | ||||
|         args.output.write(pack('I', filesize(args.dtb))) | ||||
|         # dtb physical load address | ||||
|         args.output.write(pack('Q', args.base + args.dtb_offset)) | ||||
|     pad_file(args.output, args.pagesize) | ||||
|     return img_id | ||||
| class AsciizBytes: | ||||
|     """Parses a string and encodes it as an asciiz bytes object. | ||||
|     >>> AsciizBytes(bufsize=4)('foo') | ||||
|     b'foo\\x00' | ||||
|     >>> AsciizBytes(bufsize=4)('foob') | ||||
|     Traceback (most recent call last): | ||||
|         ... | ||||
|     argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5 | ||||
|     """ | ||||
|     def __init__(self, bufsize): | ||||
|         self.bufsize = bufsize | ||||
|     def __call__(self, arg): | ||||
|         arg_bytes = arg.encode() + b'\x00' | ||||
|         if len(arg_bytes) > self.bufsize: | ||||
|             raise ArgumentTypeError( | ||||
|                 'Encoded asciiz length exceeded: ' | ||||
|                 f'max {self.bufsize}, got {len(arg_bytes)}') | ||||
|         return arg_bytes | ||||
| class VendorRamdiskTableBuilder: | ||||
|     """Vendor ramdisk table builder. | ||||
|     Attributes: | ||||
|         entries: A list of VendorRamdiskTableEntry namedtuple. | ||||
|         ramdisk_total_size: Total size in bytes of all ramdisks in the table. | ||||
|     """ | ||||
|     VendorRamdiskTableEntry = collections.namedtuple(  # pylint: disable=invalid-name | ||||
|         'VendorRamdiskTableEntry', | ||||
|         ['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type', | ||||
|          'ramdisk_name', 'board_id']) | ||||
|     def __init__(self): | ||||
|         self.entries = [] | ||||
|         self.ramdisk_total_size = 0 | ||||
|         self.ramdisk_names = set() | ||||
|     def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id): | ||||
|         # Strip any trailing null for simple comparison. | ||||
|         stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00') | ||||
|         if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST: | ||||
|             raise ValueError( | ||||
|                 f'Banned vendor ramdisk name: {stripped_ramdisk_name}') | ||||
|         if stripped_ramdisk_name in self.ramdisk_names: | ||||
|             raise ValueError( | ||||
|                 f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}') | ||||
|         self.ramdisk_names.add(stripped_ramdisk_name) | ||||
|         if board_id is None: | ||||
|             board_id = array.array( | ||||
|                 'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE) | ||||
|         else: | ||||
|             board_id = array.array('I', board_id) | ||||
|         if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE: | ||||
|             raise ValueError('board_id size must be ' | ||||
|                              f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}') | ||||
|         with open(ramdisk_path, 'rb') as f: | ||||
|             ramdisk_size = filesize(f) | ||||
|         self.entries.append(self.VendorRamdiskTableEntry( | ||||
|             ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type, | ||||
|             ramdisk_name, board_id)) | ||||
|         self.ramdisk_total_size += ramdisk_size | ||||
|     def write_ramdisks_padded(self, fout, alignment): | ||||
|         for entry in self.entries: | ||||
|             with open(entry.ramdisk_path, 'rb') as f: | ||||
|                 fout.write(f.read()) | ||||
|         pad_file(fout, alignment) | ||||
|     def write_entries_padded(self, fout, alignment): | ||||
|         for entry in self.entries: | ||||
|             fout.write(pack('I', entry.ramdisk_size)) | ||||
|             fout.write(pack('I', entry.ramdisk_offset)) | ||||
|             fout.write(pack('I', entry.ramdisk_type)) | ||||
|             fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s', | ||||
|                             entry.ramdisk_name)) | ||||
|             fout.write(entry.board_id) | ||||
|         pad_file(fout, alignment) | ||||
| def write_padded_file(f_out, f_in, padding): | ||||
|     if f_in is None: | ||||
|         return | ||||
|     f_out.write(f_in.read()) | ||||
|     pad_file(f_out, padding) | ||||
| def parse_int(x): | ||||
|     return int(x, 0) | ||||
| def parse_os_version(x): | ||||
|     match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x) | ||||
|     if match: | ||||
|         a = int(match.group(1)) | ||||
|         b = c = 0 | ||||
|         if match.lastindex >= 2: | ||||
|             b = int(match.group(2)) | ||||
|         if match.lastindex == 3: | ||||
|             c = int(match.group(3)) | ||||
|         # 7 bits allocated for each field | ||||
|         assert a < 128 | ||||
|         assert b < 128 | ||||
|         assert c < 128 | ||||
|         return (a << 14) | (b << 7) | c | ||||
|     return 0 | ||||
| def parse_os_patch_level(x): | ||||
|     match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x) | ||||
|     if match: | ||||
|         y = int(match.group(1)) - 2000 | ||||
|         m = int(match.group(2)) | ||||
|         # 7 bits allocated for the year, 4 bits for the month | ||||
|         assert 0 <= y < 128 | ||||
|         assert 0 < m <= 12 | ||||
|         return (y << 4) | m | ||||
|     return 0 | ||||
| def parse_vendor_ramdisk_type(x): | ||||
|     type_dict = { | ||||
|         'none': VENDOR_RAMDISK_TYPE_NONE, | ||||
|         'platform': VENDOR_RAMDISK_TYPE_PLATFORM, | ||||
|         'recovery': VENDOR_RAMDISK_TYPE_RECOVERY, | ||||
|         'dlkm': VENDOR_RAMDISK_TYPE_DLKM, | ||||
|     } | ||||
|     if x.lower() in type_dict: | ||||
|         return type_dict[x.lower()] | ||||
|     return parse_int(x) | ||||
| def get_vendor_boot_v4_usage(): | ||||
|     return """vendor boot version 4 arguments: | ||||
|   --ramdisk_type {none,platform,recovery,dlkm} | ||||
|                         specify the type of the ramdisk | ||||
|   --ramdisk_name NAME | ||||
|                         specify the name of the ramdisk | ||||
|   --board_id{0..15} NUMBER | ||||
|                         specify the value of the board_id vector, defaults to 0 | ||||
|   --vendor_ramdisk_fragment VENDOR_RAMDISK_FILE | ||||
|                         path to the vendor ramdisk file | ||||
|   These options can be specified multiple times, where each vendor ramdisk | ||||
|   option group ends with a --vendor_ramdisk_fragment option. | ||||
|   Each option group appends an additional ramdisk to the vendor boot image. | ||||
| """ | ||||
| def parse_vendor_ramdisk_args(args, args_list): | ||||
|     """Parses vendor ramdisk specific arguments. | ||||
|     Args: | ||||
|         args: An argparse.Namespace object. Parsed results are stored into this | ||||
|             object. | ||||
|         args_list: A list of argument strings to be parsed. | ||||
|     Returns: | ||||
|         A list argument strings that are not parsed by this method. | ||||
|     """ | ||||
|     parser = ArgumentParser(add_help=False) | ||||
|     parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type, | ||||
|                         default=VENDOR_RAMDISK_TYPE_NONE) | ||||
|     parser.add_argument('--ramdisk_name', | ||||
|                         type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE), | ||||
|                         required=True) | ||||
|     for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE): | ||||
|         parser.add_argument(f'--board_id{i}', type=parse_int, default=0) | ||||
|     parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True) | ||||
|     unknown_args = [] | ||||
|     vendor_ramdisk_table_builder = VendorRamdiskTableBuilder() | ||||
|     if args.vendor_ramdisk is not None: | ||||
|         vendor_ramdisk_table_builder.add_entry( | ||||
|             args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None) | ||||
|     while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list: | ||||
|         idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2 | ||||
|         vendor_ramdisk_args = args_list[:idx] | ||||
|         args_list = args_list[idx:] | ||||
|         ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args) | ||||
|         ramdisk_args_dict = vars(ramdisk_args) | ||||
|         unknown_args.extend(extra_args) | ||||
|         ramdisk_path = ramdisk_args.vendor_ramdisk_fragment | ||||
|         ramdisk_type = ramdisk_args.ramdisk_type | ||||
|         ramdisk_name = ramdisk_args.ramdisk_name | ||||
|         board_id = [ramdisk_args_dict[f'board_id{i}'] | ||||
|                     for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)] | ||||
|         vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type, | ||||
|                                                ramdisk_name, board_id) | ||||
|     if len(args_list) > 0: | ||||
|         unknown_args.extend(args_list) | ||||
|     args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder | ||||
|                                       .ramdisk_total_size) | ||||
|     args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder | ||||
|                                               .entries) | ||||
|     args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder | ||||
|     return unknown_args | ||||
| def parse_cmdline(): | ||||
|     version_parser = ArgumentParser(add_help=False) | ||||
|     version_parser.add_argument('--header_version', type=parse_int, default=0) | ||||
|     if version_parser.parse_known_args()[0].header_version < 3: | ||||
|         # For boot header v0 to v2, the kernel commandline field is split into | ||||
|         # two fields, cmdline and extra_cmdline. Both fields are asciiz strings, | ||||
|         # so we minus one here to ensure the encoded string plus the | ||||
|         # null-terminator can fit in the buffer size. | ||||
|         cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1 | ||||
|     else: | ||||
|         cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE | ||||
|     parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, | ||||
|                             epilog=get_vendor_boot_v4_usage()) | ||||
|     parser.add_argument('--kernel', type=FileType('rb'), | ||||
|                         help='path to the kernel') | ||||
|     parser.add_argument('--ramdisk', type=FileType('rb'), | ||||
|                         help='path to the ramdisk') | ||||
|     parser.add_argument('--second', type=FileType('rb'), | ||||
|                         help='path to the second bootloader') | ||||
|     parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb') | ||||
|     dtbo_group = parser.add_mutually_exclusive_group() | ||||
|     dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'), | ||||
|                             help='path to the recovery DTBO') | ||||
|     dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'), | ||||
|                             metavar='RECOVERY_ACPIO', dest='recovery_dtbo', | ||||
|                             help='path to the recovery ACPIO') | ||||
|     parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size), | ||||
|                         default='', help='kernel command line arguments') | ||||
|     parser.add_argument('--vendor_cmdline', | ||||
|                         type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE), | ||||
|                         default='', | ||||
|                         help='vendor boot kernel command line arguments') | ||||
|     parser.add_argument('--base', type=parse_int, default=0x10000000, | ||||
|                         help='base address') | ||||
|     parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000, | ||||
|                         help='kernel offset') | ||||
|     parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000, | ||||
|                         help='ramdisk offset') | ||||
|     parser.add_argument('--second_offset', type=parse_int, default=0x00f00000, | ||||
|                         help='second bootloader offset') | ||||
|     parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000, | ||||
|                         help='dtb offset') | ||||
|     parser.add_argument('--os_version', type=parse_os_version, default=0, | ||||
|                         help='operating system version') | ||||
|     parser.add_argument('--os_patch_level', type=parse_os_patch_level, | ||||
|                         default=0, help='operating system patch level') | ||||
|     parser.add_argument('--tags_offset', type=parse_int, default=0x00000100, | ||||
|                         help='tags offset') | ||||
|     parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE), | ||||
|                         default='', help='board name') | ||||
|     parser.add_argument('--pagesize', type=parse_int, | ||||
|                         choices=[2**i for i in range(11, 15)], default=2048, | ||||
|                         help='page size') | ||||
|     parser.add_argument('--id', action='store_true', | ||||
|                         help='print the image ID on standard output') | ||||
|     parser.add_argument('--header_version', type=parse_int, default=0, | ||||
|                         help='boot image header version') | ||||
|     parser.add_argument('-o', '--output', type=FileType('wb'), | ||||
|                         help='output file name') | ||||
|     parser.add_argument('--gki_signing_algorithm', | ||||
|                         help='GKI signing algorithm to use') | ||||
|     parser.add_argument('--gki_signing_key', | ||||
|                         help='path to RSA private key file') | ||||
|     parser.add_argument('--gki_signing_signature_args', | ||||
|                         help='other hash arguments passed to avbtool') | ||||
|     parser.add_argument('--gki_signing_avbtool_path', | ||||
|                         help='path to avbtool for boot signature generation') | ||||
|     parser.add_argument('--vendor_boot', type=FileType('wb'), | ||||
|                         help='vendor boot output file name') | ||||
|     parser.add_argument('--vendor_ramdisk', type=FileType('rb'), | ||||
|                         help='path to the vendor ramdisk') | ||||
|     parser.add_argument('--vendor_bootconfig', type=FileType('rb'), | ||||
|                         help='path to the vendor bootconfig file') | ||||
|     args, extra_args = parser.parse_known_args() | ||||
|     if args.vendor_boot is not None and args.header_version > 3: | ||||
|         extra_args = parse_vendor_ramdisk_args(args, extra_args) | ||||
|     if len(extra_args) > 0: | ||||
|         raise ValueError(f'Unrecognized arguments: {extra_args}') | ||||
|     if args.header_version < 3: | ||||
|         args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:] | ||||
|         args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00' | ||||
|         assert len(args.cmdline) <= BOOT_ARGS_SIZE | ||||
|         assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE | ||||
|     return args | ||||
| def add_boot_image_signature(args, pagesize): | ||||
|     """Adds the boot image signature. | ||||
|     Note that the signature will only be verified in VTS to ensure a | ||||
|     generic boot.img is used. It will not be used by the device | ||||
|     bootloader at boot time. The bootloader should only verify | ||||
|     the boot vbmeta at the end of the boot partition (or in the top-level | ||||
|     vbmeta partition) via the Android Verified Boot process, when the | ||||
|     device boots. | ||||
|     """ | ||||
|     args.output.flush()  # Flush the buffer for signature calculation. | ||||
|     # Appends zeros if the signing key is not specified. | ||||
|     if not args.gki_signing_key or not args.gki_signing_algorithm: | ||||
|         zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE | ||||
|         args.output.write(zeros) | ||||
|         pad_file(args.output, pagesize) | ||||
|         return | ||||
|     avbtool = 'avbtool'  # Used from otatools.zip or Android build env. | ||||
|     # We need to specify the path of avbtool in build/core/Makefile. | ||||
|     # Because avbtool is not guaranteed to be in $PATH there. | ||||
|     if args.gki_signing_avbtool_path: | ||||
|         avbtool = args.gki_signing_avbtool_path | ||||
|     # Need to specify a value of --partition_size for avbtool to work. | ||||
|     # We use 64 MB below, but avbtool will not resize the boot image to | ||||
|     # this size because --do_not_append_vbmeta_image is also specified. | ||||
|     avbtool_cmd = [ | ||||
|         avbtool, 'add_hash_footer', | ||||
|         '--partition_name', 'boot', | ||||
|         '--partition_size', str(64 * 1024 * 1024), | ||||
|         '--image', args.output.name, | ||||
|         '--algorithm', args.gki_signing_algorithm, | ||||
|         '--key', args.gki_signing_key, | ||||
|         '--salt', 'd00df00d']  # TODO: use a hash of kernel/ramdisk as the salt. | ||||
|     # Additional arguments passed to avbtool. | ||||
|     if args.gki_signing_signature_args: | ||||
|         avbtool_cmd += args.gki_signing_signature_args.split() | ||||
|     # Outputs the signed vbmeta to a separate file, then append to boot.img | ||||
|     # as the boot signature. | ||||
|     with tempfile.TemporaryDirectory() as temp_out_dir: | ||||
|         boot_signature_output = os.path.join(temp_out_dir, 'boot_signature') | ||||
|         avbtool_cmd += ['--do_not_append_vbmeta_image', | ||||
|                         '--output_vbmeta_image', boot_signature_output] | ||||
|         subprocess.check_call(avbtool_cmd) | ||||
|         with open(boot_signature_output, 'rb') as boot_signature: | ||||
|             if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE: | ||||
|                 raise ValueError( | ||||
|                     f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}') | ||||
|             write_padded_file(args.output, boot_signature, pagesize) | ||||
| def write_data(args, pagesize): | ||||
|     write_padded_file(args.output, args.kernel, pagesize) | ||||
|     write_padded_file(args.output, args.ramdisk, pagesize) | ||||
|     write_padded_file(args.output, args.second, pagesize) | ||||
|     if args.header_version > 0 and args.header_version < 3: | ||||
|         write_padded_file(args.output, args.recovery_dtbo, pagesize) | ||||
|     if args.header_version == 2: | ||||
|         write_padded_file(args.output, args.dtb, pagesize) | ||||
|     if args.header_version >= 4: | ||||
|         add_boot_image_signature(args, pagesize) | ||||
| def write_vendor_boot_data(args): | ||||
|     if args.header_version > 3: | ||||
|         builder = args.vendor_ramdisk_table_builder | ||||
|         builder.write_ramdisks_padded(args.vendor_boot, args.pagesize) | ||||
|         write_padded_file(args.vendor_boot, args.dtb, args.pagesize) | ||||
|         builder.write_entries_padded(args.vendor_boot, args.pagesize) | ||||
|         write_padded_file(args.vendor_boot, args.vendor_bootconfig, | ||||
|             args.pagesize) | ||||
|     else: | ||||
|         write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize) | ||||
|         write_padded_file(args.vendor_boot, args.dtb, args.pagesize) | ||||
| def main(): | ||||
|     args = parse_cmdline() | ||||
|     if args.vendor_boot is not None: | ||||
|         if args.header_version not in {3, 4}: | ||||
|             raise ValueError( | ||||
|                 '--vendor_boot not compatible with given header version') | ||||
|         if args.header_version == 3 and args.vendor_ramdisk is None: | ||||
|             raise ValueError('--vendor_ramdisk missing or invalid') | ||||
|         write_vendor_boot_header(args) | ||||
|         write_vendor_boot_data(args) | ||||
|     if args.output is not None: | ||||
|         if args.second is not None and args.header_version > 2: | ||||
|             raise ValueError( | ||||
|                 '--second not compatible with given header version') | ||||
|         img_id = write_header(args) | ||||
|         if args.header_version > 2: | ||||
|             write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE) | ||||
|         else: | ||||
|             write_data(args, args.pagesize) | ||||
|         if args.id and img_id is not None: | ||||
|             print('0x' + ''.join(f'{octet:02x}' for octet in img_id)) | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,16 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))" | ||||
| SNMP_OFF="i 2" | ||||
|  | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF" | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((${BM_POE_BASE:-0} + BM_POE_INTERFACE))" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 2" | ||||
|  | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF" | ||||
| sleep 3s | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON" | ||||
| @@ -1,236 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC1091 | ||||
| # shellcheck disable=SC2034 | ||||
| # shellcheck disable=SC2059 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| . "$SCRIPTS_DIR"/setup-test-env.sh | ||||
|  | ||||
| # Boot script for devices attached to a PoE switch, using NFS for the root | ||||
| # filesystem. | ||||
|  | ||||
| # We're run from the root of the repo, make a helper var for our paths | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
| CI_INSTALL=$CI_PROJECT_DIR/install | ||||
|  | ||||
| # Runner config checks | ||||
| if [ -z "$BM_SERIAL" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the serial port to listen the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|   echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the PoE switch address to connect for powering up/down devices." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|   echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the PoE switch interface where the device is connected." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERUP" ]; then | ||||
|   echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power up the device and begin its boot sequence." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERDOWN" ]; then | ||||
|   echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power off the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /nfs ]; then | ||||
|   echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /tftp ]; then | ||||
|   echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # job config checks | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_BOOTFS" ] && { [ -z "$BM_KERNEL" ] || [ -z "$BM_DTB" ]; } ; then | ||||
|   echo "Must set /boot files for the TFTP boot in the job's variables or set kernel and dtb" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_CMDLINE" ]; then | ||||
|   echo "Must set BM_CMDLINE to your board's kernel command line arguments" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| section_start prepare_rootfs "Preparing rootfs components" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results | ||||
|  | ||||
| # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
| # state, since it's volume-mounted on the host. | ||||
| rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # If BM_BOOTFS is an URL, download it | ||||
| if echo $BM_BOOTFS | grep -q http; then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     "${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS" -o /tmp/bootfs.tar | ||||
|   BM_BOOTFS=/tmp/bootfs.tar | ||||
| fi | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # If BM_BOOTFS is a file, assume it is a tarball and uncompress it | ||||
| if [ -f "${BM_BOOTFS}" ]; then | ||||
|   mkdir -p /tmp/bootfs | ||||
|   tar xf $BM_BOOTFS -C /tmp/bootfs | ||||
|   BM_BOOTFS=/tmp/bootfs | ||||
| fi | ||||
|  | ||||
| # If BM_KERNEL and BM_DTS is present | ||||
| if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then | ||||
|   if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then | ||||
|     echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!" | ||||
|     exit 1 | ||||
|   fi | ||||
|  | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o "${BM_KERNEL}" | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o "${BM_DTB}.dtb" | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|       "${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst | ||||
| fi | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Install kernel modules (it could be either in /lib/modules or | ||||
| # /usr/lib/modules, but we want to install in the latter) | ||||
| if [ -n "${EXTERNAL_KERNEL_TAG}" ]; then | ||||
|   tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/ | ||||
|   rm modules.tar.zst & | ||||
| elif [ -n "${BM_BOOTFS}" ]; then | ||||
|   [ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/ | ||||
|   [ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/ | ||||
| else | ||||
|   echo "No modules!" | ||||
| fi | ||||
|  | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Install kernel image + bootloader files | ||||
| if [ -n "${EXTERNAL_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then | ||||
|   mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/ | ||||
| else  # BM_BOOTFS | ||||
|   rsync -aL --delete $BM_BOOTFS/boot/ /tftp/ | ||||
| fi | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Set up the pxelinux config for Jetson Nano | ||||
| mkdir -p /tftp/pxelinux.cfg | ||||
| cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000 | ||||
| PROMPT 0 | ||||
| TIMEOUT 30 | ||||
| DEFAULT primary | ||||
| MENU TITLE jetson nano boot options | ||||
| LABEL primary | ||||
|       MENU LABEL CI kernel on TFTP | ||||
|       LINUX Image | ||||
|       FDT tegra210-p3450-0000.dtb | ||||
|       APPEND \${cbootargs} $BM_CMDLINE | ||||
| EOF | ||||
|  | ||||
| # Set up the pxelinux config for Jetson TK1 | ||||
| cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1 | ||||
| PROMPT 0 | ||||
| TIMEOUT 30 | ||||
| DEFAULT primary | ||||
| MENU TITLE jetson TK1 boot options | ||||
| LABEL primary | ||||
|       MENU LABEL CI kernel on TFTP | ||||
|       LINUX zImage | ||||
|       FDT tegra124-jetson-tk1.dtb | ||||
|       APPEND \${cbootargs} $BM_CMDLINE | ||||
| EOF | ||||
|  | ||||
| # Create the rootfs in the NFS directory | ||||
| . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| echo "$BM_CMDLINE" > /tftp/cmdline.txt | ||||
|  | ||||
| # Add some options in config.txt, if defined | ||||
| if [ -n "$BM_BOOTCONFIG" ]; then | ||||
|   printf "$BM_BOOTCONFIG" >> /tftp/config.txt | ||||
| fi | ||||
|  | ||||
| section_end prepare_rootfs | ||||
|  | ||||
| set +e | ||||
| STRUCTURED_LOG_FILE=results/job_detail.json | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}" | ||||
| ATTEMPTS=3 | ||||
| first_attempt=True | ||||
| while [ $((ATTEMPTS--)) -gt 0 ]; do | ||||
|   section_start dut_boot "Booting hardware device ..." | ||||
|   python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}" | ||||
|   # Update subtime time to CI_JOB_STARTED_AT only for the first run | ||||
|   if [ "$first_attempt" = "True" ]; then | ||||
|     python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}" | ||||
|   else | ||||
|     python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit | ||||
|   fi | ||||
|   python3 $BM/poe_run.py \ | ||||
|           --dev="$BM_SERIAL" \ | ||||
|           --powerup="$BM_POWERUP" \ | ||||
|           --powerdown="$BM_POWERDOWN" \ | ||||
|           --boot-timeout-seconds ${BOOT_PHASE_TIMEOUT_SECONDS:-300} \ | ||||
|           --test-timeout-minutes ${TEST_PHASE_TIMEOUT_MINUTES:-$((CI_JOB_TIMEOUT/60 - ${TEST_SETUP_AND_UPLOAD_MARGIN_MINUTES:-5}))} | ||||
|   ret=$? | ||||
|  | ||||
|   if [ $ret -eq 2 ]; then | ||||
|     python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job | ||||
|     first_attempt=False | ||||
|     error "Device failed to boot; will retry" | ||||
|   else | ||||
|     # We're no longer in dut_boot by this point | ||||
|     unset CURRENT_SECTION | ||||
|     ATTEMPTS=0 | ||||
|   fi | ||||
| done | ||||
|  | ||||
| section_start dut_cleanup "Cleaning up after job" | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job | ||||
| python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close | ||||
| set -e | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
| # will look for them. | ||||
| cp -Rp /nfs/results/. results/ | ||||
|  | ||||
| date +'%F %T' | ||||
| section_end dut_cleanup | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,134 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Igalia, S.L. | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
| import threading | ||||
|  | ||||
| from custom_logger import CustomLogger | ||||
| from serial_buffer import SerialBuffer | ||||
|  | ||||
| class PoERun: | ||||
|     def __init__(self, args, boot_timeout, test_timeout, logger): | ||||
|         self.powerup = args.powerup | ||||
|         self.powerdown = args.powerdown | ||||
|         self.ser = SerialBuffer( | ||||
|             args.dev, "results/serial-output.txt", ": ") | ||||
|         self.boot_timeout = boot_timeout | ||||
|         self.test_timeout = test_timeout | ||||
|         self.logger = logger | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|         self.logger.update_status_fail(message) | ||||
|  | ||||
|     def logged_system(self, cmd): | ||||
|         print("Running '{}'".format(cmd)) | ||||
|         return os.system(cmd) | ||||
|  | ||||
|     def run(self): | ||||
|         if self.logged_system(self.powerup) != 0: | ||||
|             self.logger.update_status_fail("powerup failed") | ||||
|             return 1 | ||||
|  | ||||
|         boot_detected = False | ||||
|         self.logger.create_job_phase("boot") | ||||
|         for line in self.ser.lines(timeout=self.boot_timeout, phase="bootloader"): | ||||
|             if re.search("Booting Linux", line): | ||||
|                 boot_detected = True | ||||
|                 break | ||||
|  | ||||
|         if not boot_detected: | ||||
|             self.print_error( | ||||
|                 "Something wrong; couldn't detect the boot start up sequence") | ||||
|             return 2 | ||||
|  | ||||
|         self.logger.create_job_phase("test") | ||||
|         for line in self.ser.lines(timeout=self.test_timeout, phase="test"): | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 self.logger.update_status_fail("kernel panic") | ||||
|                 return 1 | ||||
|  | ||||
|             # Binning memory problems | ||||
|             if re.search("binner overflow mem", line): | ||||
|                 self.print_error("Memory overflow in the binner; GPU hang") | ||||
|                 return 1 | ||||
|  | ||||
|             if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line): | ||||
|                 self.print_error("nouveau jetson boot bug, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             # network fail on tk1 | ||||
|             if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line): | ||||
|                 self.print_error("nouveau jetson tk1 network fail, abandoning run.") | ||||
|                 return 1 | ||||
|  | ||||
|             result = re.search(r"hwci: mesa: (\S*), exit_code: (\d+)", line) | ||||
|             if result: | ||||
|                 status = result.group(1) | ||||
|                 exit_code = int(result.group(2)) | ||||
|  | ||||
|                 if status == "pass": | ||||
|                     self.logger.update_dut_job("status", "pass") | ||||
|                 else: | ||||
|                     self.logger.update_status_fail("test fail") | ||||
|  | ||||
|                 self.logger.update_dut_job("exit_code", exit_code) | ||||
|                 return exit_code | ||||
|  | ||||
|         self.print_error( | ||||
|             "Reached the end of the CPU serial log without finding a result") | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--dev', type=str, | ||||
|                         help='Serial device to monitor', required=True) | ||||
|     parser.add_argument('--powerup', type=str, | ||||
|                         help='shell command for rebooting', required=True) | ||||
|     parser.add_argument('--powerdown', type=str, | ||||
|                         help='shell command for powering off', required=True) | ||||
|     parser.add_argument( | ||||
|         '--boot-timeout-seconds', type=int, help='Boot phase timeout (seconds)', required=True) | ||||
|     parser.add_argument( | ||||
|         '--test-timeout-minutes', type=int, help='Test phase timeout (minutes)', required=True) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     logger = CustomLogger("results/job_detail.json") | ||||
|     logger.update_dut_time("start", None) | ||||
|     poe = PoERun(args, args.boot_timeout_seconds, args.test_timeout_minutes * 60, logger) | ||||
|     retval = poe.run() | ||||
|  | ||||
|     poe.logged_system(args.powerdown) | ||||
|     logger.update_dut_time("end", None) | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,34 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| rootfs_dst=$1 | ||||
|  | ||||
| mkdir -p $rootfs_dst/results | ||||
|  | ||||
| # Set up the init script that brings up the system. | ||||
| cp $BM/bm-init.sh $rootfs_dst/init | ||||
| cp $CI_COMMON/init*.sh $rootfs_dst/ | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| # Make JWT token available as file in the bare-metal storage to enable access | ||||
| # to MinIO | ||||
| cp "${S3_JWT_FILE}" "${rootfs_dst}${S3_JWT_FILE}" | ||||
|  | ||||
| date +'%F %T' | ||||
|  | ||||
| cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/" | ||||
|  | ||||
| set +x | ||||
|  | ||||
| # Pass through relevant env vars from the gitlab job to the baremetal init script | ||||
| echo "Variables passed through:" | ||||
| "$CI_COMMON"/export-gitlab-job-env-for-dut.sh | tee $rootfs_dst/set-job-env-vars.sh | ||||
|  | ||||
| set -x | ||||
|  | ||||
| # Add the Mesa drivers we built, and make a consistent symlink to them. | ||||
| mkdir -p $rootfs_dst/$CI_PROJECT_DIR | ||||
| rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/ | ||||
|  | ||||
| date +'%F %T' | ||||
| @@ -1,186 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| from datetime import datetime, UTC | ||||
| import queue | ||||
| import serial | ||||
| import threading | ||||
| import time | ||||
|  | ||||
|  | ||||
| class SerialBuffer: | ||||
|     def __init__(self, dev, filename, prefix, timeout=None, line_queue=None): | ||||
|         self.filename = filename | ||||
|         self.dev = dev | ||||
|  | ||||
|         if dev: | ||||
|             self.f = open(filename, "wb+") | ||||
|             self.serial = serial.Serial(dev, 115200, timeout=timeout) | ||||
|         else: | ||||
|             self.f = open(filename, "rb") | ||||
|             self.serial = None | ||||
|  | ||||
|         self.byte_queue = queue.Queue() | ||||
|         # allow multiple SerialBuffers to share a line queue so you can merge | ||||
|         # servo's CPU and EC streams into one thing to watch the boot/test | ||||
|         # progress on. | ||||
|         if line_queue: | ||||
|             self.line_queue = line_queue | ||||
|         else: | ||||
|             self.line_queue = queue.Queue() | ||||
|         self.prefix = prefix | ||||
|         self.timeout = timeout | ||||
|         self.sentinel = object() | ||||
|         self.closing = False | ||||
|  | ||||
|         if self.dev: | ||||
|             self.read_thread = threading.Thread( | ||||
|                 target=self.serial_read_thread_loop, daemon=True) | ||||
|         else: | ||||
|             self.read_thread = threading.Thread( | ||||
|                 target=self.serial_file_read_thread_loop, daemon=True) | ||||
|         self.read_thread.start() | ||||
|  | ||||
|         self.lines_thread = threading.Thread( | ||||
|             target=self.serial_lines_thread_loop, daemon=True) | ||||
|         self.lines_thread.start() | ||||
|  | ||||
|     def close(self): | ||||
|         self.closing = True | ||||
|         if self.serial: | ||||
|             self.serial.cancel_read() | ||||
|         self.read_thread.join() | ||||
|         self.lines_thread.join() | ||||
|         if self.serial: | ||||
|             self.serial.close() | ||||
|  | ||||
|     # Thread that just reads the bytes from the serial device to try to keep from | ||||
|     # buffer overflowing it. If nothing is received in 1 minute, it finalizes. | ||||
|     def serial_read_thread_loop(self): | ||||
|         greet = "Serial thread reading from %s\n" % self.dev | ||||
|         self.byte_queue.put(greet.encode()) | ||||
|  | ||||
|         while not self.closing: | ||||
|             try: | ||||
|                 b = self.serial.read() | ||||
|                 if len(b) == 0: | ||||
|                     break | ||||
|                 self.byte_queue.put(b) | ||||
|             except Exception as err: | ||||
|                 print(self.prefix + str(err)) | ||||
|                 break | ||||
|         self.byte_queue.put(self.sentinel) | ||||
|  | ||||
|     # Thread that just reads the bytes from the file of serial output that some | ||||
|     # other process is appending to. | ||||
|     def serial_file_read_thread_loop(self): | ||||
|         greet = "Serial thread reading from %s\n" % self.filename | ||||
|         self.byte_queue.put(greet.encode()) | ||||
|  | ||||
|         while not self.closing: | ||||
|             line = self.f.readline() | ||||
|             if line: | ||||
|                 self.byte_queue.put(line) | ||||
|             else: | ||||
|                 time.sleep(0.1) | ||||
|         self.byte_queue.put(self.sentinel) | ||||
|  | ||||
|     # Thread that processes the stream of bytes to 1) log to stdout, 2) log to | ||||
|     # file, 3) add to the queue of lines to be read by program logic | ||||
|  | ||||
|     def serial_lines_thread_loop(self): | ||||
|         line = bytearray() | ||||
|         while True: | ||||
|             bytes = self.byte_queue.get(block=True) | ||||
|  | ||||
|             if bytes == self.sentinel: | ||||
|                 self.read_thread.join() | ||||
|                 self.line_queue.put(self.sentinel) | ||||
|                 break | ||||
|  | ||||
|             # Write our data to the output file if we're the ones reading from | ||||
|             # the serial device | ||||
|             if self.dev: | ||||
|                 self.f.write(bytes) | ||||
|                 self.f.flush() | ||||
|  | ||||
|             for b in bytes: | ||||
|                 line.append(b) | ||||
|                 if b == b'\n'[0]: | ||||
|                     line = line.decode(errors="replace") | ||||
|  | ||||
|                     ts = datetime.now(tz=UTC) | ||||
|                     ts_str = f"{ts.hour:02}:{ts.minute:02}:{ts.second:02}.{int(ts.microsecond / 1000):03}" | ||||
|                     print("{endc}{time}{prefix}{line}".format( | ||||
|                         time=ts_str, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='') | ||||
|  | ||||
|                     self.line_queue.put(line) | ||||
|                     line = bytearray() | ||||
|  | ||||
|     def lines(self, timeout=None, phase=None): | ||||
|         start_time = time.monotonic() | ||||
|         while True: | ||||
|             read_timeout = None | ||||
|             if timeout: | ||||
|                 read_timeout = timeout - (time.monotonic() - start_time) | ||||
|                 if read_timeout <= 0: | ||||
|                     print("read timeout waiting for serial during {}".format(phase)) | ||||
|                     self.close() | ||||
|                     break | ||||
|  | ||||
|             try: | ||||
|                 line = self.line_queue.get(timeout=read_timeout) | ||||
|             except queue.Empty: | ||||
|                 print("read timeout waiting for serial during {}".format(phase)) | ||||
|                 self.close() | ||||
|                 break | ||||
|  | ||||
|             if line == self.sentinel: | ||||
|                 print("End of serial output") | ||||
|                 self.lines_thread.join() | ||||
|                 break | ||||
|  | ||||
|             yield line | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|  | ||||
|     parser.add_argument('--dev', type=str, help='Serial device') | ||||
|     parser.add_argument('--file', type=str, | ||||
|                         help='Filename for serial output', required=True) | ||||
|     parser.add_argument('--prefix', type=str, | ||||
|                         help='Prefix for logging serial to stdout', nargs='?') | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     ser = SerialBuffer(args.dev, args.file, args.prefix or "") | ||||
|     for line in ser.lines(): | ||||
|         # We're just using this as a logger, so eat the produced lines and drop | ||||
|         # them | ||||
|         pass | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,41 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| # Copyright © 2020 Christian Gmeiner | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
| # | ||||
| # Tiny script to read bytes from telnet, and write the output to stdout, with a | ||||
| # buffer in between so we don't lose serial output from its buffer. | ||||
| # | ||||
|  | ||||
| import sys | ||||
| import telnetlib | ||||
|  | ||||
| host = sys.argv[1] | ||||
| port = sys.argv[2] | ||||
|  | ||||
| tn = telnetlib.Telnet(host, port, 1000000) | ||||
|  | ||||
| while True: | ||||
|     bytes = tn.read_some() | ||||
|     sys.stdout.buffer.write(bytes) | ||||
|     sys.stdout.flush() | ||||
|  | ||||
| tn.close() | ||||
| @@ -1 +0,0 @@ | ||||
| ../bin/ci | ||||
| @@ -1,851 +0,0 @@ | ||||
| # Shared between windows and Linux | ||||
| .build-common: | ||||
|   extends: .container+build-rules | ||||
|   # Cancel job if a newer commit is pushed to the same branch | ||||
|   interruptible: true | ||||
|   variables: | ||||
|     # Build jobs are typically taking between 5-12 minutes, depending on how | ||||
|     # much they build and how many new Rust compilers we have to build twice. | ||||
|     # Allow 25 minutes as a reasonable margin: beyond this point, something | ||||
|     # has gone badly wrong, and we should try again to see if we can get | ||||
|     # something from it. | ||||
|     # | ||||
|     # Some jobs not in the critical path use a higher timeout, particularly | ||||
|     # when building with ASan or UBSan. | ||||
|     BUILD_JOB_TIMEOUT: 12m | ||||
|     RUN_MESON_TESTS: "true" | ||||
|   timeout: 16m | ||||
|   # We don't want to download any previous job's artifacts | ||||
|   dependencies: [] | ||||
|   artifacts: | ||||
|     name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}" | ||||
|     when: always | ||||
|     paths: | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _build/meson-logs/strace | ||||
|       - _build/.ninja_log | ||||
|       - artifacts | ||||
|  | ||||
| .build-run-long: | ||||
|   variables: | ||||
|     BUILD_JOB_TIMEOUT: 18m | ||||
|   timeout: 25m | ||||
|  | ||||
|  | ||||
| # Just Linux | ||||
| .build-linux: | ||||
|   extends: .build-common | ||||
|   variables: | ||||
|     CCACHE_COMPILERCHECK: "content" | ||||
|     CCACHE_COMPRESS: "true" | ||||
|     CCACHE_DIR: /cache/mesa/ccache | ||||
|   # Use ccache transparently, and print stats before/after | ||||
|   before_script: | ||||
|     - !reference [default, before_script] | ||||
|     - | | ||||
|       export PATH="/usr/lib/ccache:$PATH" | ||||
|       export CCACHE_BASEDIR="$PWD" | ||||
|       if test -x /usr/bin/ccache; then | ||||
|         section_start ccache_before "ccache stats before build" | ||||
|         ccache --show-stats | ||||
|         section_end ccache_before | ||||
|       fi | ||||
|   after_script: | ||||
|     - if test -x /usr/bin/ccache; then ccache --show-stats | grep "Hits:"; fi | ||||
|     - !reference [default, after_script] | ||||
|  | ||||
| .build-windows: | ||||
|   extends: | ||||
|     - .build-common | ||||
|     - .windows-docker-tags | ||||
|   cache: | ||||
|     key: ${CI_JOB_NAME} | ||||
|     paths: | ||||
|       - subprojects/packagecache | ||||
|  | ||||
| .meson-build-for-tests: | ||||
|   extends: | ||||
|     - .build-linux | ||||
|   stage: build-for-tests | ||||
|   script: | ||||
|     - &meson-build timeout --verbose ${BUILD_JOB_TIMEOUT_OVERRIDE:-$BUILD_JOB_TIMEOUT} bash --login .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|  | ||||
| .meson-build-only: | ||||
|   extends: | ||||
|     - .meson-build-for-tests | ||||
|     - .build-only-delayed-rules | ||||
|   stage: build-only | ||||
|   script: | ||||
|     - *meson-build | ||||
|  | ||||
|  | ||||
| debian-testing: | ||||
|   extends: | ||||
|     - .meson-build-for-tests | ||||
|     - .use-debian/x86_64_build | ||||
|     - .build-run-long # but it really shouldn't! tracked in mesa#12544 | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D egl=enabled | ||||
|       -D gbm=enabled | ||||
|       -D glvnd=disabled | ||||
|       -D glx=dri | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=true | ||||
|       -D gallium-va=enabled | ||||
|     GALLIUM_DRIVERS: "llvmpipe,softpipe,virgl,radeonsi,zink,iris,svga" | ||||
|     VULKAN_DRIVERS: "swrast,amd,intel,virtio" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|     EXTRA_OPTION: > | ||||
|       -D intel-elk=false | ||||
|       -D spirv-to-dxil=true | ||||
|       -D tools=drm-shim | ||||
|       -D valgrind=disabled | ||||
|     S3_ARTIFACT_NAME: mesa-x86_64-default-${BUILDTYPE} | ||||
|     RUN_MESON_TESTS: "false" # debian-build-testing already runs these | ||||
|   artifacts: | ||||
|     reports: | ||||
|       junit: artifacts/ci_scripts_report.xml | ||||
|  | ||||
| debian-testing-asan: | ||||
|   extends: | ||||
|     - debian-testing | ||||
|     - .meson-build-for-tests | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "swrast" | ||||
|     GALLIUM_DRIVERS: "llvmpipe,softpipe" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-truncation | ||||
|     EXTRA_OPTION: > | ||||
|       -D b_sanitize=address | ||||
|       -D gallium-va=false | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=false | ||||
|       -D mesa-clc=system | ||||
|       -D tools=dlclose-skip | ||||
|       -D valgrind=disabled | ||||
|     S3_ARTIFACT_NAME: "" | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     # Do a host build for mesa-clc (asan complains not being loaded as | ||||
|     # the first library) | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-rusticl=false | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-drivers= | ||||
|       -D glx=disabled | ||||
|       -D install-mesa-clc=true | ||||
|       -D mesa-clc=enabled | ||||
|       -D platforms= | ||||
|       -D video-codecs= | ||||
|       -D vulkan-drivers= | ||||
|  | ||||
| debian-testing-msan: | ||||
|   # https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo | ||||
|   # msan cannot fully work until it's used together with msan libc | ||||
|   extends: | ||||
|     - debian-clang | ||||
|     - .meson-build-only | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     # l_undef is incompatible with msan | ||||
|     EXTRA_OPTION: | ||||
|       -D b_sanitize=memory | ||||
|       -D b_lundef=false | ||||
|       -D mesa-clc=system | ||||
|       -D precomp-compiler=system | ||||
|     S3_ARTIFACT_NAME: "" | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     # Don't run all the tests yet: | ||||
|     # GLSL has some issues in sexpression reading. | ||||
|     # gtest has issues in its test initialization. | ||||
|     MESON_TEST_ARGS: "--suite glcpp --suite format" | ||||
|     GALLIUM_DRIVERS: "freedreno,iris,nouveau,r300,r600,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus" | ||||
|     VULKAN_DRIVERS: intel,amd,broadcom,virtio | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     # Do a host build for mesa-clc and precomp-compiler (msan complains about uninitialized | ||||
|     # values in the LLVM libs) | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-drivers= | ||||
|       -D vulkan-drivers= | ||||
|       -D video-codecs= | ||||
|       -D glx=disabled | ||||
|       -D platforms= | ||||
|       -D mesa-clc=enabled | ||||
|       -D install-mesa-clc=true | ||||
|       -D precomp-compiler=enabled | ||||
|       -D install-precomp-compiler=true | ||||
|       -D tools=panfrost | ||||
|  | ||||
| debian-testing-ubsan: | ||||
|   extends: | ||||
|     - debian-testing | ||||
|     - .meson-build-for-tests | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-overflow | ||||
|       -Wno-error=stringop-truncation | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|     GALLIUM_DRIVERS: "llvmpipe,softpipe" | ||||
|     VULKAN_DRIVERS: "swrast" | ||||
|     EXTRA_OPTION: > | ||||
|       -D b_sanitize=undefined | ||||
|       -D mesa-clc=system | ||||
|       -D gallium-rusticl=false | ||||
|       -D gallium-va=false | ||||
|       -D gallium-nine=false | ||||
|     S3_ARTIFACT_NAME: "" | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-rusticl=false | ||||
|       -D gallium-drivers= | ||||
|       -D vulkan-drivers= | ||||
|       -D video-codecs= | ||||
|       -D glx=disabled | ||||
|       -D platforms= | ||||
|       -D mesa-clc=enabled | ||||
|       -D install-mesa-clc=true | ||||
|  | ||||
| debian-build-testing: | ||||
|   extends: | ||||
|     - .meson-build-for-tests | ||||
|     - .use-debian/x86_64_build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=disabled | ||||
|       -D platforms=x11,wayland | ||||
|       -D legacy-x11=dri2 | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=true | ||||
|       -D gallium-rusticl=false | ||||
|     GALLIUM_DRIVERS: "i915,iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus" | ||||
|     VULKAN_DRIVERS: "intel_hasvk,imagination-experimental,microsoft-experimental,nouveau,swrast" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|     EXTRA_OPTION: > | ||||
|       -D spirv-to-dxil=true | ||||
|       -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi | ||||
|       -D perfetto=true | ||||
|     S3_ARTIFACT_NAME: debian-build-testing | ||||
|  | ||||
| # Test a release build with -Werror so new warnings don't sneak in. | ||||
| debian-release: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-debian/x86_64_build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-overread | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=disabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=false | ||||
|       -D llvm=enabled | ||||
|     GALLIUM_DRIVERS: "i915,iris,nouveau,r300,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus" | ||||
|     VULKAN_DRIVERS: "swrast,intel_hasvk,imagination-experimental,microsoft-experimental" | ||||
|     EXTRA_OPTION: > | ||||
|       -D spirv-to-dxil=true | ||||
|       -D tools=all | ||||
|       -D mesa-clc=enabled | ||||
|       -D precomp-compiler=enabled | ||||
|       -D intel-rt=enabled | ||||
|       -D imagination-srv=true | ||||
|     BUILDTYPE: "release" | ||||
|     S3_ARTIFACT_NAME: "mesa-x86_64-default-${BUILDTYPE}" | ||||
|   script: | ||||
|     - *meson-build | ||||
|     - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi' | ||||
|  | ||||
| alpine-build-testing: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-alpine/x86_64_build | ||||
|   variables: | ||||
|     BUILDTYPE: "release" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=cpp | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=stringop-overflow | ||||
|       -Wno-error=stringop-overread | ||||
|       -Wno-error=misleading-indentation | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=disabled | ||||
|       -D platforms=wayland | ||||
|     GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink" | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=true | ||||
|       -D gallium-rusticl=false | ||||
|       -D gles1=disabled | ||||
|       -D gles2=enabled | ||||
|       -D llvm=enabled | ||||
|       -D llvm-orcjit=true | ||||
|       -D microsoft-clc=disabled | ||||
|       -D shared-llvm=enabled | ||||
|     UNWIND: "disabled" | ||||
|     VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental" | ||||
|  | ||||
| fedora-release: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-fedora/x86_64_build | ||||
|     - .build-run-long | ||||
|   # LTO builds can be really very slow, and we have no way to specify different | ||||
|   # timeouts for pre-merge and nightly jobs | ||||
|   timeout: 1h | ||||
|   variables: | ||||
|     BUILDTYPE: "release" | ||||
|     # array-bounds are pure non-LTO gcc buggy warning | ||||
|     # maybe-uninitialized is misfiring in nir_lower_gs_intrinsics.c, and | ||||
|     # a "maybe" warning should never be an error anyway. | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-overflow | ||||
|       -Wno-error=stringop-overread | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=maybe-uninitialized | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=dangling-reference | ||||
|       -Wno-error=overloaded-virtual | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=enabled | ||||
|       -D platforms=x11,wayland | ||||
|     EXTRA_OPTION: > | ||||
|       -D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|       -D intel-rt=enabled | ||||
|       -D imagination-srv=true | ||||
|       -D teflon=true | ||||
|     GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,lima,nouveau,panfrost,r300,r600,radeonsi,svga,llvmpipe,softpipe,tegra,v3d,vc4,virgl,zink" | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=true | ||||
|       -D gles1=disabled | ||||
|       -D gles2=enabled | ||||
|       -D llvm=enabled | ||||
|       -D microsoft-clc=disabled | ||||
|       -D shared-llvm=enabled | ||||
|     UNWIND: "disabled" | ||||
|     VULKAN_DRIVERS: "amd,asahi,broadcom,freedreno,imagination-experimental,intel,intel_hasvk" | ||||
|  | ||||
| debian-android: | ||||
|   extends: | ||||
|     - .android-variables | ||||
|     - .meson-cross | ||||
|     - .use-debian/android_build | ||||
|     - .ci-deqp-artifacts | ||||
|     - .meson-build-for-tests | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     UNWIND: "disabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=asm-operand-widths | ||||
|       -Wno-error=constant-conversion | ||||
|       -Wno-error=enum-conversion | ||||
|       -Wno-error=initializer-overrides | ||||
|       -Wno-error=sometimes-uninitialized | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=c99-designator | ||||
|       -Wno-error=unused-variable | ||||
|       -Wno-error=unused-but-set-variable | ||||
|       -Wno-error=self-assign | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D gbm=disabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=disabled | ||||
|       -D platforms=android | ||||
|     FORCE_FALLBACK_FOR: llvm | ||||
|     EXTRA_OPTION: > | ||||
|       -D android-stub=true | ||||
|       -D platform-sdk-version=${ANDROID_SDK_VERSION} | ||||
|       -D cpp_rtti=false | ||||
|       -D valgrind=disabled | ||||
|       -D android-libbacktrace=disabled | ||||
|       -D mesa-clc=system | ||||
|       -D precomp-compiler=system | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=false | ||||
|     PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files" | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-drivers= | ||||
|       -D vulkan-drivers= | ||||
|       -D video-codecs= | ||||
|       -D glx=disabled | ||||
|       -D platforms= | ||||
|       -D mesa-clc=enabled | ||||
|       -D install-mesa-clc=true | ||||
|       -D precomp-compiler=enabled | ||||
|       -D install-precomp-compiler=true | ||||
|       -D tools=panfrost | ||||
|     S3_ARTIFACT_NAME: mesa-x86_64-android-${BUILDTYPE} | ||||
|   script: | ||||
|     # x86_64 build: | ||||
|     # Can't do AMD drivers because they require LLVM, which is currently | ||||
|     # problematic in our Android builds. | ||||
|     - export CROSS=x86_64-linux-android | ||||
|     - export GALLIUM_DRIVERS=iris,virgl,zink,softpipe | ||||
|     - export VULKAN_DRIVERS=intel,virtio,swrast | ||||
|     - .gitlab-ci/create-llvm-meson-wrap-file.sh | ||||
|     - *meson-build | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|     # remove all the files created by the previous build before the next build | ||||
|     - git clean -dxf . | ||||
|     # aarch64 build: | ||||
|     # build-only, to catch compilation regressions | ||||
|     # without calling .gitlab-ci/prepare-artifacts.sh so that the | ||||
|     # artifacts are not shipped in mesa-x86_64-android-${BUILDTYPE} | ||||
|     - export CROSS=aarch64-linux-android | ||||
|     - export GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d | ||||
|     - export VULKAN_DRIVERS=freedreno,broadcom,virtio | ||||
|     - *meson-build | ||||
|  | ||||
| .meson-cross: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-debian/x86_64_build | ||||
|   variables: | ||||
|     UNWIND: "disabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|  | ||||
| .meson-arm: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/arm64_build | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "asahi,broadcom,freedreno" | ||||
|     GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,llvmpipe,softpipe,tegra,v3d,vc4,zink" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|  | ||||
| debian-arm32: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|     - .meson-build-for-tests | ||||
|   variables: | ||||
|     CROSS: armhf | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     # remove asahi & llvmpipe from the .meson-arm list because here we have llvm=disabled | ||||
|     VULKAN_DRIVERS: "broadcom,freedreno" | ||||
|     GALLIUM_DRIVERS: "etnaviv,freedreno,lima,nouveau,panfrost,softpipe,tegra,v3d,vc4,zink" | ||||
|     EXTRA_OPTION: > | ||||
|       -D llvm=disabled | ||||
|       -D valgrind=disabled | ||||
|       -D gallium-rusticl=false | ||||
|       -D mesa-clc=system | ||||
|       -D precomp-compiler=system | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-drivers= | ||||
|       -D vulkan-drivers= | ||||
|       -D video-codecs= | ||||
|       -D glx=disabled | ||||
|       -D platforms= | ||||
|       -D mesa-clc=enabled | ||||
|       -D install-mesa-clc=true | ||||
|       -D precomp-compiler=enabled | ||||
|       -D install-precomp-compiler=true | ||||
|       -D tools=panfrost | ||||
|     S3_ARTIFACT_NAME: mesa-arm32-default-${BUILDTYPE} | ||||
|     # The strip command segfaults, failing to strip the binary and leaving | ||||
|     # tempfiles in our artifacts. | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|  | ||||
| debian-arm32-asan: | ||||
|   extends: | ||||
|     - debian-arm32 | ||||
|     - .meson-build-for-tests | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     GALLIUM_DRIVERS: "etnaviv" | ||||
|     VULKAN_DRIVERS: "" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D llvm=disabled | ||||
|       -D b_sanitize=address | ||||
|       -D valgrind=disabled | ||||
|       -D tools=dlclose-skip | ||||
|       -D gallium-rusticl=false | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     S3_ARTIFACT_NAME: mesa-arm32-asan-${BUILDTYPE} | ||||
|  | ||||
| debian-arm64: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|     - .meson-build-for-tests | ||||
|   variables: | ||||
|     C_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=stringop-truncation | ||||
|     GALLIUM_DRIVERS: "etnaviv,freedreno,lima,panfrost,v3d,vc4,zink" | ||||
|     VULKAN_DRIVERS: "broadcom,freedreno,panfrost" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D valgrind=disabled | ||||
|       -D imagination-srv=true | ||||
|       -D freedreno-kmds=msm,virtio | ||||
|       -D teflon=true | ||||
|     GALLIUM_ST: | ||||
|       -D gallium-rusticl=true | ||||
|     RUN_MESON_TESTS: "false" # run by debian-arm64-build-testing | ||||
|     S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE} | ||||
|  | ||||
| debian-arm64-asan: | ||||
|   extends: | ||||
|     - debian-arm64 | ||||
|     - .meson-build-for-tests | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "broadcom,freedreno" | ||||
|     GALLIUM_DRIVERS: "freedreno,vc4,v3d" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D b_sanitize=address | ||||
|       -D valgrind=disabled | ||||
|       -D tools=dlclose-skip | ||||
|       -D gallium-rusticl=false | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     S3_ARTIFACT_NAME: mesa-arm64-asan-${BUILDTYPE} | ||||
|  | ||||
| debian-arm64-ubsan: | ||||
|   extends: | ||||
|     - debian-arm64 | ||||
|     - .meson-build-for-tests | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "broadcom" | ||||
|     GALLIUM_DRIVERS: "v3d,vc4" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=stringop-overflow | ||||
|       -Wno-error=stringop-truncation | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -fno-var-tracking-assignments | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D b_sanitize=undefined | ||||
|       -D gallium-rusticl=false | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     RUN_MESON_TESTS: "false" # just too slow | ||||
|     S3_ARTIFACT_NAME: mesa-arm64-ubsan-${BUILDTYPE} | ||||
|  | ||||
| debian-arm64-build-test: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|     - .meson-build-only | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "amd,asahi,imagination-experimental,nouveau" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D tools=panfrost,imagination | ||||
|       -D perfetto=true | ||||
|  | ||||
| debian-arm64-release: | ||||
|   extends: | ||||
|     - debian-arm64 | ||||
|     - .meson-build-only | ||||
|   variables: | ||||
|     BUILDTYPE: release | ||||
|     S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE} | ||||
|     C_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=stringop-truncation | ||||
|       -Wno-error=stringop-overread | ||||
|   script: | ||||
|     - *meson-build | ||||
|     - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi' | ||||
|  | ||||
| debian-no-libdrm: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .meson-build-only | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: freedreno | ||||
|     GALLIUM_DRIVERS: "zink,llvmpipe" | ||||
|     BUILDTYPE: release | ||||
|     C_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=stringop-truncation | ||||
|       -Wno-error=stringop-overread | ||||
|     EXTRA_OPTION: > | ||||
|       -D freedreno-kmds=kgsl | ||||
|       -D glx=disabled | ||||
|       -D gbm=disabled | ||||
|       -D egl=disabled | ||||
|       -D perfetto=true | ||||
|  | ||||
| debian-clang: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-debian/x86_64_build | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     UNWIND: "enabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=constant-conversion | ||||
|       -Wno-error=enum-conversion | ||||
|       -Wno-error=initializer-overrides | ||||
|       -Wno-error=sometimes-uninitialized | ||||
|       -Werror=misleading-indentation | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=c99-designator | ||||
|       -Wno-error=overloaded-virtual | ||||
|       -Wno-error=tautological-constant-out-of-range-compare | ||||
|       -Wno-error=unused-private-field | ||||
|       -Wno-error=vla-cxx-extension | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=enabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=true | ||||
|       -D gles1=enabled | ||||
|       -D gles2=enabled | ||||
|       -D llvm=enabled | ||||
|       -D microsoft-clc=disabled | ||||
|       -D shared-llvm=enabled | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,r300,r600,freedreno,llvmpipe,softpipe,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi" | ||||
|     VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,swrast,panfrost,imagination-experimental,microsoft-experimental,nouveau | ||||
|     EXTRA_OPTION: | ||||
|       -D spirv-to-dxil=true | ||||
|       -D imagination-srv=true | ||||
|       -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi,imagination | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|       -D build-radv-tests=true | ||||
|       -D build-aco-tests=true | ||||
|       -D mesa-clc=enabled | ||||
|       -D precomp-compiler=enabled | ||||
|       -D intel-rt=enabled | ||||
|       -D imagination-srv=true | ||||
|       -D teflon=true | ||||
|     CC: clang-${LLVM_VERSION} | ||||
|     CXX: clang++-${LLVM_VERSION} | ||||
|  | ||||
| debian-clang-release: | ||||
|   extends: | ||||
|     - debian-clang | ||||
|     - .meson-build-only | ||||
|     - .build-run-long | ||||
|   variables: | ||||
|     BUILDTYPE: "release" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=xlib | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=true | ||||
|       -D gles1=disabled | ||||
|       -D gles2=disabled | ||||
|       -D llvm=enabled | ||||
|       -D microsoft-clc=disabled | ||||
|       -D shared-llvm=enabled | ||||
|  | ||||
| windows-msvc: | ||||
|   extends: | ||||
|     - .build-windows | ||||
|     - .use-windows_build_msvc | ||||
|     - .windows-build-rules | ||||
|   stage: build-for-tests | ||||
|   script: | ||||
|     - pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1 | ||||
|   artifacts: | ||||
|     paths: | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _install/ | ||||
|  | ||||
| debian-vulkan: | ||||
|   extends: | ||||
|     - .meson-build-only | ||||
|     - .use-debian/x86_64_build | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     UNWIND: "disabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D gbm=disabled | ||||
|       -D egl=disabled | ||||
|       -D opengl=false | ||||
|       -D gles1=disabled | ||||
|       -D gles2=disabled | ||||
|       -D glvnd=disabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-rusticl=false | ||||
|       -D b_sanitize=undefined | ||||
|       -D c_args=-fno-sanitize-recover=all | ||||
|       -D cpp_args=-fno-sanitize-recover=all | ||||
|     UBSAN_OPTIONS: "print_stacktrace=1" | ||||
|     VULKAN_DRIVERS: amd,asahi,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau | ||||
|     EXTRA_OPTION: > | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|       -D build-radv-tests=true | ||||
|       -D build-aco-tests=true | ||||
|       -D intel-rt=disabled | ||||
|       -D imagination-srv=true | ||||
|  | ||||
| debian-x86_32: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/x86_32_build | ||||
|     - .meson-build-only | ||||
|     - .build-run-long # it's not clear why this runs long, but it also doesn't matter much | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     CROSS: i386 | ||||
|     VULKAN_DRIVERS: intel,amd,swrast,virtio,panfrost | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,llvmpipe,softpipe,virgl,zink,crocus,d3d12,panfrost" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|     EXTRA_OPTION: > | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|       -D mesa-clc=system | ||||
|     C_LINK_ARGS: > | ||||
|       -Wl,--no-warn-rwx-segments | ||||
|     CPP_LINK_ARGS: > | ||||
|       -Wl,--no-warn-rwx-segments | ||||
|     HOST_BUILD_OPTIONS: > | ||||
|       -D build-tests=false | ||||
|       -D enable-glcpp-tests=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D gallium-drivers= | ||||
|       -D vulkan-drivers= | ||||
|       -D video-codecs= | ||||
|       -D glx=disabled | ||||
|       -D platforms= | ||||
|       -D mesa-clc=enabled | ||||
|       -D install-mesa-clc=true | ||||
|  | ||||
| # While s390 is dead, s390x is very much alive, and one of the last major | ||||
| # big-endian platforms, so it provides useful coverage. | ||||
| # In case of issues with this job, contact @ajax | ||||
| debian-s390x: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/s390x_build | ||||
|     - .meson-build-only | ||||
|   tags: | ||||
|     - $FDO_RUNNER_JOB_PRIORITY_TAG_X86_64_KVM | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     CROSS: s390x | ||||
|     GALLIUM_DRIVERS: "llvmpipe,virgl,zink" | ||||
|     VULKAN_DRIVERS: "swrast,virtio" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|  | ||||
| debian-ppc64el: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/ppc64el_build | ||||
|     - .meson-build-only | ||||
|   variables: | ||||
|     BUILDTYPE: debug | ||||
|     CROSS: ppc64el | ||||
|     GALLIUM_DRIVERS: "nouveau,llvmpipe,softpipe,virgl,zink" | ||||
|     VULKAN_DRIVERS: "swrast" | ||||
|     DRI_LOADERS: | ||||
|       -D glvnd=disabled | ||||
|  | ||||
| # This job emits our scripts into artifacts so they can be reused for | ||||
| # job submission to hardware devices. | ||||
| python-artifacts: | ||||
|   stage: build-for-tests | ||||
|   extends: | ||||
|     - .use-debian/x86_64_pyutils | ||||
|     - .build-common | ||||
|     - .meson-build-for-tests | ||||
|   variables: | ||||
|     GIT_STRATEGY: fetch | ||||
|     S3_ARTIFACT_NAME: mesa-python-ci-artifacts | ||||
|   timeout: 10m | ||||
|   script: | ||||
|     - .gitlab-ci/prepare-artifacts-python.sh | ||||
| @@ -1,35 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2035 | ||||
| # shellcheck disable=SC2061 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| while true; do | ||||
|   devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null) | ||||
|   for i in $devcds; do | ||||
|     echo "Found a devcoredump at $i." | ||||
|     if cp $i $RESULTS_DIR/first.devcore; then | ||||
|       echo 1 > $i | ||||
|       echo "Saved to the job artifacts at /first.devcore" | ||||
|       exit 0 | ||||
|     fi | ||||
|   done | ||||
|   i915_error_states=$(find /sys/devices/ -path */drm/card*/error) | ||||
|   for i in $i915_error_states; do | ||||
|     tmpfile=$(mktemp) | ||||
|     cp "$i" "$tmpfile" | ||||
|     filesize=$(stat --printf="%s" "$tmpfile") | ||||
|     # Does the file contain "No error state collected" ? | ||||
|     if [ "$filesize" = 25 ]; then | ||||
|         rm "$tmpfile" | ||||
|     else | ||||
|         echo "Found an i915 error state at $i size=$filesize." | ||||
|         if cp "$tmpfile" $RESULTS_DIR/first.i915_error_state; then | ||||
|             rm "$tmpfile" | ||||
|             echo 1 > "$i" | ||||
|             echo "Saved to the job artifacts at /first.i915_error_state" | ||||
|             exit 0 | ||||
|         fi | ||||
|     fi | ||||
|   done | ||||
|   sleep 10 | ||||
| done | ||||
| @@ -1,142 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| VARS=( | ||||
|     ACO_DEBUG | ||||
|     ANGLE_TAG | ||||
|     ANGLE_TRACE_FILES_TAG | ||||
|     ANV_DEBUG | ||||
|     ARTIFACTS_BASE_URL | ||||
|     ASAN_OPTIONS | ||||
|     BASE_SYSTEM_FORK_HOST_PREFIX | ||||
|     BASE_SYSTEM_MAINLINE_HOST_PREFIX | ||||
|     CI_COMMIT_BRANCH | ||||
|     CI_COMMIT_REF_NAME | ||||
|     CI_COMMIT_TITLE | ||||
|     CI_JOB_ID | ||||
|     CI_JOB_NAME | ||||
|     CI_JOB_STARTED_AT | ||||
|     CI_JOB_URL | ||||
|     CI_MERGE_REQUEST_SOURCE_BRANCH_NAME | ||||
|     CI_MERGE_REQUEST_TITLE | ||||
|     CI_NODE_INDEX | ||||
|     CI_NODE_TOTAL | ||||
|     CI_PAGES_DOMAIN | ||||
|     CI_PIPELINE_ID | ||||
|     CI_PIPELINE_URL | ||||
|     CI_PROJECT_DIR | ||||
|     CI_PROJECT_NAME | ||||
|     CI_PROJECT_PATH | ||||
|     CI_PROJECT_ROOT_NAMESPACE | ||||
|     CI_RUNNER_DESCRIPTION | ||||
|     CI_SERVER_URL | ||||
|     CROSVM_GALLIUM_DRIVER | ||||
|     CROSVM_GPU_ARGS | ||||
|     CURRENT_SECTION | ||||
|     DEQP_BIN_DIR | ||||
|     DEQP_FORCE_ASAN | ||||
|     DEQP_FRACTION | ||||
|     DEQP_RUNNER_MAX_FAILS | ||||
|     DEQP_SUITE | ||||
|     DEQP_TEMP_DIR | ||||
|     DEVICE_NAME | ||||
|     DRIVER_NAME | ||||
|     EGL_PLATFORM | ||||
|     ETNA_MESA_DEBUG | ||||
|     FDO_CI_CONCURRENT | ||||
|     FDO_HTTP_CACHE_URI | ||||
|     FDO_UPSTREAM_REPO | ||||
|     FD_MESA_DEBUG | ||||
|     FLAKES_CHANNEL | ||||
|     FLUSTER_CODECS | ||||
|     FLUSTER_FRACTION | ||||
|     FLUSTER_VECTORS_VERSION | ||||
|     FREEDRENO_HANGCHECK_MS | ||||
|     GALLIUM_DRIVER | ||||
|     GALLIVM_PERF | ||||
|     GPU_VERSION | ||||
|     GTEST | ||||
|     GTEST_FAILS | ||||
|     GTEST_FRACTION | ||||
|     GTEST_RUNNER_OPTIONS | ||||
|     GTEST_SKIPS | ||||
|     HWCI_FREQ_MAX | ||||
|     HWCI_KERNEL_MODULES | ||||
|     HWCI_KVM | ||||
|     HWCI_START_WESTON | ||||
|     HWCI_START_XORG | ||||
|     HWCI_TEST_ARGS | ||||
|     HWCI_TEST_SCRIPT | ||||
|     INTEL_XE_IGNORE_EXPERIMENTAL_WARNING | ||||
|     IR3_SHADER_DEBUG | ||||
|     JOB_ARTIFACTS_BASE | ||||
|     JOB_RESULTS_PATH | ||||
|     JOB_ROOTFS_OVERLAY_PATH | ||||
|     KERNEL_IMAGE_BASE | ||||
|     KERNEL_IMAGE_NAME | ||||
|     LD_LIBRARY_PATH | ||||
|     LIBGL_ALWAYS_SOFTWARE | ||||
|     LP_NUM_THREADS | ||||
|     LVP_POISON_MEMORY | ||||
|     MESA_BASE_TAG | ||||
|     MESA_BUILD_PATH | ||||
|     MESA_DEBUG | ||||
|     MESA_GLES_VERSION_OVERRIDE | ||||
|     MESA_GLSL_VERSION_OVERRIDE | ||||
|     MESA_GL_VERSION_OVERRIDE | ||||
|     MESA_IMAGE | ||||
|     MESA_IMAGE_PATH | ||||
|     MESA_IMAGE_TAG | ||||
|     MESA_LOADER_DRIVER_OVERRIDE | ||||
|     MESA_SPIRV_LOG_LEVEL | ||||
|     MESA_TEMPLATES_COMMIT | ||||
|     MESA_VK_ABORT_ON_DEVICE_LOSS | ||||
|     MESA_VK_IGNORE_CONFORMANCE_WARNING | ||||
|     NIR_DEBUG | ||||
|     PANVK_DEBUG | ||||
|     PAN_I_WANT_A_BROKEN_VULKAN_DRIVER | ||||
|     PAN_MESA_DEBUG | ||||
|     PIGLIT_FRACTION | ||||
|     PIGLIT_NO_WINDOW | ||||
|     PIGLIT_OPTIONS | ||||
|     PIGLIT_PLATFORM | ||||
|     PIGLIT_REPLAY_ANGLE_ARCH | ||||
|     PIGLIT_REPLAY_ARTIFACTS_BASE_URL | ||||
|     PIGLIT_REPLAY_DEVICE_NAME | ||||
|     PIGLIT_REPLAY_EXTRA_ARGS | ||||
|     PIGLIT_REPLAY_LOOP_TIMES | ||||
|     PIGLIT_REPLAY_REFERENCE_IMAGES_BASE | ||||
|     PIGLIT_REPLAY_SUBCOMMAND | ||||
|     PIGLIT_RESULTS | ||||
|     PIGLIT_RUNNER_OPTIONS | ||||
|     PIGLIT_TESTS | ||||
|     PIGLIT_TRACES_FILE | ||||
|     PIPELINE_ARTIFACTS_BASE | ||||
|     RADEON_DEBUG | ||||
|     RADV_DEBUG | ||||
|     radv_enable_float16_gfx8 | ||||
|     RADV_PERFTEST | ||||
|     S3_HOST | ||||
|     S3_JWT_FILE | ||||
|     S3_RESULTS_UPLOAD | ||||
|     SKQP_ASSETS_DIR | ||||
|     SKQP_BACKENDS | ||||
|     STORAGE_FORK_HOST_PATH | ||||
|     STORAGE_MAINLINE_HOST_PATH | ||||
|     TU_DEBUG | ||||
|     VIRGL_HOST_API | ||||
|     VIRGL_RENDER_SERVER | ||||
|     VK_DRIVER | ||||
|     WAFFLE_PLATFORM | ||||
|     ZINK_DEBUG | ||||
|     ZINK_DESCRIPTORS | ||||
|  | ||||
|     # Dead code within Mesa CI, but required by virglrender CI | ||||
|     # (because they include our files in their CI) | ||||
|     VK_DRIVER_FILES | ||||
| ) | ||||
|  | ||||
| for var in "${VARS[@]}"; do | ||||
|   if [ -n "${!var+x}" ]; then | ||||
|     echo "export $var=${!var@Q}" | ||||
|   fi | ||||
| done | ||||
| @@ -1,25 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Very early init, used to make sure devices and network are set up and | ||||
| # reachable. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| cd / | ||||
|  | ||||
| findmnt --mountpoint /proc || mount -t proc none /proc | ||||
| findmnt --mountpoint /sys || mount -t sysfs none /sys | ||||
| mount -t debugfs none /sys/kernel/debug | ||||
| findmnt --mountpoint /dev || mount -t devtmpfs none /dev | ||||
| mkdir -p /dev/pts | ||||
| mount -t devpts devpts /dev/pts | ||||
| mkdir /dev/shm | ||||
| mount -t tmpfs -o noexec,nodev,nosuid tmpfs /dev/shm | ||||
| mount -t tmpfs tmpfs /tmp | ||||
|  | ||||
| echo "nameserver 8.8.8.8" > /etc/resolv.conf | ||||
| [ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts | ||||
|  | ||||
| # Set the time so we can validate certificates before we fetch anything; | ||||
| # however as not all DUTs have network, make this non-fatal. | ||||
| for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true | ||||
| @@ -1,249 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC1090 | ||||
| # shellcheck disable=SC1091 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # shellcheck disable=SC2155 | ||||
|  | ||||
| # Second-stage init, used to set up devices and our job environment before | ||||
| # running tests. | ||||
|  | ||||
| shopt -s extglob | ||||
|  | ||||
| # Make sure to kill itself and all the children process from this script on | ||||
| # exiting, since any console output may interfere with LAVA signals handling, | ||||
| # which based on the log console. | ||||
| cleanup() { | ||||
|   if [ "$BACKGROUND_PIDS" = "" ]; then | ||||
|     return 0 | ||||
|   fi | ||||
|  | ||||
|   set +x | ||||
|   echo "Killing all child processes" | ||||
|   for pid in $BACKGROUND_PIDS | ||||
|   do | ||||
|     kill "$pid" 2>/dev/null || true | ||||
|   done | ||||
|  | ||||
|   # Sleep just a little to give enough time for subprocesses to be gracefully | ||||
|   # killed. Then apply a SIGKILL if necessary. | ||||
|   sleep 5 | ||||
|   for pid in $BACKGROUND_PIDS | ||||
|   do | ||||
|     kill -9 "$pid" 2>/dev/null || true | ||||
|   done | ||||
|  | ||||
|   BACKGROUND_PIDS= | ||||
|   set -x | ||||
| } | ||||
| trap cleanup INT TERM EXIT | ||||
|  | ||||
| # Space separated values with the PIDS of the processes started in the | ||||
| # background by this script | ||||
| BACKGROUND_PIDS= | ||||
|  | ||||
|  | ||||
| for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do | ||||
|     [ -f "$path" ] && source "$path" | ||||
| done | ||||
| . "$SCRIPTS_DIR"/setup-test-env.sh | ||||
|  | ||||
| # Flush out anything which might be stuck in a serial buffer | ||||
| echo | ||||
| echo | ||||
| echo | ||||
|  | ||||
| section_switch init_stage2 "Pre-testing hardware setup" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Set up any devices required by the jobs | ||||
| [ -z "$HWCI_KERNEL_MODULES" ] || { | ||||
|     echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe | ||||
| } | ||||
|  | ||||
| # Set up ZRAM | ||||
| HWCI_ZRAM_SIZE=2G | ||||
| if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then | ||||
|     mkswap /dev/zram0 | ||||
|     swapon /dev/zram0 | ||||
|     echo "zram: $HWCI_ZRAM_SIZE activated" | ||||
| else | ||||
|     echo "zram: skipping, not supported" | ||||
| fi | ||||
|  | ||||
| # | ||||
| # Load the KVM module specific to the detected CPU virtualization extensions: | ||||
| # - vmx for Intel VT | ||||
| # - svm for AMD-V | ||||
| # | ||||
| # Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT. | ||||
| # | ||||
| if [ "$HWCI_KVM" = "true" ]; then | ||||
|     unset KVM_KERNEL_MODULE | ||||
|     { | ||||
|       grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel | ||||
|     } || { | ||||
|       grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd | ||||
|     } | ||||
|  | ||||
|     { | ||||
|       [ -z "${KVM_KERNEL_MODULE}" ] && \ | ||||
|       echo "WARNING: Failed to detect CPU virtualization extensions" | ||||
|     } || \ | ||||
|         modprobe ${KVM_KERNEL_MODULE} | ||||
|  | ||||
|     mkdir -p /kernel | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
| 	-o "/kernel/${KERNEL_IMAGE_NAME}" \ | ||||
|         "${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}" | ||||
| fi | ||||
|  | ||||
| # Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect | ||||
| # it in /install | ||||
| ln -sf $CI_PROJECT_DIR/install /install | ||||
| export LD_LIBRARY_PATH=/install/lib | ||||
| export LIBGL_DRIVERS_PATH=/install/lib/dri | ||||
|  | ||||
| # https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691 | ||||
| # The navi21 boards seem to have trouble with ld.so.cache, so try explicitly | ||||
| # telling it to look in /usr/local/lib. | ||||
| export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib | ||||
|  | ||||
| # Store Mesa's disk cache under /tmp, rather than sending it out over NFS. | ||||
| export XDG_CACHE_HOME=/tmp | ||||
|  | ||||
| # Make sure Python can find all our imports | ||||
| export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))") | ||||
|  | ||||
| # If we need to specify a driver, it means several drivers could pick up this gpu; | ||||
| # ensure that the other driver can't accidentally be used | ||||
| if [ -n "$MESA_LOADER_DRIVER_OVERRIDE" ]; then | ||||
|   rm /install/lib/dri/!($MESA_LOADER_DRIVER_OVERRIDE)_dri.so | ||||
| fi | ||||
| ls -1 /install/lib/dri/*_dri.so || true | ||||
|  | ||||
| if [ "$HWCI_FREQ_MAX" = "true" ]; then | ||||
|   # Ensure initialization of the DRM device (needed by MSM) | ||||
|   head -0 /dev/dri/renderD128 | ||||
|  | ||||
|   # Disable GPU frequency scaling | ||||
|   DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true) | ||||
|   test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true | ||||
|  | ||||
|   # Disable CPU frequency scaling | ||||
|   echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true | ||||
|  | ||||
|   # Disable GPU runtime power management | ||||
|   GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1) | ||||
|   test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true | ||||
|   # Lock Intel GPU frequency to 70% of the maximum allowed by hardware | ||||
|   # and enable throttling detection & reporting. | ||||
|   # Additionally, set the upper limit for CPU scaling frequency to 65% of the | ||||
|   # maximum permitted, as an additional measure to mitigate thermal throttling. | ||||
|   /install/common/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d | ||||
| fi | ||||
|  | ||||
| # Start a little daemon to capture sysfs records and produce a JSON file | ||||
| KDL_PATH=/install/common/kdl.sh | ||||
| if [ -x "$KDL_PATH" ]; then | ||||
|   echo "launch kdl.sh!" | ||||
|   $KDL_PATH & | ||||
|   BACKGROUND_PIDS="$! $BACKGROUND_PIDS" | ||||
| else | ||||
|   echo "kdl.sh not found!" | ||||
| fi | ||||
|  | ||||
| # Increase freedreno hangcheck timer because it's right at the edge of the | ||||
| # spilling tests timing out (and some traces, too) | ||||
| if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then | ||||
|     echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms | ||||
| fi | ||||
|  | ||||
| # Start a little daemon to capture the first devcoredump we encounter.  (They | ||||
| # expire after 5 minutes, so we poll for them). | ||||
| CAPTURE_DEVCOREDUMP=/install/common/capture-devcoredump.sh | ||||
| if [ -x "$CAPTURE_DEVCOREDUMP" ]; then | ||||
|   $CAPTURE_DEVCOREDUMP & | ||||
|   BACKGROUND_PIDS="$! $BACKGROUND_PIDS" | ||||
| fi | ||||
|  | ||||
| ARCH=$(uname -m) | ||||
| export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json" | ||||
|  | ||||
| # If we want Xorg to be running for the test, then we start it up before the | ||||
| # HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise | ||||
| # without using -displayfd you can race with Xorg's startup), but xinit will eat | ||||
| # your client's return code | ||||
| if [ -n "$HWCI_START_XORG" ]; then | ||||
|   echo "touch /xorg-started; sleep 100000" > /xorg-script | ||||
|   env \ | ||||
|     xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile "$RESULTS_DIR/Xorg.0.log" & | ||||
|   BACKGROUND_PIDS="$! $BACKGROUND_PIDS" | ||||
|  | ||||
|   # Wait for xorg to be ready for connections. | ||||
|   for _ in 1 2 3 4 5; do | ||||
|     if [ -e /xorg-started ]; then | ||||
|       break | ||||
|     fi | ||||
|     sleep 5 | ||||
|   done | ||||
|   export DISPLAY=:0 | ||||
| fi | ||||
|  | ||||
| if [ -n "$HWCI_START_WESTON" ]; then | ||||
|   WESTON_X11_SOCK="/tmp/.X11-unix/X0" | ||||
|   if [ -n "$HWCI_START_XORG" ]; then | ||||
|     echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing." | ||||
|     WESTON_X11_SOCK="/tmp/.X11-unix/X1" | ||||
|   fi | ||||
|   export WAYLAND_DISPLAY=wayland-0 | ||||
|  | ||||
|   # Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's | ||||
|   export DISPLAY=:0 | ||||
|   mkdir -p /tmp/.X11-unix | ||||
|  | ||||
|   env \ | ||||
|     weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 & | ||||
|   BACKGROUND_PIDS="$! $BACKGROUND_PIDS" | ||||
|  | ||||
|   while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done | ||||
| fi | ||||
|  | ||||
| set +x | ||||
|  | ||||
| section_end init_stage2 | ||||
|  | ||||
| echo "Running ${HWCI_TEST_SCRIPT} ${HWCI_TEST_ARGS} ..." | ||||
|  | ||||
| set +e | ||||
| $HWCI_TEST_SCRIPT ${HWCI_TEST_ARGS:-}; EXIT_CODE=$? | ||||
| set -e | ||||
|  | ||||
| section_start post_test_cleanup "Cleaning up after testing, uploading results" | ||||
| set -x | ||||
|  | ||||
| # Make sure that capture-devcoredump is done before we start trying to tar up | ||||
| # artifacts -- if it's writing while tar is reading, tar will throw an error and | ||||
| # kill the job. | ||||
| cleanup | ||||
|  | ||||
| # upload artifacts (lava jobs) | ||||
| if [ -n "$S3_RESULTS_UPLOAD" ]; then | ||||
|   tar --zstd -cf results.tar.zst results/; | ||||
|   s3_upload results.tar.zst "https://${S3_RESULTS_UPLOAD}/" | ||||
| fi | ||||
|  | ||||
| # We still need to echo the hwci: mesa message, as some scripts rely on it, such | ||||
| # as the python ones inside the bare-metal folder | ||||
| [ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail | ||||
|  | ||||
| set +x | ||||
| section_end post_test_cleanup | ||||
|  | ||||
| # Print the final result; both bare-metal and LAVA look for this string to get | ||||
| # the result of our run, so try really hard to get it out rather than losing | ||||
| # the run. The device gets shut down right at this point, and a630 seems to | ||||
| # enjoy corrupting the last line of serial output before shutdown. | ||||
| for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done | ||||
|  | ||||
| exit $EXIT_CODE | ||||
| @@ -1,820 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2013 | ||||
| # shellcheck disable=SC2015 | ||||
| # shellcheck disable=SC2034 | ||||
| # shellcheck disable=SC2046 | ||||
| # shellcheck disable=SC2059 | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # shellcheck disable=SC2154 | ||||
| # shellcheck disable=SC2155 | ||||
| # shellcheck disable=SC2162 | ||||
| # shellcheck disable=SC2229 | ||||
| # | ||||
| # This is an utility script to manage Intel GPU frequencies. | ||||
| # It can be used for debugging performance problems or trying to obtain a stable | ||||
| # frequency while benchmarking. | ||||
| # | ||||
| # Note the Intel i915 GPU driver allows to change the minimum, maximum and boost | ||||
| # frequencies in steps of 50 MHz via: | ||||
| # | ||||
| # /sys/class/drm/card<n>/<freq_info> | ||||
| # | ||||
| # Where <n> is the DRM card index and <freq_info> one of the following: | ||||
| # | ||||
| # - gt_max_freq_mhz (enforced maximum freq) | ||||
| # - gt_min_freq_mhz (enforced minimum freq) | ||||
| # - gt_boost_freq_mhz (enforced boost freq) | ||||
| # | ||||
| # The hardware capabilities can be accessed via: | ||||
| # | ||||
| # - gt_RP0_freq_mhz (supported maximum freq) | ||||
| # - gt_RPn_freq_mhz (supported minimum freq) | ||||
| # - gt_RP1_freq_mhz (most efficient freq) | ||||
| # | ||||
| # The current frequency can be read from: | ||||
| # - gt_act_freq_mhz (the actual GPU freq) | ||||
| # - gt_cur_freq_mhz (the last requested freq) | ||||
| # | ||||
| # Intel later switched to per-tile sysfs interfaces, which is what the Xe DRM | ||||
| # driver exlusively uses, and the capabilites are now located under the | ||||
| # following directory for the first tile: | ||||
| # | ||||
| # /sys/class/drm/card<n>/device/tile0/gt0/freq0/<freq_info> | ||||
| # | ||||
| # Where <n> is the DRM card index and <freq_info> one of the following: | ||||
| # | ||||
| # - max_freq (enforced maximum freq) | ||||
| # - min_freq (enforced minimum freq) | ||||
| # | ||||
| # The hardware capabilities can be accessed via: | ||||
| # | ||||
| # - rp0_freq (supported maximum freq) | ||||
| # - rpn_freq (supported minimum freq) | ||||
| # - rpe_freq (most efficient freq) | ||||
| # | ||||
| # The current frequency can be read from: | ||||
| # - act_freq (the actual GPU freq) | ||||
| # - cur_freq (the last requested freq) | ||||
| # | ||||
| # Also note that in addition to GPU management, the script offers the | ||||
| # possibility to adjust CPU operating frequencies. However, this is currently | ||||
| # limited to just setting the maximum scaling frequency as percentage of the | ||||
| # maximum frequency allowed by the hardware. | ||||
| # | ||||
| # Copyright (C) 2022 Collabora Ltd. | ||||
| # Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> | ||||
| # | ||||
| # SPDX-License-Identifier: MIT | ||||
| # | ||||
|  | ||||
| # | ||||
| # Constants | ||||
| # | ||||
|  | ||||
| # Check if any /sys/class/drm/cardX/device/tile0 directory exists to detect Xe | ||||
| USE_XE=0 | ||||
| for i in $(seq 0 15); do | ||||
|     if [ -d "/sys/class/drm/card$i/device/tile0" ]; then | ||||
|         USE_XE=1 | ||||
|         break | ||||
|     fi | ||||
| done | ||||
|  | ||||
| # GPU | ||||
| if [ "$USE_XE" -eq 1 ]; then | ||||
|     DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/device/tile0/gt0/freq0/%s_freq" | ||||
|     ENF_FREQ_INFO="max min" | ||||
|     CAP_FREQ_INFO="rp0 rpn rpe" | ||||
| else | ||||
|     DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz" | ||||
|     ENF_FREQ_INFO="max min boost" | ||||
|     CAP_FREQ_INFO="RP0 RPn RP1" | ||||
| fi | ||||
| ACT_FREQ_INFO="act cur" | ||||
| THROTT_DETECT_SLEEP_SEC=2 | ||||
| THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid | ||||
|  | ||||
| # CPU | ||||
| CPU_SYSFS_PREFIX=/sys/devices/system/cpu | ||||
| CPU_PSTATE_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/intel_pstate/%s" | ||||
| CPU_FREQ_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/cpu%s/cpufreq/%s_freq" | ||||
| CAP_CPU_FREQ_INFO="cpuinfo_max cpuinfo_min" | ||||
| ENF_CPU_FREQ_INFO="scaling_max scaling_min" | ||||
| ACT_CPU_FREQ_INFO="scaling_cur" | ||||
|  | ||||
| # | ||||
| # Global variables. | ||||
| # | ||||
| unset INTEL_DRM_CARD_INDEX | ||||
| unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ | ||||
| unset SET_MIN_FREQ SET_MAX_FREQ | ||||
| unset MONITOR_FREQ | ||||
| unset CPU_SET_MAX_FREQ | ||||
| unset DETECT_THROTT | ||||
| unset DRY_RUN | ||||
|  | ||||
| # | ||||
| # Simple printf based stderr logger. | ||||
| # | ||||
| log() { | ||||
|     local msg_type=$1 | ||||
|  | ||||
|     shift | ||||
|     printf "%s: %s: " "${msg_type}" "${0##*/}" >&2 | ||||
|     printf "$@" >&2 | ||||
|     printf "\n" >&2 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print sysfs path for the given card index and freq info. | ||||
| # | ||||
| # arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above | ||||
| # arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX | ||||
| # | ||||
| print_freq_sysfs_path() { | ||||
|     printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card. | ||||
| # | ||||
| identify_intel_gpu() { | ||||
|     local i=0 vendor path | ||||
|  | ||||
|     while [ ${i} -lt 16 ]; do | ||||
|         [ -c "/dev/dri/card$i" ] || { | ||||
|             i=$((i + 1)) | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         path=$(print_freq_sysfs_path "" ${i}) | ||||
|         if [ "$USE_XE" -eq 1 ]; then | ||||
|             path=${path%/*/*/*/*/*}/device/vendor | ||||
|         else | ||||
|             path=${path%/*}/device/vendor | ||||
|         fi | ||||
|  | ||||
|         [ -r "${path}" ] && read vendor < "${path}" && \ | ||||
|             [ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0 | ||||
|  | ||||
|         i=$((i + 1)) | ||||
|     done | ||||
|  | ||||
|     return 1 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Read the specified freq info from sysfs. | ||||
| # | ||||
| # arg1: Flag (y/n) to also enable printing the freq info. | ||||
| # arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above | ||||
| # return: Global variable(s) FREQ_${arg} containing the requested information | ||||
| # | ||||
| read_freq_info() { | ||||
|     local var val info path print=0 ret=0 | ||||
|  | ||||
|     [ "$1" = "y" ] && print=1 | ||||
|     shift | ||||
|  | ||||
|     while [ $# -gt 0 ]; do | ||||
|         info=$1 | ||||
|         shift | ||||
|         var=FREQ_${info} | ||||
|         path=$(print_freq_sysfs_path "${info}") | ||||
|  | ||||
|         [ -r ${path} ] && read ${var} < ${path} || { | ||||
|             log ERROR "Failed to read freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ -n "${var}" ] || { | ||||
|             log ERROR "Got empty freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ ${print} -eq 1 ] && { | ||||
|             eval val=\$${var} | ||||
|             printf "%6s: %4s MHz\n" "${info}" "${val}" | ||||
|         } | ||||
|     done | ||||
|  | ||||
|     return ${ret} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Display requested info. | ||||
| # | ||||
| print_freq_info() { | ||||
|     local req_freq | ||||
|  | ||||
|     [ -n "${GET_CAP_FREQ}" ] && { | ||||
|         printf "* Hardware capabilities\n" | ||||
|         read_freq_info y ${CAP_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
|  | ||||
|     [ -n "${GET_ENF_FREQ}" ] && { | ||||
|         printf "* Enforcements\n" | ||||
|         read_freq_info y ${ENF_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
|  | ||||
|     [ -n "${GET_ACT_FREQ}" ] && { | ||||
|         printf "* Actual\n" | ||||
|         read_freq_info y ${ACT_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print frequency value as requested by user via '-s, --set' option. | ||||
| # arg1: user requested freq value | ||||
| # | ||||
| compute_freq_set() { | ||||
|     local val | ||||
|  | ||||
|     case "$1" in | ||||
|     +) | ||||
|         val=$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}")  # FREQ_rp0 or FREQ_RP0 | ||||
|         ;; | ||||
|     -) | ||||
|         val=$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")  # FREQ_rpn or FREQ_RPn | ||||
|         ;; | ||||
|     *%) | ||||
|         val=$((${1%?} * $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}") / 100)) | ||||
|         # Adjust freq to comply with 50 MHz increments | ||||
|         val=$((val / 50 * 50)) | ||||
|         ;; | ||||
|     *[!0-9]*) | ||||
|         log ERROR "Cannot set freq to invalid value: %s" "$1" | ||||
|         return 1 | ||||
|         ;; | ||||
|     "") | ||||
|         log ERROR "Cannot set freq to unspecified value" | ||||
|         return 1 | ||||
|         ;; | ||||
|     *) | ||||
|         # Adjust freq to comply with 50 MHz increments | ||||
|         val=$(($1 / 50 * 50)) | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     printf "%s" "${val}" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for set_freq(). | ||||
| # | ||||
| set_freq_max() { | ||||
|     log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}" | ||||
|  | ||||
|     read_freq_info n min || return $? | ||||
|  | ||||
|     # FREQ_rp0 or FREQ_RP0 | ||||
|     [ ${SET_MAX_FREQ} -gt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}") ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \ | ||||
|             "${SET_MAX_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f1)}")" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     # FREQ_rpn or FREQ_RPn | ||||
|     [ ${SET_MAX_FREQ} -lt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \ | ||||
|             "${SET_MAX_FREQ}" "${FREQ_min}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${DRY_RUN}" ] || return 0 | ||||
|  | ||||
|     # Write to max freq path | ||||
|     if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) > /dev/null; | ||||
|     then | ||||
|         log ERROR "Failed to set GPU max frequency" | ||||
|         return 1 | ||||
|     fi | ||||
|  | ||||
|     # Only write to boost if the sysfs file exists, as it's removed in Xe | ||||
|     if [ -e "$(print_freq_sysfs_path boost)" ]; then | ||||
|         if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path boost) > /dev/null; | ||||
|         then | ||||
|             log ERROR "Failed to set GPU boost frequency" | ||||
|             return 1 | ||||
|         fi | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for set_freq(). | ||||
| # | ||||
| set_freq_min() { | ||||
|     log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}" | ||||
|  | ||||
|     read_freq_info n max || return $? | ||||
|  | ||||
|     [ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && { | ||||
|         log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "${FREQ_max}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ ${SET_MIN_FREQ} -lt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && { | ||||
|         log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "$(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}")" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${DRY_RUN}" ] || return 0 | ||||
|  | ||||
|     if ! printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min); | ||||
|     then | ||||
|         log ERROR "Failed to set GPU min frequency" | ||||
|         return 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # | ||||
| # Set min or max or both GPU frequencies to the user indicated values. | ||||
| # | ||||
| set_freq() { | ||||
|     # Get hw max & min frequencies | ||||
|     read_freq_info n $(echo $CAP_FREQ_INFO | cut -d' ' -f1,2) || return $? # RP0 RPn | ||||
|  | ||||
|     [ -z "${SET_MAX_FREQ}" ] || { | ||||
|         SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}") | ||||
|         [ -z "${SET_MAX_FREQ}" ] && return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${SET_MIN_FREQ}" ] || { | ||||
|         SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}") | ||||
|         [ -z "${SET_MIN_FREQ}" ] && return 1 | ||||
|     } | ||||
|  | ||||
|     # | ||||
|     # Ensure correct operation order, to avoid setting min freq | ||||
|     # to a value which is larger than max freq. | ||||
|     # | ||||
|     # E.g.: | ||||
|     #   crt_min=crt_max=600; new_min=new_max=700 | ||||
|     #   > operation order: max=700; min=700 | ||||
|     # | ||||
|     #   crt_min=crt_max=600; new_min=new_max=500 | ||||
|     #   > operation order: min=500; max=500 | ||||
|     # | ||||
|     if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then | ||||
|         [ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && { | ||||
|             log ERROR "Cannot set GPU max freq to be less than min freq" | ||||
|             return 1 | ||||
|         } | ||||
|  | ||||
|         read_freq_info n min || return $? | ||||
|  | ||||
|         if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then | ||||
|             set_freq_min || return $? | ||||
|             set_freq_max | ||||
|         else | ||||
|             set_freq_max || return $? | ||||
|             set_freq_min | ||||
|         fi | ||||
|     elif [ -n "${SET_MAX_FREQ}" ]; then | ||||
|         set_freq_max | ||||
|     elif [ -n "${SET_MIN_FREQ}" ]; then | ||||
|         set_freq_min | ||||
|     else | ||||
|         log "Unexpected call to set_freq()" | ||||
|         return 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for detect_throttling(). | ||||
| # | ||||
| get_thrott_detect_pid() { | ||||
|     [ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0 | ||||
|  | ||||
|     local pid | ||||
|     read pid < ${THROTT_DETECT_PID_FILE_PATH} || { | ||||
|         log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     local proc_path=/proc/${pid:-invalid}/cmdline | ||||
|     [ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && { | ||||
|         printf "%s" "${pid}" | ||||
|         return 0 | ||||
|     } | ||||
|  | ||||
|     # Remove orphaned PID file | ||||
|     rm -rf ${THROTT_DETECT_PID_FILE_PATH} | ||||
|     return 1 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Control detection and reporting of GPU throttling events. | ||||
| # arg1: start - run throttle detector in background | ||||
| #       stop - stop throttle detector process, if any | ||||
| #       status - verify if throttle detector is running | ||||
| # | ||||
| detect_throttling() { | ||||
|     local pid | ||||
|     pid=$(get_thrott_detect_pid) | ||||
|  | ||||
|     case "$1" in | ||||
|     status) | ||||
|         printf "Throttling detector is " | ||||
|         [ -z "${pid}" ] && printf "not running\n" && return 0 | ||||
|         printf "running (pid=%s)\n" ${pid} | ||||
|         ;; | ||||
|  | ||||
|     stop) | ||||
|         [ -z "${pid}" ] && return 0 | ||||
|  | ||||
|         log INFO "Stopping throttling detector (pid=%s)" "${pid}" | ||||
|         kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid} | ||||
|         rm -rf ${THROTT_DETECT_PID_FILE_PATH} | ||||
|         ;; | ||||
|  | ||||
|     start) | ||||
|         [ -n "${pid}" ] && { | ||||
|             log WARN "Throttling detector is already running (pid=%s)" ${pid} | ||||
|             return 0 | ||||
|         } | ||||
|  | ||||
|         ( | ||||
|             read_freq_info n $(echo $CAP_FREQ_INFO | cut -d' ' -f2) || return $? # RPn | ||||
|  | ||||
|             while true; do | ||||
|                 sleep ${THROTT_DETECT_SLEEP_SEC} | ||||
|                 read_freq_info n act min cur || exit $? | ||||
|  | ||||
|                 # | ||||
|                 # The throttling seems to occur when act freq goes below min. | ||||
|                 # However, it's necessary to exclude the idle states, where | ||||
|                 # act freq normally reaches rpn and cur goes below min. | ||||
|                 # | ||||
|                 [ ${FREQ_act} -lt ${FREQ_min} ] && \ | ||||
|                 [ ${FREQ_act} -gt $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") ] && \ | ||||
|                 [ ${FREQ_cur} -ge ${FREQ_min} ] && \ | ||||
|                     printf "GPU throttling detected: act=%s min=%s cur=%s rpn=%s\n" \ | ||||
|                     ${FREQ_act} ${FREQ_min} ${FREQ_cur} $(eval "echo \${FREQ_$(echo $CAP_FREQ_INFO | cut -d' ' -f2)}") | ||||
|             done | ||||
|         ) & | ||||
|  | ||||
|         pid=$! | ||||
|         log INFO "Started GPU throttling detector (pid=%s)" ${pid} | ||||
|  | ||||
|         printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \ | ||||
|             log WARN "Failed to write throttle detector PID file" | ||||
|         ;; | ||||
|     esac | ||||
| } | ||||
|  | ||||
| # | ||||
| # Retrieve the list of online CPUs. | ||||
| # | ||||
| get_online_cpus() { | ||||
|     local path cpu_index | ||||
|  | ||||
|     printf "0" | ||||
|     for path in $(grep 1 ${CPU_SYSFS_PREFIX}/cpu*/online); do | ||||
|         cpu_index=${path##*/cpu} | ||||
|         printf " %s" ${cpu_index%%/*} | ||||
|     done | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print sysfs path for the given CPU index and freq info. | ||||
| # | ||||
| # arg1: Frequency info sysfs name, one of *_CPU_FREQ_INFO constants above | ||||
| # arg2: CPU index | ||||
| # | ||||
| print_cpu_freq_sysfs_path() { | ||||
|     printf ${CPU_FREQ_SYSFS_PATTERN} "$2" "$1" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Read the specified CPU freq info from sysfs. | ||||
| # | ||||
| # arg1: CPU index | ||||
| # arg2: Flag (y/n) to also enable printing the freq info. | ||||
| # arg3...: Frequency info sysfs name(s), see *_CPU_FREQ_INFO constants above | ||||
| # return: Global variable(s) CPU_FREQ_${arg} containing the requested information | ||||
| # | ||||
| read_cpu_freq_info() { | ||||
|     local var val info path cpu_index print=0 ret=0 | ||||
|  | ||||
|     cpu_index=$1 | ||||
|     [ "$2" = "y" ] && print=1 | ||||
|     shift 2 | ||||
|  | ||||
|     while [ $# -gt 0 ]; do | ||||
|         info=$1 | ||||
|         shift | ||||
|         var=CPU_FREQ_${info} | ||||
|         path=$(print_cpu_freq_sysfs_path "${info}" ${cpu_index}) | ||||
|  | ||||
|         [ -r ${path} ] && read ${var} < ${path} || { | ||||
|             log ERROR "Failed to read CPU freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ -n "${var}" ] || { | ||||
|             log ERROR "Got empty CPU freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ ${print} -eq 1 ] && { | ||||
|             eval val=\$${var} | ||||
|             printf "%6s: %4s Hz\n" "${info}" "${val}" | ||||
|         } | ||||
|     done | ||||
|  | ||||
|     return ${ret} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print freq. value as requested by user via '--cpu-set-max' option. | ||||
| # arg1: user requested freq value | ||||
| # | ||||
| compute_cpu_freq_set() { | ||||
|     local val | ||||
|  | ||||
|     case "$1" in | ||||
|     +) | ||||
|         val=${CPU_FREQ_cpuinfo_max} | ||||
|         ;; | ||||
|     -) | ||||
|         val=${CPU_FREQ_cpuinfo_min} | ||||
|         ;; | ||||
|     *%) | ||||
|         val=$((${1%?} * CPU_FREQ_cpuinfo_max / 100)) | ||||
|         ;; | ||||
|     *[!0-9]*) | ||||
|         log ERROR "Cannot set CPU freq to invalid value: %s" "$1" | ||||
|         return 1 | ||||
|         ;; | ||||
|     "") | ||||
|         log ERROR "Cannot set CPU freq to unspecified value" | ||||
|         return 1 | ||||
|         ;; | ||||
|     *) | ||||
|         log ERROR "Cannot set CPU freq to custom value; use +, -, or % instead" | ||||
|         return 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     printf "%s" "${val}" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Adjust CPU max scaling frequency. | ||||
| # | ||||
| set_cpu_freq_max() { | ||||
|     local target_freq res=0 | ||||
|     case "${CPU_SET_MAX_FREQ}" in | ||||
|     +) | ||||
|         target_freq=100 | ||||
|         ;; | ||||
|     -) | ||||
|         target_freq=1 | ||||
|         ;; | ||||
|     *%) | ||||
|         target_freq=${CPU_SET_MAX_FREQ%?} | ||||
|         ;; | ||||
|     *) | ||||
|         log ERROR "Invalid CPU freq" | ||||
|         return 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct) | ||||
|     [ -e "${pstate_info}" ] && { | ||||
|         log INFO "Setting intel_pstate max perf to %s" "${target_freq}%" | ||||
|         if ! printf "%s" "${target_freq}" > "${pstate_info}"; | ||||
| 	then | ||||
|             log ERROR "Failed to set intel_pstate max perf" | ||||
|             res=1 | ||||
| 	fi | ||||
|     } | ||||
|  | ||||
|     local cpu_index | ||||
|     for cpu_index in $(get_online_cpus); do | ||||
|         read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; } | ||||
|  | ||||
|         target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}") | ||||
|         tf_res=$? | ||||
|         [ -z "${target_freq}" ] && { res=$tf_res; continue; } | ||||
|  | ||||
|         log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}" | ||||
|         [ -n "${DRY_RUN}" ] && continue | ||||
|  | ||||
|         if ! printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index}); | ||||
| 	then | ||||
|             res=1 | ||||
|             log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index} | ||||
| 	fi | ||||
|     done | ||||
|  | ||||
|     return ${res} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Show help message. | ||||
| # | ||||
| print_usage() { | ||||
|     cat <<EOF | ||||
| Usage: ${0##*/} [OPTION]... | ||||
|  | ||||
| A script to manage Intel GPU frequencies. Can be used for debugging performance | ||||
| problems or trying to obtain a stable frequency while benchmarking. | ||||
|  | ||||
| Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz. | ||||
|  | ||||
| Options: | ||||
|   -g, --get [act|enf|cap|all] | ||||
|                         Get frequency information: active (default), enforced, | ||||
|                         hardware capabilities or all of them. | ||||
|  | ||||
|   -s, --set [{min|max}=]{FREQUENCY[%]|+|-} | ||||
|                         Set min or max frequency to the given value (MHz). | ||||
|                         Append '%' to interpret FREQUENCY as % of hw max. | ||||
|                         Use '+' or '-' to set frequency to hardware max or min. | ||||
|                         Omit min/max prefix to set both frequencies. | ||||
|  | ||||
|   -r, --reset           Reset frequencies to hardware defaults. | ||||
|  | ||||
|   -m, --monitor [act|enf|cap|all] | ||||
|                         Monitor the indicated frequencies via 'watch' utility. | ||||
|                         See '-g, --get' option for more details. | ||||
|  | ||||
|   -d|--detect-thrott [start|stop|status] | ||||
|                         Start (default operation) the throttling detector | ||||
|                         as a background process. Use 'stop' or 'status' to | ||||
|                         terminate the detector process or verify its status. | ||||
|  | ||||
|   --cpu-set-max [FREQUENCY%|+|-} | ||||
|                         Set CPU max scaling frequency as % of hw max. | ||||
|                         Use '+' or '-' to set frequency to hardware max or min. | ||||
|  | ||||
|   -r, --reset           Reset frequencies to hardware defaults. | ||||
|  | ||||
|   --dry-run             See what the script will do without applying any | ||||
|                         frequency changes. | ||||
|  | ||||
|   -h, --help            Display this help text and exit. | ||||
| EOF | ||||
| } | ||||
|  | ||||
| # | ||||
| # Parse user input for '-g, --get' option. | ||||
| # Returns 0 if a value has been provided, otherwise 1. | ||||
| # | ||||
| parse_option_get() { | ||||
|     local ret=0 | ||||
|  | ||||
|     case "$1" in | ||||
|     act) GET_ACT_FREQ=1;; | ||||
|     enf) GET_ENF_FREQ=1;; | ||||
|     cap) GET_CAP_FREQ=1;; | ||||
|     all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;; | ||||
|     -*|"") | ||||
|         # No value provided, using default. | ||||
|         GET_ACT_FREQ=1 | ||||
|         ret=1 | ||||
|         ;; | ||||
|     *) | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     return ${ret} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Validate user input for '-s, --set' option. | ||||
| # arg1: input value to be validated | ||||
| # arg2: optional flag indicating input is restricted to % | ||||
| # | ||||
| validate_option_set() { | ||||
|     case "$1" in | ||||
|     +|-|[0-9]%|[0-9][0-9]%) | ||||
|         return 0 | ||||
|         ;; | ||||
|     *[!0-9]*|"") | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     [ -z "$2" ] || { print_usage; exit 1; } | ||||
| } | ||||
|  | ||||
| # | ||||
| # Parse script arguments. | ||||
| # | ||||
| [ $# -eq 0 ] && { print_usage; exit 1; } | ||||
|  | ||||
| while [ $# -gt 0 ]; do | ||||
|     case "$1" in | ||||
|     -g|--get) | ||||
|         parse_option_get "$2" && shift | ||||
|         ;; | ||||
|  | ||||
|     -s|--set) | ||||
|         shift | ||||
|         case "$1" in | ||||
|         min=*) | ||||
|             SET_MIN_FREQ=${1#min=} | ||||
|             validate_option_set "${SET_MIN_FREQ}" | ||||
|             ;; | ||||
|         max=*) | ||||
|             SET_MAX_FREQ=${1#max=} | ||||
|             validate_option_set "${SET_MAX_FREQ}" | ||||
|             ;; | ||||
|         *) | ||||
|             SET_MIN_FREQ=$1 | ||||
|             validate_option_set "${SET_MIN_FREQ}" | ||||
|             SET_MAX_FREQ=${SET_MIN_FREQ} | ||||
|             ;; | ||||
|         esac | ||||
|         ;; | ||||
|  | ||||
|     -r|--reset) | ||||
|         RESET_FREQ=1 | ||||
|         SET_MIN_FREQ="-" | ||||
|         SET_MAX_FREQ="+" | ||||
|         ;; | ||||
|  | ||||
|     -m|--monitor) | ||||
|         MONITOR_FREQ=act | ||||
|         parse_option_get "$2" && MONITOR_FREQ=$2 && shift | ||||
|         ;; | ||||
|  | ||||
|     -d|--detect-thrott) | ||||
|         DETECT_THROTT=start | ||||
|         case "$2" in | ||||
|         start|stop|status) | ||||
|             DETECT_THROTT=$2 | ||||
|             shift | ||||
|             ;; | ||||
|         esac | ||||
|         ;; | ||||
|  | ||||
|     --cpu-set-max) | ||||
|         shift | ||||
|         CPU_SET_MAX_FREQ=$1 | ||||
|         validate_option_set "${CPU_SET_MAX_FREQ}" restricted | ||||
|         ;; | ||||
|  | ||||
|     --dry-run) | ||||
|         DRY_RUN=1 | ||||
|         ;; | ||||
|  | ||||
|     -h|--help) | ||||
|         print_usage | ||||
|         exit 0 | ||||
|         ;; | ||||
|  | ||||
|     *) | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     shift | ||||
| done | ||||
|  | ||||
| # | ||||
| # Main | ||||
| # | ||||
| RET=0 | ||||
|  | ||||
| identify_intel_gpu || { | ||||
|     log INFO "No Intel GPU detected" | ||||
|     exit 0 | ||||
| } | ||||
|  | ||||
| [ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; } | ||||
| print_freq_info | ||||
|  | ||||
| [ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT} | ||||
|  | ||||
| [ -n "${CPU_SET_MAX_FREQ}" ] && { set_cpu_freq_max || RET=$?; } | ||||
|  | ||||
| [ -n "${MONITOR_FREQ}" ] && { | ||||
|     log INFO "Entering frequency monitoring mode" | ||||
|     sleep 2 | ||||
|     exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}" | ||||
| } | ||||
|  | ||||
| exit ${RET} | ||||
| @@ -1,18 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC1091  # the path is created in build-kdl and | ||||
| # here is check if exist | ||||
| # shellcheck disable=SC2086 # we want the arguments to be expanded | ||||
|  | ||||
| if ! [ -f /ci-kdl/bin/activate ]; then | ||||
|   echo -e "ci-kdl not installed; not monitoring temperature" | ||||
|   exit 0 | ||||
| fi | ||||
|  | ||||
| KDL_ARGS=" | ||||
| 	--output-file=${RESULTS_DIR}/kdl.json | ||||
| 	--log-level=WARNING | ||||
| 	--num-samples=-1 | ||||
| " | ||||
|  | ||||
| source /ci-kdl/bin/activate | ||||
| exec /ci-kdl/bin/ci-kdl ${KDL_ARGS} | ||||
| @@ -1,2 +0,0 @@ | ||||
| variables: | ||||
|   CONDITIONAL_BUILD_ANGLE_TAG: ab19bccfd3858c539ba8cb8d9b52a003 | ||||
| @@ -1,81 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC1091 | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # ALPINE_X86_64_BUILD_TAG | ||||
|  | ||||
| set -e | ||||
|  | ||||
| . .gitlab-ci/setup-test-env.sh | ||||
|  | ||||
| set -o xtrace | ||||
|  | ||||
| EPHEMERAL=( | ||||
| ) | ||||
|  | ||||
|  | ||||
| DEPS=( | ||||
|     bash | ||||
|     bison | ||||
|     ccache | ||||
|     "clang${LLVM_VERSION}-dev" | ||||
|     cmake | ||||
|     clang-dev | ||||
|     coreutils | ||||
|     curl | ||||
|     flex | ||||
|     gcc | ||||
|     g++ | ||||
|     git | ||||
|     gettext | ||||
|     glslang | ||||
|     graphviz | ||||
|     linux-headers | ||||
|     "llvm${LLVM_VERSION}-static" | ||||
|     "llvm${LLVM_VERSION}-dev" | ||||
|     meson | ||||
|     mold | ||||
|     musl-dev | ||||
|     expat-dev | ||||
|     elfutils-dev | ||||
|     libclc-dev | ||||
|     libdrm-dev | ||||
|     libva-dev | ||||
|     libpciaccess-dev | ||||
|     zlib-dev | ||||
|     python3-dev | ||||
|     py3-clang | ||||
|     py3-cparser | ||||
|     py3-mako | ||||
|     py3-packaging | ||||
|     py3-pip | ||||
|     py3-ply | ||||
|     py3-yaml | ||||
|     vulkan-headers | ||||
|     spirv-tools-dev | ||||
|     spirv-llvm-translator-dev | ||||
|     util-macros | ||||
| ) | ||||
|  | ||||
| apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}" | ||||
|  | ||||
| pip3 install --break-system-packages sphinx===8.2.3 hawkmoth===0.19.0 | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| EXTRA_MESON_ARGS='--prefix=/usr' \ | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| # too many vendor binarise, just keep the ones we need | ||||
| find /usr/share/clc \ | ||||
|   \( -type f -o -type l \) \ | ||||
|   ! -name 'spirv-mesa3d-.spv' \ | ||||
|   ! -name 'spirv64-mesa3d-.spv' \ | ||||
|   -delete | ||||
|  | ||||
| apk del "${EPHEMERAL[@]}" | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,32 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # This is a ci-templates build script to generate a container for LAVA SSH client. | ||||
|  | ||||
| # shellcheck disable=SC1091 | ||||
| set -e | ||||
|  | ||||
| . .gitlab-ci/setup-test-env.sh | ||||
|  | ||||
| set -o xtrace | ||||
|  | ||||
| EPHEMERAL=( | ||||
| ) | ||||
|  | ||||
| # We only need these very basic packages to run the tests. | ||||
| DEPS=( | ||||
|     openssh-client  # for ssh | ||||
|     iputils         # for ping | ||||
|     bash | ||||
|     curl | ||||
| ) | ||||
|  | ||||
|  | ||||
| apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}" | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| apk del "${EPHEMERAL[@]}" | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,62 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| # Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves | ||||
| # network transfer, disk usage, and runtime on test jobs) | ||||
|  | ||||
| # shellcheck disable=SC2154 # arch is assigned in previous scripts | ||||
| if curl --fail -L -s "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then | ||||
|   ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}" | ||||
| else | ||||
|   ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}" | ||||
| fi | ||||
|  | ||||
| curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     "${ARTIFACTS_URL}"/lava-rootfs.tar.zst -o rootfs.tar.zst | ||||
| mkdir -p /rootfs-"$arch" | ||||
| tar -C /rootfs-"$arch" '--exclude=./dev/*' --zstd -xf rootfs.tar.zst | ||||
| rm rootfs.tar.zst | ||||
|  | ||||
| if [[ $arch == "arm64" ]]; then | ||||
|     mkdir -p /baremetal-files | ||||
|     pushd /baremetal-files | ||||
|  | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
| 	-O "${KERNEL_IMAGE_BASE}"/arm64/Image | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|         -O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|         -O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel | ||||
|  | ||||
|     DEVICE_TREES="" | ||||
|     DEVICE_TREES="$DEVICE_TREES apq8016-sbc-usb-host.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb" | ||||
|  | ||||
|     for DTB in $DEVICE_TREES; do | ||||
| 	curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|             -O "${KERNEL_IMAGE_BASE}/arm64/$DTB" | ||||
|     done | ||||
|  | ||||
|     popd | ||||
| elif [[ $arch == "armhf" ]]; then | ||||
|     mkdir -p /baremetal-files | ||||
|     pushd /baremetal-files | ||||
|  | ||||
|     curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|         -O "${KERNEL_IMAGE_BASE}"/armhf/zImage | ||||
|  | ||||
|     DEVICE_TREES="" | ||||
|     DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb" | ||||
|  | ||||
|     for DTB in $DEVICE_TREES; do | ||||
| 	curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|             -O "${KERNEL_IMAGE_BASE}/armhf/$DTB" | ||||
|     done | ||||
|  | ||||
|     popd | ||||
| fi | ||||
| @@ -1,121 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml and .gitlab-ci/container/gitlab-ci.yml tags: | ||||
| # DEBIAN_BUILD_TAG | ||||
| # ANDROID_LLVM_ARTIFACT_NAME | ||||
|  | ||||
| set -exu | ||||
|  | ||||
| # If CI vars are not set, assign an empty value, this prevents -u to fail | ||||
| : "${CI:=}" | ||||
| : "${CI_PROJECT_PATH:=}" | ||||
|  | ||||
| # Early check for required env variables, relies on `set -u` | ||||
| : "$ANDROID_NDK_VERSION" | ||||
| : "$ANDROID_SDK_VERSION" | ||||
| : "$ANDROID_LLVM_VERSION" | ||||
| : "$ANDROID_LLVM_ARTIFACT_NAME" | ||||
| : "$S3_JWT_FILE" | ||||
| : "$S3_HOST" | ||||
| : "$S3_ANDROID_BUCKET" | ||||
|  | ||||
| # Check for CI if the auth file used later on is non-empty | ||||
| if [ -n "$CI" ] && [ ! -s "${S3_JWT_FILE}" ]; then | ||||
|   echo "Error: ${S3_JWT_FILE} is empty." 1>&2 | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst"; then | ||||
|   echo "Artifact ${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst already exists, skip re-building." | ||||
|  | ||||
|   # Download prebuilt LLVM libraries for Android when they have not changed, | ||||
|   # to save some time | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     -o "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" | ||||
|   tar -C / --zstd -xf "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" | ||||
|   rm "/${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" | ||||
|  | ||||
|   exit | ||||
| fi | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| EPHEMERAL=( | ||||
|   unzip | ||||
| ) | ||||
|  | ||||
| apt-get update | ||||
| apt-get install -y --no-install-recommends --no-remove "${EPHEMERAL[@]}" | ||||
|  | ||||
| ANDROID_NDK="android-ndk-${ANDROID_NDK_VERSION}" | ||||
| ANDROID_NDK_ROOT="/${ANDROID_NDK}" | ||||
| if [ ! -d "$ANDROID_NDK_ROOT" ]; | ||||
| then | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     -o "${ANDROID_NDK}.zip" \ | ||||
|     "https://dl.google.com/android/repository/${ANDROID_NDK}-linux.zip" | ||||
|   unzip -d / "${ANDROID_NDK}.zip" "$ANDROID_NDK/source.properties" "$ANDROID_NDK/build/cmake/*" "$ANDROID_NDK/toolchains/llvm/*" | ||||
|   rm "${ANDROID_NDK}.zip" | ||||
| fi | ||||
|  | ||||
| if [ ! -d "/llvm-project" ]; | ||||
| then | ||||
|   mkdir "/llvm-project" | ||||
|   pushd "/llvm-project" | ||||
|   git init | ||||
|   git remote add origin https://github.com/llvm/llvm-project.git | ||||
|   git fetch --depth 1 origin "$ANDROID_LLVM_VERSION" | ||||
|   git checkout FETCH_HEAD | ||||
|   popd | ||||
| fi | ||||
|  | ||||
| pushd "/llvm-project" | ||||
|  | ||||
| # Checkout again the intended version, just in case of a pre-existing full clone | ||||
| git checkout "$ANDROID_LLVM_VERSION" || true | ||||
|  | ||||
| LLVM_INSTALL_PREFIX="/${ANDROID_LLVM_ARTIFACT_NAME}" | ||||
|  | ||||
| rm -rf build/ | ||||
| cmake -GNinja -S llvm -B build/ \ | ||||
|     -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake" \ | ||||
|     -DANDROID_ABI=x86_64 \ | ||||
|     -DANDROID_PLATFORM="android-${ANDROID_SDK_VERSION}" \ | ||||
|     -DANDROID_NDK="${ANDROID_NDK_ROOT}" \ | ||||
|     -DCMAKE_ANDROID_ARCH_ABI=x86_64 \ | ||||
|     -DCMAKE_ANDROID_NDK="${ANDROID_NDK_ROOT}" \ | ||||
|     -DCMAKE_BUILD_TYPE=MinSizeRel \ | ||||
|     -DCMAKE_SYSTEM_NAME=Android \ | ||||
|     -DCMAKE_SYSTEM_VERSION="${ANDROID_SDK_VERSION}" \ | ||||
|     -DCMAKE_INSTALL_PREFIX="${LLVM_INSTALL_PREFIX}" \ | ||||
|     -DCMAKE_CXX_FLAGS="-march=x86-64 --target=x86_64-linux-android${ANDROID_SDK_VERSION} -fno-rtti" \ | ||||
|     -DLLVM_HOST_TRIPLE="x86_64-linux-android${ANDROID_SDK_VERSION}" \ | ||||
|     -DLLVM_TARGETS_TO_BUILD=X86 \ | ||||
|     -DLLVM_BUILD_LLVM_DYLIB=OFF \ | ||||
|     -DLLVM_BUILD_TESTS=OFF \ | ||||
|     -DLLVM_BUILD_EXAMPLES=OFF \ | ||||
|     -DLLVM_BUILD_DOCS=OFF \ | ||||
|     -DLLVM_BUILD_TOOLS=OFF \ | ||||
|     -DLLVM_ENABLE_RTTI=OFF \ | ||||
|     -DLLVM_BUILD_INSTRUMENTED_COVERAGE=OFF \ | ||||
|     -DLLVM_NATIVE_TOOL_DIR="${ANDROID_NDK_ROOT}/toolchains/llvm/prebuilt/linux-x86_64/bin" \ | ||||
|     -DLLVM_ENABLE_PIC=False \ | ||||
|     -DLLVM_OPTIMIZED_TABLEGEN=ON | ||||
|  | ||||
| ninja "-j${FDO_CI_CONCURRENT:-4}" -C build/ install | ||||
|  | ||||
| popd | ||||
|  | ||||
| rm -rf /llvm-project | ||||
|  | ||||
| tar --zstd -cf "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "$LLVM_INSTALL_PREFIX" | ||||
|  | ||||
| # If run in CI upload the tar.zst archive to S3 to avoid rebuilding it if the | ||||
| # version does not change, and delete it. | ||||
| # The file is not deleted for non-CI because it can be useful in local runs. | ||||
| if [ -n "$CI" ]; then | ||||
|   s3_upload "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" "https://${S3_HOST}/${S3_ANDROID_BUCKET}/${CI_PROJECT_PATH}/" | ||||
|   rm "${ANDROID_LLVM_ARTIFACT_NAME}.tar.zst" | ||||
| fi | ||||
|  | ||||
| apt-get purge -y "${EPHEMERAL[@]}" | ||||
| @@ -1,164 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_ANDROID_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start angle "Building ANGLE" | ||||
|  | ||||
| # Do a very early check to make sure the tag is correct without the need of | ||||
| # setting up the environment variables locally | ||||
| ci_tag_build_time_check "ANGLE_TAG" | ||||
|  | ||||
| ANGLE_REV="a3f2545f6bb3e8d27827dceb2b4e901673995ad1" | ||||
|  | ||||
| # Set ANGLE_ARCH based on DEBIAN_ARCH if it hasn't been explicitly defined | ||||
| if [[ -z "${ANGLE_ARCH:-}" ]]; then | ||||
|   case "$DEBIAN_ARCH" in | ||||
|     amd64) ANGLE_ARCH=x64;; | ||||
|     arm64) ANGLE_ARCH=arm64;; | ||||
|   esac | ||||
| fi | ||||
|  | ||||
| # DEPOT tools | ||||
| git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git /depot-tools | ||||
| export PATH=/depot-tools:$PATH | ||||
| export DEPOT_TOOLS_UPDATE=0 | ||||
|  | ||||
| mkdir /angle-build | ||||
| mkdir /angle | ||||
| pushd /angle-build | ||||
| git init | ||||
| git remote add origin https://chromium.googlesource.com/angle/angle.git | ||||
| git fetch --depth 1 origin "$ANGLE_REV" | ||||
| git checkout FETCH_HEAD | ||||
|  | ||||
| echo "$ANGLE_REV" > /angle/version | ||||
|  | ||||
| GCLIENT_CUSTOM_VARS=() | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_cl=False') | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_cl_testing=False') | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_vulkan_validation_layers=False') | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=angle_enable_wgpu=False') | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=build_angle_deqp_tests=False') | ||||
| GCLIENT_CUSTOM_VARS+=('--custom-var=build_angle_perftests=False') | ||||
| if [[ "$ANGLE_TARGET" == "android" ]]; then | ||||
|   GCLIENT_CUSTOM_VARS+=('--custom-var=checkout_android=True') | ||||
| fi | ||||
|  | ||||
| # source preparation | ||||
| gclient config --name REPLACE-WITH-A-DOT --unmanaged \ | ||||
|   "${GCLIENT_CUSTOM_VARS[@]}" \ | ||||
|   https://chromium.googlesource.com/angle/angle.git | ||||
| sed -e 's/REPLACE-WITH-A-DOT/./;' -i .gclient | ||||
| sed -e 's|"custom_deps" : {|"custom_deps" : {\ | ||||
|       "third_party/clspv/src": None,\ | ||||
|       "third_party/dawn": None,\ | ||||
|       "third_party/glmark2/src": None,\ | ||||
|       "third_party/libjpeg_turbo": None,\ | ||||
|       "third_party/llvm/src": None,\ | ||||
|       "third_party/OpenCL-CTS/src": None,\ | ||||
|       "third_party/SwiftShader": None,\ | ||||
|       "third_party/VK-GL-CTS/src": None,\ | ||||
|       "third_party/vulkan-validation-layers/src": None,|' -i .gclient | ||||
| gclient sync --no-history -j"${FDO_CI_CONCURRENT:-4}" | ||||
|  | ||||
| mkdir -p out/Release | ||||
| cat > out/Release/args.gn <<EOF | ||||
| angle_assert_always_on=false | ||||
| angle_build_all=false | ||||
| angle_build_tests=false | ||||
| angle_enable_cl=false | ||||
| angle_enable_cl_testing=false | ||||
| angle_enable_gl=false | ||||
| angle_enable_gl_desktop_backend=false | ||||
| angle_enable_null=false | ||||
| angle_enable_swiftshader=false | ||||
| angle_enable_trace=false | ||||
| angle_enable_wgpu=false | ||||
| angle_enable_vulkan=true | ||||
| angle_enable_vulkan_api_dump_layer=false | ||||
| angle_enable_vulkan_validation_layers=false | ||||
| angle_has_frame_capture=false | ||||
| angle_has_histograms=false | ||||
| angle_has_rapidjson=false | ||||
| angle_use_custom_libvulkan=false | ||||
| build_angle_deqp_tests=false | ||||
| dcheck_always_on=true | ||||
| enable_expensive_dchecks=false | ||||
| is_component_build=false | ||||
| is_debug=false | ||||
| target_cpu="${ANGLE_ARCH}" | ||||
| target_os="${ANGLE_TARGET}" | ||||
| treat_warnings_as_errors=false | ||||
| EOF | ||||
|  | ||||
| case "$ANGLE_TARGET" in | ||||
|   linux) cat >> out/Release/args.gn <<EOF | ||||
| angle_egl_extension="so.1" | ||||
| angle_glesv2_extension="so.2" | ||||
| use_custom_libcxx=false | ||||
| custom_toolchain="//build/toolchain/linux/unbundle:default" | ||||
| host_toolchain="//build/toolchain/linux/unbundle:default" | ||||
| EOF | ||||
|     ;; | ||||
|   android) cat >> out/Release/args.gn <<EOF | ||||
| android_ndk_version="${ANDROID_NDK_VERSION}" | ||||
| android64_ndk_api_level=${ANDROID_SDK_VERSION} | ||||
| android32_ndk_api_level=${ANDROID_SDK_VERSION} | ||||
| use_custom_libcxx=true | ||||
| EOF | ||||
|     ;; | ||||
|     *) echo "Unexpected ANGLE_TARGET value: $ANGLE_TARGET"; exit 1;; | ||||
| esac | ||||
|  | ||||
| if [[ "$DEBIAN_ARCH" = "arm64" ]]; then | ||||
|   # We need to get an AArch64 sysroot - because ANGLE isn't great friends with | ||||
|   # system dependencies - but use the default system toolchain, because the | ||||
|   # 'arm64' toolchain you get from Google infrastructure is a cross-compiler | ||||
|   # from x86-64 | ||||
|   build/linux/sysroot_scripts/install-sysroot.py --arch=arm64 | ||||
| fi | ||||
|  | ||||
| ( | ||||
|   # The 'unbundled' toolchain configuration requires clang, and it also needs to | ||||
|   # be configured via environment variables. | ||||
|   export CC="clang-${LLVM_VERSION}" | ||||
|   export HOST_CC="$CC" | ||||
|   export CFLAGS="-Wno-unknown-warning-option" | ||||
|   export HOST_CFLAGS="$CFLAGS" | ||||
|   export CXX="clang++-${LLVM_VERSION}" | ||||
|   export HOST_CXX="$CXX" | ||||
|   export CXXFLAGS="-Wno-unknown-warning-option" | ||||
|   export HOST_CXXFLAGS="$CXXFLAGS" | ||||
|   export AR="ar" | ||||
|   export HOST_AR="$AR" | ||||
|   export NM="nm" | ||||
|   export HOST_NM="$NM" | ||||
|   export LDFLAGS="-fuse-ld=lld-${LLVM_VERSION} -lpthread -ldl" | ||||
|   export HOST_LDFLAGS="$LDFLAGS" | ||||
|  | ||||
|   gn gen out/Release | ||||
|   # depot_tools overrides ninja with a version that doesn't work.  We want | ||||
|   # ninja with FDO_CI_CONCURRENT anyway. | ||||
|   /usr/local/bin/ninja -C out/Release/ libEGL libGLESv1_CM libGLESv2 | ||||
| ) | ||||
|  | ||||
| rm -f out/Release/libvulkan.so* out/Release/*.so*.TOC | ||||
| cp out/Release/lib*.so* /angle/ | ||||
|  | ||||
| if [[ "$ANGLE_TARGET" == "linux" ]]; then | ||||
|   ln -s libEGL.so.1 /angle/libEGL.so | ||||
|   ln -s libGLESv2.so.2 /angle/libGLESv2.so | ||||
| fi | ||||
|  | ||||
| rm -rf out | ||||
|  | ||||
| popd | ||||
| rm -rf /depot-tools | ||||
| rm -rf /angle-build | ||||
|  | ||||
| section_end angle | ||||
| @@ -1,29 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start apitrace "Building apitrace" | ||||
|  | ||||
| APITRACE_VERSION="952bad1469ea747012bdc48c48993bd5f13eec04" | ||||
|  | ||||
| git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace | ||||
| pushd /apitrace | ||||
| git checkout "$APITRACE_VERSION" | ||||
| git submodule update --init --depth 1 --recursive | ||||
| cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on ${EXTRA_CMAKE_ARGS:-} | ||||
| cmake --build _build --parallel --target apitrace eglretrace | ||||
| mkdir build | ||||
| cp _build/apitrace build | ||||
| cp _build/eglretrace build | ||||
| ${STRIP_CMD:-strip} build/* | ||||
| find . -not -path './build' -not -path './build/*' -delete | ||||
| popd | ||||
|  | ||||
| section_end apitrace | ||||
| @@ -1,23 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| uncollapsed_section_start bindgen "Building bindgen" | ||||
|  | ||||
| BINDGEN_VER=0.65.1 | ||||
| CBINDGEN_VER=0.26.0 | ||||
|  | ||||
| # bindgen | ||||
| RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   bindgen-cli --version ${BINDGEN_VER} \ | ||||
|   --locked \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --root /usr/local | ||||
|  | ||||
| # cbindgen | ||||
| RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   cbindgen --version ${CBINDGEN_VER} \ | ||||
|   --locked \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --root /usr/local | ||||
|  | ||||
| section_end bindgen | ||||
| @@ -1,55 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_BASE_TAG | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start crosvm "Building crosvm" | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
|  | ||||
| CROSVM_VERSION=e27efaf8f4bdc4a47d1e99cc44d2b6908b6f36bd | ||||
| git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm | ||||
| pushd /platform/crosvm | ||||
| git checkout "$CROSVM_VERSION" | ||||
| git submodule update --init | ||||
|  | ||||
| VIRGLRENDERER_VERSION=7570167549358ce77b8d4774041b4a77c72a021c | ||||
| rm -rf third_party/virglrenderer | ||||
| git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer | ||||
| pushd third_party/virglrenderer | ||||
| git checkout "$VIRGLRENDERER_VERSION" | ||||
| meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C build | ||||
| popd | ||||
|  | ||||
| rm rust-toolchain | ||||
|  | ||||
| RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   bindgen-cli \ | ||||
|   --locked \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --root /usr/local \ | ||||
|   --version 0.71.1 \ | ||||
|   ${EXTRA_CARGO_ARGS:-} | ||||
|  | ||||
| CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --locked \ | ||||
|   --features 'default-no-sandbox gpu x virgl_renderer' \ | ||||
|   --path . \ | ||||
|   --root /usr/local \ | ||||
|   ${EXTRA_CARGO_ARGS:-} | ||||
|  | ||||
| popd | ||||
|  | ||||
| rm -rf /platform/crosvm | ||||
|  | ||||
| section_end crosvm | ||||
| @@ -1,100 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_ANDROID_TAG | ||||
| # DEBIAN_BASE_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start deqp-runner "Building deqp-runner" | ||||
|  | ||||
| DEQP_RUNNER_VERSION=0.20.3 | ||||
|  | ||||
| commits_to_backport=( | ||||
| ) | ||||
|  | ||||
| patch_files=( | ||||
| ) | ||||
|  | ||||
| DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}" | ||||
|  | ||||
| if [ -n "${DEQP_RUNNER_GIT_TAG:-}" ]; then | ||||
|     DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG" | ||||
| elif [ -n "${DEQP_RUNNER_GIT_REV:-}" ]; then | ||||
|     DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV" | ||||
| else | ||||
|     DEQP_RUNNER_GIT_CHECKOUT="v$DEQP_RUNNER_VERSION" | ||||
| fi | ||||
|  | ||||
| BASE_PWD=$PWD | ||||
|  | ||||
| mkdir -p /deqp-runner | ||||
| pushd /deqp-runner | ||||
| mkdir deqp-runner-git | ||||
| pushd deqp-runner-git | ||||
| git init | ||||
| git remote add origin "$DEQP_RUNNER_GIT_URL" | ||||
| git fetch --depth 1 origin "$DEQP_RUNNER_GIT_CHECKOUT" | ||||
| git checkout FETCH_HEAD | ||||
|  | ||||
| for commit in "${commits_to_backport[@]}" | ||||
| do | ||||
|   PATCH_URL="https://gitlab.freedesktop.org/mesa/deqp-runner/-/commit/$commit.patch" | ||||
|   echo "Backport deqp-runner commit $commit from $PATCH_URL" | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | git am | ||||
| done | ||||
|  | ||||
| for patch in "${patch_files[@]}" | ||||
| do | ||||
|   echo "Apply patch to deqp-runner from $patch" | ||||
|   git am "$BASE_PWD/.gitlab-ci/container/patches/$patch" | ||||
| done | ||||
|  | ||||
| if [ -z "${RUST_TARGET:-}" ]; then | ||||
|     RUST_TARGET="" | ||||
| fi | ||||
|  | ||||
| if [[ "$RUST_TARGET" != *-android ]]; then | ||||
|     # When CC (/usr/lib/ccache/gcc) variable is set, the rust compiler uses | ||||
|     # this variable when cross-compiling arm32 and build fails for zsys-sys. | ||||
|     # So unset the CC variable when cross-compiling for arm32. | ||||
|     SAVEDCC=${CC:-} | ||||
|     if [ "$RUST_TARGET" = "armv7-unknown-linux-gnueabihf" ]; then | ||||
|         unset CC | ||||
|     fi | ||||
|     cargo install --locked  \ | ||||
|         -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|         --root /usr/local \ | ||||
|         ${EXTRA_CARGO_ARGS:-} \ | ||||
|         --path . | ||||
|     CC=$SAVEDCC | ||||
| else | ||||
|     cargo install --locked  \ | ||||
|         -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|         --root /usr/local --version 2.10.0 \ | ||||
|         cargo-ndk | ||||
|  | ||||
|     rustup target add $RUST_TARGET | ||||
|     RUSTFLAGS='-C target-feature=+crt-static' cargo ndk --target $RUST_TARGET build --release | ||||
|  | ||||
|     mv target/$RUST_TARGET/release/deqp-runner /deqp-runner | ||||
|  | ||||
|     cargo uninstall --locked  \ | ||||
|         --root /usr/local \ | ||||
|         cargo-ndk | ||||
| fi | ||||
|  | ||||
| popd | ||||
| rm -rf deqp-runner-git | ||||
| popd | ||||
|  | ||||
| # remove unused test runners to shrink images for the Mesa CI build (not kernel, | ||||
| # which chooses its own deqp branch) | ||||
| if [ -z "${DEQP_RUNNER_GIT_TAG:-}${DEQP_RUNNER_GIT_REV:-}" ]; then | ||||
|     rm -f /usr/local/bin/igt-runner | ||||
| fi | ||||
|  | ||||
| section_end deqp-runner | ||||
| @@ -1,325 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_ANDROID_TAG | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -ue -o pipefail | ||||
|  | ||||
| # shellcheck disable=SC2153 | ||||
| deqp_api=${DEQP_API,,} | ||||
|  | ||||
| uncollapsed_section_start deqp-$deqp_api "Building dEQP $DEQP_API" | ||||
|  | ||||
| set -x | ||||
|  | ||||
| # See `deqp_build_targets` below for which release is used to produce which | ||||
| # binary. Unless this comment has bitrotten: | ||||
| # - the commit from the main branch produces the deqp tools and `deqp-vk`, | ||||
| # - the VK release produces `deqp-vk`, | ||||
| # - the GL release produces `glcts`, and | ||||
| # - the GLES release produces `deqp-gles*` and `deqp-egl` | ||||
|  | ||||
| DEQP_MAIN_COMMIT=76c1572eaba42d7ddd9bb8eb5788e52dd932068e | ||||
| DEQP_VK_VERSION=1.4.1.1 | ||||
| DEQP_GL_VERSION=4.6.5.0 | ||||
| DEQP_GLES_VERSION=3.2.11.0 | ||||
|  | ||||
| # Patches to VulkanCTS may come from commits in their repo (listed in | ||||
| # cts_commits_to_backport) or patch files stored in our repo (in the patch | ||||
| # directory `$OLDPWD/.gitlab-ci/container/patches/` listed in cts_patch_files). | ||||
| # Both list variables would have comments explaining the reasons behind the | ||||
| # patches. | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| main_cts_commits_to_backport=( | ||||
|     # If you find yourself wanting to add something in here, consider whether | ||||
|     # bumping DEQP_MAIN_COMMIT is not a better solution :) | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| main_cts_patch_files=( | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| vk_cts_commits_to_backport=( | ||||
|   # Stop querying device address from unbound buffers | ||||
|   046343f46f7d39d53b47842d7fd8ed3279528046 | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| vk_cts_patch_files=( | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| gl_cts_commits_to_backport=( | ||||
|   # Add #include <cmath> in deMath.h when being compiled by C++ | ||||
|   71808fe7d0a640dfd703e845d93ba1c5ab751055 | ||||
|   # Revert "Add #include <cmath> in deMath.h when being compiled by C++ compiler" | ||||
|   # This also adds an alternative fix along with the revert. | ||||
|   6164879a0acce258637d261592a9c395e564b361 | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| gl_cts_patch_files=( | ||||
|   build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| # GLES builds also EGL | ||||
| gles_cts_commits_to_backport=( | ||||
|   # Add #include <cmath> in deMath.h when being compiled by C++ | ||||
|   71808fe7d0a640dfd703e845d93ba1c5ab751055 | ||||
|   # Revert "Add #include <cmath> in deMath.h when being compiled by C++ compiler" | ||||
|   # This also adds an alternative fix along with the revert. | ||||
|   6164879a0acce258637d261592a9c395e564b361 | ||||
| ) | ||||
|  | ||||
| # shellcheck disable=SC2034 | ||||
| gles_cts_patch_files=( | ||||
|   build-deqp-gl_Build-Don-t-build-Vulkan-utilities-for-GL-builds.patch | ||||
| ) | ||||
|  | ||||
| if [ "${DEQP_TARGET}" = 'android' ]; then | ||||
|   gles_cts_patch_files+=( | ||||
|     build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch | ||||
|     build-deqp-gles_Android-prints-to-stdout-instead-of-logcat.patch | ||||
|   ) | ||||
| fi | ||||
|  | ||||
|  | ||||
| ### Careful editing anything below this line | ||||
|  | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
|  | ||||
| # shellcheck disable=SC2153 | ||||
| case "${DEQP_API}" in | ||||
|   tools) DEQP_VERSION="$DEQP_MAIN_COMMIT";; | ||||
|   *-main) DEQP_VERSION="$DEQP_MAIN_COMMIT";; | ||||
|   VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";; | ||||
|   GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";; | ||||
|   GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";; | ||||
|   *) echo "Unexpected DEQP_API value: $DEQP_API"; exit 1;; | ||||
| esac | ||||
|  | ||||
| mkdir -p /VK-GL-CTS | ||||
| pushd /VK-GL-CTS | ||||
| [ -e .git ] || { | ||||
|   git init | ||||
|   git remote add origin https://github.com/KhronosGroup/VK-GL-CTS.git | ||||
| } | ||||
| git fetch --depth 1 origin "$DEQP_VERSION" | ||||
| git checkout FETCH_HEAD | ||||
| DEQP_COMMIT=$(git rev-parse FETCH_HEAD) | ||||
|  | ||||
| if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then | ||||
|   merge_base="$(curl --fail -s https://api.github.com/repos/KhronosGroup/VK-GL-CTS/compare/main...$DEQP_MAIN_COMMIT | jq -r .merge_base_commit.sha)" | ||||
|   if [[ "$merge_base" != "$DEQP_MAIN_COMMIT" ]]; then | ||||
|     echo "VK-GL-CTS commit $DEQP_MAIN_COMMIT is not a commit from the main branch." | ||||
|     exit 1 | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| mkdir -p /deqp-$deqp_api | ||||
|  | ||||
| if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then | ||||
|   prefix="main" | ||||
| else | ||||
|   prefix="$deqp_api" | ||||
| fi | ||||
|  | ||||
| cts_commits_to_backport="${prefix}_cts_commits_to_backport[@]" | ||||
| for commit in "${!cts_commits_to_backport}" | ||||
| do | ||||
|   PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch" | ||||
|   echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL" | ||||
|   curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \ | ||||
|     GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am - | ||||
| done | ||||
|  | ||||
| cts_patch_files="${prefix}_cts_patch_files[@]" | ||||
| for patch in "${!cts_patch_files}" | ||||
| do | ||||
|   echo "Apply patch to ${DEQP_API} CTS from $patch" | ||||
|   GIT_COMMITTER_DATE=$(LC_TIME=C date -d@0) git am < $OLDPWD/.gitlab-ci/container/patches/$patch | ||||
| done | ||||
|  | ||||
| { | ||||
|   if [ "$DEQP_VERSION" = "$DEQP_MAIN_COMMIT" ]; then | ||||
|     commit_desc=$(git show --no-patch --format='commit %h on %ci' --abbrev=10 "$DEQP_COMMIT") | ||||
|     echo "dEQP $DEQP_API at $commit_desc" | ||||
|   else | ||||
|     echo "dEQP $DEQP_API version $DEQP_VERSION" | ||||
|   fi | ||||
|   if [ "$(git rev-parse HEAD)" != "$DEQP_COMMIT" ]; then | ||||
|     echo "The following local patches are applied on top:" | ||||
|     git log --reverse --oneline "$DEQP_COMMIT".. --format='- %s' | ||||
|   fi | ||||
| } > /deqp-$deqp_api/deqp-$deqp_api-version | ||||
|  | ||||
| # --insecure is due to SSL cert failures hitting sourceforge for zlib and | ||||
| # libpng (sigh).  The archives get their checksums checked anyway, and git | ||||
| # always goes through ssh or https. | ||||
| python3 external/fetch_sources.py --insecure | ||||
|  | ||||
| case "${DEQP_API}" in | ||||
|   VK-main) | ||||
|     # Video tests rely on external files | ||||
|     python3 external/fetch_video_decode_samples.py | ||||
|     python3 external/fetch_video_encode_samples.py | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| if [[ "$DEQP_API" = tools ]]; then | ||||
|   # Save the testlog stylesheets: | ||||
|   cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp-$deqp_api | ||||
| fi | ||||
|  | ||||
| popd | ||||
|  | ||||
| deqp_build_targets=() | ||||
| case "${DEQP_API}" in | ||||
|   VK|VK-main) | ||||
|     deqp_build_targets+=(deqp-vk) | ||||
|     ;; | ||||
|   GL) | ||||
|     deqp_build_targets+=(glcts) | ||||
|     ;; | ||||
|   GLES) | ||||
|     deqp_build_targets+=(deqp-gles{2,3,31}) | ||||
|     deqp_build_targets+=(glcts)  # needed for gles*-khr tests | ||||
|     # deqp-egl also comes from this build, but it is handled separately below. | ||||
|     ;; | ||||
|   tools) | ||||
|     deqp_build_targets+=(testlog-to-xml) | ||||
|     deqp_build_targets+=(testlog-to-csv) | ||||
|     deqp_build_targets+=(testlog-to-junit) | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| OLD_IFS="$IFS" | ||||
| IFS=";" | ||||
| CMAKE_SBT="${deqp_build_targets[*]}" | ||||
| IFS="$OLD_IFS" | ||||
|  | ||||
| pushd /deqp-$deqp_api | ||||
|  | ||||
| if [ "${DEQP_API}" = 'GLES' ]; then | ||||
|   if [ "${DEQP_TARGET}" = 'android' ]; then | ||||
|     cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|         -DDEQP_TARGET=android \ | ||||
|         -DCMAKE_BUILD_TYPE=Release \ | ||||
|         -DSELECTED_BUILD_TARGETS="deqp-egl" \ | ||||
|         ${EXTRA_CMAKE_ARGS:-} | ||||
|     ninja modules/egl/deqp-egl | ||||
|     mv modules/egl/deqp-egl{,-android} | ||||
|   else | ||||
|     # When including EGL/X11 testing, do that build first and save off its | ||||
|     # deqp-egl binary. | ||||
|     cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|         -DDEQP_TARGET=x11_egl_glx \ | ||||
|         -DCMAKE_BUILD_TYPE=Release \ | ||||
|         -DSELECTED_BUILD_TARGETS="deqp-egl" \ | ||||
|         ${EXTRA_CMAKE_ARGS:-} | ||||
|     ninja modules/egl/deqp-egl | ||||
|     mv modules/egl/deqp-egl{,-x11} | ||||
|  | ||||
|     cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|         -DDEQP_TARGET=wayland \ | ||||
|         -DCMAKE_BUILD_TYPE=Release \ | ||||
|         -DSELECTED_BUILD_TARGETS="deqp-egl" \ | ||||
|         ${EXTRA_CMAKE_ARGS:-} | ||||
|     ninja modules/egl/deqp-egl | ||||
|     mv modules/egl/deqp-egl{,-wayland} | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|       -DDEQP_TARGET=${DEQP_TARGET} \ | ||||
|       -DCMAKE_BUILD_TYPE=Release \ | ||||
|       -DSELECTED_BUILD_TARGETS="${CMAKE_SBT}" \ | ||||
|       ${EXTRA_CMAKE_ARGS:-} | ||||
|  | ||||
| # Make sure `default` doesn't silently stop detecting one of the platforms we care about | ||||
| if [ "${DEQP_TARGET}" = 'default' ]; then | ||||
|   grep -q DEQP_SUPPORT_WAYLAND=1 build.ninja | ||||
|   grep -q DEQP_SUPPORT_X11=1 build.ninja | ||||
|   grep -q DEQP_SUPPORT_XCB=1 build.ninja | ||||
| fi | ||||
|  | ||||
| ninja "${deqp_build_targets[@]}" | ||||
|  | ||||
| if [ "$DEQP_API" != tools ]; then | ||||
|     # Copy out the mustpass lists we want. | ||||
|     mkdir -p mustpass | ||||
|  | ||||
|     if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then | ||||
|         for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do | ||||
|             cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \ | ||||
|                 >> mustpass/vk-main.txt | ||||
|         done | ||||
|     fi | ||||
|  | ||||
|     if [ "${DEQP_API}" = 'GL' ]; then | ||||
|         cp \ | ||||
|             /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass/main/*-main.txt \ | ||||
|             mustpass/ | ||||
|         cp \ | ||||
|             /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gl/khronos_mustpass_single/main/*-single.txt \ | ||||
|             mustpass/ | ||||
|     fi | ||||
|  | ||||
|     if [ "${DEQP_API}" = 'GLES' ]; then | ||||
|         cp \ | ||||
|             /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/aosp_mustpass/main/*.txt \ | ||||
|             mustpass/ | ||||
|         cp \ | ||||
|             /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/egl/aosp_mustpass/main/egl-main.txt \ | ||||
|             mustpass/ | ||||
|         cp \ | ||||
|             /VK-GL-CTS/external/openglcts/data/gl_cts/data/mustpass/gles/khronos_mustpass/main/*-main.txt \ | ||||
|             mustpass/ | ||||
|     fi | ||||
|  | ||||
|     # Compress the caselists, since Vulkan's in particular are gigantic; higher | ||||
|     # compression levels provide no real measurable benefit. | ||||
|     zstd -1 --rm mustpass/*.txt | ||||
| fi | ||||
|  | ||||
| if [ "$DEQP_API" = tools ]; then | ||||
|     # Save *some* executor utils, but otherwise strip things down | ||||
|     # to reduct deqp build size: | ||||
|     mv executor/testlog-to-* . | ||||
|     rm -rf executor | ||||
| fi | ||||
|  | ||||
| # Remove other mustpass files, since we saved off the ones we wanted to conventient locations above. | ||||
| rm -rf assets/**/mustpass/ | ||||
| rm -rf external/**/mustpass/ | ||||
| rm -rf external/vulkancts/modules/vulkan/vk-main* | ||||
| rm -rf external/vulkancts/modules/vulkan/vk-default | ||||
|  | ||||
| rm -rf external/openglcts/modules/cts-runner | ||||
| rm -rf modules/internal | ||||
| rm -rf execserver | ||||
| rm -rf framework | ||||
| find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \; | ||||
| if [ "${DEQP_API}" = 'VK' ] || [ "${DEQP_API}" = 'VK-main' ]; then | ||||
|   ${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk | ||||
| fi | ||||
| if [ "${DEQP_API}" = 'GL' ] || [ "${DEQP_API}" = 'GLES' ]; then | ||||
|   ${STRIP_CMD:-strip} external/openglcts/modules/glcts | ||||
| fi | ||||
| if [ "${DEQP_API}" = 'GLES' ]; then | ||||
|   ${STRIP_CMD:-strip} modules/*/deqp-* | ||||
| fi | ||||
| du -sh ./* | ||||
| popd | ||||
|  | ||||
| section_end deqp-$deqp_api | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_BUILD_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start directx-headers "Building directx-headers" | ||||
|  | ||||
| git clone https://github.com/microsoft/DirectX-Headers -b v1.614.1 --depth 1 | ||||
| pushd DirectX-Headers | ||||
| meson setup build --backend=ninja --buildtype=release -Dbuild-test=false ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C build | ||||
| popd | ||||
| rm -rf DirectX-Headers | ||||
|  | ||||
| section_end directx-headers | ||||
| @@ -1,39 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime. | ||||
| # shellcheck disable=SC2034 # Variables are used in scripts called from here | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # Install fluster in /usr/local. | ||||
|  | ||||
| FLUSTER_REVISION="e997402978f62428fffc8e5a4a709690d9ca9bc5" | ||||
|  | ||||
| git clone https://github.com/fluendo/fluster.git --single-branch --no-checkout | ||||
|  | ||||
| pushd fluster || exit | ||||
| git checkout ${FLUSTER_REVISION} | ||||
| popd || exit | ||||
|  | ||||
| if [ "${SKIP_UPDATE_FLUSTER_VECTORS}" != 1 ]; then | ||||
|     # Download the necessary vectors: H264, H265 and VP9 | ||||
|     # When updating FLUSTER_REVISION, make sure to update the vectors if necessary or | ||||
|     # fluster-runner will report Missing results. | ||||
|     fluster/fluster.py download \ | ||||
| 	JVT-AVC_V1 JVT-FR-EXT JVT-MVC JVT-SVC_V1 \ | ||||
| 	JCT-VC-3D-HEVC JCT-VC-HEVC_V1 JCT-VC-MV-HEVC JCT-VC-RExt JCT-VC-SCC JCT-VC-SHVC \ | ||||
| 	VP9-TEST-VECTORS-HIGH VP9-TEST-VECTORS | ||||
|  | ||||
|     # Build fluster vectors archive and upload it | ||||
|     tar --zstd -cf "vectors.tar.zst" fluster/resources/ | ||||
|     s3_upload vectors.tar.zst "https://${S3_PATH_FLUSTER}/" | ||||
|  | ||||
|     touch /lava-files/done | ||||
|     s3_upload /lava-files/done "https://${S3_PATH_FLUSTER}/" | ||||
|  | ||||
|     # Don't include the vectors in the rootfs | ||||
|     rm -fr fluster/resources/* | ||||
| fi | ||||
|  | ||||
| mkdir -p "${ROOTFS}/usr/local/" | ||||
| mv fluster "${ROOTFS}/usr/local/" | ||||
|  | ||||
| @@ -1,23 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start fossilize "Building fossilize" | ||||
|  | ||||
| git clone https://github.com/ValveSoftware/Fossilize.git | ||||
| cd Fossilize | ||||
| git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327 | ||||
| git submodule update --init | ||||
| mkdir build | ||||
| cd build | ||||
| cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release | ||||
| ninja -C . install | ||||
| cd ../.. | ||||
| rm -rf Fossilize | ||||
|  | ||||
| section_end fossilize | ||||
| @@ -1,23 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start gfxreconstruct "Building gfxreconstruct" | ||||
|  | ||||
| GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae | ||||
|  | ||||
| git clone https://github.com/LunarG/gfxreconstruct.git \ | ||||
|     --single-branch \ | ||||
|     -b master \ | ||||
|     --no-checkout \ | ||||
|     /gfxreconstruct | ||||
| pushd /gfxreconstruct | ||||
| git checkout "$GFXRECONSTRUCT_VERSION" | ||||
| git submodule update --init | ||||
| git submodule update | ||||
| cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH=/gfxreconstruct/build -DBUILD_WERROR=OFF | ||||
| cmake --build _build --parallel --target tools/{replay,info}/install/strip | ||||
| find . -not -path './build' -not -path './build/*' -delete | ||||
| popd | ||||
|  | ||||
| section_end gfxreconstruct | ||||
| @@ -1,32 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC1091  # the path is created by the script | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start kdl "Building kdl" | ||||
|  | ||||
| KDL_REVISION="cbbe5fd54505fd03ee34f35bfd16794f0c30074f" | ||||
| KDL_CHECKOUT_DIR="/tmp/ci-kdl.git" | ||||
|  | ||||
| mkdir -p ${KDL_CHECKOUT_DIR} | ||||
| pushd ${KDL_CHECKOUT_DIR} | ||||
| git init | ||||
| git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git | ||||
| git fetch --depth 1 origin ${KDL_REVISION} | ||||
| git checkout FETCH_HEAD | ||||
| popd | ||||
|  | ||||
| # Run venv in a subshell, so we don't accidentally leak the venv state into | ||||
| # calling scripts | ||||
| ( | ||||
| 	python3 -m venv /ci-kdl | ||||
| 	source /ci-kdl/bin/activate && | ||||
| 	pushd ${KDL_CHECKOUT_DIR} && | ||||
| 	pip install -r requirements.txt && | ||||
| 	pip install . && | ||||
| 	popd | ||||
| ) | ||||
|  | ||||
| rm -rf ${KDL_CHECKOUT_DIR} | ||||
|  | ||||
| section_end kdl | ||||
| @@ -1,35 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start libclc "Building libclc" | ||||
|  | ||||
| export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}" | ||||
| LLVM_TAG="llvmorg-15.0.7" | ||||
|  | ||||
| $LLVM_CONFIG --version | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
| git clone \ | ||||
|     https://github.com/llvm/llvm-project \ | ||||
|     --depth 1 \ | ||||
|     -b "${LLVM_TAG}" \ | ||||
|     /llvm-project | ||||
|  | ||||
| mkdir /libclc | ||||
| pushd /libclc | ||||
| cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG="$LLVM_CONFIG" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv | ||||
| ninja | ||||
| ninja install | ||||
| popd | ||||
|  | ||||
| # workaroud cmake vs debian packaging. | ||||
| mkdir -p /usr/lib/clc | ||||
| ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/ | ||||
| ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/ | ||||
|  | ||||
| du -sh ./* | ||||
| rm -rf /libclc /llvm-project | ||||
|  | ||||
| section_end libclc | ||||
| @@ -1,21 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # Script used for Android and Fedora builds (Debian builds get their libdrm version | ||||
| # from https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo - see PKG_REPO_REV) | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start libdrm "Building libdrm" | ||||
|  | ||||
| export LIBDRM_VERSION=libdrm-2.4.122 | ||||
|  | ||||
| curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz | ||||
| tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz | ||||
| cd "$LIBDRM_VERSION" | ||||
| meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C build | ||||
| cd .. | ||||
| rm -rf "$LIBDRM_VERSION" | ||||
|  | ||||
| section_end libdrm | ||||
| @@ -1,30 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start llvm-spirv "Building LLVM-SPIRV-Translator" | ||||
|  | ||||
| if [ "${LLVM_VERSION:?llvm version not set}" -ge 18 ]; then | ||||
|   VER="${LLVM_VERSION}.1.0" | ||||
| else | ||||
|   VER="${LLVM_VERSION}.0.0" | ||||
| fi | ||||
|  | ||||
| curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     -O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz" | ||||
| tar -xvf "v${VER}.tar.gz" && rm "v${VER}.tar.gz" | ||||
|  | ||||
| mkdir "SPIRV-LLVM-Translator-${VER}/build" | ||||
| pushd "SPIRV-LLVM-Translator-${VER}/build" | ||||
| cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr | ||||
| ninja | ||||
| ninja install | ||||
| # For some reason llvm-spirv is not installed by default | ||||
| ninja llvm-spirv | ||||
| cp tools/llvm-spirv/llvm-spirv /usr/bin/ | ||||
| popd | ||||
|  | ||||
| du -sh "SPIRV-LLVM-Translator-${VER}" | ||||
| rm -rf "SPIRV-LLVM-Translator-${VER}" | ||||
|  | ||||
| section_end llvm-spirv | ||||
| @@ -1,32 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # ALPINE_X86_64_BUILD_TAG | ||||
| # DEBIAN_BASE_TAG | ||||
| # DEBIAN_BUILD_TAG | ||||
| # FEDORA_X86_64_BUILD_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| uncollapsed_section_start mold "Building mold" | ||||
|  | ||||
| MOLD_VERSION="2.32.0" | ||||
|  | ||||
| git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git | ||||
| pushd mold | ||||
|  | ||||
| cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON | ||||
| cmake --build . --parallel "${FDO_CI_CONCURRENT:-4}" | ||||
| cmake --install . --strip | ||||
|  | ||||
| # Always use mold from now on | ||||
| find /usr/bin \( -name '*-ld' -o -name 'ld' \) \ | ||||
|   -exec ln -sf /usr/local/bin/ld.mold {} \; \ | ||||
|   -exec ls -l {} + | ||||
|  | ||||
| popd | ||||
| rm -rf mold | ||||
|  | ||||
| section_end mold | ||||
| @@ -1,29 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_GL_TAG | ||||
|  | ||||
| set -ex -o pipefail | ||||
|  | ||||
| uncollapsed_section_start ninetests "Building Nine tests" | ||||
|  | ||||
| ### Careful editing anything below this line | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
| git clone https://github.com/axeldavy/Xnine.git /Xnine | ||||
| mkdir /Xnine/build | ||||
| pushd /Xnine/build | ||||
| git checkout c64753d224c08006bcdcfa7880ada826f27164b1 | ||||
|  | ||||
| cmake .. -DBUILD_TESTS=1 -DWITH_DRI3=1 -DD3DADAPTER9_LOCATION=/install/lib/d3d/d3dadapter9.so | ||||
| make | ||||
|  | ||||
| mkdir -p /NineTests/ | ||||
| mv NineTests/NineTests /NineTests/ | ||||
|  | ||||
| popd | ||||
| rm -rf /Xnine | ||||
|  | ||||
| section_end ninetests | ||||
| @@ -1,38 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start piglit "Building piglit" | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| REV="0ecdebb0f5927728ddeeb851639a559b0f7d6590" | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit | ||||
| pushd /piglit | ||||
| git checkout "$REV" | ||||
| patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff | ||||
| cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS ${EXTRA_CMAKE_ARGS:-} | ||||
| ninja ${PIGLIT_BUILD_TARGETS:-} | ||||
| find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) \ | ||||
|        ! -name 'include_test.h' -exec rm -rf {} \; | ||||
| rm -rf target_api | ||||
| if [ "${PIGLIT_BUILD_TARGETS:-}" = "piglit_replayer" ]; then | ||||
|     find . -depth \ | ||||
|          ! -regex "^\.$" \ | ||||
|          ! -regex "^\.\/piglit.*" \ | ||||
|          ! -regex "^\.\/framework.*" \ | ||||
|          ! -regex "^\.\/bin$" \ | ||||
|          ! -regex "^\.\/bin\/replayer\.py" \ | ||||
|          ! -regex "^\.\/templates.*" \ | ||||
|          ! -regex "^\.\/tests$" \ | ||||
|          ! -regex "^\.\/tests\/replay\.py" \ | ||||
|          -exec rm -rf {} \; 2>/dev/null | ||||
| fi | ||||
| popd | ||||
|  | ||||
| section_end piglit | ||||
| @@ -1,38 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Note that this script is not actually "building" rust, but build- is the | ||||
| # convention for the shared helpers for putting stuff in our containers. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start rust "Building Rust toolchain" | ||||
|  | ||||
| # Pick a specific snapshot from rustup so the compiler doesn't drift on us. | ||||
| RUST_VERSION=1.78.0-2024-05-02 | ||||
|  | ||||
| # For rust in Mesa, we use rustup to install.  This lets us pick an arbitrary | ||||
| # version of the compiler, rather than whatever the container's Debian comes | ||||
| # with. | ||||
| curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \ | ||||
|     --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \ | ||||
|       --default-toolchain $RUST_VERSION \ | ||||
|       --profile minimal \ | ||||
|       -y | ||||
|  | ||||
| # Make rustup tools available in the PATH environment variable | ||||
| # shellcheck disable=SC1091 | ||||
| . "$HOME/.cargo/env" | ||||
|  | ||||
| rustup component add clippy rustfmt | ||||
|  | ||||
| # Set up a config script for cross compiling -- cargo needs your system cc for | ||||
| # linking in cross builds, but doesn't know what you want to use for system cc. | ||||
| cat > "$HOME/.cargo/config" <<EOF | ||||
| [target.armv7-unknown-linux-gnueabihf] | ||||
| linker = "arm-linux-gnueabihf-gcc" | ||||
|  | ||||
| [target.aarch64-unknown-linux-gnu] | ||||
| linker = "aarch64-linux-gnu-gcc" | ||||
| EOF | ||||
|  | ||||
| section_end rust | ||||
| @@ -1,18 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_BUILD_TAG | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start shader-db "Building shader-db" | ||||
|  | ||||
| pushd /usr/local | ||||
| git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1 | ||||
| rm -rf shader-db/.git | ||||
| cd shader-db | ||||
| make | ||||
| popd | ||||
|  | ||||
| section_end shader-db | ||||
| @@ -1,104 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # SPDX-License-Identifier: MIT | ||||
| # | ||||
| # Copyright © 2022 Collabora Limited | ||||
| # Author: Guilherme Gallo <guilherme.gallo@collabora.com> | ||||
| # | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start skqp "Building skqp" | ||||
|  | ||||
| SKQP_BRANCH=android-cts-12.1_r5 | ||||
|  | ||||
| SCRIPT_DIR="$(pwd)/.gitlab-ci/container" | ||||
| SKQP_PATCH_DIR="${SCRIPT_DIR}/patches" | ||||
| BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn" | ||||
|  | ||||
| case "$DEBIAN_ARCH" in | ||||
|   amd64) | ||||
|     SKQP_ARCH=x64 | ||||
|     ;; | ||||
|   armhf) | ||||
|     SKQP_ARCH=arm | ||||
|     ;; | ||||
|   arm64) | ||||
|     SKQP_ARCH=arm64 | ||||
|     ;; | ||||
| esac | ||||
|  | ||||
| SKIA_DIR=${SKIA_DIR:-$(mktemp -d)} | ||||
| SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH} | ||||
| SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp} | ||||
| SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets" | ||||
| SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms) | ||||
|  | ||||
| create_gn_args() { | ||||
|     # gn can be configured to cross-compile skia and its tools | ||||
|     # It is important to set the target_cpu to guarantee the intended target | ||||
|     # machine | ||||
|     cp "${BASE_ARGS_GN_FILE}" "${SKQP_OUT_DIR}"/args.gn | ||||
|     echo "target_cpu = \"${SKQP_ARCH}\"" >> "${SKQP_OUT_DIR}"/args.gn | ||||
| } | ||||
|  | ||||
|  | ||||
| download_skia_source() { | ||||
|     if [ -z ${SKIA_DIR+x} ] | ||||
|     then | ||||
|         return 1 | ||||
|     fi | ||||
|  | ||||
|     # Skia cloned from https://android.googlesource.com/platform/external/skqp | ||||
|     # has all needed assets tracked on git-fs | ||||
|     SKQP_REPO=https://android.googlesource.com/platform/external/skqp | ||||
|  | ||||
|     git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}" | ||||
| } | ||||
|  | ||||
| download_skia_source | ||||
|  | ||||
| pushd "${SKIA_DIR}" | ||||
|  | ||||
| # Apply all skqp patches for Mesa CI | ||||
| cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch | | ||||
|     patch -p1 | ||||
|  | ||||
| # hack for skqp see the clang | ||||
| pushd /usr/bin/ | ||||
| ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang" clang | ||||
| ln -s "../lib/llvm-${LLVM_VERSION}/bin/clang++" clang++ | ||||
| popd | ||||
|  | ||||
| # Fetch some needed build tools needed to build skia/skqp. | ||||
| # Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS | ||||
| # directory. | ||||
| python tools/git-sync-deps | ||||
|  | ||||
| mkdir -p "${SKQP_OUT_DIR}" | ||||
| mkdir -p "${SKQP_INSTALL_DIR}" | ||||
|  | ||||
| create_gn_args | ||||
|  | ||||
| # Build and install skqp binaries | ||||
| bin/gn gen "${SKQP_OUT_DIR}" | ||||
|  | ||||
| for BINARY in "${SKQP_BINARIES[@]}" | ||||
| do | ||||
|     /usr/bin/ninja -C "${SKQP_OUT_DIR}" "${BINARY}" | ||||
|     # Strip binary, since gn is not stripping it even when `is_debug == false` | ||||
|     ${STRIP_CMD:-strip} "${SKQP_OUT_DIR}/${BINARY}" | ||||
|     install -m 0755 "${SKQP_OUT_DIR}/${BINARY}" "${SKQP_INSTALL_DIR}" | ||||
| done | ||||
|  | ||||
| # Move assets to the target directory, which will reside in rootfs. | ||||
| mv platform_tools/android/apps/skqp/src/main/assets/ "${SKQP_ASSETS_DIR}" | ||||
|  | ||||
| popd | ||||
| rm -Rf "${SKIA_DIR}" | ||||
|  | ||||
| set +ex | ||||
|  | ||||
| section_end skqp | ||||
| @@ -1,64 +0,0 @@ | ||||
| cc = "clang" | ||||
| cxx = "clang++" | ||||
|  | ||||
| extra_cflags = [ | ||||
|         "-Wno-error", | ||||
|  | ||||
|         "-DSK_ENABLE_DUMP_GPU", | ||||
|         "-DSK_BUILD_FOR_SKQP" | ||||
|     ] | ||||
| extra_cflags_cc = [ | ||||
|         "-Wno-error", | ||||
|  | ||||
|         # skqp build process produces a lot of compilation warnings, silencing | ||||
|         # most of them to remove clutter and avoid the CI job log to exceed the | ||||
|         # maximum size | ||||
|  | ||||
|         # GCC flags | ||||
|         "-Wno-redundant-move", | ||||
|         "-Wno-suggest-override", | ||||
|         "-Wno-class-memaccess", | ||||
|         "-Wno-deprecated-copy", | ||||
|         "-Wno-uninitialized", | ||||
|  | ||||
|         # Clang flags | ||||
|         "-Wno-macro-redefined", | ||||
|         "-Wno-anon-enum-enum-conversion", | ||||
|         "-Wno-suggest-destructor-override", | ||||
|         "-Wno-return-std-move-in-c++11", | ||||
|         "-Wno-extra-semi-stmt", | ||||
|         "-Wno-reserved-identifier", | ||||
|         "-Wno-bitwise-instead-of-logical", | ||||
|         "-Wno-reserved-identifier", | ||||
|         "-Wno-psabi", | ||||
|         "-Wno-unused-but-set-variable", | ||||
|         "-Wno-sizeof-array-div", | ||||
|         "-Wno-string-concatenation", | ||||
|         "-Wno-unsafe-buffer-usage", | ||||
|         "-Wno-switch-default", | ||||
|         "-Wno-cast-function-type-strict", | ||||
|         "-Wno-format", | ||||
|         "-Wno-enum-constexpr-conversion", | ||||
|     ] | ||||
|  | ||||
| cc_wrapper = "ccache" | ||||
|  | ||||
| is_debug = false | ||||
|  | ||||
| skia_enable_fontmgr_android = false | ||||
| skia_enable_fontmgr_empty = true | ||||
| skia_enable_pdf = false | ||||
| skia_enable_skottie = false | ||||
|  | ||||
| skia_skqp_global_error_tolerance = 8 | ||||
| skia_tools_require_resources = true | ||||
|  | ||||
| skia_use_dng_sdk = false | ||||
| skia_use_expat = true | ||||
| skia_use_icu = false | ||||
| skia_use_libheif = false | ||||
| skia_use_lua = false | ||||
| skia_use_piex = false | ||||
| skia_use_vulkan = true | ||||
|  | ||||
| target_os = "linux" | ||||
| @@ -1,29 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start va-tools "Building va-tools" | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
|  | ||||
| git clone \ | ||||
|     https://github.com/intel/libva-utils.git \ | ||||
|     -b 2.18.1 \ | ||||
|     --depth 1 \ | ||||
|     /va-utils | ||||
|  | ||||
| pushd /va-utils | ||||
| # Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch. | ||||
| curl --fail -L https://github.com/intel/libva-utils/pull/329.patch | git am | ||||
|  | ||||
| meson setup build -D tests=true -Dprefix=/va ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C build | ||||
| popd | ||||
| rm -rf /va-utils | ||||
|  | ||||
| section_end va-tools | ||||
| @@ -1,50 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| set -ex | ||||
|  | ||||
| uncollapsed_section_start vkd3d-proton "Building vkd3d-proton" | ||||
|  | ||||
| VKD3D_PROTON_COMMIT="078f07f588c849c52fa21c8cfdd1c201465b1932" | ||||
|  | ||||
| VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests" | ||||
| VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src" | ||||
| VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-build" | ||||
|  | ||||
| function build_arch { | ||||
|   local arch="$1" | ||||
|  | ||||
|   meson setup                              \ | ||||
|         -Denable_tests=true                \ | ||||
|         --buildtype release                \ | ||||
|         --prefix "$VKD3D_PROTON_DST_DIR"   \ | ||||
|         --strip                            \ | ||||
|         --bindir "x${arch}"                \ | ||||
|         --libdir "x${arch}"                \ | ||||
|         "$VKD3D_PROTON_BUILD_DIR/build.${arch}" | ||||
|  | ||||
|   ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install | ||||
|  | ||||
|   install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/d3d12" | ||||
| } | ||||
|  | ||||
| git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR" | ||||
| pushd "$VKD3D_PROTON_SRC_DIR" | ||||
| git checkout "$VKD3D_PROTON_COMMIT" | ||||
| git submodule update --init --recursive | ||||
| git submodule update --recursive | ||||
| build_arch 64 | ||||
| build_arch 86 | ||||
| mkdir "$VKD3D_PROTON_DST_DIR/tests" | ||||
| cp \ | ||||
|   "tests/test-runner.sh" \ | ||||
|   "tests/d3d12_tests.h" \ | ||||
|   "$VKD3D_PROTON_DST_DIR/tests/" | ||||
| popd | ||||
|  | ||||
| rm -rf "$VKD3D_PROTON_BUILD_DIR" | ||||
| rm -rf "$VKD3D_PROTON_SRC_DIR" | ||||
|  | ||||
| section_end vkd3d-proton | ||||
| @@ -1,25 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start vulkan-validation "Building Vulkan validation layers" | ||||
|  | ||||
| VALIDATION_TAG="snapshot-2025wk15" | ||||
|  | ||||
| git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git | ||||
| pushd Vulkan-ValidationLayers | ||||
| # we don't need to build SPIRV-Tools tools | ||||
| sed -i scripts/known_good.json -e 's/SPIRV_SKIP_EXECUTABLES=OFF/SPIRV_SKIP_EXECUTABLES=ON/' | ||||
| python3 scripts/update_deps.py --dir external --config release --generator Ninja --optional tests | ||||
| cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build | ||||
| ninja -C build -j"${FDO_CI_CONCURRENT:-4}" | ||||
| cmake --install build --strip | ||||
| popd | ||||
| rm -rf Vulkan-ValidationLayers | ||||
|  | ||||
| section_end vulkan-validation | ||||
| @@ -1,38 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| set -uex | ||||
|  | ||||
| uncollapsed_section_start wayland "Building Wayland" | ||||
|  | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # ALPINE_X86_64_BUILD_TAG | ||||
| # DEBIAN_BASE_TAG | ||||
| # DEBIAN_BUILD_TAG | ||||
| # DEBIAN_TEST_ANDROID_TAG | ||||
| # DEBIAN_TEST_GL_TAG | ||||
| # DEBIAN_TEST_VK_TAG | ||||
| # FEDORA_X86_64_BUILD_TAG | ||||
| # KERNEL_ROOTFS_TAG | ||||
|  | ||||
| export LIBWAYLAND_VERSION="1.21.0" | ||||
| export WAYLAND_PROTOCOLS_VERSION="1.41" | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/wayland/wayland | ||||
| cd wayland | ||||
| git checkout "$LIBWAYLAND_VERSION" | ||||
| meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C _build | ||||
| cd .. | ||||
| rm -rf wayland | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/wayland/wayland-protocols | ||||
| cd wayland-protocols | ||||
| git checkout "$WAYLAND_PROTOCOLS_VERSION" | ||||
| meson setup -Dtests=false _build ${EXTRA_MESON_ARGS:-} | ||||
| meson install -C _build | ||||
| cd .. | ||||
| rm -rf wayland-protocols | ||||
|  | ||||
| section_end wayland | ||||
| @@ -1,24 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # When changing this file, all the linux tags in | ||||
| # .gitlab-ci/image-tags.yml need updating. | ||||
|  | ||||
| set -eu | ||||
|  | ||||
| # Early check for required env variables, relies on `set -u` | ||||
| : "$S3_JWT_FILE_SCRIPT" | ||||
|  | ||||
| if [ -z "$1" ]; then | ||||
|   echo "usage: $(basename "$0") <CONTAINER_CI_JOB_NAME>" 1>&2 | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| CONTAINER_CI_JOB_NAME="$1" | ||||
|  | ||||
| # Tasks to perform before executing the script of a container job | ||||
| eval "$S3_JWT_FILE_SCRIPT" | ||||
| unset S3_JWT_FILE_SCRIPT | ||||
|  | ||||
| trap 'rm -f ${S3_JWT_FILE}' EXIT INT TERM | ||||
|  | ||||
| bash ".gitlab-ci/container/${CONTAINER_CI_JOB_NAME}.sh" | ||||
| @@ -1,12 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| if test -f /etc/debian_version; then | ||||
|     apt-get autoremove -y --purge | ||||
| fi | ||||
|  | ||||
| # Clean up any build cache | ||||
| rm -rf /root/.cache | ||||
|  | ||||
| if test -x /usr/bin/ccache; then | ||||
|     ccache --show-stats | ||||
| fi | ||||
| @@ -1,74 +0,0 @@ | ||||
| #!/bin/sh | ||||
| # When changing this file, you need to bump the following | ||||
| # .gitlab-ci/image-tags.yml tags: | ||||
| # DEBIAN_BUILD_TAG | ||||
|  | ||||
| if test -x /usr/bin/ccache; then | ||||
|     if test -f /etc/debian_version; then | ||||
|         CCACHE_PATH=/usr/lib/ccache | ||||
|     elif test -f /etc/alpine-release; then | ||||
|         CCACHE_PATH=/usr/lib/ccache/bin | ||||
|     else | ||||
|         CCACHE_PATH=/usr/lib64/ccache | ||||
|     fi | ||||
|  | ||||
|     # Common setup among container builds before we get to building code. | ||||
|  | ||||
|     export CCACHE_COMPILERCHECK=content | ||||
|     export CCACHE_COMPRESS=true | ||||
|     export CCACHE_DIR="/cache/$CI_PROJECT_NAME/ccache" | ||||
|     export PATH="$CCACHE_PATH:$PATH" | ||||
|  | ||||
|     # CMake ignores $PATH, so we have to force CC/GCC to the ccache versions. | ||||
|     export CC="${CCACHE_PATH}/gcc" | ||||
|     export CXX="${CCACHE_PATH}/g++" | ||||
|  | ||||
|     ccache --show-stats | ||||
| fi | ||||
|  | ||||
| # Make a wrapper script for ninja to always include the -j flags | ||||
| { | ||||
|     echo '#!/bin/sh -x' | ||||
|     # shellcheck disable=SC2016 | ||||
|     echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' | ||||
| } > /usr/local/bin/ninja | ||||
| chmod +x /usr/local/bin/ninja | ||||
|  | ||||
| # Set MAKEFLAGS so that all make invocations in container builds include the | ||||
| # flags (doesn't apply to non-container builds, but we don't run make there) | ||||
| export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}" | ||||
|  | ||||
| # make wget to try more than once, when download fails or timeout | ||||
| echo -e "retry_connrefused = on\n" \ | ||||
|         "read_timeout = 300\n" \ | ||||
|         "tries = 4\n" \ | ||||
| 	"retry_on_host_error = on\n" \ | ||||
| 	"retry_on_http_error = 429,500,502,503,504\n" \ | ||||
|         "wait_retry = 32" >> /etc/wgetrc | ||||
|  | ||||
| # Ensure that rust tools are in PATH if they exist | ||||
| CARGO_ENV_FILE="$HOME/.cargo/env" | ||||
| if [ -f "$CARGO_ENV_FILE" ]; then | ||||
|     # shellcheck disable=SC1090 | ||||
|     source "$CARGO_ENV_FILE" | ||||
| fi | ||||
|  | ||||
| ci_tag_early_checks() { | ||||
|     # Runs the first part of the build script to perform the tag check only | ||||
|     uncollapsed_section_switch "ci_tag_early_checks" "Ensuring component versions match declared tags in CI builds" | ||||
|     echo "[Structured Tagging] Checking components: ${CI_BUILD_COMPONENTS}" | ||||
|     # shellcheck disable=SC2086 | ||||
|     for component in ${CI_BUILD_COMPONENTS}; do | ||||
|         bin/ci/update_tag.py --check ${component} || exit 1 | ||||
|     done | ||||
|     echo "[Structured Tagging] Components check done" | ||||
|     section_end "ci_tag_early_checks" | ||||
| } | ||||
|  | ||||
| # Check if each declared tag component is up to date before building | ||||
| if [ -n "${CI_BUILD_COMPONENTS:-}" ]; then | ||||
|     # Remove any duplicates by splitting on whitespace, sorting, then joining back | ||||
|     CI_BUILD_COMPONENTS="$(echo "${CI_BUILD_COMPONENTS}" | xargs -n1 | sort -u | xargs)" | ||||
|  | ||||
|     ci_tag_early_checks | ||||
| fi | ||||
| @@ -1,37 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| ndk=$1 | ||||
| arch=$2 | ||||
| cpu_family=$3 | ||||
| cpu=$4 | ||||
| cross_file="/cross_file-$arch.txt" | ||||
| sdk_version=$5 | ||||
|  | ||||
| # armv7 has the toolchain split between two names. | ||||
| arch2=${6:-$2} | ||||
|  | ||||
| # Note that we disable C++ exceptions, because Mesa doesn't use exceptions, | ||||
| # and allowing it in code generation means we get unwind symbols that break | ||||
| # the libEGL and driver symbol tests. | ||||
|  | ||||
| cat > "$cross_file" <<EOF | ||||
| [binaries] | ||||
| ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ar' | ||||
| c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables'] | ||||
| cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '--start-no-unused-arguments', '-static-libstdc++', '--end-no-unused-arguments'] | ||||
| c_ld = 'lld' | ||||
| cpp_ld = 'lld' | ||||
| strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip' | ||||
| pkg-config = ['/usr/bin/pkgconf'] | ||||
|  | ||||
| [host_machine] | ||||
| system = 'android' | ||||
| cpu_family = '$cpu_family' | ||||
| cpu = '$cpu' | ||||
| endian = 'little' | ||||
|  | ||||
| [properties] | ||||
| needs_exe_wrapper = true | ||||
| pkg_config_libdir = '/usr/local/lib/${arch2}/pkgconfig/:/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/${arch2}/pkgconfig/' | ||||
|  | ||||
| EOF | ||||
| @@ -1,40 +0,0 @@ | ||||
| #!/bin/sh | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| # Makes a .pc file in the Android NDK for meson to find its libraries. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| ndk="$1" | ||||
| pc="$2" | ||||
| cflags="$3" | ||||
| libs="$4" | ||||
| version="$5" | ||||
| sdk_version="$6" | ||||
|  | ||||
| sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot | ||||
|  | ||||
| for arch in \ | ||||
|         x86_64-linux-android \ | ||||
|         i686-linux-android \ | ||||
|         aarch64-linux-android \ | ||||
|         arm-linux-androideabi; do | ||||
|     pcdir=$sysroot/usr/lib/$arch/pkgconfig | ||||
|     mkdir -p $pcdir | ||||
|  | ||||
|     cat >$pcdir/$pc <<EOF | ||||
| prefix=$sysroot | ||||
| exec_prefix=$sysroot | ||||
| libdir=$sysroot/usr/lib/$arch/$sdk_version | ||||
| sharedlibdir=$sysroot/usr/lib/$arch | ||||
| includedir=$sysroot/usr/include | ||||
|  | ||||
| Name: zlib | ||||
| Description: zlib compression library | ||||
| Version: $version | ||||
|  | ||||
| Requires: | ||||
| Libs: -L$sysroot/usr/lib/$arch/$sdk_version $libs | ||||
| Cflags: -I$sysroot/usr/include $cflags | ||||
| EOF | ||||
| done | ||||
| @@ -1,54 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| arch=$1 | ||||
| cross_file="/cross_file-$arch.txt" | ||||
| meson env2mfile --cross --debarch "$arch" -o "$cross_file" | ||||
|  | ||||
| # Explicitly set ccache path for cross compilers | ||||
| sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file" | ||||
|  | ||||
| # Rely on qemu-user being configured in binfmt_misc on the host | ||||
| # shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally | ||||
| sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file" | ||||
|  | ||||
| # Add a line for rustc, which meson env2mfile is missing. | ||||
| cc=$(sed -n "s|^c\s*=\s*\[?'\(.*\)'\]?|\1|p" < "$cross_file") | ||||
|  | ||||
| if [[ "$arch" = "arm64" ]]; then | ||||
|     rust_target=aarch64-unknown-linux-gnu | ||||
| elif [[ "$arch" = "armhf" ]]; then | ||||
|     rust_target=armv7-unknown-linux-gnueabihf | ||||
| elif [[ "$arch" = "i386" ]]; then | ||||
|     rust_target=i686-unknown-linux-gnu | ||||
| elif [[ "$arch" = "ppc64el" ]]; then | ||||
|     rust_target=powerpc64le-unknown-linux-gnu | ||||
| elif [[ "$arch" = "s390x" ]]; then | ||||
|     rust_target=s390x-unknown-linux-gnu | ||||
| else | ||||
|     echo "Needs rustc target mapping" | ||||
| fi | ||||
|  | ||||
| # shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally | ||||
| sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file" | ||||
|  | ||||
| # Set up cmake cross compile toolchain file for dEQP builds | ||||
| toolchain_file="/toolchain-$arch.cmake" | ||||
| if [[ "$arch" = "arm64" ]]; then | ||||
|     GCC_ARCH="aarch64-linux-gnu" | ||||
|     DE_CPU="DE_CPU_ARM_64" | ||||
| elif [[ "$arch" = "armhf" ]]; then | ||||
|     GCC_ARCH="arm-linux-gnueabihf" | ||||
|     DE_CPU="DE_CPU_ARM" | ||||
| fi | ||||
|  | ||||
| if [[ -n "$GCC_ARCH" ]]; then | ||||
|     { | ||||
|         echo "set(CMAKE_SYSTEM_NAME Linux)"; | ||||
|         echo "set(CMAKE_SYSTEM_PROCESSOR arm)"; | ||||
|         echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)"; | ||||
|         echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)"; | ||||
|         echo "set(CMAKE_CXX_FLAGS_INIT \"-Wno-psabi\")";  # makes ABI warnings quiet for ARMv7 | ||||
|         echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkgconf\")"; | ||||
|         echo "set(DE_CPU $DE_CPU)"; | ||||
|     } > "$toolchain_file" | ||||
| fi | ||||
| @@ -1,95 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
| # shellcheck disable=SC2086 # we want word splitting | ||||
|  | ||||
| set -e | ||||
|  | ||||
| . .gitlab-ci/setup-test-env.sh | ||||
|  | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
| : "${LLVM_VERSION:?llvm version not set!}" | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| EPHEMERAL=( | ||||
| ) | ||||
|  | ||||
| DEPS=( | ||||
|     "crossbuild-essential-$arch" | ||||
|     "pkgconf:$arch" | ||||
|     "libasan8:$arch" | ||||
|     "libdrm-dev:$arch" | ||||
|     "libelf-dev:$arch" | ||||
|     "libexpat1-dev:$arch" | ||||
|     "libffi-dev:$arch" | ||||
|     "libpciaccess-dev:$arch" | ||||
|     "libstdc++6:$arch" | ||||
|     "libvulkan-dev:$arch" | ||||
|     "libx11-dev:$arch" | ||||
|     "libx11-xcb-dev:$arch" | ||||
|     "libxcb-dri2-0-dev:$arch" | ||||
|     "libxcb-dri3-dev:$arch" | ||||
|     "libxcb-glx0-dev:$arch" | ||||
|     "libxcb-present-dev:$arch" | ||||
|     "libxcb-randr0-dev:$arch" | ||||
|     "libxcb-shm0-dev:$arch" | ||||
|     "libxcb-xfixes0-dev:$arch" | ||||
|     "libxdamage-dev:$arch" | ||||
|     "libxext-dev:$arch" | ||||
|     "libxrandr-dev:$arch" | ||||
|     "libxshmfence-dev:$arch" | ||||
|     "libxxf86vm-dev:$arch" | ||||
|     "libwayland-dev:$arch" | ||||
| ) | ||||
|  | ||||
| dpkg --add-architecture $arch | ||||
|  | ||||
| echo "deb [trusted=yes] https://gitlab.freedesktop.org/gfx-ci/ci-deb-repo/-/raw/${PKG_REPO_REV}/ ${FDO_DISTRIBUTION_VERSION%-*} main" | tee /etc/apt/sources.list.d/gfx-ci_.list | ||||
|  | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove "${DEPS[@]}" "${EPHEMERAL[@]}" \ | ||||
|     $EXTRA_LOCAL_PACKAGES | ||||
|  | ||||
| if [[ $arch != "armhf" ]]; then | ||||
|     # We don't need clang-format for the crossbuilds, but the installed amd64 | ||||
|     # package will conflict with libclang. Uninstall clang-format (and its | ||||
|     # problematic dependency) to fix. | ||||
|     apt-get remove -y "clang-format-${LLVM_VERSION}" "libclang-cpp${LLVM_VERSION}" \ | ||||
|             "llvm-${LLVM_VERSION}-runtime" "llvm-${LLVM_VERSION}-linker-tools" | ||||
|  | ||||
|     # llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only | ||||
|     # with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get | ||||
|     # around this. | ||||
|     apt-get install -y --no-remove --no-install-recommends \ | ||||
|             "libclang-cpp${LLVM_VERSION}:$arch" \ | ||||
|             "libgcc-s1:$arch" \ | ||||
|             "libtinfo-dev:$arch" \ | ||||
|             "libz3-dev:$arch" \ | ||||
|             "llvm-${LLVM_VERSION}:$arch" \ | ||||
|             zlib1g | ||||
| fi | ||||
|  | ||||
| . .gitlab-ci/container/create-cross-file.sh $arch | ||||
|  | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
|  | ||||
| # dependencies where we want a specific version | ||||
| MULTIARCH_PATH=$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH) | ||||
| export EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/${MULTIARCH_PATH}" | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| . .gitlab-ci/container/build-directx-headers.sh | ||||
|  | ||||
| apt-get purge -y "${EPHEMERAL[@]}" | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
|  | ||||
| # This needs to be done after container_post_build.sh, or apt-get breaks in there | ||||
| if [[ $arch != "armhf" ]]; then | ||||
|     apt-get download llvm-"${LLVM_VERSION}"-{dev,tools}:"$arch" | ||||
|     dpkg -i --force-depends llvm-"${LLVM_VERSION}"-*_"${arch}".deb | ||||
|     rm llvm-"${LLVM_VERSION}"-*_"${arch}".deb | ||||
| fi | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user