Compare commits
	
		
			219 Commits
		
	
	
		
			mesa-22.1.
			...
			mesa_6_4_2
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 3b0c8dcaa3 | ||
|  | d8a6ef40a6 | ||
|  | 7a79373adc | ||
|  | 69dd5a749a | ||
|  | c76125ef03 | ||
|  | 020fa49e9c | ||
|  | 89cc04d6bd | ||
|  | c3104ab838 | ||
|  | 5f7f146ec2 | ||
|  | 4161495074 | ||
|  | 61c58fce3d | ||
|  | ba5e587f1d | ||
|  | cbdfd0d112 | ||
|  | 965e888f49 | ||
|  | 89f3e7f749 | ||
|  | d2e735f2e7 | ||
|  | 5879d1be61 | ||
|  | 546f85daa0 | ||
|  | a47358ce09 | ||
|  | 318e1adaa6 | ||
|  | 2dfa8a9c0b | ||
|  | 333e14b066 | ||
|  | 52c07cddd5 | ||
|  | 978d0eb71b | ||
|  | e26c934961 | ||
|  | 7e76e05f0b | ||
|  | 4fc07ba779 | ||
|  | a8c469c3b6 | ||
|  | 919b606a94 | ||
|  | 112180c2d8 | ||
|  | eccccc8940 | ||
|  | fcb01c7127 | ||
|  | 366d6600d0 | ||
|  | 98ac241a4e | ||
|  | f3cb488333 | ||
|  | 53e595017e | ||
|  | f8be78367d | ||
|  | 89568e0509 | ||
|  | 149675017f | ||
|  | 4d87d19a60 | ||
|  | 7a46bf5e1b | ||
|  | c2cabc6755 | ||
|  | 3fa7668f99 | ||
|  | 2370d426cf | ||
|  | d8eb2d49dd | ||
|  | fcda7131b4 | ||
|  | 6ff7b66f4a | ||
|  | e037150407 | ||
|  | c2e6917e14 | ||
|  | 2048fabe04 | ||
|  | b7da962caf | ||
|  | 040bdbe2e6 | ||
|  | 471c17cc84 | ||
|  | ad9a205ba8 | ||
|  | 45c30dbead | ||
|  | eeb4f1aff9 | ||
|  | 1ea4669fce | ||
|  | a110f0f407 | ||
|  | 4f42aa83f1 | ||
|  | eb72779ef1 | ||
|  | 47604ffc2f | ||
|  | 5889e4b775 | ||
|  | f9d66b2a44 | ||
|  | 2119711dac | ||
|  | fcc21abbc6 | ||
|  | c331aef105 | ||
|  | 2fc89f126a | ||
|  | 22c8686dad | ||
|  | 7a12ad5a08 | ||
|  | c6165ec3b3 | ||
|  | 67ef9d76e7 | ||
|  | aa5f5d54a8 | ||
|  | c18fd3a3bf | ||
|  | d799fbf78e | ||
|  | 48d7f01740 | ||
|  | 59f84d5723 | ||
|  | 6f9ff2af81 | ||
|  | 4591406e21 | ||
|  | 2c6cf3d0aa | ||
|  | 154a8be6a7 | ||
|  | db03e55efa | ||
|  | d038563a60 | ||
|  | 469bd21184 | ||
|  | 13a5235305 | ||
|  | 4e4f6a29f3 | ||
|  | 87855fd59b | ||
|  | 546b020144 | ||
|  | 84663e12d0 | ||
|  | c16bb264e8 | ||
|  | 8ef980af6e | ||
|  | 7cd7a78d72 | ||
|  | 5c57efd6b4 | ||
|  | ea9ed5849f | ||
|  | 1ef0939a64 | ||
|  | d0f9c2887c | ||
|  | ed75287003 | ||
|  | c01c2fb542 | ||
|  | e1d7297812 | ||
|  | b4227f0d98 | ||
|  | c868c1ee14 | ||
|  | a1dcb43658 | ||
|  | cc7b6810e2 | ||
|  | 7b9fecee10 | ||
|  | 8856180e4f | ||
|  | df51dfbdc3 | ||
|  | 2fd1ed1b45 | ||
|  | 66b928e95e | ||
|  | d5e9cca0b2 | ||
|  | c072c17a8c | ||
|  | 8367420be9 | ||
|  | e2a2ed06b2 | ||
|  | 0cc3703188 | ||
|  | fb6c1492fb | ||
|  | 89ad797f9a | ||
|  | fa67028ae7 | ||
|  | 8e1724dfe9 | ||
|  | fbdb5c9b14 | ||
|  | 4b2728f95c | ||
|  | 3a7437399f | ||
|  | 3338f04be7 | ||
|  | 047f7bb11c | ||
|  | 5b25acdac7 | ||
|  | cb16208ec0 | ||
|  | 88107bea56 | ||
|  | b4d25157f2 | ||
|  | c6d1f32ebe | ||
|  | 281f388e03 | ||
|  | c0a0dfeb96 | ||
|  | ab07593563 | ||
|  | 5b08058604 | ||
|  | 3bf13d3c4b | ||
|  | 2b05784084 | ||
|  | 2292262de4 | ||
|  | a9358bb3b0 | ||
|  | b9d111f202 | ||
|  | 20fd105e2a | ||
|  | 1faaa0090a | ||
|  | 23803bd5fb | ||
|  | c000250504 | ||
|  | b854297bab | ||
|  | 3c1b44ad55 | ||
|  | bdab2cc216 | ||
|  | 91ade9ad68 | ||
|  | 7355bbce55 | ||
|  | 1d2671e6cb | ||
|  | 0a405dcc5f | ||
|  | b210ffaadf | ||
|  | b1e8471898 | ||
|  | aefe0dabec | ||
|  | 758c5b9a7e | ||
|  | 44db6d7cb8 | ||
|  | 04f1fdc755 | ||
|  | b181773d02 | ||
|  | 4218cddfb1 | ||
|  | a4e35c21d2 | ||
|  | 5d63323d34 | ||
|  | 419a350499 | ||
|  | db78ec7018 | ||
|  | 291911aeb5 | ||
|  | e15a7d4883 | ||
|  | 2e95fdc5d8 | ||
|  | 6e03f6ed43 | ||
|  | 97790948f6 | ||
|  | a882648023 | ||
|  | b7f8d69bb3 | ||
|  | 5bad790e56 | ||
|  | 382b00f483 | ||
|  | 718bd385ed | ||
|  | 402ca8903f | ||
|  | 25a9c9f370 | ||
|  | d73a838f01 | ||
|  | c72b2fb2db | ||
|  | 5a6ce051ef | ||
|  | e7c2fa9cff | ||
|  | 5919aec5c3 | ||
|  | f5a6175d83 | ||
|  | e41ea35fee | ||
|  | 38cf2b14f4 | ||
|  | 66725dca81 | ||
|  | 59ad040754 | ||
|  | 5e16f314d6 | ||
|  | 2376438410 | ||
|  | 1be13df2c3 | ||
|  | 1f6892527b | ||
|  | e5f6f26c87 | ||
|  | e14b3baca0 | ||
|  | 5eb129de76 | ||
|  | 3f27b6ffeb | ||
|  | 270572e543 | ||
|  | 7b56e1a443 | ||
|  | cfaac5dc4f | ||
|  | 1e5c3e977f | ||
|  | 831b4143f7 | ||
|  | b26b565f91 | ||
|  | f4d1dafc18 | ||
|  | db70c3f9b6 | ||
|  | 384b1d03e4 | ||
|  | e3c1b594ae | ||
|  | 453eb90128 | ||
|  | cb505bf744 | ||
|  | 133112c71a | ||
|  | a66eebc579 | ||
|  | 89ea47182d | ||
|  | 2efe639f26 | ||
|  | 39e4095560 | ||
|  | b19ec5009f | ||
|  | afed3e55ba | ||
|  | 46cf4fa1bd | ||
|  | b4198e821a | ||
|  | 5a1fb7db0e | ||
|  | f16fbf03bd | ||
|  | ec0c42a3be | ||
|  | a4c1895d07 | ||
|  | b2971358ad | ||
|  | 24a36c26fa | ||
|  | 247e27d0b8 | ||
|  | 912bcb2386 | ||
|  | cee416863f | ||
|  | 8e946792dc | 
| @@ -1,18 +0,0 @@ | ||||
| ((nil . ((show-trailing-whitespace . t))) | ||||
|  (prog-mode | ||||
|   (indent-tabs-mode . nil) | ||||
|   (tab-width . 8) | ||||
|   (c-basic-offset . 3) | ||||
|   (c-file-style . "stroustrup") | ||||
|   (fill-column . 78) | ||||
|   (eval . (progn | ||||
| 	    (c-set-offset 'case-label '0) | ||||
| 	    (c-set-offset 'innamespace '0) | ||||
| 	    (c-set-offset 'inline-open '0))) | ||||
|   (whitespace-style face indentation) | ||||
|   (whitespace-line-column . 79) | ||||
|   (eval ignore-errors | ||||
|         (require 'whitespace) | ||||
|         (whitespace-mode 1))) | ||||
|  (makefile-mode (indent-tabs-mode . t)) | ||||
|  ) | ||||
| @@ -1,36 +0,0 @@ | ||||
| # To use this config on you editor, follow the instructions at: | ||||
| # http://editorconfig.org | ||||
|  | ||||
| root = true | ||||
|  | ||||
| [*] | ||||
| charset = utf-8 | ||||
| insert_final_newline = true | ||||
| tab_width = 8 | ||||
|  | ||||
| [*.{c,h,cpp,hpp,cc,hh}] | ||||
| indent_style = space | ||||
| indent_size = 3 | ||||
| max_line_length = 78 | ||||
|  | ||||
| [{Makefile*,*.mk}] | ||||
| indent_style = tab | ||||
|  | ||||
| [*.py] | ||||
| indent_style = space | ||||
| indent_size = 4 | ||||
|  | ||||
| [*.yml] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
|  | ||||
| [*.rst] | ||||
| indent_style = space | ||||
| indent_size = 3 | ||||
|  | ||||
| [*.patch] | ||||
| trim_trailing_whitespace = false | ||||
|  | ||||
| [{meson.build,meson_options.txt}] | ||||
| indent_style = space | ||||
| indent_size = 2 | ||||
							
								
								
									
										6
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitattributes
									
									
									
									
										vendored
									
									
								
							| @@ -1,6 +0,0 @@ | ||||
| *.csv eol=crlf | ||||
| * text=auto | ||||
| *.jpg binary | ||||
| *.png binary | ||||
| *.gif binary | ||||
| *.ico binary | ||||
							
								
								
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -1,4 +0,0 @@ | ||||
| *.pyc | ||||
| *.pyo | ||||
| *.out | ||||
| build | ||||
							
								
								
									
										327
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							
							
						
						
									
										327
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							| @@ -1,327 +0,0 @@ | ||||
| variables: | ||||
|   FDO_UPSTREAM_REPO: mesa/mesa | ||||
|   MESA_TEMPLATES_COMMIT: &ci-templates-commit 290b79e0e78eab67a83766f4e9691be554fc4afd | ||||
|   CI_PRE_CLONE_SCRIPT: |- | ||||
|           set -o xtrace | ||||
|           wget -q -O download-git-cache.sh ${CI_PROJECT_URL}/-/raw/${CI_COMMIT_SHA}/.gitlab-ci/download-git-cache.sh | ||||
|           bash download-git-cache.sh | ||||
|           rm download-git-cache.sh | ||||
|           set +o xtrace | ||||
|   CI_JOB_JWT_FILE: /minio_jwt | ||||
|   MINIO_HOST: minio-packet.freedesktop.org | ||||
|   # per-pipeline artifact storage on MinIO | ||||
|   PIPELINE_ARTIFACTS_BASE: ${MINIO_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID} | ||||
|   # per-job artifact storage on MinIO | ||||
|   JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID} | ||||
|   # reference images stored for traces | ||||
|   PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${MINIO_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO" | ||||
|   # Individual CI farm status, set to "offline" to disable jobs | ||||
|   # running on a particular CI farm (ie. for outages, etc): | ||||
|   FD_FARM: "online" | ||||
|   COLLABORA_FARM: "offline" | ||||
|   MICROSOFT_FARM: "offline" | ||||
|  | ||||
| default: | ||||
|   before_script: | ||||
|     - echo -e "\e[0Ksection_start:$(date +%s):unset_env_vars_section[collapsed=true]\r\e[0KUnsetting vulnerable environment variables" | ||||
|     - echo -n "${CI_JOB_JWT}" > "${CI_JOB_JWT_FILE}" | ||||
|     - unset CI_JOB_JWT | ||||
|     - echo -e "\e[0Ksection_end:$(date +%s):unset_env_vars_section\r\e[0K" | ||||
|  | ||||
|   after_script: | ||||
|     - > | ||||
|       set +x | ||||
|  | ||||
|       test -e "${CI_JOB_JWT_FILE}" && | ||||
|       export CI_JOB_JWT="$(<${CI_JOB_JWT_FILE})" && | ||||
|       rm "${CI_JOB_JWT_FILE}" | ||||
|  | ||||
| include: | ||||
|   - project: 'freedesktop/ci-templates' | ||||
|     ref: 34f4ade99434043f88e164933f570301fd18b125 | ||||
|     file: | ||||
|       - '/templates/ci-fairy.yml' | ||||
|   - project: 'freedesktop/ci-templates' | ||||
|     ref: *ci-templates-commit | ||||
|     file: | ||||
|       - '/templates/debian.yml' | ||||
|       - '/templates/fedora.yml' | ||||
|   - local: '.gitlab-ci/image-tags.yml' | ||||
|   - local: '.gitlab-ci/lava/lava-gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/container/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/build/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/test/gitlab-ci.yml' | ||||
|   - local: '.gitlab-ci/test-source-dep.yml' | ||||
|   - local: 'src/amd/ci/gitlab-ci.yml' | ||||
|   - local: 'src/broadcom/ci/gitlab-ci.yml' | ||||
|   - local: 'src/etnaviv/ci/gitlab-ci.yml' | ||||
|   - local: 'src/freedreno/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/crocus/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/d3d12/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/i915/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/iris/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/lima/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/llvmpipe/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/nouveau/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/radeonsi/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/softpipe/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/virgl/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/drivers/zink/ci/gitlab-ci.yml' | ||||
|   - local: 'src/gallium/frontends/lavapipe/ci/gitlab-ci.yml' | ||||
|   - local: 'src/intel/ci/gitlab-ci.yml' | ||||
|   - local: 'src/microsoft/ci/gitlab-ci.yml' | ||||
|   - local: 'src/panfrost/ci/gitlab-ci.yml' | ||||
|  | ||||
| stages: | ||||
|   - sanity | ||||
|   - container | ||||
|   - git-archive | ||||
|   - build-x86_64 | ||||
|   - build-misc | ||||
|   - amd | ||||
|   - intel | ||||
|   - nouveau | ||||
|   - arm | ||||
|   - broadcom | ||||
|   - freedreno | ||||
|   - etnaviv | ||||
|   - software-renderer | ||||
|   - layered-backends | ||||
|   - deploy | ||||
|  | ||||
| # Generic rule to not run the job during scheduled pipelines | ||||
| # ---------------------------------------------------------- | ||||
| .scheduled_pipelines-rules: | ||||
|   rules: &ignore_scheduled_pipelines | ||||
|     if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"' | ||||
|     when: never | ||||
|  | ||||
| # YAML anchors for rule conditions | ||||
| # -------------------------------- | ||||
| .rules-anchors: | ||||
|   rules: | ||||
|     # Pipeline for forked project branch | ||||
|     - if: &is-forked-branch '$CI_COMMIT_BRANCH && $CI_PROJECT_NAMESPACE != "mesa"' | ||||
|       when: manual | ||||
|     # Forked project branch / pre-merge pipeline not for Marge bot | ||||
|     - if: &is-forked-branch-or-pre-merge-not-for-marge '$CI_PROJECT_NAMESPACE != "mesa" || ($GITLAB_USER_LOGIN != "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event")' | ||||
|       when: manual | ||||
|     # Pipeline runs for the main branch of the upstream Mesa project | ||||
|     - if: &is-mesa-main '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH && $CI_COMMIT_BRANCH' | ||||
|       when: always | ||||
|     # Post-merge pipeline | ||||
|     - if: &is-post-merge '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH' | ||||
|       when: on_success | ||||
|     # Post-merge pipeline, not for Marge Bot | ||||
|     - if: &is-post-merge-not-for-marge '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH' | ||||
|       when: on_success | ||||
|     # Pre-merge pipeline | ||||
|     - if: &is-pre-merge '$CI_PIPELINE_SOURCE == "merge_request_event"' | ||||
|       when: on_success | ||||
|     # Pre-merge pipeline for Marge Bot | ||||
|     - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"' | ||||
|       when: on_success | ||||
|  | ||||
|  | ||||
| .docs-base: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|     - .ci-run-policy | ||||
|   script: | ||||
|   - apk --no-cache add graphviz doxygen | ||||
|   - pip3 install sphinx breathe mako sphinx_rtd_theme | ||||
|   - docs/doxygen-wrapper.py --out-dir=docs/doxygen_xml | ||||
|   - sphinx-build -W -b html docs public | ||||
|  | ||||
| pages: | ||||
|   extends: .docs-base | ||||
|   stage: deploy | ||||
|   artifacts: | ||||
|     paths: | ||||
|     - public | ||||
|   needs: [] | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     - if: *is-mesa-main | ||||
|       changes: &docs-or-ci | ||||
|       - docs/**/* | ||||
|       - .gitlab-ci.yml | ||||
|       when: always | ||||
|     # Other cases default to never | ||||
|  | ||||
| test-docs: | ||||
|   extends: .docs-base | ||||
|   # Cancel job if a newer commit is pushed to the same branch | ||||
|   interruptible: true | ||||
|   stage: deploy | ||||
|   needs: [] | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     - if: *is-forked-branch | ||||
|       changes: *docs-or-ci | ||||
|       when: manual | ||||
|     # Other cases default to never | ||||
|  | ||||
| test-docs-mr: | ||||
|   extends: | ||||
|     - test-docs | ||||
|   needs: | ||||
|     - sanity | ||||
|   artifacts: | ||||
|     expose_as: 'Documentation preview' | ||||
|     paths: | ||||
|     - public/ | ||||
|   rules: | ||||
|     - if: *is-pre-merge | ||||
|       changes: *docs-or-ci | ||||
|       when: on_success | ||||
|     # Other cases default to never | ||||
|  | ||||
| # When to automatically run the CI | ||||
| .ci-run-policy: | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     # If any files affecting the pipeline are changed, build/test jobs run | ||||
|     # automatically once all dependency jobs have passed | ||||
|     - changes: &all_paths | ||||
|       - VERSION | ||||
|       - bin/git_sha1_gen.py | ||||
|       - bin/install_megadrivers.py | ||||
|       - bin/meson_get_version.py | ||||
|       - bin/symbols-check.py | ||||
|       # GitLab CI | ||||
|       - .gitlab-ci.yml | ||||
|       - .gitlab-ci/**/* | ||||
|       # Meson | ||||
|       - meson* | ||||
|       - build-support/**/* | ||||
|       - subprojects/**/* | ||||
|       # Source code | ||||
|       - include/**/* | ||||
|       - src/**/* | ||||
|       when: on_success | ||||
|     # Otherwise, build/test jobs won't run | ||||
|     - when: never | ||||
|   retry: | ||||
|     max: 2 | ||||
|     when: | ||||
|       - runner_system_failure | ||||
|  | ||||
|  | ||||
| .ci-deqp-artifacts: | ||||
|   artifacts: | ||||
|     name: "mesa_${CI_JOB_NAME}" | ||||
|     when: always | ||||
|     untracked: false | ||||
|     paths: | ||||
|       # Watch out!  Artifacts are relative to the build dir. | ||||
|       # https://gitlab.com/gitlab-org/gitlab-ce/commit/8788fb925706cad594adf6917a6c5f6587dd1521 | ||||
|       - artifacts | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _build/meson-logs/strace | ||||
|  | ||||
| .container-rules: | ||||
|   extends: | ||||
|     - .ci-run-policy | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     # Run pipeline by default in the main project if any CI pipeline | ||||
|     # configuration files were changed, to ensure docker images are up to date | ||||
|     - if: *is-post-merge | ||||
|       changes: | ||||
|       - .gitlab-ci.yml | ||||
|       - .gitlab-ci/**/* | ||||
|       when: on_success | ||||
|     # Run pipeline by default if it was triggered by Marge Bot, is for a | ||||
|     # merge request, and any files affecting the pipeline were changed | ||||
|     - if: *is-pre-merge-for-marge | ||||
|       changes: | ||||
|         *all_paths | ||||
|       when: on_success | ||||
|     # Run pipeline by default in the main project if it was not triggered by | ||||
|     # Marge Bot, and any files affecting the pipeline were changed | ||||
|     - if: *is-post-merge-not-for-marge | ||||
|       changes: | ||||
|         *all_paths | ||||
|       when: on_success | ||||
|     # Allow triggering jobs manually in other cases if any files affecting the | ||||
|     # pipeline were changed | ||||
|     - changes: | ||||
|         *all_paths | ||||
|       when: manual | ||||
|     # Otherwise, container jobs won't run | ||||
|     - when: never | ||||
|  | ||||
| # Git archive | ||||
|  | ||||
| make git archive: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|   stage: git-archive | ||||
|   rules: | ||||
|     - if: *is-scheduled-pipeline | ||||
|       when: on_success | ||||
|   # ensure we are running on packet | ||||
|   tags: | ||||
|     - packet.net | ||||
|   script: | ||||
|     # Compactify the .git directory | ||||
|     - git gc --aggressive | ||||
|     # compress the current folder | ||||
|     - tar -cvzf ../$CI_PROJECT_NAME.tar.gz . | ||||
|  | ||||
|     # login with the JWT token file | ||||
|     - ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}" | ||||
|     - ci-fairy minio cp ../$CI_PROJECT_NAME.tar.gz minio://$MINIO_HOST/git-cache/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz | ||||
|  | ||||
|  | ||||
| # Sanity checks of MR settings and commit logs | ||||
| sanity: | ||||
|   extends: | ||||
|     - .fdo.ci-fairy | ||||
|   stage: sanity | ||||
|   rules: | ||||
|     - if: *is-pre-merge | ||||
|       when: on_success | ||||
|     # Other cases default to never | ||||
|   variables: | ||||
|     GIT_STRATEGY: none | ||||
|   script: | ||||
|     # ci-fairy check-commits --junit-xml=check-commits.xml | ||||
|     - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml | ||||
|   artifacts: | ||||
|     when: on_failure | ||||
|     reports: | ||||
|       junit: check-*.xml | ||||
|  | ||||
| # Rules for tests that should not be present in MRs or the main | ||||
| # project's pipeline (don't block marge or report red on | ||||
| # mesa/mesa main) but should be present on pipelines in personal | ||||
| # branches (so you can opt in to running the flaky test when you want | ||||
| # to). | ||||
| .test-manual: | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     - if: *is-forked-branch | ||||
|       changes: | ||||
|         *all_paths | ||||
|       when: manual | ||||
|     - when: never | ||||
|   variables: | ||||
|     GIT_STRATEGY: none | ||||
|     JOB_TIMEOUT: 80 | ||||
|  | ||||
| # The above .test-manual rules doesn't allow the jobs to be available for MRs | ||||
| # but we should have an option to have manual jobs in MRs as well. | ||||
| .test-manual-mr: | ||||
|   rules: | ||||
|     - *ignore_scheduled_pipelines | ||||
|     - if: *is-forked-branch-or-pre-merge-not-for-marge | ||||
|       changes: | ||||
|         *all_paths | ||||
|       when: manual | ||||
|     - when: never | ||||
|   variables: | ||||
|     GIT_STRATEGY: none | ||||
|     JOB_TIMEOUT: 80 | ||||
|  | ||||
| @@ -1,17 +0,0 @@ | ||||
| # Note: skips lists for CI are just a list of lines that, when | ||||
| # non-zero-length and not starting with '#', will regex match to | ||||
| # delete lines from the test list.  Be careful. | ||||
|  | ||||
| # These are tremendously slow (pushing toward a minute), and aren't | ||||
| # reliable to be run in parallel with other tests due to CPU-side timing. | ||||
| dEQP-GLES[0-9]*.functional.flush_finish.* | ||||
|  | ||||
| # piglit: WGL is Windows-only | ||||
| wgl@.* | ||||
|  | ||||
| # These are sensitive to CPU timing, and would need to be run in isolation | ||||
| # on the system rather than in parallel with other tests. | ||||
| glx@glx_arb_sync_control@timing.* | ||||
|  | ||||
| # This test is not built with waffle, while we do build tests with waffle | ||||
| spec@!opengl 1.1@windowoverlap | ||||
| @@ -1,2 +0,0 @@ | ||||
| [*.sh] | ||||
| indent_size = 2 | ||||
| @@ -1,26 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # This test script groups together a bunch of fast dEQP variant runs | ||||
| # to amortize the cost of rebooting the board. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| EXIT=0 | ||||
|  | ||||
| # Run reset tests without parallelism: | ||||
| if ! env \ | ||||
|   DEQP_RESULTS_DIR=results/reset \ | ||||
|   FDO_CI_CONCURRENT=1 \ | ||||
|   DEQP_CASELIST_FILTER='.*reset.*' \ | ||||
|   /install/deqp-runner.sh; then | ||||
|     EXIT=1 | ||||
| fi | ||||
|  | ||||
| # Then run everything else with parallelism: | ||||
| if ! env \ | ||||
|   DEQP_RESULTS_DIR=results/nonrobustness \ | ||||
|   DEQP_CASELIST_INV_FILTER='.*reset.*' \ | ||||
|   /install/deqp-runner.sh; then | ||||
|     EXIT=1 | ||||
| fi | ||||
|  | ||||
| @@ -1,13 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Init entrypoint for bare-metal devices; calls common init code. | ||||
|  | ||||
| # First stage: very basic setup to bring up network and /dev etc | ||||
| /init-stage1.sh | ||||
|  | ||||
| # Second stage: run jobs | ||||
| test $? -eq 0 && /init-stage2.sh | ||||
|  | ||||
| # Wait until the job would have timed out anyway, so we don't spew a "init | ||||
| # exited" panic. | ||||
| sleep 6000 | ||||
| @@ -1,17 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power down" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 4" | ||||
|  | ||||
| snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF | ||||
| @@ -1,21 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 4" | ||||
|  | ||||
| snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF | ||||
| sleep 3s | ||||
| snmpset -v2c -r 3 -t 10 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON | ||||
| @@ -1,101 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Boot script for Chrome OS devices attached to a servo debug connector, using | ||||
| # NFS and TFTP to boot. | ||||
|  | ||||
| # We're run from the root of the repo, make a helper var for our paths | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
|  | ||||
| # Runner config checks | ||||
| if [ -z "$BM_SERIAL" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the CPU serial device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_SERIAL_EC" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the EC serial device for controlling board power" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /nfs ]; then | ||||
|   echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /tftp ]; then | ||||
|   echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # job config checks | ||||
| if [ -z "$BM_KERNEL" ]; then | ||||
|   echo "Must set BM_KERNEL to your board's kernel FIT image" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_CMDLINE" ]; then | ||||
|   echo "Must set BM_CMDLINE to your board's kernel command line arguments" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results | ||||
|  | ||||
| # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
| # state, since it's volume-mounted on the host. | ||||
| rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
| mkdir -p /nfs/results | ||||
| . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
| # Put the kernel/dtb image and the boot command line in the tftp directory for | ||||
| # the board to find.  For normal Mesa development, we build the kernel and | ||||
| # store it in the docker container that this script is running in. | ||||
| # | ||||
| # However, container builds are expensive, so when you're hacking on the | ||||
| # kernel, it's nice to be able to skip the half hour container build and plus | ||||
| # moving that container to the runner.  So, if BM_KERNEL is a URL, fetch it | ||||
| # instead of looking in the container.  Note that the kernel build should be | ||||
| # the output of: | ||||
| # | ||||
| # make Image.lzma | ||||
| # | ||||
| # mkimage \ | ||||
| #  -A arm64 \ | ||||
| #  -f auto \ | ||||
| #  -C lzma \ | ||||
| #  -d arch/arm64/boot/Image.lzma \ | ||||
| #  -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \ | ||||
| #  cheza-image.img | ||||
|  | ||||
| rm -rf /tftp/* | ||||
| if echo "$BM_KERNEL" | grep -q http; then | ||||
|   apt install -y wget | ||||
|   wget $BM_KERNEL -O /tftp/vmlinuz | ||||
| else | ||||
|   cp $BM_KERNEL /tftp/vmlinuz | ||||
| fi | ||||
| echo "$BM_CMDLINE" > /tftp/cmdline | ||||
|  | ||||
| set +e | ||||
| python3 $BM/cros_servo_run.py \ | ||||
|         --cpu $BM_SERIAL \ | ||||
|         --ec $BM_SERIAL_EC | ||||
| ret=$? | ||||
| set -e | ||||
|  | ||||
| # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
| # will look for them. | ||||
| cp -Rp /nfs/results/. results/ | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,198 +0,0 @@ | ||||
|  | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| import queue | ||||
| import re | ||||
| from serial_buffer import SerialBuffer | ||||
| import sys | ||||
| import threading | ||||
|  | ||||
|  | ||||
| class CrosServoRun: | ||||
|     def __init__(self, cpu, ec): | ||||
|         # Merged FIFO for the two serial buffers, fed by threads. | ||||
|         self.serial_queue = queue.Queue() | ||||
|         self.sentinel = object() | ||||
|         self.threads_done = 0 | ||||
|  | ||||
|         self.ec_ser = SerialBuffer( | ||||
|             ec, "results/serial-ec.txt", "R SERIAL-EC> ") | ||||
|         self.cpu_ser = SerialBuffer( | ||||
|             cpu, "results/serial.txt", "R SERIAL-CPU> ") | ||||
|  | ||||
|         self.iter_feed_ec = threading.Thread( | ||||
|             target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),)) | ||||
|         self.iter_feed_ec.start() | ||||
|  | ||||
|         self.iter_feed_cpu = threading.Thread( | ||||
|             target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),)) | ||||
|         self.iter_feed_cpu.start() | ||||
|  | ||||
|     def close(self): | ||||
|         self.ec_ser.close() | ||||
|         self.cpu_ser.close() | ||||
|         self.iter_feed_ec.join() | ||||
|         self.iter_feed_cpu.join() | ||||
|  | ||||
|     # Feed lines from our serial queues into the merged queue, marking when our | ||||
|     # input is done. | ||||
|     def iter_feed_queue(self, it): | ||||
|         for i in it: | ||||
|             self.serial_queue.put(i) | ||||
|         self.serial_queue.put(self.sentinel) | ||||
|  | ||||
|     # Return the next line from the queue, counting how many threads have | ||||
|     # terminated and joining when done | ||||
|     def get_serial_queue_line(self): | ||||
|         line = self.serial_queue.get() | ||||
|         if line == self.sentinel: | ||||
|             self.threads_done = self.threads_done + 1 | ||||
|             if self.threads_done == 2: | ||||
|                 self.iter_feed_cpu.join() | ||||
|                 self.iter_feed_ec.join() | ||||
|         return line | ||||
|  | ||||
|     # Returns an iterator for getting the next line. | ||||
|     def serial_queue_lines(self): | ||||
|         return iter(self.get_serial_queue_line, self.sentinel) | ||||
|  | ||||
|     def ec_write(self, s): | ||||
|         print("W SERIAL-EC> %s" % s) | ||||
|         self.ec_ser.serial.write(s.encode()) | ||||
|  | ||||
|     def cpu_write(self, s): | ||||
|         print("W SERIAL-CPU> %s" % s) | ||||
|         self.cpu_ser.serial.write(s.encode()) | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|  | ||||
|     def run(self): | ||||
|         # Flush any partial commands in the EC's prompt, then ask for a reboot. | ||||
|         self.ec_write("\n") | ||||
|         self.ec_write("reboot\n") | ||||
|  | ||||
|         # This is emitted right when the bootloader pauses to check for input. | ||||
|         # Emit a ^N character to request network boot, because we don't have a | ||||
|         # direct-to-netboot firmware on cheza. | ||||
|         for line in self.serial_queue_lines(): | ||||
|             if re.search("load_archive: loading locale_en.bin", line): | ||||
|                 self.cpu_write("\016") | ||||
|                 break | ||||
|  | ||||
|             # The Cheza boards have issues with failing to bring up power to | ||||
|             # the system sometimes, possibly dependent on ambient temperature | ||||
|             # in the farm. | ||||
|             if re.search("POWER_GOOD not seen in time", line): | ||||
|                 self.print_error("Detected intermittent poweron failure, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|         tftp_failures = 0 | ||||
|         for line in self.serial_queue_lines(): | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 return 1 | ||||
|  | ||||
|             # The Cheza firmware seems to occasionally get stuck looping in | ||||
|             # this error state during TFTP booting, possibly based on amount of | ||||
|             # network traffic around it, but it'll usually recover after a | ||||
|             # reboot. | ||||
|             if re.search("R8152: Bulk read error 0xffffffbf", line): | ||||
|                 tftp_failures += 1 | ||||
|                 if tftp_failures >= 100: | ||||
|                     self.print_error("Detected intermittent tftp failure, restarting run...") | ||||
|                     return 2 | ||||
|  | ||||
|             # There are very infrequent bus errors during power management transitions | ||||
|             # on cheza, which we don't expect to be the case on future boards. | ||||
|             if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line): | ||||
|                 self.print_error("Detected cheza power management bus error, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             # If the network device dies, it's probably not graphics's fault, just try again. | ||||
|             if re.search("NETDEV WATCHDOG", line): | ||||
|                 self.print_error( | ||||
|                     "Detected network device failure, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             # These HFI response errors started appearing with the introduction | ||||
|             # of piglit runs.  CosmicPenguin says: | ||||
|             # | ||||
|             # "message ID 106 isn't a thing, so likely what happened is that we | ||||
|             # got confused when parsing the HFI queue.  If it happened on only | ||||
|             # one run, then memory corruption could be a possible clue" | ||||
|             # | ||||
|             # Given that it seems to trigger randomly near a GPU fault and then | ||||
|             # break many tests after that, just restart the whole run. | ||||
|             if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line): | ||||
|                 self.print_error("Detected cheza power management bus error, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             if re.search("coreboot.*bootblock starting", line): | ||||
|                 self.print_error( | ||||
|                     "Detected spontaneous reboot, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line): | ||||
|                 self.print_error("Detected cheza MMU fail, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             result = re.search("hwci: mesa: (\S*)", line) | ||||
|             if result: | ||||
|                 if result.group(1) == "pass": | ||||
|                     return 0 | ||||
|                 else: | ||||
|                     return 1 | ||||
|  | ||||
|         self.print_error("Reached the end of the CPU serial log without finding a result") | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--cpu', type=str, | ||||
|                         help='CPU Serial device', required=True) | ||||
|     parser.add_argument( | ||||
|         '--ec', type=str, help='EC Serial device', required=True) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     servo = CrosServoRun(args.cpu, args.ec) | ||||
|  | ||||
|     while True: | ||||
|         retval = servo.run() | ||||
|         if retval != 2: | ||||
|             break | ||||
|  | ||||
|     # power down the CPU on the device | ||||
|     servo.ec_write("power off\n") | ||||
|  | ||||
|     servo.close() | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| $CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay | ||||
| @@ -1,28 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import sys | ||||
| import socket | ||||
|  | ||||
| host = sys.argv[1] | ||||
| port = sys.argv[2] | ||||
| mode = sys.argv[3] | ||||
| relay = sys.argv[4] | ||||
| msg = None | ||||
|  | ||||
| if mode == "on": | ||||
|     msg = b'\x20' | ||||
| else: | ||||
|     msg = b'\x21' | ||||
|  | ||||
| msg += int(relay).to_bytes(1, 'big') | ||||
| msg += b'\x00' | ||||
|  | ||||
| c = socket.create_connection((host, int(port))) | ||||
| c.sendall(msg) | ||||
|  | ||||
| data = c.recv(1) | ||||
| c.close() | ||||
|  | ||||
| if data[0] == b'\x01': | ||||
|     print('Command failed') | ||||
|     sys.exit(1) | ||||
| @@ -1,12 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| $CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay | ||||
| sleep 5 | ||||
| $CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT on $relay | ||||
| @@ -1,30 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
|  | ||||
| STRINGS=$(mktemp) | ||||
| ERRORS=$(mktemp) | ||||
|  | ||||
| trap "rm $STRINGS; rm $ERRORS;" EXIT | ||||
|  | ||||
| FILE=$1 | ||||
| shift 1 | ||||
|  | ||||
| while getopts "f:e:" opt; do | ||||
|   case $opt in | ||||
|     f) echo "$OPTARG" >> $STRINGS;; | ||||
|     e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;; | ||||
|   esac | ||||
| done | ||||
| shift $((OPTIND -1)) | ||||
|  | ||||
| echo "Waiting for $FILE to say one of following strings" | ||||
| cat $STRINGS | ||||
|  | ||||
| while ! egrep -wf $STRINGS $FILE; do | ||||
|   sleep 2 | ||||
| done | ||||
|  | ||||
| if egrep -wf $ERRORS $FILE; then | ||||
|   exit 1 | ||||
| fi | ||||
| @@ -1,148 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
|  | ||||
| if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then | ||||
|   echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "BM_SERIAL:" | ||||
|   echo "  This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel." | ||||
|   echo "BM_SERIAL_SCRIPT:" | ||||
|   echo "  This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERUP" ]; then | ||||
|   echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should reset the device and begin its boot sequence" | ||||
|   echo "such that it pauses at fastboot." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERDOWN" ]; then | ||||
|   echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power off the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_FASTBOOT_SERIAL" ]; then | ||||
|   echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This must be the a stable-across-resets fastboot serial number." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_KERNEL" ]; then | ||||
|   echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_DTB" ]; then | ||||
|   echo "Must set BM_DTB to your board's DTB file in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then | ||||
|   BM_FASTBOOT_NFSROOT=1 | ||||
| fi | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results/ | ||||
|  | ||||
| if [ -n "$BM_FASTBOOT_NFSROOT" ]; then | ||||
|   # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
|   # state, since it's volume-mounted on the host. | ||||
|   rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
|   mkdir -p /nfs/results | ||||
|   . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
|   # Root on NFS, no need for an inintramfs. | ||||
|   rm -f rootfs.cpio.gz | ||||
|   touch rootfs.cpio | ||||
|   gzip rootfs.cpio | ||||
| else | ||||
|   # Create the rootfs in a temp dir | ||||
|   rsync -a --delete $BM_ROOTFS/ rootfs/ | ||||
|   . $BM/rootfs-setup.sh rootfs | ||||
|  | ||||
|   # Finally, pack it up into a cpio rootfs.  Skip the vulkan CTS since none of | ||||
|   # these devices use it and it would take up space in the initrd. | ||||
|  | ||||
|   if [ -n "$PIGLIT_PROFILES" ]; then | ||||
|     EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader" | ||||
|   else | ||||
|     EXCLUDE_FILTER="piglit|python" | ||||
|   fi | ||||
|  | ||||
|   pushd rootfs | ||||
|   find -H | \ | ||||
|     egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" | | ||||
|     egrep -v "traces-db|apitrace|renderdoc" | \ | ||||
|     egrep -v $EXCLUDE_FILTER | \ | ||||
|     cpio -H newc -o | \ | ||||
|     xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz | ||||
|   popd | ||||
| fi | ||||
|  | ||||
| # Make the combined kernel image and dtb for passing to fastboot.  For normal | ||||
| # Mesa development, we build the kernel and store it in the docker container | ||||
| # that this script is running in. | ||||
| # | ||||
| # However, container builds are expensive, so when you're hacking on the | ||||
| # kernel, it's nice to be able to skip the half hour container build and plus | ||||
| # moving that container to the runner.  So, if BM_KERNEL+BM_DTB are URLs, | ||||
| # fetch them instead of looking in the container. | ||||
| if echo "$BM_KERNEL $BM_DTB" | grep -q http; then | ||||
|   apt install -y wget | ||||
|  | ||||
|   wget $BM_KERNEL -O kernel | ||||
|   wget $BM_DTB -O dtb | ||||
|  | ||||
|   cat kernel dtb > Image.gz-dtb | ||||
|   rm kernel dtb | ||||
| else | ||||
|   cat $BM_KERNEL $BM_DTB > Image.gz-dtb | ||||
| fi | ||||
|  | ||||
| mkdir -p artifacts | ||||
| abootimg \ | ||||
|   --create artifacts/fastboot.img \ | ||||
|   -k Image.gz-dtb \ | ||||
|   -r rootfs.cpio.gz \ | ||||
|   -c cmdline="$BM_CMDLINE" | ||||
| rm Image.gz-dtb | ||||
|  | ||||
| export PATH=$BM:$PATH | ||||
|  | ||||
| # Start background command for talking to serial if we have one. | ||||
| if [ -n "$BM_SERIAL_SCRIPT" ]; then | ||||
|   $BM_SERIAL_SCRIPT > results/serial-output.txt & | ||||
|  | ||||
|   while [ ! -e results/serial-output.txt ]; do | ||||
|     sleep 1 | ||||
|   done | ||||
| fi | ||||
|  | ||||
| set +e | ||||
| $BM/fastboot_run.py \ | ||||
|   --dev="$BM_SERIAL" \ | ||||
|   --fbserial="$BM_FASTBOOT_SERIAL" \ | ||||
|   --powerup="$BM_POWERUP" \ | ||||
|   --powerdown="$BM_POWERDOWN" | ||||
| ret=$? | ||||
| set -e | ||||
|  | ||||
| if [ -n "$BM_FASTBOOT_NFSROOT" ]; then | ||||
|   # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
|   # will look for them. | ||||
|   cp -Rp /nfs/results/. results/ | ||||
| fi | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,146 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| import os | ||||
| import re | ||||
| from serial_buffer import SerialBuffer | ||||
| import sys | ||||
| import threading | ||||
|  | ||||
| class FastbootRun: | ||||
|     def __init__(self, args): | ||||
|         self.powerup = args.powerup | ||||
|         # We would like something like a 1 minute timeout, but the piglit traces | ||||
|         # jobs stall out for long periods of time. | ||||
|         self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600) | ||||
|         self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial) | ||||
|  | ||||
|     def close(self): | ||||
|         self.ser.close() | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|  | ||||
|     def logged_system(self, cmd): | ||||
|         print("Running '{}'".format(cmd)) | ||||
|         return os.system(cmd) | ||||
|  | ||||
|     def run(self): | ||||
|         if self.logged_system(self.powerup) != 0: | ||||
|             return 1 | ||||
|  | ||||
|         fastboot_ready = False | ||||
|         for line in self.ser.lines(): | ||||
|             if re.search("fastboot: processing commands", line) or \ | ||||
|                 re.search("Listening for fastboot command on", line): | ||||
|                 fastboot_ready = True | ||||
|                 break | ||||
|  | ||||
|             if re.search("data abort", line): | ||||
|                 self.print_error("Detected crash during boot, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|         if not fastboot_ready: | ||||
|             self.print_error("Failed to get to fastboot prompt, restarting run...") | ||||
|             return 2 | ||||
|  | ||||
|         if self.logged_system(self.fastboot) != 0: | ||||
|             return 1 | ||||
|  | ||||
|         print_more_lines = -1 | ||||
|         for line in self.ser.lines(): | ||||
|             if print_more_lines == 0: | ||||
|                 return 2 | ||||
|             if print_more_lines > 0: | ||||
|                 print_more_lines -= 1 | ||||
|  | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 return 1 | ||||
|  | ||||
|             # The db820c boards intermittently reboot.  Just restart the run | ||||
|             # when if we see a reboot after we got past fastboot. | ||||
|             if re.search("PON REASON", line): | ||||
|                 self.print_error("Detected spontaneous reboot, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             # db820c sometimes wedges around iommu fault recovery | ||||
|             if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line): | ||||
|                 self.print_error( | ||||
|                     "Detected kernel soft lockup, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             # If the network device dies, it's probably not graphics's fault, just try again. | ||||
|             if re.search("NETDEV WATCHDOG", line): | ||||
|                 self.print_error( | ||||
|                     "Detected network device failure, restarting run...") | ||||
|                 return 2 | ||||
|  | ||||
|             # A3xx recovery doesn't quite work. Sometimes the GPU will get | ||||
|             # wedged and recovery will fail (because power can't be reset?) | ||||
|             # This assumes that the jobs are sufficiently well-tested that GPU | ||||
|             # hangs aren't always triggered, so just try again. But print some | ||||
|             # more lines first so that we get better information on the cause | ||||
|             # of the hang. Once a hang happens, it's pretty chatty. | ||||
|             if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line: | ||||
|                 self.print_error( | ||||
|                     "Detected GPU hang, restarting run...") | ||||
|                 if print_more_lines == -1: | ||||
|                     print_more_lines = 30 | ||||
|  | ||||
|             result = re.search("hwci: mesa: (\S*)", line) | ||||
|             if result: | ||||
|                 if result.group(1) == "pass": | ||||
|                     return 0 | ||||
|                 else: | ||||
|                     return 1 | ||||
|  | ||||
|         self.print_error("Reached the end of the CPU serial log without finding a result, restarting run...") | ||||
|         return 2 | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)') | ||||
|     parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True) | ||||
|     parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True) | ||||
|     parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     fastboot = FastbootRun(args) | ||||
|  | ||||
|     while True: | ||||
|         retval = fastboot.run() | ||||
|         fastboot.close() | ||||
|         if retval != 2: | ||||
|             break | ||||
|  | ||||
|         fastboot = FastbootRun(args) | ||||
|  | ||||
|     fastboot.logged_system(args.powerdown) | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,10 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| $CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import sys | ||||
| import serial | ||||
|  | ||||
| mode = sys.argv[1] | ||||
| relay = sys.argv[2] | ||||
|  | ||||
| # our relays are "off" means "board is powered". | ||||
| mode_swap = { | ||||
|      "on" : "off", | ||||
|      "off" : "on", | ||||
| } | ||||
| mode = mode_swap[mode] | ||||
|  | ||||
| ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2) | ||||
| command = "relay {} {}\n\r".format(mode, relay) | ||||
| ser.write(command.encode()) | ||||
| ser.close() | ||||
| @@ -1,12 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| relay=$1 | ||||
|  | ||||
| if [ -z "$relay" ]; then | ||||
|     echo "Must supply a relay arg" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| $CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay | ||||
| sleep 5 | ||||
| $CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay | ||||
| @@ -1,17 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 2" | ||||
|  | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF" | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|     echo "Must supply the PoE Interface to power up" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|     echo "Must supply the PoE Switch host" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`" | ||||
| SNMP_ON="i 1" | ||||
| SNMP_OFF="i 2" | ||||
|  | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF" | ||||
| sleep 3s | ||||
| flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON" | ||||
| @@ -1,149 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Boot script for devices attached to a PoE switch, using NFS for the root | ||||
| # filesystem. | ||||
|  | ||||
| # We're run from the root of the repo, make a helper var for our paths | ||||
| BM=$CI_PROJECT_DIR/install/bare-metal | ||||
| CI_COMMON=$CI_PROJECT_DIR/install/common | ||||
|  | ||||
| # Runner config checks | ||||
| if [ -z "$BM_SERIAL" ]; then | ||||
|   echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the serial port to listen the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_ADDRESS" ]; then | ||||
|   echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the PoE switch address to connect for powering up/down devices." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POE_INTERFACE" ]; then | ||||
|   echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is the PoE switch interface where the device is connected." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERUP" ]; then | ||||
|   echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power up the device and begin its boot sequence." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_POWERDOWN" ]; then | ||||
|   echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment" | ||||
|   echo "This is a shell script that should power off the device." | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /nfs ]; then | ||||
|   echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ ! -d /tftp ]; then | ||||
|   echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # job config checks | ||||
| if [ -z "$BM_ROOTFS" ]; then | ||||
|   echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_BOOTFS" ]; then | ||||
|   echo "Must set /boot files for the TFTP boot in the job's variables" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_CMDLINE" ]; then | ||||
|   echo "Must set BM_CMDLINE to your board's kernel command line arguments" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$BM_BOOTCONFIG" ]; then | ||||
|   echo "Must set BM_BOOTCONFIG to your board's required boot configuration arguments" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Clear out any previous run's artifacts. | ||||
| rm -rf results/ | ||||
| mkdir -p results | ||||
|  | ||||
| # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine | ||||
| # state, since it's volume-mounted on the host. | ||||
| rsync -a --delete $BM_ROOTFS/ /nfs/ | ||||
|  | ||||
| # If BM_BOOTFS is an URL, download it | ||||
| if echo $BM_BOOTFS | grep -q http; then | ||||
|   apt install -y wget | ||||
|   wget ${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS -O /tmp/bootfs.tar | ||||
|   BM_BOOTFS=/tmp/bootfs.tar | ||||
| fi | ||||
|  | ||||
| # If BM_BOOTFS is a file, assume it is a tarball and uncompress it | ||||
| if [ -f $BM_BOOTFS ]; then | ||||
|   mkdir -p /tmp/bootfs | ||||
|   tar xf $BM_BOOTFS -C /tmp/bootfs | ||||
|   BM_BOOTFS=/tmp/bootfs | ||||
| fi | ||||
|  | ||||
| # Install kernel modules (it could be either in /lib/modules or | ||||
| # /usr/lib/modules, but we want to install in the latter) | ||||
| [ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/ | ||||
| [ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/ | ||||
|  | ||||
| # Install kernel image + bootloader files | ||||
| rsync -aL --delete $BM_BOOTFS/boot/ /tftp/ | ||||
|  | ||||
| # Set up the pxelinux config for Jetson Nano | ||||
| mkdir -p /tftp/pxelinux.cfg | ||||
| cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000 | ||||
| PROMPT 0 | ||||
| TIMEOUT 30 | ||||
| DEFAULT primary | ||||
| MENU TITLE jetson nano boot options | ||||
| LABEL primary | ||||
|       MENU LABEL CI kernel on TFTP | ||||
|       LINUX Image | ||||
|       FDT tegra210-p3450-0000.dtb | ||||
|       APPEND \${cbootargs} $BM_CMDLINE | ||||
| EOF | ||||
|  | ||||
| # Create the rootfs in the NFS directory | ||||
| mkdir -p /nfs/results | ||||
| . $BM/rootfs-setup.sh /nfs | ||||
|  | ||||
| echo "$BM_CMDLINE" > /tftp/cmdline.txt | ||||
|  | ||||
| # Add some required options in config.txt | ||||
| printf "$BM_BOOTCONFIG" >> /tftp/config.txt | ||||
|  | ||||
| set +e | ||||
| ATTEMPTS=10 | ||||
| while [ $((ATTEMPTS--)) -gt 0 ]; do | ||||
|   python3 $BM/poe_run.py \ | ||||
|           --dev="$BM_SERIAL" \ | ||||
|           --powerup="$BM_POWERUP" \ | ||||
|           --powerdown="$BM_POWERDOWN" \ | ||||
|           --timeout="${BM_POE_TIMEOUT:-60}" | ||||
|   ret=$? | ||||
|  | ||||
|   if [ $ret -eq 2 ]; then | ||||
|     echo "Did not detect boot sequence, retrying..." | ||||
|   else | ||||
|     ATTEMPTS=0 | ||||
|   fi | ||||
| done | ||||
| set -e | ||||
|  | ||||
| # Bring artifacts back from the NFS dir to the build dir where gitlab-runner | ||||
| # will look for them. | ||||
| cp -Rp /nfs/results/. results/ | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,100 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Igalia, S.L. | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| import os | ||||
| import re | ||||
| from serial_buffer import SerialBuffer | ||||
| import sys | ||||
| import threading | ||||
|  | ||||
| class PoERun: | ||||
|     def __init__(self, args): | ||||
|         self.powerup = args.powerup | ||||
|         self.powerdown = args.powerdown | ||||
|         self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout) | ||||
|  | ||||
|     def print_error(self, message): | ||||
|         RED = '\033[0;31m' | ||||
|         NO_COLOR = '\033[0m' | ||||
|         print(RED + message + NO_COLOR) | ||||
|  | ||||
|     def logged_system(self, cmd): | ||||
|         print("Running '{}'".format(cmd)) | ||||
|         return os.system(cmd) | ||||
|  | ||||
|     def run(self): | ||||
|         if self.logged_system(self.powerup) != 0: | ||||
|             return 1 | ||||
|  | ||||
|         boot_detected = False | ||||
|         for line in self.ser.lines(): | ||||
|             if re.search("Booting Linux", line): | ||||
|                 boot_detected = True | ||||
|                 break | ||||
|  | ||||
|         if not boot_detected: | ||||
|             self.print_error("Something wrong; couldn't detect the boot start up sequence") | ||||
|             return 2 | ||||
|  | ||||
|         for line in self.ser.lines(): | ||||
|             if re.search("---. end Kernel panic", line): | ||||
|                 return 1 | ||||
|  | ||||
|             # Binning memory problems | ||||
|             if re.search("binner overflow mem", line): | ||||
|                 self.print_error("Memory overflow in the binner; GPU hang") | ||||
|                 return 1 | ||||
|  | ||||
|             if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line): | ||||
|                 self.print_error("nouveau jetson boot bug, retrying.") | ||||
|                 return 2 | ||||
|  | ||||
|             result = re.search("hwci: mesa: (\S*)", line) | ||||
|             if result: | ||||
|                 if result.group(1) == "pass": | ||||
|                     return 0 | ||||
|                 else: | ||||
|                     return 1 | ||||
|  | ||||
|         self.print_error("Reached the end of the CPU serial log without finding a result") | ||||
|         return 2 | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True) | ||||
|     parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True) | ||||
|     parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True) | ||||
|     parser.add_argument('--timeout', type=int, default=60, | ||||
|                         help='time in seconds to wait for activity', required=False) | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     poe = PoERun(args) | ||||
|     retval = poe.run() | ||||
|  | ||||
|     poe.logged_system(args.powerdown) | ||||
|  | ||||
|     sys.exit(retval) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,30 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| rootfs_dst=$1 | ||||
|  | ||||
| mkdir -p $rootfs_dst/results | ||||
|  | ||||
| # Set up the init script that brings up the system. | ||||
| cp $BM/bm-init.sh $rootfs_dst/init | ||||
| cp $CI_COMMON/init*.sh $rootfs_dst/ | ||||
|  | ||||
| # Make JWT token available as file in the bare-metal storage to enable access | ||||
| # to MinIO | ||||
| cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}" | ||||
|  | ||||
| cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/ | ||||
| cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/ | ||||
|  | ||||
| set +x | ||||
|  | ||||
| # Pass through relevant env vars from the gitlab job to the baremetal init script | ||||
| "$CI_COMMON"/generate-env.sh > $rootfs_dst/set-job-env-vars.sh | ||||
| chmod +x $rootfs_dst/set-job-env-vars.sh | ||||
| echo "Variables passed through:" | ||||
| cat $rootfs_dst/set-job-env-vars.sh | ||||
|  | ||||
| set -x | ||||
|  | ||||
| # Add the Mesa drivers we built, and make a consistent symlink to them. | ||||
| mkdir -p $rootfs_dst/$CI_PROJECT_DIR | ||||
| rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/ | ||||
| @@ -1,162 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright © 2020 Google LLC | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
|  | ||||
| import argparse | ||||
| from datetime import datetime, timezone | ||||
| import queue | ||||
| import serial | ||||
| import threading | ||||
| import time | ||||
|  | ||||
| class SerialBuffer: | ||||
|     def __init__(self, dev, filename, prefix, timeout = None): | ||||
|         self.filename = filename | ||||
|         self.dev = dev | ||||
|  | ||||
|         if dev: | ||||
|             self.f = open(filename, "wb+") | ||||
|             self.serial = serial.Serial(dev, 115200, timeout=timeout) | ||||
|         else: | ||||
|             self.f = open(filename, "rb") | ||||
|             self.serial = None | ||||
|  | ||||
|         self.byte_queue = queue.Queue() | ||||
|         self.line_queue = queue.Queue() | ||||
|         self.prefix = prefix | ||||
|         self.timeout = timeout | ||||
|         self.sentinel = object() | ||||
|         self.closing = False | ||||
|  | ||||
|         if self.dev: | ||||
|             self.read_thread = threading.Thread( | ||||
|                 target=self.serial_read_thread_loop, daemon=True) | ||||
|         else: | ||||
|             self.read_thread = threading.Thread( | ||||
|                 target=self.serial_file_read_thread_loop, daemon=True) | ||||
|         self.read_thread.start() | ||||
|  | ||||
|         self.lines_thread = threading.Thread( | ||||
|             target=self.serial_lines_thread_loop, daemon=True) | ||||
|         self.lines_thread.start() | ||||
|  | ||||
|     def close(self): | ||||
|         self.closing = True | ||||
|         if self.serial: | ||||
|             self.serial.cancel_read() | ||||
|         self.read_thread.join() | ||||
|         self.lines_thread.join() | ||||
|         if self.serial: | ||||
|             self.serial.close() | ||||
|  | ||||
|     # Thread that just reads the bytes from the serial device to try to keep from | ||||
|     # buffer overflowing it. If nothing is received in 1 minute, it finalizes. | ||||
|     def serial_read_thread_loop(self): | ||||
|         greet = "Serial thread reading from %s\n" % self.dev | ||||
|         self.byte_queue.put(greet.encode()) | ||||
|  | ||||
|         while not self.closing: | ||||
|             try: | ||||
|                 b = self.serial.read() | ||||
|                 if len(b) == 0: | ||||
|                     break | ||||
|                 self.byte_queue.put(b) | ||||
|             except Exception as err: | ||||
|                 print(self.prefix + str(err)) | ||||
|                 break | ||||
|         self.byte_queue.put(self.sentinel) | ||||
|  | ||||
|     # Thread that just reads the bytes from the file of serial output that some | ||||
|     # other process is appending to. | ||||
|     def serial_file_read_thread_loop(self): | ||||
|         greet = "Serial thread reading from %s\n" % self.filename | ||||
|         self.byte_queue.put(greet.encode()) | ||||
|  | ||||
|         while not self.closing: | ||||
|             line = self.f.readline() | ||||
|             if line: | ||||
|                 self.byte_queue.put(line) | ||||
|             else: | ||||
|                 time.sleep(0.1) | ||||
|         self.byte_queue.put(self.sentinel) | ||||
|  | ||||
|     # Thread that processes the stream of bytes to 1) log to stdout, 2) log to | ||||
|     # file, 3) add to the queue of lines to be read by program logic | ||||
|  | ||||
|     def serial_lines_thread_loop(self): | ||||
|         line = bytearray() | ||||
|         while True: | ||||
|             bytes = self.byte_queue.get(block=True) | ||||
|  | ||||
|             if bytes == self.sentinel: | ||||
|                 self.read_thread.join() | ||||
|                 self.line_queue.put(self.sentinel) | ||||
|                 break | ||||
|  | ||||
|             # Write our data to the output file if we're the ones reading from | ||||
|             # the serial device | ||||
|             if self.dev: | ||||
|                 self.f.write(bytes) | ||||
|                 self.f.flush() | ||||
|  | ||||
|             for b in bytes: | ||||
|                 line.append(b) | ||||
|                 if b == b'\n'[0]: | ||||
|                     line = line.decode(errors="replace") | ||||
|  | ||||
|                     time = datetime.now().strftime('%y-%m-%d %H:%M:%S') | ||||
|                     print("{endc}{time} {prefix}{line}".format( | ||||
|                         time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='') | ||||
|  | ||||
|                     self.line_queue.put(line) | ||||
|                     line = bytearray() | ||||
|  | ||||
|     def get_line(self): | ||||
|         line = self.line_queue.get() | ||||
|         if line == self.sentinel: | ||||
|             self.lines_thread.join() | ||||
|         return line | ||||
|  | ||||
|     def lines(self): | ||||
|         return iter(self.get_line, self.sentinel) | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|  | ||||
|     parser.add_argument('--dev', type=str, help='Serial device') | ||||
|     parser.add_argument('--file', type=str, | ||||
|                         help='Filename for serial output', required=True) | ||||
|     parser.add_argument('--prefix', type=str, | ||||
|                         help='Prefix for logging serial to stdout', nargs='?') | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     ser = SerialBuffer(args.dev, args.file, args.prefix or "") | ||||
|     for line in ser.lines(): | ||||
|         # We're just using this as a logger, so eat the produced lines and drop | ||||
|         # them | ||||
|         pass | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     main() | ||||
| @@ -1,41 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| # Copyright © 2020 Christian Gmeiner | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||
| # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||
| # IN THE SOFTWARE. | ||||
| # | ||||
| # Tiny script to read bytes from telnet, and write the output to stdout, with a | ||||
| # buffer in between so we don't lose serial output from its buffer. | ||||
| # | ||||
|  | ||||
| import sys | ||||
| import telnetlib | ||||
|  | ||||
| host=sys.argv[1] | ||||
| port=sys.argv[2] | ||||
|  | ||||
| tn = telnetlib.Telnet(host, port, 1000000) | ||||
|  | ||||
| while True: | ||||
|     bytes = tn.read_some() | ||||
|     sys.stdout.buffer.write(bytes) | ||||
|     sys.stdout.flush() | ||||
|  | ||||
| tn.close() | ||||
| @@ -1,525 +0,0 @@ | ||||
| # Shared between windows and Linux | ||||
| .build-common: | ||||
|   extends: .ci-run-policy | ||||
|   # Cancel job if a newer commit is pushed to the same branch | ||||
|   interruptible: true | ||||
|   artifacts: | ||||
|     name: "mesa_${CI_JOB_NAME}" | ||||
|     when: always | ||||
|     paths: | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _build/meson-logs/strace | ||||
|       - shader-db | ||||
|  | ||||
| # Just Linux | ||||
| .build-linux: | ||||
|   extends: .build-common | ||||
|   variables: | ||||
|     CCACHE_COMPILERCHECK: "content" | ||||
|     CCACHE_COMPRESS: "true" | ||||
|     CCACHE_DIR: /cache/mesa/ccache | ||||
|   # Use ccache transparently, and print stats before/after | ||||
|   before_script: | ||||
|     - !reference [default, before_script] | ||||
|     - export PATH="/usr/lib/ccache:$PATH" | ||||
|     - export CCACHE_BASEDIR="$PWD" | ||||
|     - echo -e "\e[0Ksection_start:$(date +%s):ccache_before[collapsed=true]\r\e[0Kccache stats before build" | ||||
|     - ccache --show-stats | ||||
|     - echo -e "\e[0Ksection_end:$(date +%s):ccache_before\r\e[0K" | ||||
|   after_script: | ||||
|     - echo -e "\e[0Ksection_start:$(date +%s):ccache_after[collapsed=true]\r\e[0Kccache stats after build" | ||||
|     - ccache --show-stats | ||||
|     - echo -e "\e[0Ksection_end:$(date +%s):ccache_after\r\e[0K" | ||||
|     - !reference [default, after_script] | ||||
|  | ||||
| .build-windows: | ||||
|   extends: .build-common | ||||
|   tags: | ||||
|     - windows | ||||
|     - docker | ||||
|     - "1809" | ||||
|     - mesa | ||||
|   cache: | ||||
|     key: ${CI_JOB_NAME} | ||||
|     paths: | ||||
|       - subprojects/packagecache | ||||
|  | ||||
| .meson-build: | ||||
|   extends: | ||||
|     - .build-linux | ||||
|     - .use-debian/x86_build | ||||
|   stage: build-x86_64 | ||||
|   variables: | ||||
|     LLVM_VERSION: 11 | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|  | ||||
| debian-testing: | ||||
|   extends: | ||||
|     - .meson-build | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=x11 | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-va=enabled | ||||
|     GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915" | ||||
|     VULKAN_DRIVERS: "swrast,amd,intel" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|     EXTRA_OPTION: > | ||||
|       -D valgrind=false | ||||
|     MINIO_ARTIFACT_NAME: mesa-amd64 | ||||
|   script: | ||||
|     - .gitlab-ci/lava/lava-pytest.sh | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|   artifacts: | ||||
|     reports: | ||||
|       junit: artifacts/ci_scripts_report.xml | ||||
|  | ||||
| debian-testing-asan: | ||||
|   extends: | ||||
|     - debian-testing | ||||
|   variables: | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-truncation | ||||
|     EXTRA_OPTION: > | ||||
|       -D b_sanitize=address | ||||
|       -D valgrind=false | ||||
|       -D tools=dlclose-skip | ||||
|     MINIO_ARTIFACT_NAME: "" | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|  | ||||
| debian-testing-msan: | ||||
|   extends: | ||||
|     - debian-clang | ||||
|   variables: | ||||
|     # l_undef is incompatible with msan | ||||
|     EXTRA_OPTION: | ||||
|       -D b_sanitize=memory | ||||
|       -D b_lundef=false | ||||
|     MINIO_ARTIFACT_NAME: "" | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     # Don't run all the tests yet: | ||||
|     # GLSL has some issues in sexpression reading. | ||||
|     # gtest has issues in its test initialization. | ||||
|     MESON_TEST_ARGS: "--suite glcpp --suite gallium  --suite format" | ||||
|     # Freedreno dropped because freedreno tools fail at msan. | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus" | ||||
|     VULKAN_DRIVERS: intel,amd,broadcom,virtio-experimental | ||||
|  | ||||
| debian-clover-testing: | ||||
|   extends: | ||||
|     - .meson-build | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D egl=disabled | ||||
|       -D gbm=disabled | ||||
|     GALLIUM_ST: > | ||||
|       -D gallium-opencl=icd | ||||
|       -D opencl-spirv=true | ||||
|     GALLIUM_DRIVERS: "swrast" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|     EXTRA_OPTION: > | ||||
|       -D valgrind=false | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|  | ||||
| debian-gallium: | ||||
|   extends: .meson-build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-xvmc=enabled | ||||
|       -D gallium-omx=bellagio | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=true | ||||
|       -D gallium-opencl=disabled | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus" | ||||
|     VULKAN_DRIVERS: swrast | ||||
|     EXTRA_OPTION: > | ||||
|       -D osmesa=true | ||||
|       -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,xvmc,lima,panfrost,asahi | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/run-shader-db.sh | ||||
|  | ||||
| # Test a release build with -Werror so new warnings don't sneak in. | ||||
| debian-release: | ||||
|   extends: .meson-build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=x11,wayland | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D llvm=enabled | ||||
|     GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus" | ||||
|     VULKAN_DRIVERS: "amd,imagination-experimental" | ||||
|     BUILDTYPE: "release" | ||||
|     EXTRA_OPTION: > | ||||
|       -D osmesa=true | ||||
|       -D tools=all | ||||
|       -D intel-clc=enabled | ||||
|       -D imagination-srv=true | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|  | ||||
| fedora-release: | ||||
|   extends: | ||||
|     - .meson-build | ||||
|     - .use-fedora/x86_build | ||||
|   variables: | ||||
|     BUILDTYPE: "release" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|       -Wno-error=maybe-uninitialized | ||||
|       -Wno-error=stringop-overread | ||||
|       -Wno-error=uninitialized | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=array-bounds | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D glvnd=true | ||||
|       -D platforms=x11,wayland | ||||
|     EXTRA_OPTION: > | ||||
|       -D osmesa=true | ||||
|       -D selinux=true | ||||
|       -D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination | ||||
|       -D intel-clc=enabled | ||||
|       -D imagination-srv=true | ||||
|     GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink" | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-extra-hud=true | ||||
|       -D gallium-vdpau=enabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=enabled | ||||
|       -D gallium-xa=enabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-opencl=icd | ||||
|       -D gles1=disabled | ||||
|       -D gles2=enabled | ||||
|       -D llvm=enabled | ||||
|       -D microsoft-clc=disabled | ||||
|       -D shared-llvm=enabled | ||||
|       -D vulkan-device-select-layer=true | ||||
|     LLVM_VERSION: "" | ||||
|     UNWIND: "disabled" | ||||
|     VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental" | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|  | ||||
| debian-android: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/android_build | ||||
|   variables: | ||||
|     UNWIND: "disabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=asm-operand-widths | ||||
|       -Wno-error=constant-conversion | ||||
|       -Wno-error=enum-conversion | ||||
|       -Wno-error=initializer-overrides | ||||
|       -Wno-error=missing-braces | ||||
|       -Wno-error=sometimes-uninitialized | ||||
|       -Wno-error=unused-function | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=deprecated-declarations | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D gbm=disabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=android | ||||
|     EXTRA_OPTION: > | ||||
|       -D android-stub=true | ||||
|       -D llvm=disabled | ||||
|       -D platform-sdk-version=29 | ||||
|       -D valgrind=false | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=disabled | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-opencl=disabled | ||||
|     LLVM_VERSION: "" | ||||
|     PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files" | ||||
|   script: | ||||
|     - PKG_CONFIG_PATH=/usr/local/lib/aarch64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/aarch64-linux-android/pkgconfig/ CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio-experimental .gitlab-ci/meson/build.sh | ||||
|     # x86_64 build: | ||||
|     # Can't do Intel because gen_decoder.c currently requires libexpat, which | ||||
|     # is not a dependency that AOSP wants to accept.  Can't do Radeon Gallium | ||||
|     # drivers because they requires LLVM, which we don't have an Android build | ||||
|     # of. | ||||
|     - PKG_CONFIG_PATH=/usr/local/lib/x86_64-linux-android/pkgconfig/:/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/x86_64-linux-android/pkgconfig/ CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh | ||||
|  | ||||
| .meson-cross: | ||||
|   extends: | ||||
|     - .meson-build | ||||
|   stage: build-misc | ||||
|   variables: | ||||
|     UNWIND: "disabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=dri | ||||
|       -D gbm=enabled | ||||
|       -D egl=enabled | ||||
|       -D platforms=x11 | ||||
|       -D osmesa=false | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|  | ||||
| .meson-arm: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/arm_build | ||||
|   needs: | ||||
|     - debian/arm_build | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: freedreno,broadcom | ||||
|     GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4" | ||||
|     BUILDTYPE: "debugoptimized" | ||||
|   tags: | ||||
|     - aarch64 | ||||
|  | ||||
| debian-armhf: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     CROSS: armhf | ||||
|     EXTRA_OPTION: > | ||||
|       -D llvm=disabled | ||||
|       -D valgrind=false | ||||
|     MINIO_ARTIFACT_NAME: mesa-armhf | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|  | ||||
| debian-arm64: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental" | ||||
|     EXTRA_OPTION: > | ||||
|       -D llvm=disabled | ||||
|       -D valgrind=false | ||||
|       -D imagination-srv=true | ||||
|     MINIO_ARTIFACT_NAME: mesa-arm64 | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/prepare-artifacts.sh | ||||
|  | ||||
| debian-arm64-asan: | ||||
|   extends: | ||||
|     - debian-arm64 | ||||
|   variables: | ||||
|     C_ARGS: > | ||||
|       -Wno-error=stringop-truncation | ||||
|     EXTRA_OPTION: > | ||||
|       -D llvm=disabled | ||||
|       -D b_sanitize=address | ||||
|       -D valgrind=false | ||||
|       -D tools=dlclose-skip | ||||
|     ARTIFACTS_DEBUG_SYMBOLS: 1 | ||||
|     MINIO_ARTIFACT_NAME: mesa-arm64-asan | ||||
|     MESON_TEST_ARGS: "--no-suite mesa:compiler" | ||||
|  | ||||
| debian-arm64-build-test: | ||||
|   extends: | ||||
|     - .meson-arm | ||||
|     - .ci-deqp-artifacts | ||||
|   variables: | ||||
|     VULKAN_DRIVERS: "amd" | ||||
|     EXTRA_OPTION: > | ||||
|       -Dtools=panfrost,imagination | ||||
|   script: | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|  | ||||
| debian-clang: | ||||
|   extends: .meson-build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=constant-conversion | ||||
|       -Wno-error=enum-conversion | ||||
|       -Wno-error=implicit-const-int-float-conversion | ||||
|       -Wno-error=initializer-overrides | ||||
|       -Wno-error=sometimes-uninitialized | ||||
|       -Wno-error=unused-function | ||||
|     CPP_ARGS: > | ||||
|       -Wno-error=c99-designator | ||||
|       -Wno-error=deprecated-declarations | ||||
|       -Wno-error=implicit-const-int-float-conversion | ||||
|       -Wno-error=missing-braces | ||||
|       -Wno-error=overloaded-virtual | ||||
|       -Wno-error=tautological-constant-out-of-range-compare | ||||
|       -Wno-error=unused-const-variable | ||||
|       -Wno-error=unused-private-field | ||||
|     DRI_LOADERS: > | ||||
|       -D glvnd=true | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi" | ||||
|     VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,swrast,panfrost,imagination-experimental | ||||
|     EXTRA_OPTIONS: | ||||
|       -D imagination-srv=true | ||||
|     CC: clang | ||||
|     CXX: clang++ | ||||
|  | ||||
| windows-vs2019: | ||||
|   extends: | ||||
|     - .build-windows | ||||
|     - .use-windows_build_vs2019 | ||||
|     - .windows-build-rules | ||||
|   stage: build-misc | ||||
|   script: | ||||
|     - . .\.gitlab-ci\windows\mesa_build.ps1 | ||||
|   artifacts: | ||||
|     paths: | ||||
|       - _build/meson-logs/*.txt | ||||
|       - _install/ | ||||
|  | ||||
| debian-clover: | ||||
|   extends: .meson-build | ||||
|   variables: | ||||
|     UNWIND: "enabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D egl=disabled | ||||
|       -D gbm=disabled | ||||
|     GALLIUM_DRIVERS: "r600,radeonsi" | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=disabled | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-opencl=icd | ||||
|     EXTRA_OPTION: > | ||||
|       -D valgrind=false | ||||
|   script: | ||||
|     - LLVM_VERSION=9 GALLIUM_DRIVERS=r600,swrast .gitlab-ci/meson/build.sh | ||||
|     - .gitlab-ci/meson/build.sh | ||||
|  | ||||
| debian-vulkan: | ||||
|   extends: .meson-build | ||||
|   variables: | ||||
|     UNWIND: "disabled" | ||||
|     DRI_LOADERS: > | ||||
|       -D glx=disabled | ||||
|       -D gbm=disabled | ||||
|       -D egl=disabled | ||||
|       -D platforms=x11,wayland | ||||
|       -D osmesa=false | ||||
|     GALLIUM_ST: > | ||||
|       -D dri3=enabled | ||||
|       -D gallium-vdpau=disabled | ||||
|       -D gallium-xvmc=disabled | ||||
|       -D gallium-omx=disabled | ||||
|       -D gallium-va=disabled | ||||
|       -D gallium-xa=disabled | ||||
|       -D gallium-nine=false | ||||
|       -D gallium-opencl=disabled | ||||
|       -D b_sanitize=undefined | ||||
|       -D c_args=-fno-sanitize-recover=all | ||||
|       -D cpp_args=-fno-sanitize-recover=all | ||||
|     UBSAN_OPTIONS: "print_stacktrace=1" | ||||
|     VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio-experimental,imagination-experimental | ||||
|     EXTRA_OPTION: > | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|       -D build-aco-tests=true | ||||
|       -D intel-clc=enabled | ||||
|       -D imagination-srv=true | ||||
|  | ||||
| debian-i386: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/i386_build | ||||
|   variables: | ||||
|     CROSS: i386 | ||||
|     VULKAN_DRIVERS: intel,amd,swrast,virtio-experimental | ||||
|     GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus" | ||||
|     EXTRA_OPTION: > | ||||
|       -D vulkan-layers=device-select,overlay | ||||
|  | ||||
| debian-s390x: | ||||
|   extends: | ||||
|     - debian-ppc64el | ||||
|     - .use-debian/s390x_build | ||||
|     - .s390x-rules | ||||
|   tags: | ||||
|     - kvm | ||||
|   variables: | ||||
|     CROSS: s390x | ||||
|     GALLIUM_DRIVERS: "swrast,zink" | ||||
|     # The lp_test_blend test times out with LLVM 11 | ||||
|     LLVM_VERSION: 9 | ||||
|     VULKAN_DRIVERS: "swrast" | ||||
|  | ||||
| debian-ppc64el: | ||||
|   extends: | ||||
|     - .meson-cross | ||||
|     - .use-debian/ppc64el_build | ||||
|     - .ppc64el-rules | ||||
|   variables: | ||||
|     CROSS: ppc64el | ||||
|     GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink" | ||||
|     VULKAN_DRIVERS: "amd,swrast" | ||||
|  | ||||
| debian-mingw32-x86_64: | ||||
|   extends: .meson-build | ||||
|   stage: build-misc | ||||
|   variables: | ||||
|     UNWIND: "disabled" | ||||
|     C_ARGS: > | ||||
|       -Wno-error=format | ||||
|       -Wno-error=format-extra-args | ||||
|     CPP_ARGS: $C_ARGS | ||||
|     GALLIUM_DRIVERS: "swrast" | ||||
|     EXTRA_OPTION: > | ||||
|       -Dllvm=disabled | ||||
|       -Dzlib=disabled | ||||
|       -Dosmesa=true | ||||
|       --cross-file=.gitlab-ci/x86_64-w64-mingw32 | ||||
| @@ -1,14 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| while true; do | ||||
|   devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null` | ||||
|   for i in $devcds; do | ||||
|     echo "Found a devcoredump at $i." | ||||
|     if cp $i /results/first.devcore; then | ||||
|       echo 1 > $i | ||||
|       echo "Saved to the job artifacts at /first.devcore" | ||||
|       exit 0 | ||||
|     fi | ||||
|   done | ||||
|   sleep 10 | ||||
| done | ||||
| @@ -1,117 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| for var in \ | ||||
|     ASAN_OPTIONS \ | ||||
|     BASE_SYSTEM_FORK_HOST_PREFIX \ | ||||
|     BASE_SYSTEM_MAINLINE_HOST_PREFIX \ | ||||
|     CI_COMMIT_BRANCH \ | ||||
|     CI_COMMIT_REF_NAME \ | ||||
|     CI_COMMIT_TITLE \ | ||||
|     CI_JOB_ID \ | ||||
|     CI_JOB_JWT_FILE \ | ||||
|     CI_JOB_NAME \ | ||||
|     CI_JOB_URL \ | ||||
|     CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \ | ||||
|     CI_MERGE_REQUEST_TITLE \ | ||||
|     CI_NODE_INDEX \ | ||||
|     CI_NODE_TOTAL \ | ||||
|     CI_PAGES_DOMAIN \ | ||||
|     CI_PIPELINE_ID \ | ||||
|     CI_PIPELINE_URL \ | ||||
|     CI_PROJECT_DIR \ | ||||
|     CI_PROJECT_NAME \ | ||||
|     CI_PROJECT_PATH \ | ||||
|     CI_PROJECT_ROOT_NAMESPACE \ | ||||
|     CI_RUNNER_DESCRIPTION \ | ||||
|     CI_SERVER_URL \ | ||||
|     CROSVM_GALLIUM_DRIVER \ | ||||
|     CROSVM_GPU_ARGS \ | ||||
|     DEQP_BIN_DIR \ | ||||
|     DEQP_CASELIST_FILTER \ | ||||
|     DEQP_CASELIST_INV_FILTER \ | ||||
|     DEQP_CONFIG \ | ||||
|     DEQP_EXPECTED_RENDERER \ | ||||
|     DEQP_FRACTION \ | ||||
|     DEQP_HEIGHT \ | ||||
|     DEQP_RESULTS_DIR \ | ||||
|     DEQP_RUNNER_OPTIONS \ | ||||
|     DEQP_SUITE \ | ||||
|     DEQP_TEMP_DIR \ | ||||
|     DEQP_VARIANT \ | ||||
|     DEQP_VER \ | ||||
|     DEQP_WIDTH \ | ||||
|     DEVICE_NAME \ | ||||
|     DRIVER_NAME \ | ||||
|     EGL_PLATFORM \ | ||||
|     ETNA_MESA_DEBUG \ | ||||
|     FDO_CI_CONCURRENT \ | ||||
|     FDO_UPSTREAM_REPO \ | ||||
|     FD_MESA_DEBUG \ | ||||
|     FLAKES_CHANNEL \ | ||||
|     FREEDRENO_HANGCHECK_MS \ | ||||
|     GALLIUM_DRIVER \ | ||||
|     GALLIVM_PERF \ | ||||
|     GPU_VERSION \ | ||||
|     GTEST \ | ||||
|     GTEST_FAILS \ | ||||
|     GTEST_FRACTION \ | ||||
|     GTEST_RESULTS_DIR \ | ||||
|     GTEST_RUNNER_OPTIONS \ | ||||
|     GTEST_SKIPS \ | ||||
|     HWCI_FREQ_MAX \ | ||||
|     HWCI_KERNEL_MODULES \ | ||||
|     HWCI_KVM \ | ||||
|     HWCI_START_XORG \ | ||||
|     HWCI_TEST_SCRIPT \ | ||||
|     IR3_SHADER_DEBUG \ | ||||
|     JOB_ARTIFACTS_BASE \ | ||||
|     JOB_RESULTS_PATH \ | ||||
|     JOB_ROOTFS_OVERLAY_PATH \ | ||||
|     KERNEL_IMAGE_BASE_URL \ | ||||
|     KERNEL_IMAGE_NAME \ | ||||
|     LD_LIBRARY_PATH \ | ||||
|     LP_NUM_THREADS \ | ||||
|     MESA_BASE_TAG \ | ||||
|     MESA_BUILD_PATH \ | ||||
|     MESA_DEBUG \ | ||||
|     MESA_GLES_VERSION_OVERRIDE \ | ||||
|     MESA_GLSL_VERSION_OVERRIDE \ | ||||
|     MESA_GL_VERSION_OVERRIDE \ | ||||
|     MESA_IMAGE \ | ||||
|     MESA_IMAGE_PATH \ | ||||
|     MESA_IMAGE_TAG \ | ||||
|     MESA_TEMPLATES_COMMIT \ | ||||
|     MESA_VK_IGNORE_CONFORMANCE_WARNING \ | ||||
|     MESA_SPIRV_LOG_LEVEL \ | ||||
|     MINIO_HOST \ | ||||
|     MINIO_RESULTS_UPLOAD \ | ||||
|     NIR_DEBUG \ | ||||
|     PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \ | ||||
|     PAN_MESA_DEBUG \ | ||||
|     PIGLIT_FRACTION \ | ||||
|     PIGLIT_NO_WINDOW \ | ||||
|     PIGLIT_OPTIONS \ | ||||
|     PIGLIT_PLATFORM \ | ||||
|     PIGLIT_PROFILES \ | ||||
|     PIGLIT_REPLAY_ARTIFACTS_BASE_URL \ | ||||
|     PIGLIT_REPLAY_DESCRIPTION_FILE \ | ||||
|     PIGLIT_REPLAY_DEVICE_NAME \ | ||||
|     PIGLIT_REPLAY_EXTRA_ARGS \ | ||||
|     PIGLIT_REPLAY_LOOP_TIMES \ | ||||
|     PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \ | ||||
|     PIGLIT_REPLAY_SUBCOMMAND \ | ||||
|     PIGLIT_RESULTS \ | ||||
|     PIGLIT_TESTS \ | ||||
|     PIPELINE_ARTIFACTS_BASE \ | ||||
|     SKQP_ASSETS_DIR \ | ||||
|     SKQP_BACKENDS \ | ||||
|     TU_DEBUG \ | ||||
|     VIRGL_HOST_API \ | ||||
|     VK_CPU \ | ||||
|     VK_DRIVER \ | ||||
|     VK_ICD_FILENAMES \ | ||||
|     ; do | ||||
|   if [ -n "${!var+x}" ]; then | ||||
|     echo "export $var=${!var@Q}" | ||||
|   fi | ||||
| done | ||||
| @@ -1,23 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Very early init, used to make sure devices and network are set up and | ||||
| # reachable. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| cd / | ||||
|  | ||||
| mount -t proc none /proc | ||||
| mount -t sysfs none /sys | ||||
| mount -t debugfs none /sys/kernel/debug | ||||
| mount -t devtmpfs none /dev || echo possibly already mounted | ||||
| mkdir -p /dev/pts | ||||
| mount -t devpts devpts /dev/pts | ||||
| mount -t tmpfs tmpfs /tmp | ||||
|  | ||||
| echo "nameserver 8.8.8.8" > /etc/resolv.conf | ||||
| [ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts | ||||
|  | ||||
| # Set the time so we can validate certificates before we fetch anything; | ||||
| # however as not all DUTs have network, make this non-fatal. | ||||
| for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true | ||||
| @@ -1,112 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Second-stage init, used to set up devices and our job environment before | ||||
| # running tests. | ||||
|  | ||||
| . /set-job-env-vars.sh | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Set up any devices required by the jobs | ||||
| [ -z "$HWCI_KERNEL_MODULES" ] || { | ||||
|     echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe | ||||
| } | ||||
|  | ||||
| # | ||||
| # Load the KVM module specific to the detected CPU virtualization extensions: | ||||
| # - vmx for Intel VT | ||||
| # - svm for AMD-V | ||||
| # | ||||
| # Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT. | ||||
| # | ||||
| if [ "$HWCI_KVM" = "true" ]; then | ||||
|     unset KVM_KERNEL_MODULE | ||||
|     grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel || { | ||||
|         grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd | ||||
|     } | ||||
|  | ||||
|     [ -z "${KVM_KERNEL_MODULE}" ] && \ | ||||
|         echo "WARNING: Failed to detect CPU virtualization extensions" || \ | ||||
|         modprobe ${KVM_KERNEL_MODULE} | ||||
|  | ||||
|     mkdir -p /lava-files | ||||
|     wget -S --progress=dot:giga -O /lava-files/${KERNEL_IMAGE_NAME} \ | ||||
|         "${KERNEL_IMAGE_BASE_URL}/${KERNEL_IMAGE_NAME}" | ||||
| fi | ||||
|  | ||||
| # Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect | ||||
| # it in /install | ||||
| ln -sf $CI_PROJECT_DIR/install /install | ||||
| export LD_LIBRARY_PATH=/install/lib | ||||
| export LIBGL_DRIVERS_PATH=/install/lib/dri | ||||
|  | ||||
| # Store Mesa's disk cache under /tmp, rather than sending it out over NFS. | ||||
| export XDG_CACHE_HOME=/tmp | ||||
|  | ||||
| # Make sure Python can find all our imports | ||||
| export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))") | ||||
|  | ||||
| if [ "$HWCI_FREQ_MAX" = "true" ]; then | ||||
|   # Ensure initialization of the DRM device (needed by MSM) | ||||
|   head -0 /dev/dri/renderD128 | ||||
|  | ||||
|   # Disable GPU frequency scaling | ||||
|   DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true` | ||||
|   test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true | ||||
|  | ||||
|   # Disable CPU frequency scaling | ||||
|   echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true | ||||
|  | ||||
|   # Disable GPU runtime power management | ||||
|   GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1` | ||||
|   test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true | ||||
|  | ||||
|   # Lock Intel GPU frequency to 70% of the maximum allowed by hardware | ||||
|   # and enable throttling detection & reporting. | ||||
|   ./intel-gpu-freq.sh -s 70% -g all -d | ||||
| fi | ||||
|  | ||||
| # Increase freedreno hangcheck timer because it's right at the edge of the | ||||
| # spilling tests timing out (and some traces, too) | ||||
| if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then | ||||
|     echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms | ||||
| fi | ||||
|  | ||||
| # Start a little daemon to capture the first devcoredump we encounter.  (They | ||||
| # expire after 5 minutes, so we poll for them). | ||||
| ./capture-devcoredump.sh & | ||||
|  | ||||
| # If we want Xorg to be running for the test, then we start it up before the | ||||
| # HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise | ||||
| # without using -displayfd you can race with Xorg's startup), but xinit will eat | ||||
| # your client's return code | ||||
| if [ -n "$HWCI_START_XORG" ]; then | ||||
|   echo "touch /xorg-started; sleep 100000" > /xorg-script | ||||
|   env \ | ||||
|     xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log & | ||||
|  | ||||
|   # Wait for xorg to be ready for connections. | ||||
|   for i in 1 2 3 4 5; do | ||||
|     if [ -e /xorg-started ]; then | ||||
|       break | ||||
|     fi | ||||
|     sleep 5 | ||||
|   done | ||||
|   export DISPLAY=:0 | ||||
| fi | ||||
|  | ||||
| sh -c "$HWCI_TEST_SCRIPT" && RESULT=pass || RESULT=fail | ||||
|  | ||||
| # Let's make sure the results are always stored in current working directory | ||||
| mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true | ||||
|  | ||||
| [ "${RESULT}" = "fail" ] || rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME | ||||
|  | ||||
| # upload artifacts | ||||
| if [ -n "$MINIO_RESULTS_UPLOAD" ]; then | ||||
|   tar -czf results.tar.gz results/; | ||||
|   ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}"; | ||||
|   ci-fairy minio cp results.tar.gz minio://"$MINIO_RESULTS_UPLOAD"/results.tar.gz; | ||||
| fi | ||||
|  | ||||
| echo "hwci: mesa: $RESULT" | ||||
| @@ -1,567 +0,0 @@ | ||||
| #!/bin/sh | ||||
| # | ||||
| # The Intel i915 GPU driver allows to change the minimum, maximum and boost | ||||
| # frequencies in steps of 50 MHz via /sys/class/drm/card<n>/<freq_info>, | ||||
| # where <n> is the DRM card index and <freq_info> one of the following: | ||||
| # | ||||
| # - gt_max_freq_mhz (enforced maximum freq) | ||||
| # - gt_min_freq_mhz (enforced minimum freq) | ||||
| # - gt_boost_freq_mhz (enforced boost freq) | ||||
| # | ||||
| # The hardware capabilities can be accessed via: | ||||
| # | ||||
| # - gt_RP0_freq_mhz (supported maximum freq) | ||||
| # - gt_RPn_freq_mhz (supported minimum freq) | ||||
| # - gt_RP1_freq_mhz (most efficient freq) | ||||
| # | ||||
| # The current frequency can be read from: | ||||
| # - gt_act_freq_mhz (the actual GPU freq) | ||||
| # - gt_cur_freq_mhz (the last requested freq) | ||||
| # | ||||
| # Copyright (C) 2022 Collabora Ltd. | ||||
| # Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com> | ||||
| # | ||||
| # SPDX-License-Identifier: MIT | ||||
| # | ||||
|  | ||||
| # | ||||
| # Constants | ||||
| # | ||||
| DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz" | ||||
| ENF_FREQ_INFO="max min boost" | ||||
| CAP_FREQ_INFO="RP0 RPn RP1" | ||||
| ACT_FREQ_INFO="act cur" | ||||
| THROTT_DETECT_SLEEP_SEC=2 | ||||
| THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid | ||||
|  | ||||
| # | ||||
| # Global variables. | ||||
| # | ||||
| unset INTEL_DRM_CARD_INDEX | ||||
| unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ | ||||
| unset SET_MIN_FREQ SET_MAX_FREQ | ||||
| unset MONITOR_FREQ | ||||
| unset DETECT_THROTT | ||||
| unset DRY_RUN | ||||
|  | ||||
| # | ||||
| # Simple printf based stderr logger. | ||||
| # | ||||
| log() { | ||||
|     local msg_type=$1 | ||||
|  | ||||
|     shift | ||||
|     printf "%s: %s: " "${msg_type}" "${0##*/}" >&2 | ||||
|     printf "$@" >&2 | ||||
|     printf "\n" >&2 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print sysfs path for the given card index and freq info. | ||||
| # | ||||
| # arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above | ||||
| # arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX | ||||
| # | ||||
| print_freq_sysfs_path() { | ||||
|     printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card. | ||||
| # | ||||
| identify_intel_gpu() { | ||||
|     local i=0 vendor path | ||||
|  | ||||
|     while [ ${i} -lt 16 ]; do | ||||
|         [ -c "/dev/dri/card$i" ] || { | ||||
|             i=$((i + 1)) | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         path=$(print_freq_sysfs_path "" ${i}) | ||||
|         path=${path%/*}/device/vendor | ||||
|  | ||||
|         [ -r "${path}" ] && read vendor < "${path}" && \ | ||||
|             [ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0 | ||||
|  | ||||
|         i=$((i + 1)) | ||||
|     done | ||||
|  | ||||
|     return 1 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Read the specified freq info from sysfs. | ||||
| # | ||||
| # arg1: Flag (y/n) to also enable printing the freq info. | ||||
| # arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above | ||||
| # return: Global variable(s) FREQ_${arg} containing the requested information | ||||
| # | ||||
| read_freq_info() { | ||||
|     local var val path print=0 ret=0 | ||||
|  | ||||
|     [ "$1" = "y" ] && print=1 | ||||
|     shift | ||||
|  | ||||
|     while [ $# -gt 0 ]; do | ||||
|         var=FREQ_$1 | ||||
|         path=$(print_freq_sysfs_path "$1") | ||||
|  | ||||
|         [ -r ${path} ] && read ${var} < ${path} || { | ||||
|             log ERROR "Failed to read freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ -n "${var}" ] || { | ||||
|             log ERROR "Got empty freq info from: %s" "${path}" | ||||
|             ret=1 | ||||
|             continue | ||||
|         } | ||||
|  | ||||
|         [ ${print} -eq 1 ] && { | ||||
|             eval val=\$${var} | ||||
|             printf "%6s: %4s MHz\n" "$1" "${val}" | ||||
|         } | ||||
|  | ||||
|         shift | ||||
|     done | ||||
|  | ||||
|     return ${ret} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Display requested info. | ||||
| # | ||||
| print_freq_info() { | ||||
|     local req_freq | ||||
|  | ||||
|     [ -n "${GET_CAP_FREQ}" ] && { | ||||
|         printf "* Hardware capabilities\n" | ||||
|         read_freq_info y ${CAP_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
|  | ||||
|     [ -n "${GET_ENF_FREQ}" ] && { | ||||
|         printf "* Enforcements\n" | ||||
|         read_freq_info y ${ENF_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
|  | ||||
|     [ -n "${GET_ACT_FREQ}" ] && { | ||||
|         printf "* Actual\n" | ||||
|         read_freq_info y ${ACT_FREQ_INFO} | ||||
|         printf "\n" | ||||
|     } | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper to print frequency value as requested by user via '-s, --set' option. | ||||
| # arg1: user requested freq value | ||||
| # | ||||
| compute_freq_set() { | ||||
|     local val | ||||
|  | ||||
|     case "$1" in | ||||
|     +) | ||||
|         val=${FREQ_RP0} | ||||
|         ;; | ||||
|     -) | ||||
|         val=${FREQ_RPn} | ||||
|         ;; | ||||
|     *%) | ||||
|         val=$((${1%?} * ${FREQ_RP0} / 100)) | ||||
|         # Adjust freq to comply with 50 MHz increments | ||||
|         val=$((val / 50 * 50)) | ||||
|         ;; | ||||
|     *[!0-9]*) | ||||
|         log ERROR "Cannot set freq to invalid value: %s" "$1" | ||||
|         return 1 | ||||
|         ;; | ||||
|     "") | ||||
|         log ERROR "Cannot set freq to unspecified value" | ||||
|         return 1 | ||||
|         ;; | ||||
|     *) | ||||
|         # Adjust freq to comply with 50 MHz increments | ||||
|         val=$(($1 / 50 * 50)) | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     printf "%s" "${val}" | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for set_freq(). | ||||
| # | ||||
| set_freq_max() { | ||||
|     log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}" | ||||
|  | ||||
|     read_freq_info n min || return $? | ||||
|  | ||||
|     [ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \ | ||||
|             "${SET_MAX_FREQ}" "${FREQ_RP0}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "${FREQ_RPn}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && { | ||||
|         log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \ | ||||
|             "${SET_MAX_FREQ}" "${FREQ_min}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${DRY_RUN}" ] || return 0 | ||||
|  | ||||
|     printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \ | ||||
|         $(print_freq_sysfs_path boost) > /dev/null | ||||
|     [ $? -eq 0 ] || { | ||||
|         log ERROR "Failed to set GPU max frequency" | ||||
|         return 1 | ||||
|     } | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for set_freq(). | ||||
| # | ||||
| set_freq_min() { | ||||
|     log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}" | ||||
|  | ||||
|     read_freq_info n max || return $? | ||||
|  | ||||
|     [ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && { | ||||
|         log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "${FREQ_max}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && { | ||||
|         log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \ | ||||
|             "${SET_MIN_FREQ}" "${FREQ_RPn}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${DRY_RUN}" ] || return 0 | ||||
|  | ||||
|     printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min) | ||||
|     [ $? -eq 0 ] || { | ||||
|         log ERROR "Failed to set GPU min frequency" | ||||
|         return 1 | ||||
|     } | ||||
| } | ||||
|  | ||||
| # | ||||
| # Set min or max or both GPU frequencies to the user indicated values. | ||||
| # | ||||
| set_freq() { | ||||
|     # Get hw max & min frequencies | ||||
|     read_freq_info n RP0 RPn || return $? | ||||
|  | ||||
|     [ -z "${SET_MAX_FREQ}" ] || { | ||||
|         SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}") | ||||
|         [ -z "${SET_MAX_FREQ}" ] && return 1 | ||||
|     } | ||||
|  | ||||
|     [ -z "${SET_MIN_FREQ}" ] || { | ||||
|         SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}") | ||||
|         [ -z "${SET_MIN_FREQ}" ] && return 1 | ||||
|     } | ||||
|  | ||||
|     # | ||||
|     # Ensure correct operation order, to avoid setting min freq | ||||
|     # to a value which is larger than max freq. | ||||
|     # | ||||
|     # E.g.: | ||||
|     #   crt_min=crt_max=600; new_min=new_max=700 | ||||
|     #   > operation order: max=700; min=700 | ||||
|     # | ||||
|     #   crt_min=crt_max=600; new_min=new_max=500 | ||||
|     #   > operation order: min=500; max=500 | ||||
|     # | ||||
|     if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then | ||||
|         [ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && { | ||||
|             log ERROR "Cannot set GPU max freq to be less than min freq" | ||||
|             return 1 | ||||
|         } | ||||
|  | ||||
|         read_freq_info n min || return $? | ||||
|  | ||||
|         if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then | ||||
|             set_freq_min || return $? | ||||
|             set_freq_max | ||||
|         else | ||||
|             set_freq_max || return $? | ||||
|             set_freq_min | ||||
|         fi | ||||
|     elif [ -n "${SET_MAX_FREQ}" ]; then | ||||
|         set_freq_max | ||||
|     elif [ -n "${SET_MIN_FREQ}" ]; then | ||||
|         set_freq_min | ||||
|     else | ||||
|         log "Unexpected call to set_freq()" | ||||
|         return 1 | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # | ||||
| # Helper for detect_throttling(). | ||||
| # | ||||
| get_thrott_detect_pid() { | ||||
|     [ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0 | ||||
|  | ||||
|     local pid | ||||
|     read pid < ${THROTT_DETECT_PID_FILE_PATH} || { | ||||
|         log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}" | ||||
|         return 1 | ||||
|     } | ||||
|  | ||||
|     local proc_path=/proc/${pid:-invalid}/cmdline | ||||
|     [ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && { | ||||
|         printf "%s" "${pid}" | ||||
|         return 0 | ||||
|     } | ||||
|  | ||||
|     # Remove orphaned PID file | ||||
|     rm -rf ${THROTT_DETECT_PID_FILE_PATH} | ||||
|     return 1 | ||||
| } | ||||
|  | ||||
| # | ||||
| # Control detection and reporting of GPU throttling events. | ||||
| # arg1: start - run throttle detector in background | ||||
| #       stop - stop throttle detector process, if any | ||||
| #       status - verify if throttle detector is running | ||||
| # | ||||
| detect_throttling() { | ||||
|     local pid | ||||
|     pid=$(get_thrott_detect_pid) | ||||
|  | ||||
|     case "$1" in | ||||
|     status) | ||||
|         printf "Throttling detector is " | ||||
|         [ -z "${pid}" ] && printf "not running\n" && return 0 | ||||
|         printf "running (pid=%s)\n" ${pid} | ||||
|         ;; | ||||
|  | ||||
|     stop) | ||||
|         [ -z "${pid}" ] && return 0 | ||||
|  | ||||
|         log INFO "Stopping throttling detector (pid=%s)" "${pid}" | ||||
|         kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid} | ||||
|         rm -rf ${THROTT_DETECT_PID_FILE_PATH} | ||||
|         ;; | ||||
|  | ||||
|     start) | ||||
|         [ -n "${pid}" ] && { | ||||
|             log WARN "Throttling detector is already running (pid=%s)" ${pid} | ||||
|             return 0 | ||||
|         } | ||||
|  | ||||
|         ( | ||||
|             read_freq_info n RPn || exit $? | ||||
|  | ||||
|             while true; do | ||||
|                 sleep ${THROTT_DETECT_SLEEP_SEC} | ||||
|                 read_freq_info n act min cur || exit $? | ||||
|  | ||||
|                 # | ||||
|                 # The throttling seems to occur when act freq goes below min. | ||||
|                 # However, it's necessary to exclude the idle states, where | ||||
|                 # act freq normally reaches RPn and cur goes below min. | ||||
|                 # | ||||
|                 [ ${FREQ_act} -lt ${FREQ_min} ] && \ | ||||
|                 [ ${FREQ_act} -gt ${FREQ_RPn} ] && \ | ||||
|                 [ ${FREQ_cur} -ge ${FREQ_min} ] && \ | ||||
|                     printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \ | ||||
|                     ${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn} | ||||
|             done | ||||
|         ) & | ||||
|  | ||||
|         pid=$! | ||||
|         log INFO "Started GPU throttling detector (pid=%s)" ${pid} | ||||
|  | ||||
|         printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \ | ||||
|             log WARN "Failed to write throttle detector PID file" | ||||
|         ;; | ||||
|     esac | ||||
| } | ||||
|  | ||||
| # | ||||
| # Show help message. | ||||
| # | ||||
| print_usage() { | ||||
|     cat <<EOF | ||||
| Usage: ${0##*/} [OPTION]... | ||||
|  | ||||
| A script to manage Intel GPU frequencies. Can be used for debugging performance | ||||
| problems or trying to obtain a stable frequency while benchmarking. | ||||
|  | ||||
| Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz. | ||||
|  | ||||
| Options: | ||||
|   -g, --get [act|enf|cap|all] | ||||
|                         Get frequency information: active (default), enforced, | ||||
|                         hardware capabilities or all of them. | ||||
|  | ||||
|   -s, --set [{min|max}=]{FREQUENCY[%]|+|-} | ||||
|                         Set min or max frequency to the given value (MHz). | ||||
|                         Append '%' to interpret FREQUENCY as % of hw max. | ||||
|                         Use '+' or '-' to set frequency to hardware max or min. | ||||
|                         Omit min/max prefix to set both frequencies. | ||||
|  | ||||
|   -r, --reset           Reset frequencies to hardware defaults. | ||||
|  | ||||
|   -m, --monitor [act|enf|cap|all] | ||||
|                         Monitor the indicated frequencies via 'watch' utility. | ||||
|                         See '-g, --get' option for more details. | ||||
|  | ||||
|   -d|--detect-thrott [start|stop|status] | ||||
|                         Start (default operation) the throttling detector | ||||
|                         as a background process. Use 'stop' or 'status' to | ||||
|                         terminate the detector process or verify its status. | ||||
|  | ||||
|   --dry-run             See what the script will do without applying any | ||||
|                         frequency changes. | ||||
|  | ||||
|   -h, --help            Display this help text and exit. | ||||
| EOF | ||||
| } | ||||
|  | ||||
| # | ||||
| # Parse user input for '-g, --get' option. | ||||
| # Returns 0 if a value has been provided, otherwise 1. | ||||
| # | ||||
| parse_option_get() { | ||||
|     local ret=0 | ||||
|  | ||||
|     case "$1" in | ||||
|     act) GET_ACT_FREQ=1;; | ||||
|     enf) GET_ENF_FREQ=1;; | ||||
|     cap) GET_CAP_FREQ=1;; | ||||
|     all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;; | ||||
|     -*|"") | ||||
|         # No value provided, using default. | ||||
|         GET_ACT_FREQ=1 | ||||
|         ret=1 | ||||
|         ;; | ||||
|     *) | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     return ${ret} | ||||
| } | ||||
|  | ||||
| # | ||||
| # Validate user input for '-s, --set' option. | ||||
| # | ||||
| validate_option_set() { | ||||
|     case "$1" in | ||||
|     +|-|[0-9]%|[0-9][0-9]%) | ||||
|         return 0 | ||||
|         ;; | ||||
|     *[!0-9]*|"") | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
| } | ||||
|  | ||||
| # | ||||
| # Parse script arguments. | ||||
| # | ||||
| [ $# -eq 0 ] && { print_usage; exit 1; } | ||||
|  | ||||
| while [ $# -gt 0 ]; do | ||||
|     case "$1" in | ||||
|     -g|--get) | ||||
|         parse_option_get "$2" && shift | ||||
|         ;; | ||||
|  | ||||
|     -s|--set) | ||||
|         shift | ||||
|         case "$1" in | ||||
|         min=*) | ||||
|             SET_MIN_FREQ=${1#min=} | ||||
|             validate_option_set "${SET_MIN_FREQ}" | ||||
|             ;; | ||||
|         max=*) | ||||
|             SET_MAX_FREQ=${1#max=} | ||||
|             validate_option_set "${SET_MAX_FREQ}" | ||||
|             ;; | ||||
|         *) | ||||
|             SET_MIN_FREQ=$1 | ||||
|             validate_option_set "${SET_MIN_FREQ}" | ||||
|             SET_MAX_FREQ=${SET_MIN_FREQ} | ||||
|             ;; | ||||
|         esac | ||||
|         ;; | ||||
|  | ||||
|     -r|--reset) | ||||
|         RESET_FREQ=1 | ||||
|         SET_MIN_FREQ="-" | ||||
|         SET_MAX_FREQ="+" | ||||
|         ;; | ||||
|  | ||||
|     -m|--monitor) | ||||
|         MONITOR_FREQ=act | ||||
|         parse_option_get "$2" && MONITOR_FREQ=$2 && shift | ||||
|         ;; | ||||
|  | ||||
|     -d|--detect-thrott) | ||||
|         DETECT_THROTT=start | ||||
|         case "$2" in | ||||
|         start|stop|status) | ||||
|             DETECT_THROTT=$2 | ||||
|             shift | ||||
|             ;; | ||||
|         esac | ||||
|         ;; | ||||
|  | ||||
|     --dry-run) | ||||
|         DRY_RUN=1 | ||||
|         ;; | ||||
|  | ||||
|     -h|--help) | ||||
|         print_usage | ||||
|         exit 0 | ||||
|         ;; | ||||
|  | ||||
|     *) | ||||
|         print_usage | ||||
|         exit 1 | ||||
|         ;; | ||||
|     esac | ||||
|  | ||||
|     shift | ||||
| done | ||||
|  | ||||
| # | ||||
| # Main | ||||
| # | ||||
| RET=0 | ||||
|  | ||||
| identify_intel_gpu || { | ||||
|     log INFO "No Intel GPU detected" | ||||
|     exit 0 | ||||
| } | ||||
|  | ||||
| [ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; } | ||||
| print_freq_info | ||||
|  | ||||
| [ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT} | ||||
|  | ||||
| [ -n "${MONITOR_FREQ}" ] && { | ||||
|     log INFO "Entering frequency monitoring mode" | ||||
|     sleep 2 | ||||
|     exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}" | ||||
| } | ||||
|  | ||||
| exit ${RET} | ||||
| @@ -1,21 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| _XORG_SCRIPT="/xorg-script" | ||||
| _FLAG_FILE="/xorg-started" | ||||
|  | ||||
| echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}" | ||||
| if [ "x$1" != "x" ]; then | ||||
|     export LD_LIBRARY_PATH="${1}/lib" | ||||
|     export LIBGL_DRIVERS_PATH="${1}/lib/dri" | ||||
| fi | ||||
| xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log & | ||||
|  | ||||
| # Wait for xorg to be ready for connections. | ||||
| for i in 1 2 3 4 5; do | ||||
|     if [ -e "${_FLAG_FILE}" ]; then | ||||
|         break | ||||
|     fi | ||||
|     sleep 5 | ||||
| done | ||||
| @@ -1,57 +0,0 @@ | ||||
| CONFIG_LOCALVERSION_AUTO=y | ||||
| CONFIG_DEBUG_KERNEL=y | ||||
|  | ||||
| # abootimg with a 'dummy' rootfs fails with root=/dev/nfs | ||||
| CONFIG_BLK_DEV_INITRD=n | ||||
|  | ||||
| CONFIG_DEVFREQ_GOV_PERFORMANCE=y | ||||
| CONFIG_DEVFREQ_GOV_POWERSAVE=y | ||||
| CONFIG_DEVFREQ_GOV_USERSPACE=y | ||||
| CONFIG_DEVFREQ_GOV_PASSIVE=y | ||||
| CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y | ||||
|  | ||||
| CONFIG_DRM=y | ||||
| CONFIG_DRM_ETNAVIV=y | ||||
| CONFIG_DRM_ROCKCHIP=y | ||||
| CONFIG_DRM_PANFROST=y | ||||
| CONFIG_DRM_LIMA=y | ||||
| CONFIG_DRM_PANEL_SIMPLE=y | ||||
| CONFIG_PWM_CROS_EC=y | ||||
| CONFIG_BACKLIGHT_PWM=y | ||||
|  | ||||
| CONFIG_ROCKCHIP_CDN_DP=n | ||||
|  | ||||
| CONFIG_SPI_ROCKCHIP=y | ||||
| CONFIG_PWM_ROCKCHIP=y | ||||
| CONFIG_PHY_ROCKCHIP_DP=y | ||||
| CONFIG_DWMAC_ROCKCHIP=y | ||||
|  | ||||
| CONFIG_MFD_RK808=y | ||||
| CONFIG_REGULATOR_RK808=y | ||||
| CONFIG_RTC_DRV_RK808=y | ||||
| CONFIG_COMMON_CLK_RK808=y | ||||
|  | ||||
| CONFIG_REGULATOR_FAN53555=y | ||||
| CONFIG_REGULATOR=y | ||||
|  | ||||
| CONFIG_REGULATOR_VCTRL=y | ||||
|  | ||||
| CONFIG_KASAN=n | ||||
| CONFIG_KASAN_INLINE=n | ||||
| CONFIG_STACKTRACE=n | ||||
|  | ||||
| CONFIG_TMPFS=y | ||||
|  | ||||
| CONFIG_PROVE_LOCKING=n | ||||
| CONFIG_DEBUG_LOCKDEP=n | ||||
| CONFIG_SOFTLOCKUP_DETECTOR=n | ||||
| CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n | ||||
|  | ||||
| CONFIG_FW_LOADER_COMPRESS=y | ||||
|  | ||||
| CONFIG_USB_USBNET=y | ||||
| CONFIG_NETDEVICES=y | ||||
| CONFIG_USB_NET_DRIVERS=y | ||||
| CONFIG_USB_RTL8152=y | ||||
| CONFIG_USB_NET_AX8817X=y | ||||
| CONFIG_USB_NET_SMSC95XX=y | ||||
| @@ -1,172 +0,0 @@ | ||||
| CONFIG_LOCALVERSION_AUTO=y | ||||
| CONFIG_DEBUG_KERNEL=y | ||||
|  | ||||
| # abootimg with a 'dummy' rootfs fails with root=/dev/nfs | ||||
| CONFIG_BLK_DEV_INITRD=n | ||||
|  | ||||
| CONFIG_DEVFREQ_GOV_PERFORMANCE=y | ||||
| CONFIG_DEVFREQ_GOV_POWERSAVE=y | ||||
| CONFIG_DEVFREQ_GOV_USERSPACE=y | ||||
| CONFIG_DEVFREQ_GOV_PASSIVE=y | ||||
|  | ||||
| CONFIG_DRM=y | ||||
| CONFIG_DRM_ROCKCHIP=y | ||||
| CONFIG_DRM_PANFROST=y | ||||
| CONFIG_DRM_LIMA=y | ||||
| CONFIG_DRM_PANEL_SIMPLE=y | ||||
| CONFIG_DRM_PANEL_EDP=y | ||||
| CONFIG_DRM_MSM=y | ||||
| CONFIG_DRM_I2C_ADV7511=y | ||||
| CONFIG_PWM_CROS_EC=y | ||||
| CONFIG_BACKLIGHT_PWM=y | ||||
|  | ||||
| CONFIG_ROCKCHIP_CDN_DP=n | ||||
|  | ||||
| CONFIG_SPI_ROCKCHIP=y | ||||
| CONFIG_PWM_ROCKCHIP=y | ||||
| CONFIG_PHY_ROCKCHIP_DP=y | ||||
| CONFIG_DWMAC_ROCKCHIP=y | ||||
| CONFIG_STMMAC_ETH=y | ||||
| CONFIG_TYPEC_FUSB302=y | ||||
| CONFIG_TYPEC=y | ||||
| CONFIG_TYPEC_TCPM=y | ||||
|  | ||||
| # MSM platform bits | ||||
|  | ||||
| # For CONFIG_QCOM_LMH | ||||
| CONFIG_OF=y | ||||
|  | ||||
| CONFIG_QCOM_COMMAND_DB=y | ||||
| CONFIG_QCOM_RPMHPD=y | ||||
| CONFIG_QCOM_RPMPD=y | ||||
| CONFIG_SDM_GPUCC_845=y | ||||
| CONFIG_SDM_VIDEOCC_845=y | ||||
| CONFIG_SDM_DISPCC_845=y | ||||
| CONFIG_SDM_LPASSCC_845=y | ||||
| CONFIG_SDM_CAMCC_845=y | ||||
| CONFIG_RESET_QCOM_PDC=y | ||||
| CONFIG_DRM_TI_SN65DSI86=y | ||||
| CONFIG_I2C_QCOM_GENI=y | ||||
| CONFIG_SPI_QCOM_GENI=y | ||||
| CONFIG_PHY_QCOM_QUSB2=y | ||||
| CONFIG_PHY_QCOM_QMP=y | ||||
| CONFIG_QCOM_CLK_APCC_MSM8996=y | ||||
| CONFIG_QCOM_LLCC=y | ||||
| CONFIG_QCOM_LMH=y | ||||
| CONFIG_QCOM_SPMI_TEMP_ALARM=y | ||||
| CONFIG_QCOM_WDT=y | ||||
| CONFIG_POWER_RESET_QCOM_PON=y | ||||
| CONFIG_RTC_DRV_PM8XXX=y | ||||
| CONFIG_INTERCONNECT=y | ||||
| CONFIG_INTERCONNECT_QCOM=y | ||||
| CONFIG_INTERCONNECT_QCOM_SDM845=y | ||||
| CONFIG_INTERCONNECT_QCOM_MSM8916=y | ||||
| CONFIG_INTERCONNECT_QCOM_OSM_L3=y | ||||
| CONFIG_INTERCONNECT_QCOM_SC7180=y | ||||
| CONFIG_CRYPTO_DEV_QCOM_RNG=y | ||||
| CONFIG_SC_DISPCC_7180=y | ||||
| CONFIG_SC_GPUCC_7180=y | ||||
|  | ||||
| # db410c ethernet | ||||
| CONFIG_USB_RTL8152=y | ||||
| # db820c ethernet | ||||
| CONFIG_ATL1C=y | ||||
|  | ||||
| CONFIG_ARCH_ALPINE=n | ||||
| CONFIG_ARCH_BCM2835=n | ||||
| CONFIG_ARCH_BCM_IPROC=n | ||||
| CONFIG_ARCH_BERLIN=n | ||||
| CONFIG_ARCH_BRCMSTB=n | ||||
| CONFIG_ARCH_EXYNOS=n | ||||
| CONFIG_ARCH_K3=n | ||||
| CONFIG_ARCH_LAYERSCAPE=n | ||||
| CONFIG_ARCH_LG1K=n | ||||
| CONFIG_ARCH_HISI=n | ||||
| CONFIG_ARCH_MVEBU=n | ||||
| CONFIG_ARCH_SEATTLE=n | ||||
| CONFIG_ARCH_SYNQUACER=n | ||||
| CONFIG_ARCH_RENESAS=n | ||||
| CONFIG_ARCH_R8A774A1=n | ||||
| CONFIG_ARCH_R8A774C0=n | ||||
| CONFIG_ARCH_R8A7795=n | ||||
| CONFIG_ARCH_R8A7796=n | ||||
| CONFIG_ARCH_R8A77965=n | ||||
| CONFIG_ARCH_R8A77970=n | ||||
| CONFIG_ARCH_R8A77980=n | ||||
| CONFIG_ARCH_R8A77990=n | ||||
| CONFIG_ARCH_R8A77995=n | ||||
| CONFIG_ARCH_STRATIX10=n | ||||
| CONFIG_ARCH_TEGRA=n | ||||
| CONFIG_ARCH_SPRD=n | ||||
| CONFIG_ARCH_THUNDER=n | ||||
| CONFIG_ARCH_THUNDER2=n | ||||
| CONFIG_ARCH_UNIPHIER=n | ||||
| CONFIG_ARCH_VEXPRESS=n | ||||
| CONFIG_ARCH_XGENE=n | ||||
| CONFIG_ARCH_ZX=n | ||||
| CONFIG_ARCH_ZYNQMP=n | ||||
|  | ||||
| # Strip out some stuff we don't need for graphics testing, to reduce | ||||
| # the build. | ||||
| CONFIG_CAN=n | ||||
| CONFIG_WIRELESS=n | ||||
| CONFIG_RFKILL=n | ||||
| CONFIG_WLAN=n | ||||
|  | ||||
| CONFIG_REGULATOR_FAN53555=y | ||||
| CONFIG_REGULATOR=y | ||||
|  | ||||
| CONFIG_REGULATOR_VCTRL=y | ||||
|  | ||||
| CONFIG_KASAN=n | ||||
| CONFIG_KASAN_INLINE=n | ||||
| CONFIG_STACKTRACE=n | ||||
|  | ||||
| CONFIG_TMPFS=y | ||||
|  | ||||
| CONFIG_PROVE_LOCKING=n | ||||
| CONFIG_DEBUG_LOCKDEP=n | ||||
| CONFIG_SOFTLOCKUP_DETECTOR=y | ||||
| CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y | ||||
|  | ||||
| CONFIG_DETECT_HUNG_TASK=y | ||||
|  | ||||
| CONFIG_FW_LOADER_COMPRESS=y | ||||
| CONFIG_FW_LOADER_USER_HELPER=n | ||||
|  | ||||
| CONFIG_USB_USBNET=y | ||||
| CONFIG_NETDEVICES=y | ||||
| CONFIG_USB_NET_DRIVERS=y | ||||
| CONFIG_USB_RTL8152=y | ||||
| CONFIG_USB_NET_AX8817X=y | ||||
| CONFIG_USB_NET_SMSC95XX=y | ||||
|  | ||||
| # For amlogic | ||||
| CONFIG_MESON_GXL_PHY=y | ||||
| CONFIG_MDIO_BUS_MUX_MESON_G12A=y | ||||
| CONFIG_DRM_MESON=y | ||||
|  | ||||
| # For Mediatek | ||||
| CONFIG_DRM_MEDIATEK=y | ||||
| CONFIG_PWM_MEDIATEK=y | ||||
| CONFIG_DRM_MEDIATEK_HDMI=y | ||||
| CONFIG_GNSS=y | ||||
| CONFIG_GNSS_MTK_SERIAL=y | ||||
| CONFIG_HW_RANDOM=y | ||||
| CONFIG_HW_RANDOM_MTK=y | ||||
| CONFIG_MTK_DEVAPC=y | ||||
| CONFIG_PWM_MTK_DISP=y | ||||
| CONFIG_MTK_CMDQ=y | ||||
|  | ||||
| # For nouveau.  Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware. | ||||
| CONFIG_ARCH_TEGRA=y | ||||
| CONFIG_DRM_NOUVEAU=m | ||||
| CONFIG_DRM_TEGRA=m | ||||
| CONFIG_R8169=y | ||||
| CONFIG_STAGING=y | ||||
| CONFIG_DRM_TEGRA_STAGING=y | ||||
| CONFIG_TEGRA_HOST1X=y | ||||
| CONFIG_ARM_TEGRA_DEVFREQ=y | ||||
| CONFIG_TEGRA_SOCTHERM=y | ||||
| CONFIG_DRM_TEGRA_DEBUG=y | ||||
| CONFIG_PWM_TEGRA=y | ||||
| @@ -1,51 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| # Fetch the arm-built rootfs image and unpack it in our x86 container (saves | ||||
| # network transfer, disk usage, and runtime on test jobs) | ||||
|  | ||||
| if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then | ||||
|   ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}" | ||||
| else | ||||
|   ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}" | ||||
| fi | ||||
|  | ||||
| wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz | ||||
| mkdir -p /rootfs-$arch | ||||
| tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz | ||||
| rm rootfs.tgz | ||||
|  | ||||
| if [[ $arch == "arm64" ]]; then | ||||
|     mkdir -p /baremetal-files | ||||
|     pushd /baremetal-files | ||||
|  | ||||
|     wget ${ARTIFACTS_URL}/Image | ||||
|     wget ${ARTIFACTS_URL}/Image.gz | ||||
|     wget ${ARTIFACTS_URL}/cheza-kernel | ||||
|  | ||||
|     DEVICE_TREES="" | ||||
|     DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb" | ||||
|     DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb" | ||||
|  | ||||
|     for DTB in $DEVICE_TREES; do | ||||
|         wget ${ARTIFACTS_URL}/$DTB | ||||
|     done | ||||
|  | ||||
|     popd | ||||
| elif [[ $arch == "armhf" ]]; then | ||||
|     mkdir -p /baremetal-files | ||||
|     pushd /baremetal-files | ||||
|  | ||||
|     wget ${ARTIFACTS_URL}/zImage | ||||
|  | ||||
|     DEVICE_TREES="imx6q-cubox-i.dtb" | ||||
|  | ||||
|     for DTB in $DEVICE_TREES; do | ||||
|         wget ${ARTIFACTS_URL}/$DTB | ||||
|     done | ||||
|  | ||||
|     popd | ||||
| fi | ||||
| @@ -1,18 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| APITRACE_VERSION="170424754bb46002ba706e16ee5404b61988d74a" | ||||
|  | ||||
| git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace | ||||
| pushd /apitrace | ||||
| git checkout "$APITRACE_VERSION" | ||||
| git submodule update --init --depth 1 --recursive | ||||
| cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS | ||||
| ninja -C _build | ||||
| mkdir build | ||||
| cp _build/apitrace build | ||||
| cp _build/eglretrace build | ||||
| ${STRIP_CMD:-strip} build/* | ||||
| find . -not -path './build' -not -path './build/*' -delete | ||||
| popd | ||||
| @@ -1,41 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| SCRIPT_DIR="$(pwd)" | ||||
|  | ||||
| CROSVM_VERSION=c7cd0e0114c8363b884ba56d8e12adee718dcc93 | ||||
| git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm | ||||
| pushd /platform/crosvm | ||||
| git checkout "$CROSVM_VERSION" | ||||
| git submodule update --init | ||||
| # Apply all crosvm patches for Mesa CI | ||||
| cat "$SCRIPT_DIR"/.gitlab-ci/container/build-crosvm_*.patch | | ||||
|     patch -p1 | ||||
|  | ||||
| VIRGLRENDERER_VERSION=0564c9a0c2f584e004a7d4864aee3b8ec9692105 | ||||
| rm -rf third_party/virglrenderer | ||||
| git clone --single-branch -b master --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer | ||||
| pushd third_party/virglrenderer | ||||
| git checkout "$VIRGLRENDERER_VERSION" | ||||
| meson build/ $EXTRA_MESON_ARGS | ||||
| ninja -C build install | ||||
| popd | ||||
|  | ||||
| RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   bindgen \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --root /usr/local \ | ||||
|   $EXTRA_CARGO_ARGS | ||||
|  | ||||
| RUSTFLAGS='-L native=/usr/local/lib' cargo install \ | ||||
|   -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|   --locked \ | ||||
|   --features 'default-no-sandbox gpu x virgl_renderer virgl_renderer_next' \ | ||||
|   --path . \ | ||||
|   --root /usr/local \ | ||||
|   $EXTRA_CARGO_ARGS | ||||
|  | ||||
| popd | ||||
|  | ||||
| rm -rf /platform/crosvm | ||||
| @@ -1,43 +0,0 @@ | ||||
| From 3c57ec558bccc67fd53363c23deea20646be5c47 Mon Sep 17 00:00:00 2001 | ||||
| From: Tomeu Vizoso <tomeu.vizoso@collabora.com> | ||||
| Date: Wed, 17 Nov 2021 10:18:04 +0100 | ||||
| Subject: [PATCH] Hack syslog out | ||||
|  | ||||
| It's causing stability problems when running several Crosvm instances in | ||||
| parallel. | ||||
|  | ||||
| Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com> | ||||
| --- | ||||
|  base/src/unix/linux/syslog.rs       | 2 +- | ||||
|  common/sys_util/src/linux/syslog.rs | 2 +- | ||||
|  2 files changed, 2 insertions(+), 2 deletions(-) | ||||
|  | ||||
| diff --git a/base/src/unix/linux/syslog.rs b/base/src/unix/linux/syslog.rs | ||||
| index 05972a3a..f0db3781 100644 | ||||
| --- a/base/src/unix/linux/syslog.rs | ||||
| +++ b/base/src/unix/linux/syslog.rs | ||||
| @@ -35,7 +35,7 @@ pub struct PlatformSyslog { | ||||
|  impl Syslog for PlatformSyslog { | ||||
|      fn new() -> Result<Self, Error> { | ||||
|          Ok(Self { | ||||
| -            socket: Some(openlog_and_get_socket()?), | ||||
| +            socket: None, | ||||
|          }) | ||||
|      } | ||||
|   | ||||
| diff --git a/common/sys_util/src/linux/syslog.rs b/common/sys_util/src/linux/syslog.rs | ||||
| index 05972a3a..f0db3781 100644 | ||||
| --- a/common/sys_util/src/linux/syslog.rs | ||||
| +++ b/common/sys_util/src/linux/syslog.rs | ||||
| @@ -35,7 +35,7 @@ pub struct PlatformSyslog { | ||||
|  impl Syslog for PlatformSyslog { | ||||
|      fn new() -> Result<Self, Error> { | ||||
|          Ok(Self { | ||||
| -            socket: Some(openlog_and_get_socket()?), | ||||
| +            socket: None, | ||||
|          }) | ||||
|      } | ||||
|   | ||||
| --  | ||||
| 2.25.1 | ||||
|  | ||||
| @@ -1,24 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then | ||||
|     # Build and install from source | ||||
|     DEQP_RUNNER_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git}" | ||||
|  | ||||
|     if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then | ||||
|         DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}" | ||||
|     else | ||||
|         DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}" | ||||
|     fi | ||||
|  | ||||
|     DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}" | ||||
| else | ||||
|     # Install from package registry | ||||
|     DEQP_RUNNER_CARGO_ARGS="--version 0.13.1 ${EXTRA_CARGO_ARGS} -- deqp-runner" | ||||
| fi | ||||
|  | ||||
| cargo install --locked  \ | ||||
|     -j ${FDO_CI_CONCURRENT:-4} \ | ||||
|     --root /usr/local \ | ||||
|     ${DEQP_RUNNER_CARGO_ARGS} | ||||
| @@ -1,90 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
| git clone \ | ||||
|     https://github.com/KhronosGroup/VK-GL-CTS.git \ | ||||
|     -b vulkan-cts-1.3.1.1 \ | ||||
|     --depth 1 \ | ||||
|     /VK-GL-CTS | ||||
| pushd /VK-GL-CTS | ||||
|  | ||||
| # Cherry-pick fix for zlib dependency | ||||
| git fetch origin main | ||||
| git cherry-pick -x ec1804831b654ac55bd2a7a5dd27a556afe05030 | ||||
|  | ||||
| # --insecure is due to SSL cert failures hitting sourceforge for zlib and | ||||
| # libpng (sigh).  The archives get their checksums checked anyway, and git | ||||
| # always goes through ssh or https. | ||||
| python3 external/fetch_sources.py --insecure | ||||
|  | ||||
| mkdir -p /deqp | ||||
|  | ||||
| # Save the testlog stylesheets: | ||||
| cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp | ||||
| popd | ||||
|  | ||||
| pushd /deqp | ||||
| # When including EGL/X11 testing, do that build first and save off its | ||||
| # deqp-egl binary. | ||||
| cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|       -DDEQP_TARGET=x11_egl_glx \ | ||||
|       -DCMAKE_BUILD_TYPE=Release \ | ||||
|       $EXTRA_CMAKE_ARGS | ||||
| ninja modules/egl/deqp-egl | ||||
| cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11 | ||||
|  | ||||
|  | ||||
| cmake -S /VK-GL-CTS -B . -G Ninja \ | ||||
|       -DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \ | ||||
|       -DCMAKE_BUILD_TYPE=Release \ | ||||
|       $EXTRA_CMAKE_ARGS | ||||
| ninja | ||||
|  | ||||
| mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl | ||||
|  | ||||
| # Copy out the mustpass lists we want. | ||||
| mkdir /deqp/mustpass | ||||
| for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt) ; do | ||||
|     cat /VK-GL-CTS/external/vulkancts/mustpass/master/$mustpass \ | ||||
|         >> /deqp/mustpass/vk-master.txt | ||||
| done | ||||
|  | ||||
| cp \ | ||||
|     /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \ | ||||
|     /deqp/mustpass/. | ||||
| cp \ | ||||
|     /deqp/external/openglcts/modules/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt \ | ||||
|     /deqp/mustpass/. | ||||
| cp \ | ||||
|     /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-master.txt \ | ||||
|     /deqp/mustpass/. | ||||
| cp \ | ||||
|     /deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \ | ||||
|     /deqp/mustpass/. | ||||
|  | ||||
| # Save *some* executor utils, but otherwise strip things down | ||||
| # to reduct deqp build size: | ||||
| mkdir /deqp/executor.save | ||||
| cp /deqp/executor/testlog-to-* /deqp/executor.save | ||||
| rm -rf /deqp/executor | ||||
| mv /deqp/executor.save /deqp/executor | ||||
|  | ||||
| # Remove other mustpass files, since we saved off the ones we wanted to conventient locations above. | ||||
| rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass | ||||
| rm -rf /deqp/external/vulkancts/modules/vulkan/vk-master* | ||||
| rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default | ||||
|  | ||||
| rm -rf /deqp/external/openglcts/modules/cts-runner | ||||
| rm -rf /deqp/modules/internal | ||||
| rm -rf /deqp/execserver | ||||
| rm -rf /deqp/framework | ||||
| find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf | ||||
| ${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk | ||||
| ${STRIP_CMD:-strip} external/openglcts/modules/glcts | ||||
| ${STRIP_CMD:-strip} modules/*/deqp-* | ||||
| du -sh * | ||||
| rm -rf /VK-GL-CTS | ||||
| popd | ||||
| @@ -1,14 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| git clone https://github.com/ValveSoftware/Fossilize.git | ||||
| cd Fossilize | ||||
| git checkout 16fba1b8b5d9310126bb02323d7bae3227338461 | ||||
| git submodule update --init | ||||
| mkdir build | ||||
| cd build | ||||
| cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release | ||||
| ninja -C . install | ||||
| cd ../.. | ||||
| rm -rf Fossilize | ||||
| @@ -1,19 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1 | ||||
|  | ||||
| git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct | ||||
| pushd /gfxreconstruct | ||||
| git checkout "$GFXRECONSTRUCT_VERSION" | ||||
| git submodule update --init | ||||
| git submodule update | ||||
| cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release | ||||
| ninja -C _build gfxrecon-replay gfxrecon-info | ||||
| mkdir -p build/bin | ||||
| install _build/tools/replay/gfxrecon-replay build/bin | ||||
| install _build/tools/info/gfxrecon-info build/bin | ||||
| strip build/bin/* | ||||
| find . -not -path './build' -not -path './build/*' -delete | ||||
| popd | ||||
| @@ -1,16 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| PARALLEL_DEQP_RUNNER_VERSION=6596b71cf37a7efb4d54acd48c770ed2d4ad6b7e | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner | ||||
| pushd /parallel-deqp-runner | ||||
| git checkout "$PARALLEL_DEQP_RUNNER_VERSION" | ||||
| meson . _build | ||||
| ninja -C _build hang-detection | ||||
| mkdir -p build/bin | ||||
| install _build/hang-detection build/bin | ||||
| strip build/bin/* | ||||
| find . -not -path './build' -not -path './build/*' -delete | ||||
| popd | ||||
| @@ -1,51 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| mkdir -p kernel | ||||
| wget -qO- ${KERNEL_URL} | tar -xj --strip-components=1 -C kernel | ||||
| pushd kernel | ||||
|  | ||||
| # The kernel doesn't like the gold linker (or the old lld in our debians). | ||||
| # Sneak in some override symlinks during kernel build until we can update | ||||
| # debian (they'll get blown away by the rm of the kernel dir at the end). | ||||
| mkdir -p ld-links | ||||
| for i in /usr/bin/*-ld /usr/bin/ld; do | ||||
|     i=`basename $i` | ||||
|     ln -sf /usr/bin/$i.bfd ld-links/$i | ||||
| done | ||||
| export PATH=`pwd`/ld-links:$PATH | ||||
|  | ||||
| export LOCALVERSION="`basename $KERNEL_URL`" | ||||
| ./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config | ||||
| make ${KERNEL_IMAGE_NAME} | ||||
| for image in ${KERNEL_IMAGE_NAME}; do | ||||
|     cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/. | ||||
| done | ||||
|  | ||||
| if [[ -n ${DEVICE_TREES} ]]; then | ||||
|     make dtbs | ||||
|     cp ${DEVICE_TREES} /lava-files/. | ||||
| fi | ||||
|  | ||||
| if [[ ${DEBIAN_ARCH} = "amd64" || ${DEBIAN_ARCH} = "arm64" ]]; then | ||||
|     make modules | ||||
|     INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install | ||||
| fi | ||||
|  | ||||
| if [[ ${DEBIAN_ARCH} = "arm64" ]]; then | ||||
|     make Image.lzma | ||||
|     mkimage \ | ||||
|         -f auto \ | ||||
|         -A arm \ | ||||
|         -O linux \ | ||||
|         -d arch/arm64/boot/Image.lzma \ | ||||
|         -C lzma\ | ||||
|         -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \ | ||||
|         /lava-files/cheza-kernel | ||||
|     KERNEL_IMAGE_NAME+=" cheza-kernel" | ||||
| fi | ||||
|  | ||||
| popd | ||||
| rm -rf kernel | ||||
|  | ||||
| @@ -1,30 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| export LLVM_CONFIG="llvm-config-11" | ||||
|  | ||||
| $LLVM_CONFIG --version | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
| git clone \ | ||||
|     https://github.com/llvm/llvm-project \ | ||||
|     --depth 1 \ | ||||
|     -b llvmorg-12.0.0-rc3 \ | ||||
|     /llvm-project | ||||
|  | ||||
| mkdir /libclc | ||||
| pushd /libclc | ||||
| cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv | ||||
| ninja | ||||
| ninja install | ||||
| popd | ||||
|  | ||||
| # workaroud cmake vs debian packaging. | ||||
| mkdir -p /usr/lib/clc | ||||
| ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/ | ||||
| ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/ | ||||
|  | ||||
| du -sh * | ||||
| rm -rf /libclc /llvm-project | ||||
| @@ -1,14 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| export LIBDRM_VERSION=libdrm-2.4.110 | ||||
|  | ||||
| wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz | ||||
| tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz | ||||
| cd $LIBDRM_VERSION | ||||
| meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS | ||||
| ninja -C build install | ||||
| cd .. | ||||
| rm -rf $LIBDRM_VERSION | ||||
|  | ||||
| @@ -1,23 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit | ||||
| pushd /piglit | ||||
| git checkout 445711587d461539a4d8f9d35a7fe996a86d3c8d | ||||
| patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff | ||||
| cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS | ||||
| ninja $PIGLIT_BUILD_TARGETS | ||||
| find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf | ||||
| rm -rf target_api | ||||
| if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then | ||||
|     find ! -regex "^\.$" \ | ||||
|          ! -regex "^\.\/piglit.*" \ | ||||
|          ! -regex "^\.\/framework.*" \ | ||||
|          ! -regex "^\.\/bin$" \ | ||||
|          ! -regex "^\.\/bin\/replayer\.py" \ | ||||
|          ! -regex "^\.\/templates.*" \ | ||||
|          ! -regex "^\.\/tests$" \ | ||||
|          ! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf | ||||
| fi | ||||
| popd | ||||
| @@ -1,31 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Note that this script is not actually "building" rust, but build- is the | ||||
| # convention for the shared helpers for putting stuff in our containers. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in | ||||
| # $HOME/.cargo/bin.  Make bin a link to a public bin directory so the commands | ||||
| # are just available to all build jobs. | ||||
| mkdir -p $HOME/.cargo | ||||
| ln -s /usr/local/bin $HOME/.cargo/bin | ||||
|  | ||||
| # For rust in Mesa, we use rustup to install.  This lets us pick an arbitrary | ||||
| # version of the compiler, rather than whatever the container's Debian comes | ||||
| # with. | ||||
| # | ||||
| # Pick the rust compiler (1.48) available in Debian stable, and pick a specific | ||||
| # snapshot from rustup so the compiler doesn't drift on us. | ||||
| wget https://sh.rustup.rs -O - | \ | ||||
|     sh -s -- -y --default-toolchain 1.49.0-2020-12-31 | ||||
|  | ||||
| # Set up a config script for cross compiling -- cargo needs your system cc for | ||||
| # linking in cross builds, but doesn't know what you want to use for system cc. | ||||
| cat > /root/.cargo/config <<EOF | ||||
| [target.armv7-unknown-linux-gnueabihf] | ||||
| linker = "arm-linux-gnueabihf-gcc" | ||||
|  | ||||
| [target.aarch64-unknown-linux-gnu] | ||||
| linker = "aarch64-linux-gnu-gcc" | ||||
| EOF | ||||
| @@ -1,97 +0,0 @@ | ||||
| #!/bin/bash | ||||
| # | ||||
| # Copyright (C) 2022 Collabora Limited | ||||
| # Author: Guilherme Gallo <guilherme.gallo@collabora.com> | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| # SOFTWARE. | ||||
|  | ||||
|  | ||||
| create_gn_args() { | ||||
|     # gn can be configured to cross-compile skia and its tools | ||||
|     # It is important to set the target_cpu to guarantee the intended target | ||||
|     # machine | ||||
|     cp "${BASE_ARGS_GN_FILE}" "${SKQP_OUT_DIR}"/args.gn | ||||
|     echo "target_cpu = \"${SKQP_ARCH}\"" >> "${SKQP_OUT_DIR}"/args.gn | ||||
| } | ||||
|  | ||||
|  | ||||
| download_skia_source() { | ||||
|     if [ -z ${SKIA_DIR+x} ] | ||||
|     then | ||||
|         return 1 | ||||
|     fi | ||||
|  | ||||
|     # Skia cloned from https://android.googlesource.com/platform/external/skqp | ||||
|     # has all needed assets tracked on git-fs | ||||
|     SKQP_REPO=https://android.googlesource.com/platform/external/skqp | ||||
|     SKQP_BRANCH=android-cts-10.0_r11 | ||||
|  | ||||
|     git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}" | ||||
| } | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| SCRIPT_DIR=$(realpath "$(dirname "$0")") | ||||
| SKQP_PATCH_DIR="${SCRIPT_DIR}" | ||||
| BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn" | ||||
|  | ||||
| SKQP_ARCH=${SKQP_ARCH:-x64} | ||||
| SKIA_DIR=${SKIA_DIR:-$(mktemp -d)} | ||||
| SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH} | ||||
| SKQP_INSTALL_DIR=/skqp | ||||
| SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets" | ||||
| SKQP_BINARIES=(skqp) | ||||
|  | ||||
| download_skia_source | ||||
|  | ||||
| pushd "${SKIA_DIR}" | ||||
|  | ||||
| # Apply all skqp patches for Mesa CI | ||||
| cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch | | ||||
|     patch -p1 | ||||
|  | ||||
| # Fetch some needed build tools needed to build skia/skqp. | ||||
| # Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS | ||||
| # directory. | ||||
| python tools/git-sync-deps | ||||
|  | ||||
| mkdir -p "${SKQP_OUT_DIR}" | ||||
| mkdir -p "${SKQP_INSTALL_DIR}" | ||||
|  | ||||
| create_gn_args | ||||
|  | ||||
| # Build and install skqp binaries | ||||
| bin/gn gen "${SKQP_OUT_DIR}" | ||||
|  | ||||
| for BINARY in "${SKQP_BINARIES[@]}" | ||||
| do | ||||
|     /usr/bin/ninja -C "${SKQP_OUT_DIR}" "${BINARY}" | ||||
|     # Strip binary, since gn is not stripping it even when `is_debug == false` | ||||
|     ${STRIP_CMD:-strip} "${SKQP_OUT_DIR}/${BINARY}" | ||||
|     install -m 0755 "${SKQP_OUT_DIR}/${BINARY}" "${SKQP_INSTALL_DIR}" | ||||
| done | ||||
|  | ||||
| # Move assets to the target directory, which will reside in rootfs. | ||||
| mv platform_tools/android/apps/skqp/src/main/assets/ "${SKQP_ASSETS_DIR}" | ||||
|  | ||||
| popd | ||||
| rm -Rf "${SKIA_DIR}" | ||||
|  | ||||
| set +ex | ||||
| @@ -1,13 +0,0 @@ | ||||
| diff --git a/BUILD.gn b/BUILD.gn | ||||
| index d2b1407..7b60c90 100644 | ||||
| --- a/BUILD.gn | ||||
| +++ b/BUILD.gn | ||||
| @@ -144,7 +144,7 @@ config("skia_public") { | ||||
|   | ||||
|  # Skia internal APIs, used by Skia itself and a few test tools. | ||||
|  config("skia_private") { | ||||
| -  visibility = [ ":*" ] | ||||
| +  visibility = [ "*" ] | ||||
|   | ||||
|    include_dirs = [ | ||||
|      "include/private", | ||||
| @@ -1,47 +0,0 @@ | ||||
| cc = "clang" | ||||
| cxx = "clang++" | ||||
|  | ||||
| extra_cflags = [ "-DSK_ENABLE_DUMP_GPU", "-DSK_BUILD_FOR_SKQP" ] | ||||
| extra_cflags_cc = [ | ||||
|         "-Wno-error", | ||||
|  | ||||
|         # skqp build process produces a lot of compilation warnings, silencing | ||||
|         # most of them to remove clutter and avoid the CI job log to exceed the | ||||
|         # maximum size | ||||
|  | ||||
|         # GCC flags | ||||
|         "-Wno-redundant-move", | ||||
|         "-Wno-suggest-override", | ||||
|         "-Wno-class-memaccess", | ||||
|         "-Wno-deprecated-copy", | ||||
|         "-Wno-uninitialized", | ||||
|  | ||||
|         # Clang flags | ||||
|         "-Wno-macro-redefined", | ||||
|         "-Wno-anon-enum-enum-conversion", | ||||
|         "-Wno-suggest-destructor-override", | ||||
|         "-Wno-return-std-move-in-c++11", | ||||
|         "-Wno-extra-semi-stmt", | ||||
|     ] | ||||
|  | ||||
| cc_wrapper = "ccache" | ||||
|  | ||||
| is_debug = false | ||||
|  | ||||
| skia_enable_fontmgr_android = false | ||||
| skia_enable_fontmgr_empty = true | ||||
| skia_enable_pdf = false | ||||
| skia_enable_skottie = false | ||||
|  | ||||
| skia_skqp_global_error_tolerance = 8 | ||||
| skia_tools_require_resources = true | ||||
|  | ||||
| skia_use_dng_sdk = false | ||||
| skia_use_expat = true | ||||
| skia_use_icu = false | ||||
| skia_use_libheif = false | ||||
| skia_use_lua = false | ||||
| skia_use_piex = false | ||||
| skia_use_vulkan = true | ||||
|  | ||||
| target_os = "linux" | ||||
| @@ -1,68 +0,0 @@ | ||||
| diff --git a/bin/fetch-gn b/bin/fetch-gn | ||||
| index d5e94a2..59c4591 100755 | ||||
| --- a/bin/fetch-gn | ||||
| +++ b/bin/fetch-gn | ||||
| @@ -5,39 +5,44 @@ | ||||
|  # Use of this source code is governed by a BSD-style license that can be | ||||
|  # found in the LICENSE file. | ||||
|   | ||||
| -import hashlib | ||||
|  import os | ||||
| +import platform | ||||
|  import shutil | ||||
|  import stat | ||||
|  import sys | ||||
| -import urllib2 | ||||
| +import tempfile | ||||
| +import zipfile | ||||
| + | ||||
| +if sys.version_info[0] < 3: | ||||
| +  from urllib2 import urlopen | ||||
| +else: | ||||
| +  from urllib.request import urlopen | ||||
|   | ||||
|  os.chdir(os.path.join(os.path.dirname(__file__), os.pardir)) | ||||
|   | ||||
| -dst = 'bin/gn.exe' if 'win32' in sys.platform else 'bin/gn' | ||||
| +gnzip = os.path.join(tempfile.mkdtemp(), 'gn.zip') | ||||
| +with open(gnzip, 'wb') as f: | ||||
| +  OS  = {'darwin': 'mac', 'linux': 'linux', 'linux2': 'linux', 'win32': 'windows'}[sys.platform] | ||||
| +  cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64', 'aarch64': 'arm64'}[platform.machine().lower()] | ||||
|   | ||||
| -sha1 = '2f27ff0b6118e5886df976da5effa6003d19d1ce' if 'linux'  in sys.platform else \ | ||||
| -       '9be792dd9010ce303a9c3a497a67bcc5ac8c7666' if 'darwin' in sys.platform else \ | ||||
| -       'eb69be2d984b4df60a8c21f598135991f0ad1742'  # Windows | ||||
| +  rev = 'd62642c920e6a0d1756316d225a90fd6faa9e21e' | ||||
| +  url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/{}-{}/+/git_revision:{}'.format( | ||||
| +          OS,cpu,rev) | ||||
| +  f.write(urlopen(url).read()) | ||||
|   | ||||
| -def sha1_of_file(path): | ||||
| -  h = hashlib.sha1() | ||||
| -  if os.path.isfile(path): | ||||
| -    with open(path, 'rb') as f: | ||||
| -      h.update(f.read()) | ||||
| -  return h.hexdigest() | ||||
| +gn = 'gn.exe' if 'win32' in sys.platform else 'gn' | ||||
| +with zipfile.ZipFile(gnzip, 'r') as f: | ||||
| +  f.extract(gn, 'bin') | ||||
|   | ||||
| -if sha1_of_file(dst) != sha1: | ||||
| -  with open(dst, 'wb') as f: | ||||
| -    f.write(urllib2.urlopen('https://chromium-gn.storage-download.googleapis.com/' + sha1).read()) | ||||
| +gn = os.path.join('bin', gn) | ||||
|   | ||||
| -  os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | | ||||
| -                stat.S_IRGRP                | stat.S_IXGRP | | ||||
| -                stat.S_IROTH                | stat.S_IXOTH ) | ||||
| +os.chmod(gn, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | | ||||
| +             stat.S_IRGRP                | stat.S_IXGRP | | ||||
| +             stat.S_IROTH                | stat.S_IXOTH ) | ||||
|   | ||||
|  # We'll also copy to a path that depot_tools' GN wrapper will expect to find the binary. | ||||
|  copy_path = 'buildtools/linux64/gn' if 'linux'  in sys.platform else \ | ||||
|              'buildtools/mac/gn'     if 'darwin' in sys.platform else \ | ||||
|              'buildtools/win/gn.exe' | ||||
|  if os.path.isdir(os.path.dirname(copy_path)): | ||||
| -  shutil.copy(dst, copy_path) | ||||
| +  shutil.copy(gn, copy_path) | ||||
| @@ -1,142 +0,0 @@ | ||||
| Patch based from diff with skia repository from commit | ||||
| 013397884c73959dc07cb0a26ee742b1cdfbda8a | ||||
|  | ||||
| Adds support for Python3, but removes the constraint of only SHA based refs in | ||||
| DEPS | ||||
| diff --git a/tools/git-sync-deps b/tools/git-sync-deps | ||||
| index c7379c0b5c..f63d4d9ccf 100755 | ||||
| --- a/tools/git-sync-deps | ||||
| +++ b/tools/git-sync-deps | ||||
| @@ -43,7 +43,7 @@ def git_executable(): | ||||
|        A string suitable for passing to subprocess functions, or None. | ||||
|    """ | ||||
|    envgit = os.environ.get('GIT_EXECUTABLE') | ||||
| -  searchlist = ['git'] | ||||
| +  searchlist = ['git', 'git.bat'] | ||||
|    if envgit: | ||||
|      searchlist.insert(0, envgit) | ||||
|    with open(os.devnull, 'w') as devnull: | ||||
| @@ -94,21 +94,25 @@ def is_git_toplevel(git, directory): | ||||
|    try: | ||||
|      toplevel = subprocess.check_output( | ||||
|        [git, 'rev-parse', '--show-toplevel'], cwd=directory).strip() | ||||
| -    return os.path.realpath(directory) == os.path.realpath(toplevel) | ||||
| +    return os.path.realpath(directory) == os.path.realpath(toplevel.decode()) | ||||
|    except subprocess.CalledProcessError: | ||||
|      return False | ||||
|   | ||||
|   | ||||
| -def status(directory, checkoutable): | ||||
| -  def truncate(s, length): | ||||
| +def status(directory, commithash, change): | ||||
| +  def truncate_beginning(s, length): | ||||
| +    return s if len(s) <= length else '...' + s[-(length-3):] | ||||
| +  def truncate_end(s, length): | ||||
|      return s if len(s) <= length else s[:(length - 3)] + '...' | ||||
| + | ||||
|    dlen = 36 | ||||
| -  directory = truncate(directory, dlen) | ||||
| -  checkoutable = truncate(checkoutable, 40) | ||||
| -  sys.stdout.write('%-*s @ %s\n' % (dlen, directory, checkoutable)) | ||||
| +  directory = truncate_beginning(directory, dlen) | ||||
| +  commithash = truncate_end(commithash, 40) | ||||
| +  symbol = '>' if change else '@' | ||||
| +  sys.stdout.write('%-*s %s %s\n' % (dlen, directory, symbol, commithash)) | ||||
|   | ||||
|   | ||||
| -def git_checkout_to_directory(git, repo, checkoutable, directory, verbose): | ||||
| +def git_checkout_to_directory(git, repo, commithash, directory, verbose): | ||||
|    """Checkout (and clone if needed) a Git repository. | ||||
|   | ||||
|    Args: | ||||
| @@ -117,8 +121,7 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose): | ||||
|      repo (string) the location of the repository, suitable | ||||
|           for passing to `git clone`. | ||||
|   | ||||
| -    checkoutable (string) a tag, branch, or commit, suitable for | ||||
| -                 passing to `git checkout` | ||||
| +    commithash (string) a commit, suitable for passing to `git checkout` | ||||
|   | ||||
|      directory (string) the path into which the repository | ||||
|                should be checked out. | ||||
| @@ -129,7 +132,12 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose): | ||||
|    """ | ||||
|    if not os.path.isdir(directory): | ||||
|      subprocess.check_call( | ||||
| -      [git, 'clone', '--quiet', repo, directory]) | ||||
| +      [git, 'clone', '--quiet', '--no-checkout', repo, directory]) | ||||
| +    subprocess.check_call([git, 'checkout', '--quiet', commithash], | ||||
| +                          cwd=directory) | ||||
| +    if verbose: | ||||
| +      status(directory, commithash, True) | ||||
| +    return | ||||
|   | ||||
|    if not is_git_toplevel(git, directory): | ||||
|      # if the directory exists, but isn't a git repo, you will modify | ||||
| @@ -145,11 +153,11 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose): | ||||
|    with open(os.devnull, 'w') as devnull: | ||||
|      # If this fails, we will fetch before trying again.  Don't spam user | ||||
|      # with error infomation. | ||||
| -    if 0 == subprocess.call([git, 'checkout', '--quiet', checkoutable], | ||||
| +    if 0 == subprocess.call([git, 'checkout', '--quiet', commithash], | ||||
|                              cwd=directory, stderr=devnull): | ||||
|        # if this succeeds, skip slow `git fetch`. | ||||
|        if verbose: | ||||
| -        status(directory, checkoutable)  # Success. | ||||
| +        status(directory, commithash, False)  # Success. | ||||
|        return | ||||
|   | ||||
|    # If the repo has changed, always force use of the correct repo. | ||||
| @@ -159,18 +167,24 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose): | ||||
|   | ||||
|    subprocess.check_call([git, 'fetch', '--quiet'], cwd=directory) | ||||
|   | ||||
| -  subprocess.check_call([git, 'checkout', '--quiet', checkoutable], cwd=directory) | ||||
| +  subprocess.check_call([git, 'checkout', '--quiet', commithash], cwd=directory) | ||||
|   | ||||
|    if verbose: | ||||
| -    status(directory, checkoutable)  # Success. | ||||
| +    status(directory, commithash, True)  # Success. | ||||
|   | ||||
|   | ||||
|  def parse_file_to_dict(path): | ||||
|    dictionary = {} | ||||
| -  execfile(path, dictionary) | ||||
| +  with open(path) as f: | ||||
| +    exec('def Var(x): return vars[x]\n' + f.read(), dictionary) | ||||
|    return dictionary | ||||
|   | ||||
|   | ||||
| +def is_sha1_sum(s): | ||||
| +  """SHA1 sums are 160 bits, encoded as lowercase hexadecimal.""" | ||||
| +  return len(s) == 40 and all(c in '0123456789abcdef' for c in s) | ||||
| + | ||||
| + | ||||
|  def git_sync_deps(deps_file_path, command_line_os_requests, verbose): | ||||
|    """Grab dependencies, with optional platform support. | ||||
|   | ||||
| @@ -204,19 +218,19 @@ def git_sync_deps(deps_file_path, command_line_os_requests, verbose): | ||||
|          raise Exception('%r is parent of %r' % (other_dir, directory)) | ||||
|    list_of_arg_lists = [] | ||||
|    for directory in sorted(dependencies): | ||||
| -    if not isinstance(dependencies[directory], basestring): | ||||
| +    if not isinstance(dependencies[directory], str): | ||||
|        if verbose: | ||||
| -        print 'Skipping "%s".' % directory | ||||
| +        sys.stdout.write( 'Skipping "%s".\n' % directory) | ||||
|        continue | ||||
|      if '@' in dependencies[directory]: | ||||
| -      repo, checkoutable = dependencies[directory].split('@', 1) | ||||
| +      repo, commithash = dependencies[directory].split('@', 1) | ||||
|      else: | ||||
| -      raise Exception("please specify commit or tag") | ||||
| +      raise Exception("please specify commit") | ||||
|   | ||||
|      relative_directory = os.path.join(deps_file_directory, directory) | ||||
|   | ||||
|      list_of_arg_lists.append( | ||||
| -      (git, repo, checkoutable, relative_directory, verbose)) | ||||
| +      (git, repo, commithash, relative_directory, verbose)) | ||||
|   | ||||
|    multithread(git_checkout_to_directory, list_of_arg_lists) | ||||
|   | ||||
| @@ -1,13 +0,0 @@ | ||||
| diff --git a/gn/BUILDCONFIG.gn b/gn/BUILDCONFIG.gn | ||||
| index 454334a..1797594 100644 | ||||
| --- a/gn/BUILDCONFIG.gn | ||||
| +++ b/gn/BUILDCONFIG.gn | ||||
| @@ -80,7 +80,7 @@ if (current_cpu == "") { | ||||
|  is_clang = is_android || is_ios || is_mac || | ||||
|             (cc == "clang" && cxx == "clang++") || clang_win != "" | ||||
|  if (!is_clang && !is_win) { | ||||
| -  is_clang = exec_script("gn/is_clang.py", | ||||
| +  is_clang = exec_script("//gn/is_clang.py", | ||||
|                           [ | ||||
|                             cc, | ||||
|                             cxx, | ||||
| @@ -1,17 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| git config --global user.email "mesa@example.com" | ||||
| git config --global user.name "Mesa CI" | ||||
| git clone \ | ||||
|     https://github.com/intel/libva-utils.git \ | ||||
|     -b 2.13.0 \ | ||||
|     --depth 1 \ | ||||
|     /va-utils | ||||
|  | ||||
| pushd /va-utils | ||||
| meson build -D tests=true  -Dprefix=/va $EXTRA_MESON_ARGS | ||||
| ninja -C build install | ||||
| popd | ||||
| rm -rf /va-utils | ||||
| @@ -1,43 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| VKD3D_PROTON_VERSION="2.6" | ||||
| VKD3D_PROTON_COMMIT="3e5aab6fb3e18f81a71b339be4cb5cdf55140980" | ||||
|  | ||||
| VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests" | ||||
| VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src" | ||||
| VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION" | ||||
|  | ||||
| function build_arch { | ||||
|   local arch="$1" | ||||
|   shift | ||||
|  | ||||
|   meson "$@"                               \ | ||||
|         -Denable_tests=true                \ | ||||
|         --buildtype release                \ | ||||
|         --prefix "$VKD3D_PROTON_BUILD_DIR" \ | ||||
|         --strip                            \ | ||||
|         --bindir "x${arch}"                \ | ||||
|         --libdir "x${arch}"                \ | ||||
|         "$VKD3D_PROTON_BUILD_DIR/build.${arch}" | ||||
|  | ||||
|   ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install | ||||
|  | ||||
|   install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/"*.exe | ||||
| } | ||||
|  | ||||
| git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b "v$VKD3D_PROTON_VERSION" --no-checkout "$VKD3D_PROTON_SRC_DIR" | ||||
| pushd "$VKD3D_PROTON_SRC_DIR" | ||||
| git checkout "$VKD3D_PROTON_COMMIT" | ||||
| git submodule update --init --recursive | ||||
| git submodule update --recursive | ||||
| build_arch 64 --cross-file build-win64.txt | ||||
| build_arch 86 --cross-file build-win32.txt | ||||
| cp "setup_vkd3d_proton.sh" "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh" | ||||
| chmod +x "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh" | ||||
| popd | ||||
|  | ||||
| "$VKD3D_PROTON_BUILD_DIR"/setup_vkd3d_proton.sh install | ||||
| rm -rf "$VKD3D_PROTON_BUILD_DIR" | ||||
| rm -rf "$VKD3D_PROTON_SRC_DIR" | ||||
| @@ -1,22 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| export LIBWAYLAND_VERSION="1.18.0" | ||||
| export WAYLAND_PROTOCOLS_VERSION="1.24" | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/wayland/wayland | ||||
| cd wayland | ||||
| git checkout "$LIBWAYLAND_VERSION" | ||||
| meson -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build | ||||
| ninja -C _build install | ||||
| cd .. | ||||
| rm -rf wayland | ||||
|  | ||||
| git clone https://gitlab.freedesktop.org/wayland/wayland-protocols | ||||
| cd wayland-protocols | ||||
| git checkout "$WAYLAND_PROTOCOLS_VERSION" | ||||
| meson _build | ||||
| ninja -C _build install | ||||
| cd .. | ||||
| rm -rf wayland-protocols | ||||
| @@ -1,10 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| if test -f /etc/debian_version; then | ||||
|     apt-get autoremove -y --purge | ||||
| fi | ||||
|  | ||||
| # Clean up any build cache for rust. | ||||
| rm -rf /.cargo | ||||
|  | ||||
| ccache --show-stats | ||||
| @@ -1,36 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| if test -f /etc/debian_version; then | ||||
|     CCACHE_PATH=/usr/lib/ccache | ||||
| else | ||||
|     CCACHE_PATH=/usr/lib64/ccache | ||||
| fi | ||||
|  | ||||
| # Common setup among container builds before we get to building code. | ||||
|  | ||||
| export CCACHE_COMPILERCHECK=content | ||||
| export CCACHE_COMPRESS=true | ||||
| export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache | ||||
| export PATH=$CCACHE_PATH:$PATH | ||||
|  | ||||
| # CMake ignores $PATH, so we have to force CC/GCC to the ccache versions. | ||||
| export CC="${CCACHE_PATH}/gcc" | ||||
| export CXX="${CCACHE_PATH}/g++" | ||||
|  | ||||
| # Force linkers to gold, since it's so much faster for building.  We can't use | ||||
| # lld because we're on old debian and it's buggy.  ming fails meson builds | ||||
| # with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker" | ||||
| find /usr/bin -name \*-ld -o -name ld | \ | ||||
|     grep -v mingw | \ | ||||
|     xargs -n 1 -I '{}' ln -sf '{}.gold' '{}' | ||||
|  | ||||
| ccache --show-stats | ||||
|  | ||||
| # Make a wrapper script for ninja to always include the -j flags | ||||
| echo '#!/bin/sh -x' > /usr/local/bin/ninja | ||||
| echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' >> /usr/local/bin/ninja | ||||
| chmod +x /usr/local/bin/ninja | ||||
|  | ||||
| # Set MAKEFLAGS so that all make invocations in container builds include the | ||||
| # flags (doesn't apply to non-container builds, but we don't run make there) | ||||
| export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}" | ||||
| @@ -1,35 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| ndk=$1 | ||||
| arch=$2 | ||||
| cpu_family=$3 | ||||
| cpu=$4 | ||||
| cross_file="/cross_file-$arch.txt" | ||||
|  | ||||
| # armv7 has the toolchain split between two names. | ||||
| arch2=${5:-$2} | ||||
|  | ||||
| # Note that we disable C++ exceptions, because Mesa doesn't use exceptions, | ||||
| # and allowing it in code generation means we get unwind symbols that break | ||||
| # the libEGL and driver symbol tests. | ||||
|  | ||||
| cat >$cross_file <<EOF | ||||
| [binaries] | ||||
| ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-ar' | ||||
| c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables'] | ||||
| cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables'] | ||||
| c_ld = 'lld' | ||||
| cpp_ld = 'lld' | ||||
| strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-strip' | ||||
| pkgconfig = ['/usr/bin/pkg-config'] | ||||
|  | ||||
| [host_machine] | ||||
| system = 'linux' | ||||
| cpu_family = '$cpu_family' | ||||
| cpu = '$cpu' | ||||
| endian = 'little' | ||||
|  | ||||
| [properties] | ||||
| needs_exe_wrapper = true | ||||
|  | ||||
| EOF | ||||
| @@ -1,38 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Makes a .pc file in the Android NDK for meson to find its libraries. | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| ndk="$1" | ||||
| pc="$2" | ||||
| cflags="$3" | ||||
| libs="$4" | ||||
| version="$5" | ||||
|  | ||||
| sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot | ||||
|  | ||||
| for arch in \ | ||||
|         x86_64-linux-android \ | ||||
|         i686-linux-android \ | ||||
|         aarch64-linux-android \ | ||||
|         arm-linux-androideabi; do | ||||
|     pcdir=$sysroot/usr/lib/$arch/pkgconfig | ||||
|     mkdir -p $pcdir | ||||
|  | ||||
|     cat >$pcdir/$pc <<EOF | ||||
| prefix=$sysroot | ||||
| exec_prefix=$sysroot | ||||
| libdir=$sysroot/usr/lib/$arch/29 | ||||
| sharedlibdir=$sysroot/usr/lib/$arch | ||||
| includedir=$sysroot/usr/include | ||||
|  | ||||
| Name: zlib | ||||
| Description: zlib compression library | ||||
| Version: $version | ||||
|  | ||||
| Requires: | ||||
| Libs: -L$sysroot/usr/lib/$arch/29 $libs | ||||
| Cflags: -I$sysroot/usr/include $cflags | ||||
| EOF | ||||
| done | ||||
| @@ -1,51 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| arch=$1 | ||||
| cross_file="/cross_file-$arch.txt" | ||||
| /usr/share/meson/debcrossgen --arch $arch -o "$cross_file" | ||||
| # Explicitly set ccache path for cross compilers | ||||
| sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file" | ||||
| if [ "$arch" = "i386" ]; then | ||||
|     # Work around a bug in debcrossgen that should be fixed in the next release | ||||
|     sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file" | ||||
| fi | ||||
| # Rely on qemu-user being configured in binfmt_misc on the host | ||||
| sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file" | ||||
|  | ||||
| # Add a line for rustc, which debcrossgen is missing. | ||||
| cc=`sed -n 's|c = .\(.*\).|\1|p' < $cross_file` | ||||
| if [[ "$arch" = "arm64" ]]; then | ||||
|     rust_target=aarch64-unknown-linux-gnu | ||||
| elif [[ "$arch" = "armhf" ]]; then | ||||
|     rust_target=armv7-unknown-linux-gnueabihf | ||||
| elif [[ "$arch" = "i386" ]]; then | ||||
|     rust_target=i686-unknown-linux-gnu | ||||
| elif [[ "$arch" = "ppc64el" ]]; then | ||||
|     rust_target=powerpc64le-unknown-linux-gnu | ||||
| elif [[ "$arch" = "s390x" ]]; then | ||||
|     rust_target=s390x-unknown-linux-gnu | ||||
| else | ||||
|     echo "Needs rustc target mapping" | ||||
| fi | ||||
| sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file" | ||||
|  | ||||
| # Set up cmake cross compile toolchain file for dEQP builds | ||||
| toolchain_file="/toolchain-$arch.cmake" | ||||
| if [[ "$arch" = "arm64" ]]; then | ||||
|     GCC_ARCH="aarch64-linux-gnu" | ||||
|     DE_CPU="DE_CPU_ARM_64" | ||||
|     CMAKE_ARCH=arm | ||||
| elif [[ "$arch" = "armhf" ]]; then | ||||
|     GCC_ARCH="arm-linux-gnueabihf" | ||||
|     DE_CPU="DE_CPU_ARM" | ||||
|     CMAKE_ARCH=arm | ||||
| fi | ||||
|  | ||||
| if [[ -n "$GCC_ARCH" ]]; then | ||||
|     echo "set(CMAKE_SYSTEM_NAME Linux)" > "$toolchain_file" | ||||
|     echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> "$toolchain_file" | ||||
|     echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)" >> "$toolchain_file" | ||||
|     echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)" >> "$toolchain_file" | ||||
|     echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")" >> "$toolchain_file" | ||||
|     echo "set(DE_CPU $DE_CPU)" >> "$toolchain_file" | ||||
| fi | ||||
| @@ -1,280 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| if [ $DEBIAN_ARCH = arm64 ]; then | ||||
|     ARCH_PACKAGES="firmware-qcom-media | ||||
|                    firmware-linux-nonfree | ||||
|                    libfontconfig1 | ||||
|                    libgl1 | ||||
|                    libglu1-mesa | ||||
|                    libvulkan-dev | ||||
|     " | ||||
| elif [ $DEBIAN_ARCH = amd64 ]; then | ||||
|     ARCH_PACKAGES="firmware-amd-graphics | ||||
|                    inetutils-syslogd | ||||
|                    iptables | ||||
|                    libcap2 | ||||
|                    libelf1 | ||||
|                    libfdt1 | ||||
|                    libllvm11 | ||||
|                    libva2 | ||||
|                    libva-drm2 | ||||
|                    socat | ||||
|                    spirv-tools | ||||
|                    sysvinit-core | ||||
|                   " | ||||
| fi | ||||
|  | ||||
| INSTALL_CI_FAIRY_PACKAGES="git | ||||
|                            python3-dev | ||||
|                            python3-pip | ||||
|                            python3-setuptools | ||||
|                            python3-wheel | ||||
|                            " | ||||
|  | ||||
| apt-get -y install --no-install-recommends \ | ||||
|     $ARCH_PACKAGES \ | ||||
|     $INSTALL_CI_FAIRY_PACKAGES \ | ||||
|     $EXTRA_LOCAL_PACKAGES \ | ||||
|     bash \ | ||||
|     ca-certificates \ | ||||
|     firmware-realtek \ | ||||
|     initramfs-tools \ | ||||
|     libasan6 \ | ||||
|     libexpat1 \ | ||||
|     libpng16-16 \ | ||||
|     libpython3.9 \ | ||||
|     libsensors5 \ | ||||
|     libvulkan1 \ | ||||
|     libwaffle-1-0 \ | ||||
|     libx11-6 \ | ||||
|     libx11-xcb1 \ | ||||
|     libxcb-dri2-0 \ | ||||
|     libxcb-dri3-0 \ | ||||
|     libxcb-glx0 \ | ||||
|     libxcb-present0 \ | ||||
|     libxcb-randr0 \ | ||||
|     libxcb-shm0 \ | ||||
|     libxcb-sync1 \ | ||||
|     libxcb-xfixes0 \ | ||||
|     libxdamage1 \ | ||||
|     libxext6 \ | ||||
|     libxfixes3 \ | ||||
|     libxkbcommon0 \ | ||||
|     libxrender1 \ | ||||
|     libxshmfence1 \ | ||||
|     libxxf86vm1 \ | ||||
|     netcat-openbsd \ | ||||
|     python3 \ | ||||
|     python3-lxml \ | ||||
|     python3-mako \ | ||||
|     python3-numpy \ | ||||
|     python3-packaging \ | ||||
|     python3-pil \ | ||||
|     python3-renderdoc \ | ||||
|     python3-requests \ | ||||
|     python3-simplejson \ | ||||
|     python3-yaml \ | ||||
|     sntp \ | ||||
|     strace \ | ||||
|     waffle-utils \ | ||||
|     wget \ | ||||
|     xinit \ | ||||
|     xserver-xorg-core | ||||
|  | ||||
| # Needed for ci-fairy, this revision is able to upload files to | ||||
| # MinIO and doesn't depend on git | ||||
| pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125 | ||||
|  | ||||
| apt-get purge -y \ | ||||
|         $INSTALL_CI_FAIRY_PACKAGES | ||||
|  | ||||
| passwd root -d | ||||
| chsh -s /bin/sh | ||||
|  | ||||
| cat > /init <<EOF | ||||
| #!/bin/sh | ||||
| export PS1=lava-shell: | ||||
| exec sh | ||||
| EOF | ||||
| chmod +x  /init | ||||
|  | ||||
| ####################################################################### | ||||
| # Strip the image to a small minimal system without removing the debian | ||||
| # toolchain. | ||||
|  | ||||
| # Copy timezone file and remove tzdata package | ||||
| rm -rf /etc/localtime | ||||
| cp /usr/share/zoneinfo/Etc/UTC /etc/localtime | ||||
|  | ||||
| UNNEEDED_PACKAGES=" | ||||
|         libfdisk1 | ||||
|         " | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Removing unused packages | ||||
| for PACKAGE in ${UNNEEDED_PACKAGES} | ||||
| do | ||||
| 	echo ${PACKAGE} | ||||
| 	if ! apt-get remove --purge --yes "${PACKAGE}" | ||||
| 	then | ||||
| 		echo "WARNING: ${PACKAGE} isn't installed" | ||||
| 	fi | ||||
| done | ||||
|  | ||||
| apt-get autoremove --yes || true | ||||
|  | ||||
| # Dropping logs | ||||
| rm -rf /var/log/* | ||||
|  | ||||
| # Dropping documentation, localization, i18n files, etc | ||||
| rm -rf /usr/share/doc/* | ||||
| rm -rf /usr/share/locale/* | ||||
| rm -rf /usr/share/X11/locale/* | ||||
| rm -rf /usr/share/man | ||||
| rm -rf /usr/share/i18n/* | ||||
| rm -rf /usr/share/info/* | ||||
| rm -rf /usr/share/lintian/* | ||||
| rm -rf /usr/share/common-licenses/* | ||||
| rm -rf /usr/share/mime/* | ||||
|  | ||||
| # Dropping reportbug scripts | ||||
| rm -rf /usr/share/bug | ||||
|  | ||||
| # Drop udev hwdb not required on a stripped system | ||||
| rm -rf /lib/udev/hwdb.bin /lib/udev/hwdb.d/* | ||||
|  | ||||
| # Drop all gconv conversions && binaries | ||||
| rm -rf usr/bin/iconv | ||||
| rm -rf usr/sbin/iconvconfig | ||||
| rm -rf usr/lib/*/gconv/ | ||||
|  | ||||
| # Remove libusb database | ||||
| rm -rf usr/sbin/update-usbids | ||||
| rm -rf var/lib/usbutils/usb.ids | ||||
| rm -rf usr/share/misc/usb.ids | ||||
|  | ||||
| rm -rf /root/.pip | ||||
|  | ||||
| ####################################################################### | ||||
| # Crush into a minimal production image to be deployed via some type of image | ||||
| # updating system. | ||||
| # IMPORTANT: The Debian system is not longer functional at this point, | ||||
| # for example, apt and dpkg will stop working | ||||
|  | ||||
| UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\ | ||||
| "ncurses-bin ncurses-base libncursesw6 libncurses6 "\ | ||||
| "perl-base "\ | ||||
| "debconf libdebconfclient0 "\ | ||||
| "e2fsprogs e2fslibs libfdisk1 "\ | ||||
| "insserv "\ | ||||
| "udev "\ | ||||
| "init-system-helpers "\ | ||||
| "cpio "\ | ||||
| "passwd "\ | ||||
| "libsemanage1 libsemanage-common "\ | ||||
| "libsepol1 "\ | ||||
| "gpgv "\ | ||||
| "hostname "\ | ||||
| "adduser "\ | ||||
| "debian-archive-keyring "\ | ||||
| "libegl1-mesa-dev "\ | ||||
| "libegl-mesa0 "\ | ||||
| "libgl1-mesa-dev "\ | ||||
| "libgl1-mesa-dri "\ | ||||
| "libglapi-mesa "\ | ||||
| "libgles2-mesa-dev "\ | ||||
| "libglx-mesa0 "\ | ||||
| "mesa-common-dev "\ | ||||
|  | ||||
| # Removing unneeded packages | ||||
| for PACKAGE in ${UNNEEDED_PACKAGES} | ||||
| do | ||||
| 	echo "Forcing removal of ${PACKAGE}" | ||||
| 	if ! dpkg --purge --force-remove-essential --force-depends "${PACKAGE}" | ||||
| 	then | ||||
| 		echo "WARNING: ${PACKAGE} isn't installed" | ||||
| 	fi | ||||
| done | ||||
|  | ||||
| # Show what's left package-wise before dropping dpkg itself | ||||
| COLUMNS=300 dpkg-query -W --showformat='${Installed-Size;10}\t${Package}\n' | sort -k1,1n | ||||
|  | ||||
| # Drop dpkg | ||||
| dpkg --purge --force-remove-essential --force-depends  dpkg | ||||
|  | ||||
| # No apt or dpkg, no need for its configuration archives | ||||
| rm -rf etc/apt | ||||
| rm -rf etc/dpkg | ||||
|  | ||||
| # Drop directories not part of ostree | ||||
| # Note that /var needs to exist as ostree bind mounts the deployment /var over | ||||
| # it | ||||
| rm -rf var/* opt srv share | ||||
|  | ||||
| # ca-certificates are in /etc drop the source | ||||
| rm -rf usr/share/ca-certificates | ||||
|  | ||||
| # No need for completions | ||||
| rm -rf usr/share/bash-completion | ||||
|  | ||||
| # No zsh, no need for comletions | ||||
| rm -rf usr/share/zsh/vendor-completions | ||||
|  | ||||
| # drop gcc python helpers | ||||
| rm -rf usr/share/gcc | ||||
|  | ||||
| # Drop sysvinit leftovers | ||||
| rm -rf etc/init.d | ||||
| rm -rf etc/rc[0-6S].d | ||||
|  | ||||
| # Drop upstart helpers | ||||
| rm -rf etc/init | ||||
|  | ||||
| # Various xtables helpers | ||||
| rm -rf usr/lib/xtables | ||||
|  | ||||
| # Drop all locales | ||||
| # TODO: only remaining locale is actually "C". Should we really remove it? | ||||
| rm -rf usr/lib/locale/* | ||||
|  | ||||
| # partition helpers | ||||
| rm -rf usr/sbin/*fdisk | ||||
|  | ||||
| # local compiler | ||||
| rm -rf usr/bin/localedef | ||||
|  | ||||
| # Systemd dns resolver | ||||
| find usr etc -name '*systemd-resolve*' -prune -exec rm -r {} \; | ||||
|  | ||||
| # Systemd network configuration | ||||
| find usr etc -name '*networkd*' -prune -exec rm -r {} \; | ||||
|  | ||||
| # systemd ntp client | ||||
| find usr etc -name '*timesyncd*' -prune -exec rm -r {} \; | ||||
|  | ||||
| # systemd hw database manager | ||||
| find usr etc -name '*systemd-hwdb*' -prune -exec rm -r {} \; | ||||
|  | ||||
| # No need for fuse | ||||
| find usr etc -name '*fuse*' -prune -exec rm -r {} \; | ||||
|  | ||||
| # lsb init function leftovers | ||||
| rm -rf usr/lib/lsb | ||||
|  | ||||
| # Only needed when adding libraries | ||||
| rm -rf usr/sbin/ldconfig* | ||||
|  | ||||
| # Games, unused | ||||
| rmdir usr/games | ||||
|  | ||||
| # Remove pam module to authenticate against a DB | ||||
| # plus libdb-5.3.so that is only used by this pam module | ||||
| rm -rf usr/lib/*/security/pam_userdb.so | ||||
| rm -rf usr/lib/*/libdb-5.3.so | ||||
|  | ||||
| # remove NSS support for nis, nisplus and hesiod | ||||
| rm -rf usr/lib/*/libnss_hesiod* | ||||
| rm -rf usr/lib/*/libnss_nis* | ||||
| @@ -1,79 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|         " | ||||
|  | ||||
| dpkg --add-architecture $arch | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|         $STABLE_EPHEMERAL \ | ||||
|         crossbuild-essential-$arch \ | ||||
|         libelf-dev:$arch \ | ||||
|         libexpat1-dev:$arch \ | ||||
|         libpciaccess-dev:$arch \ | ||||
|         libstdc++6:$arch \ | ||||
|         libvulkan-dev:$arch \ | ||||
|         libx11-dev:$arch \ | ||||
|         libx11-xcb-dev:$arch \ | ||||
|         libxcb-dri2-0-dev:$arch \ | ||||
|         libxcb-dri3-dev:$arch \ | ||||
|         libxcb-glx0-dev:$arch \ | ||||
|         libxcb-present-dev:$arch \ | ||||
|         libxcb-randr0-dev:$arch \ | ||||
|         libxcb-shm0-dev:$arch \ | ||||
|         libxcb-xfixes0-dev:$arch \ | ||||
|         libxdamage-dev:$arch \ | ||||
|         libxext-dev:$arch \ | ||||
|         libxrandr-dev:$arch \ | ||||
|         libxshmfence-dev:$arch \ | ||||
|         libxxf86vm-dev:$arch \ | ||||
|         wget | ||||
|  | ||||
| if [[ $arch != "armhf" ]]; then | ||||
|     if [[ $arch == "s390x" ]]; then | ||||
|         LLVM=9 | ||||
|     else | ||||
|         LLVM=11 | ||||
|     fi | ||||
|  | ||||
|     # llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only | ||||
|     # with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get | ||||
|     # around this. | ||||
|     apt-get install -y --no-remove \ | ||||
|             libclang-cpp${LLVM}:$arch \ | ||||
|             libffi-dev:$arch \ | ||||
|             libgcc-s1:$arch \ | ||||
|             libtinfo-dev:$arch \ | ||||
|             libz3-dev:$arch \ | ||||
|             llvm-${LLVM}:$arch \ | ||||
|             zlib1g | ||||
| fi | ||||
|  | ||||
| . .gitlab-ci/container/create-cross-file.sh $arch | ||||
|  | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
|  | ||||
| # dependencies where we want a specific version | ||||
| EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)" | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| apt-get purge -y \ | ||||
|         $STABLE_EPHEMERAL | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
|  | ||||
| # This needs to be done after container_post_build.sh, or apt-get breaks in there | ||||
| if [[ $arch != "armhf" ]]; then | ||||
|     apt-get download llvm-${LLVM}-{dev,tools}:$arch | ||||
|     dpkg -i --force-depends llvm-${LLVM}-*_${arch}.deb | ||||
|     rm llvm-${LLVM}-*_${arch}.deb | ||||
| fi | ||||
| @@ -1,106 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| EPHEMERAL="\ | ||||
|          autoconf \ | ||||
|          rdfind \ | ||||
|          unzip \ | ||||
|          " | ||||
|  | ||||
| apt-get install -y --no-remove $EPHEMERAL | ||||
|  | ||||
| # Fetch the NDK and extract just the toolchain we want. | ||||
| ndk=android-ndk-r21d | ||||
| wget -O $ndk.zip https://dl.google.com/android/repository/$ndk-linux-x86_64.zip | ||||
| unzip -d / $ndk.zip "$ndk/toolchains/llvm/*" | ||||
| rm $ndk.zip | ||||
| # Since it was packed as a zip file, symlinks/hardlinks got turned into | ||||
| # duplicate files.  Turn them into hardlinks to save on container space. | ||||
| rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/ | ||||
| # Drop some large tools we won't use in this build. | ||||
| find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f | ||||
|  | ||||
| sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3" | ||||
|  | ||||
| sh .gitlab-ci/container/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64 | ||||
| sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x86 x86 | ||||
| sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android arm armv8 | ||||
| sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl armv7a-linux-androideabi | ||||
|  | ||||
| # Not using build-libdrm.sh because we don't want its cleanup after building | ||||
| # each arch.  Fetch and extract now. | ||||
| export LIBDRM_VERSION=libdrm-2.4.110 | ||||
| wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz | ||||
| tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz | ||||
|  | ||||
| for arch in \ | ||||
|         x86_64-linux-android \ | ||||
|         i686-linux-android \ | ||||
|         aarch64-linux-android \ | ||||
|         arm-linux-androideabi ; do | ||||
|  | ||||
|     cd $LIBDRM_VERSION | ||||
|     rm -rf build-$arch | ||||
|     meson build-$arch \ | ||||
|           --cross-file=/cross_file-$arch.txt \ | ||||
|           --libdir=lib/$arch \ | ||||
|           -Dlibkms=false \ | ||||
|           -Dnouveau=false \ | ||||
|           -Dvc4=false \ | ||||
|           -Detnaviv=false \ | ||||
|           -Dfreedreno=false \ | ||||
|           -Dintel=false \ | ||||
|           -Dcairo-tests=false \ | ||||
|           -Dvalgrind=false | ||||
|     ninja -C build-$arch install | ||||
|     cd .. | ||||
| done | ||||
|  | ||||
| rm -rf $LIBDRM_VERSION | ||||
|  | ||||
| export LIBELF_VERSION=libelf-0.8.13 | ||||
| wget https://fossies.org/linux/misc/old/$LIBELF_VERSION.tar.gz | ||||
|  | ||||
| # Not 100% sure who runs the mirror above so be extra careful | ||||
| if ! echo "4136d7b4c04df68b686570afa26988ac ${LIBELF_VERSION}.tar.gz" | md5sum -c -; then | ||||
|     echo "Checksum failed" | ||||
|     exit 1 | ||||
| fi | ||||
|  | ||||
| tar -xf ${LIBELF_VERSION}.tar.gz | ||||
| cd $LIBELF_VERSION | ||||
|  | ||||
| # Work around a bug in the original configure not enabling __LIBELF64. | ||||
| autoreconf | ||||
|  | ||||
| for arch in \ | ||||
|         x86_64-linux-android \ | ||||
|         i686-linux-android \ | ||||
|         aarch64-linux-android \ | ||||
|         arm-linux-androideabi ; do | ||||
|  | ||||
|     ccarch=${arch} | ||||
|     if [ "${arch}" ==  'arm-linux-androideabi' ] | ||||
|     then | ||||
|        ccarch=armv7a-linux-androideabi | ||||
|     fi | ||||
|  | ||||
|     export CC=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ar | ||||
|     export CC=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}29-clang | ||||
|     export CXX=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}29-clang++ | ||||
|     export LD=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ld | ||||
|     export RANLIB=/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ranlib | ||||
|  | ||||
|     # The configure script doesn't know about android, but doesn't really use the host anyway it | ||||
|     # seems | ||||
|     ./configure --host=x86_64-linux-gnu  --disable-nls --disable-shared \ | ||||
|                 --libdir=/usr/local/lib/${arch} | ||||
|     make install | ||||
|     make distclean | ||||
| done | ||||
|  | ||||
| cd .. | ||||
| rm -rf $LIBELF_VERSION | ||||
|  | ||||
| apt-get purge -y $EPHEMERAL | ||||
| @@ -1,72 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| apt-get -y install ca-certificates | ||||
| sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list | ||||
| echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/buster.list | ||||
| apt-get update | ||||
|  | ||||
| apt-get -y install \ | ||||
| 	${EXTRA_LOCAL_PACKAGES} \ | ||||
| 	abootimg \ | ||||
| 	autoconf \ | ||||
| 	automake \ | ||||
| 	bc \ | ||||
| 	bison \ | ||||
| 	ccache \ | ||||
| 	cmake \ | ||||
| 	debootstrap \ | ||||
| 	fastboot \ | ||||
| 	flex \ | ||||
| 	g++ \ | ||||
| 	git \ | ||||
| 	kmod \ | ||||
| 	libasan6 \ | ||||
| 	libdrm-dev \ | ||||
| 	libelf-dev \ | ||||
| 	libexpat1-dev \ | ||||
| 	libx11-dev \ | ||||
| 	libx11-xcb-dev \ | ||||
| 	libxcb-dri2-0-dev \ | ||||
| 	libxcb-dri3-dev \ | ||||
| 	libxcb-glx0-dev \ | ||||
| 	libxcb-present-dev \ | ||||
| 	libxcb-randr0-dev \ | ||||
| 	libxcb-shm0-dev \ | ||||
| 	libxcb-xfixes0-dev \ | ||||
| 	libxdamage-dev \ | ||||
| 	libxext-dev \ | ||||
| 	libxrandr-dev \ | ||||
| 	libxshmfence-dev \ | ||||
| 	libxxf86vm-dev \ | ||||
| 	llvm-11-dev \ | ||||
| 	meson \ | ||||
| 	pkg-config \ | ||||
| 	python3-mako \ | ||||
| 	python3-pil \ | ||||
| 	python3-pip \ | ||||
| 	python3-requests \ | ||||
| 	python3-setuptools \ | ||||
| 	u-boot-tools \ | ||||
| 	wget \ | ||||
| 	xz-utils \ | ||||
| 	zlib1g-dev | ||||
|  | ||||
| # Not available anymore in bullseye | ||||
| apt-get install -y --no-remove -t buster \ | ||||
|         android-sdk-ext4-utils | ||||
|  | ||||
| pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125 | ||||
|  | ||||
| arch=armhf | ||||
| . .gitlab-ci/container/cross_build.sh | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| # dependencies where we want a specific version | ||||
| EXTRA_MESON_ARGS= | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,39 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| ############### Install packages for baremetal testing | ||||
| apt-get install -y ca-certificates | ||||
| sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|         abootimg \ | ||||
|         cpio \ | ||||
|         fastboot \ | ||||
|         netcat \ | ||||
|         procps \ | ||||
|         python3-distutils \ | ||||
|         python3-minimal \ | ||||
|         python3-serial \ | ||||
|         rsync \ | ||||
|         snmp \ | ||||
|         wget | ||||
|  | ||||
| # setup SNMPv2 SMI MIB | ||||
| wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \ | ||||
|     -O /usr/share/snmp/mibs/SNMPv2-SMI.txt | ||||
|  | ||||
| arch=arm64 . .gitlab-ci/container/baremetal_build.sh | ||||
| arch=armhf . .gitlab-ci/container/baremetal_build.sh | ||||
|  | ||||
| # This firmware file from Debian bullseye causes hangs | ||||
| wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \ | ||||
|      -O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw | ||||
|  | ||||
| mkdir -p /baremetal-files/jetson-nano/boot/ | ||||
| ln -s \ | ||||
|     /baremetal-files/Image \ | ||||
|     /baremetal-files/tegra210-p3450-0000.dtb \ | ||||
|     /baremetal-files/jetson-nano/boot/ | ||||
| @@ -1,5 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| arch=i386 | ||||
|  | ||||
| . .gitlab-ci/container/cross_build.sh | ||||
| @@ -1,5 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| arch=ppc64el | ||||
|  | ||||
| . .gitlab-ci/container/cross_build.sh | ||||
| @@ -1,5 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| arch=s390x | ||||
|  | ||||
| . .gitlab-ci/container/cross_build.sh | ||||
| @@ -1,81 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| apt-get install -y ca-certificates | ||||
|  | ||||
| sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at | ||||
| # the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|         python3-pip \ | ||||
|         python3-setuptools \ | ||||
|         " | ||||
|  | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|         $STABLE_EPHEMERAL \ | ||||
|         bison \ | ||||
|         ccache \ | ||||
|         dpkg-cross \ | ||||
|         flex \ | ||||
|         g++ \ | ||||
|         g++-mingw-w64-x86-64 \ | ||||
|         gcc \ | ||||
|         git \ | ||||
|         glslang-tools \ | ||||
|         kmod \ | ||||
|         libclang-11-dev \ | ||||
|         libclang-9-dev \ | ||||
|         libclc-dev \ | ||||
|         libelf-dev \ | ||||
|         libepoxy-dev \ | ||||
|         libexpat1-dev \ | ||||
|         libgtk-3-dev \ | ||||
|         libllvm11 \ | ||||
|         libllvm9 \ | ||||
|         libomxil-bellagio-dev \ | ||||
|         libpciaccess-dev \ | ||||
|         libunwind-dev \ | ||||
|         libva-dev \ | ||||
|         libvdpau-dev \ | ||||
|         libvulkan-dev \ | ||||
|         libx11-dev \ | ||||
|         libx11-xcb-dev \ | ||||
|         libxext-dev \ | ||||
|         libxml2-utils \ | ||||
|         libxrandr-dev \ | ||||
|         libxrender-dev \ | ||||
|         libxshmfence-dev \ | ||||
|         libxvmc-dev \ | ||||
|         libxxf86vm-dev \ | ||||
|         libz-mingw-w64-dev \ | ||||
|         make \ | ||||
|         meson \ | ||||
|         pkg-config \ | ||||
|         python3-mako \ | ||||
|         python3-pil \ | ||||
|         python3-requests \ | ||||
|         qemu-user \ | ||||
|         valgrind \ | ||||
|         wget \ | ||||
|         wine64 \ | ||||
|         x11proto-dri2-dev \ | ||||
|         x11proto-gl-dev \ | ||||
|         x11proto-randr-dev \ | ||||
|         xz-utils \ | ||||
|         zlib1g-dev | ||||
|  | ||||
| # Needed for ci-fairy, this revision is able to upload files to MinIO | ||||
| pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125 | ||||
|  | ||||
| ############### Uninstall ephemeral packages | ||||
|  | ||||
| apt-get purge -y $STABLE_EPHEMERAL | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,109 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|       autoconf \ | ||||
|       automake \ | ||||
|       autotools-dev \ | ||||
|       bzip2 \ | ||||
|       libtool \ | ||||
|       python3-pip \ | ||||
|       " | ||||
|  | ||||
| # We need multiarch for Wine | ||||
| dpkg --add-architecture i386 | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|       $STABLE_EPHEMERAL \ | ||||
|       check \ | ||||
|       clang \ | ||||
|       cmake \ | ||||
|       libasan6 \ | ||||
|       libarchive-dev \ | ||||
|       libclang-cpp11-dev \ | ||||
|       libgbm-dev \ | ||||
|       libglvnd-dev \ | ||||
|       libllvmspirvlib-dev \ | ||||
|       liblua5.3-dev \ | ||||
|       libxcb-dri2-0-dev \ | ||||
|       libxcb-dri3-dev \ | ||||
|       libxcb-glx0-dev \ | ||||
|       libxcb-present-dev \ | ||||
|       libxcb-randr0-dev \ | ||||
|       libxcb-shm0-dev \ | ||||
|       libxcb-sync-dev \ | ||||
|       libxcb-xfixes0-dev \ | ||||
|       libxcb1-dev \ | ||||
|       libxml2-dev \ | ||||
|       llvm-11-dev \ | ||||
|       llvm-9-dev \ | ||||
|       ocl-icd-opencl-dev \ | ||||
|       python3-freezegun \ | ||||
|       python3-pytest \ | ||||
|       procps \ | ||||
|       spirv-tools \ | ||||
|       strace \ | ||||
|       time \ | ||||
|       wine \ | ||||
|       wine32 | ||||
|  | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
|  | ||||
| # Debian's pkg-config wrapers for mingw are broken, and there's no sign that | ||||
| # they're going to be fixed, so we'll just have to fix it ourselves | ||||
| # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492 | ||||
| cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF | ||||
| #!/bin/sh | ||||
|  | ||||
| PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@ | ||||
| EOF | ||||
| chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config | ||||
|  | ||||
|  | ||||
| # dependencies where we want a specific version | ||||
| export              XORG_RELEASES=https://xorg.freedesktop.org/releases/individual | ||||
|  | ||||
| export         XORGMACROS_VERSION=util-macros-1.19.0 | ||||
|  | ||||
| wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2 | ||||
| tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2 | ||||
| cd $XORGMACROS_VERSION; ./configure; make install; cd .. | ||||
| rm -rf $XORGMACROS_VERSION | ||||
|  | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| pushd /usr/local | ||||
| git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1 | ||||
| rm -rf shader-db/.git | ||||
| cd shader-db | ||||
| make | ||||
| popd | ||||
|  | ||||
| git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1 | ||||
| pushd DirectX-Headers | ||||
| mkdir build | ||||
| cd build | ||||
| meson .. --backend=ninja --buildtype=release -Dbuild-test=false | ||||
| ninja | ||||
| ninja install | ||||
| popd | ||||
| rm -rf DirectX-Headers | ||||
|  | ||||
| pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7 | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| apt-get purge -y \ | ||||
|       $STABLE_EPHEMERAL | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,70 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| apt-get install -y ca-certificates | ||||
|  | ||||
| sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at | ||||
| # the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|       cargo \ | ||||
|       python3-dev \ | ||||
|       python3-pip \ | ||||
|       python3-setuptools \ | ||||
|       python3-wheel \ | ||||
|       " | ||||
|  | ||||
| apt-get update | ||||
| apt-get dist-upgrade -y | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|       git \ | ||||
|       git-lfs \ | ||||
|       libasan6 \ | ||||
|       libexpat1 \ | ||||
|       libllvm11 \ | ||||
|       libllvm9 \ | ||||
|       liblz4-1 \ | ||||
|       libpng16-16 \ | ||||
|       libpython3.9 \ | ||||
|       libvulkan1 \ | ||||
|       libwayland-client0 \ | ||||
|       libwayland-server0 \ | ||||
|       libxcb-ewmh2 \ | ||||
|       libxcb-randr0 \ | ||||
|       libxcb-xfixes0 \ | ||||
|       libxkbcommon0 \ | ||||
|       libxrandr2 \ | ||||
|       libxrender1 \ | ||||
|       python3-mako \ | ||||
|       python3-numpy \ | ||||
|       python3-packaging \ | ||||
|       python3-pil \ | ||||
|       python3-requests \ | ||||
|       python3-six \ | ||||
|       python3-yaml \ | ||||
|       vulkan-tools \ | ||||
|       waffle-utils \ | ||||
|       xauth \ | ||||
|       xvfb \ | ||||
|       zlib1g | ||||
|  | ||||
| apt-get install -y --no-install-recommends \ | ||||
|       $STABLE_EPHEMERAL | ||||
|  | ||||
| # Needed for ci-fairy, this revision is able to upload files to MinIO | ||||
| # and doesn't depend on git | ||||
| pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@34f4ade99434043f88e164933f570301fd18b125 | ||||
|  | ||||
| ############### Build dEQP runner | ||||
| . .gitlab-ci/container/build-deqp-runner.sh | ||||
| rm -rf ~/.cargo | ||||
|  | ||||
| apt-get purge -y $STABLE_EPHEMERAL | ||||
|  | ||||
| apt-get autoremove -y --purge | ||||
| @@ -1,123 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|       autoconf \ | ||||
|       automake \ | ||||
|       bc \ | ||||
|       bison \ | ||||
|       bzip2 \ | ||||
|       ccache \ | ||||
|       clang-11 \ | ||||
|       cmake \ | ||||
|       flex \ | ||||
|       g++ \ | ||||
|       glslang-tools \ | ||||
|       libasound2-dev \ | ||||
|       libcap-dev \ | ||||
|       libclang-cpp11-dev \ | ||||
|       libelf-dev \ | ||||
|       libexpat1-dev \ | ||||
|       libfdt-dev \ | ||||
|       libgbm-dev \ | ||||
|       libgles2-mesa-dev \ | ||||
|       libllvmspirvlib-dev \ | ||||
|       libpciaccess-dev \ | ||||
|       libpng-dev \ | ||||
|       libudev-dev \ | ||||
|       libvulkan-dev \ | ||||
|       libwaffle-dev \ | ||||
|       libx11-xcb-dev \ | ||||
|       libxcb-dri2-0-dev \ | ||||
|       libxext-dev \ | ||||
|       libxkbcommon-dev \ | ||||
|       libxrender-dev \ | ||||
|       llvm-11-dev \ | ||||
|       llvm-spirv \ | ||||
|       make \ | ||||
|       meson \ | ||||
|       ocl-icd-opencl-dev \ | ||||
|       patch \ | ||||
|       pkg-config \ | ||||
|       python3-distutils \ | ||||
|       xz-utils \ | ||||
|       " | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|       $STABLE_EPHEMERAL \ | ||||
|       clinfo \ | ||||
|       iptables \ | ||||
|       libclang-common-11-dev \ | ||||
|       libclang-cpp11 \ | ||||
|       libcap2 \ | ||||
|       libegl1 \ | ||||
|       libepoxy-dev \ | ||||
|       libfdt1 \ | ||||
|       libllvmspirvlib11 \ | ||||
|       libxcb-shm0 \ | ||||
|       ocl-icd-libopencl1 \ | ||||
|       python3-lxml \ | ||||
|       python3-renderdoc \ | ||||
|       python3-simplejson \ | ||||
|       socat \ | ||||
|       spirv-tools \ | ||||
|       sysvinit-core \ | ||||
|       wget | ||||
|  | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| ############### Build libdrm | ||||
|  | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| ############### Build Wayland | ||||
|  | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| ############### Build Crosvm | ||||
|  | ||||
| . .gitlab-ci/container/build-rust.sh | ||||
| . .gitlab-ci/container/build-crosvm.sh | ||||
| rm -rf /root/.cargo | ||||
| rm -rf /root/.rustup | ||||
|  | ||||
| ############### Build kernel | ||||
|  | ||||
| export DEFCONFIG="arch/x86/configs/x86_64_defconfig" | ||||
| export KERNEL_IMAGE_NAME=bzImage | ||||
| export KERNEL_ARCH=x86_64 | ||||
| export DEBIAN_ARCH=amd64 | ||||
|  | ||||
| mkdir -p /lava-files/ | ||||
| . .gitlab-ci/container/build-kernel.sh | ||||
|  | ||||
| ############### Build libclc | ||||
|  | ||||
| . .gitlab-ci/container/build-libclc.sh | ||||
|  | ||||
| ############### Build piglit | ||||
|  | ||||
| PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh | ||||
|  | ||||
| ############### Build dEQP GL | ||||
|  | ||||
| DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh | ||||
|  | ||||
| ############### Build apitrace | ||||
|  | ||||
| . .gitlab-ci/container/build-apitrace.sh | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| ccache --show-stats | ||||
|  | ||||
| apt-get purge -y \ | ||||
|       $STABLE_EPHEMERAL | ||||
|  | ||||
| apt-get autoremove -y --purge | ||||
| @@ -1,164 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Ephemeral packages (installed for this script and removed again at the end) | ||||
| STABLE_EPHEMERAL=" \ | ||||
|       ccache \ | ||||
|       cmake \ | ||||
|       g++ \ | ||||
|       g++-mingw-w64-i686-posix \ | ||||
|       g++-mingw-w64-x86-64-posix \ | ||||
|       glslang-tools \ | ||||
|       libexpat1-dev \ | ||||
|       libgbm-dev \ | ||||
|       libgles2-mesa-dev \ | ||||
|       liblz4-dev \ | ||||
|       libpciaccess-dev \ | ||||
|       libudev-dev \ | ||||
|       libvulkan-dev \ | ||||
|       libwaffle-dev \ | ||||
|       libx11-xcb-dev \ | ||||
|       libxcb-ewmh-dev \ | ||||
|       libxcb-keysyms1-dev \ | ||||
|       libxkbcommon-dev \ | ||||
|       libxrandr-dev \ | ||||
|       libxrender-dev \ | ||||
|       libzstd-dev \ | ||||
|       meson \ | ||||
|       mingw-w64-i686-dev \ | ||||
|       mingw-w64-tools \ | ||||
|       mingw-w64-x86-64-dev \ | ||||
|       p7zip \ | ||||
|       patch \ | ||||
|       pkg-config \ | ||||
|       python3-distutils \ | ||||
|       wget \ | ||||
|       xz-utils \ | ||||
|       " | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|       $STABLE_EPHEMERAL \ | ||||
|       libxcb-shm0 \ | ||||
|       python3-lxml \ | ||||
|       python3-simplejson \ | ||||
|       xinit \ | ||||
|       xserver-xorg-video-amdgpu \ | ||||
|       xserver-xorg-video-ati | ||||
|  | ||||
| # We need multiarch for Wine | ||||
| dpkg --add-architecture i386 | ||||
|  | ||||
| apt-get update | ||||
|  | ||||
| apt-get install -y --no-remove \ | ||||
|       wine \ | ||||
|       wine32 \ | ||||
|       wine64 | ||||
|  | ||||
| function setup_wine() { | ||||
|     export WINEDEBUG="-all" | ||||
|     export WINEPREFIX="$1" | ||||
|  | ||||
|     # We don't want crash dialogs | ||||
|     cat >crashdialog.reg <<EOF | ||||
| Windows Registry Editor Version 5.00 | ||||
|  | ||||
| [HKEY_CURRENT_USER\Software\Wine\WineDbg] | ||||
| "ShowCrashDialog"=dword:00000000 | ||||
|  | ||||
| EOF | ||||
|  | ||||
|     # Set the wine prefix and disable the crash dialog | ||||
|     wine regedit crashdialog.reg | ||||
|     rm crashdialog.reg | ||||
|  | ||||
|     # An immediate wine command may fail with: "${WINEPREFIX}: Not a | ||||
|     # valid wine prefix."  and that is just spit because of checking | ||||
|     # the existance of the system.reg file, which fails.  Just giving | ||||
|     # it a bit more of time for it to be created solves the problem | ||||
|     # ... | ||||
|     while ! test -f  "${WINEPREFIX}/system.reg"; do sleep 1; done | ||||
| } | ||||
|  | ||||
| ############### Install DXVK | ||||
|  | ||||
| DXVK_VERSION="1.8.1" | ||||
|  | ||||
| setup_wine "/dxvk-wine64" | ||||
|  | ||||
| wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz" | ||||
| tar xzpf dxvk-"${DXVK_VERSION}".tar.gz | ||||
| dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install | ||||
| rm -rf dxvk-"${DXVK_VERSION}" | ||||
| rm dxvk-"${DXVK_VERSION}".tar.gz | ||||
|  | ||||
| ############### Install Windows' apitrace binaries | ||||
|  | ||||
| APITRACE_VERSION="10.0" | ||||
| APITRACE_VERSION_DATE="" | ||||
|  | ||||
| wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" | ||||
| 7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \ | ||||
|       "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \ | ||||
|       "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe" | ||||
| mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64 | ||||
| rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" | ||||
|  | ||||
| # Add the apitrace path to the registry | ||||
| wine \ | ||||
|     reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \ | ||||
|     /v Path \ | ||||
|     /t REG_EXPAND_SZ \ | ||||
|     /d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \ | ||||
|     /f | ||||
|  | ||||
| ############### Building ... | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| ############### Build libdrm | ||||
|  | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| ############### Build Wayland | ||||
|  | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| ############### Build parallel-deqp-runner's hang-detection tool | ||||
|  | ||||
| . .gitlab-ci/container/build-hang-detection.sh | ||||
|  | ||||
| ############### Build piglit | ||||
|  | ||||
| PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh | ||||
|  | ||||
| ############### Build Fossilize | ||||
|  | ||||
| . .gitlab-ci/container/build-fossilize.sh | ||||
|  | ||||
| ############### Build dEQP VK | ||||
|  | ||||
| . .gitlab-ci/container/build-deqp.sh | ||||
|  | ||||
| ############### Build gfxreconstruct | ||||
|  | ||||
| . .gitlab-ci/container/build-gfxreconstruct.sh | ||||
|  | ||||
| ############### Build VKD3D-Proton | ||||
|  | ||||
| setup_wine "/vkd3d-proton-wine64" | ||||
|  | ||||
| . .gitlab-ci/container/build-vkd3d-proton.sh | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| ccache --show-stats | ||||
|  | ||||
| apt-get purge -y \ | ||||
|       $STABLE_EPHEMERAL | ||||
|  | ||||
| apt-get autoremove -y --purge | ||||
| @@ -1,101 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
|  | ||||
| EPHEMERAL=" | ||||
|         autoconf | ||||
|         automake | ||||
|         bzip2 | ||||
|         git | ||||
|         libtool | ||||
|         pkgconfig(epoxy) | ||||
|         pkgconfig(gbm) | ||||
|         unzip | ||||
|         wget | ||||
|         xz | ||||
|         " | ||||
|  | ||||
| dnf install -y --setopt=install_weak_deps=False \ | ||||
|     bison \ | ||||
|     ccache \ | ||||
|     clang-devel \ | ||||
|     flex \ | ||||
|     gcc \ | ||||
|     gcc-c++ \ | ||||
|     gettext \ | ||||
|     kernel-headers \ | ||||
|     llvm-devel \ | ||||
|     clang-devel \ | ||||
|     meson \ | ||||
|     "pkgconfig(dri2proto)" \ | ||||
|     "pkgconfig(expat)" \ | ||||
|     "pkgconfig(glproto)" \ | ||||
|     "pkgconfig(libclc)" \ | ||||
|     "pkgconfig(libelf)" \ | ||||
|     "pkgconfig(libglvnd)" \ | ||||
|     "pkgconfig(libomxil-bellagio)" \ | ||||
|     "pkgconfig(libselinux)" \ | ||||
|     "pkgconfig(libva)" \ | ||||
|     "pkgconfig(pciaccess)" \ | ||||
|     "pkgconfig(vdpau)" \ | ||||
|     "pkgconfig(vulkan)" \ | ||||
|     "pkgconfig(x11)" \ | ||||
|     "pkgconfig(x11-xcb)" \ | ||||
|     "pkgconfig(xcb)" \ | ||||
|     "pkgconfig(xcb-dri2)" \ | ||||
|     "pkgconfig(xcb-dri3)" \ | ||||
|     "pkgconfig(xcb-glx)" \ | ||||
|     "pkgconfig(xcb-present)" \ | ||||
|     "pkgconfig(xcb-randr)" \ | ||||
|     "pkgconfig(xcb-sync)" \ | ||||
|     "pkgconfig(xcb-xfixes)" \ | ||||
|     "pkgconfig(xdamage)" \ | ||||
|     "pkgconfig(xext)" \ | ||||
|     "pkgconfig(xfixes)" \ | ||||
|     "pkgconfig(xrandr)" \ | ||||
|     "pkgconfig(xshmfence)" \ | ||||
|     "pkgconfig(xxf86vm)" \ | ||||
|     "pkgconfig(zlib)" \ | ||||
|     python-unversioned-command \ | ||||
|     python3-devel \ | ||||
|     python3-mako \ | ||||
|     python3-devel \ | ||||
|     python3-mako \ | ||||
|     vulkan-headers \ | ||||
|     spirv-tools-devel \ | ||||
|     spirv-llvm-translator-devel \ | ||||
|     $EPHEMERAL | ||||
|  | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
|  | ||||
| # dependencies where we want a specific version | ||||
| export              XORG_RELEASES=https://xorg.freedesktop.org/releases/individual | ||||
|  | ||||
| export         XORGMACROS_VERSION=util-macros-1.19.0 | ||||
|  | ||||
| wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2 | ||||
| tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2 | ||||
| cd $XORGMACROS_VERSION; ./configure; make install; cd .. | ||||
| rm -rf $XORGMACROS_VERSION | ||||
|  | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
| . .gitlab-ci/container/build-wayland.sh | ||||
|  | ||||
| pushd /usr/local | ||||
| git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1 | ||||
| rm -rf shader-db/.git | ||||
| cd shader-db | ||||
| make | ||||
| popd | ||||
|  | ||||
|  | ||||
| ############### Uninstall the build software | ||||
|  | ||||
| dnf remove -y $EPHEMERAL | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
| @@ -1,420 +0,0 @@ | ||||
| # Docker image tag helper templates | ||||
|  | ||||
| .incorporate-templates-commit: | ||||
|   variables: | ||||
|     FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|  | ||||
| .incorporate-base-tag+templates-commit: | ||||
|   variables: | ||||
|     FDO_BASE_IMAGE: "${CI_REGISTRY_IMAGE}/${MESA_BASE_IMAGE}:${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|     FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|  | ||||
| .set-image: | ||||
|   extends: | ||||
|     - .incorporate-templates-commit | ||||
|   variables: | ||||
|     MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}" | ||||
|   image: "$MESA_IMAGE" | ||||
|  | ||||
| .set-image-base-tag: | ||||
|   extends: | ||||
|     - .set-image | ||||
|     - .incorporate-base-tag+templates-commit | ||||
|   variables: | ||||
|     MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}" | ||||
|  | ||||
|  | ||||
| # Build the CI docker images. | ||||
| # | ||||
| # MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the | ||||
| # image doesn't exist yet, the container stage job generates it. | ||||
| # | ||||
| # In order to generate a new image, one should generally change the tag. | ||||
| # While removing the image from the registry would also work, that's not | ||||
| # recommended except for ephemeral images during development: Replacing | ||||
| # an image after a significant amount of time might pull in newer | ||||
| # versions of gcc/clang or other packages, which might break the build | ||||
| # with older commits using the same tag. | ||||
| # | ||||
| # After merging a change resulting in generating a new image to the | ||||
| # main repository, it's recommended to remove the image from the source | ||||
| # repository's container registry, so that the image from the main | ||||
| # repository's registry will be used there as well. | ||||
|  | ||||
| .container: | ||||
|   stage: container | ||||
|   extends: | ||||
|     - .container-rules | ||||
|     - .incorporate-templates-commit | ||||
|   variables: | ||||
|     FDO_DISTRIBUTION_VERSION: bullseye-slim | ||||
|     FDO_REPO_SUFFIX: $CI_JOB_NAME | ||||
|     FDO_DISTRIBUTION_EXEC: 'env FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh' | ||||
|     # no need to pull the whole repo to build the container image | ||||
|     GIT_STRATEGY: none | ||||
|  | ||||
| .use-base-image: | ||||
|   extends: | ||||
|     - .container | ||||
|     - .incorporate-base-tag+templates-commit | ||||
|     # Don't want the .container rules | ||||
|     - .ci-run-policy | ||||
|  | ||||
| # Debian 11 based x86 build image base | ||||
| debian/x86_build-base: | ||||
|   extends: | ||||
|     - .fdo.container-build@debian | ||||
|     - .container | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-x86_build-base ${DEBIAN_BASE_TAG} | ||||
|  | ||||
| .use-debian/x86_build-base: | ||||
|   extends: | ||||
|     - .fdo.container-build@debian | ||||
|     - .use-base-image | ||||
|   variables: | ||||
|     MESA_BASE_IMAGE: ${DEBIAN_X86_BUILD_BASE_IMAGE} | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_ARTIFACTS_BASE_TAG: *debian-x86_build-base | ||||
|   needs: | ||||
|     - debian/x86_build-base | ||||
|  | ||||
| # Debian 11 based x86 main build image | ||||
| debian/x86_build: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-x86_build ${DEBIAN_BUILD_TAG} | ||||
|  | ||||
| .use-debian/x86_build: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_IMAGE_PATH: ${DEBIAN_X86_BUILD_IMAGE_PATH} | ||||
|     MESA_IMAGE_TAG: *debian-x86_build | ||||
|   needs: | ||||
|     - debian/x86_build | ||||
|  | ||||
| # Debian 11 based i386 cross-build image | ||||
| debian/i386_build: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-i386_build ${DEBIAN_BUILD_TAG} | ||||
|  | ||||
| .use-debian/i386_build: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_IMAGE_PATH: "debian/i386_build" | ||||
|     MESA_IMAGE_TAG: *debian-i386_build | ||||
|   needs: | ||||
|     - debian/i386_build | ||||
|  | ||||
| # Debian 11 based ppc64el cross-build image | ||||
| debian/ppc64el_build: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-ppc64el_build ${DEBIAN_BUILD_TAG} | ||||
|  | ||||
| .use-debian/ppc64el_build: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_IMAGE_PATH: "debian/ppc64el_build" | ||||
|     MESA_IMAGE_TAG: *debian-ppc64el_build | ||||
|   needs: | ||||
|     - debian/ppc64el_build | ||||
|  | ||||
| # Debian 11 based s390x cross-build image | ||||
| debian/s390x_build: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-s390x_build ${DEBIAN_BUILD_TAG} | ||||
|  | ||||
| .use-debian/s390x_build: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_IMAGE_PATH: "debian/s390x_build" | ||||
|     MESA_IMAGE_TAG: *debian-s390x_build | ||||
|   needs: | ||||
|     - debian/s390x_build | ||||
|  | ||||
| # Android NDK cross-build image | ||||
| debian/android_build: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-android_build ${DEBIAN_BUILD_TAG} | ||||
|  | ||||
| .use-debian/android_build: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_build-base | ||||
|     MESA_IMAGE_PATH: "debian/android_build" | ||||
|     MESA_IMAGE_TAG: *debian-android_build | ||||
|   needs: | ||||
|     - debian/android_build | ||||
|  | ||||
| # Debian 11 based x86 test image base | ||||
| debian/x86_test-base: | ||||
|   extends: debian/x86_build-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-x86_test-base ${DEBIAN_BASE_TAG} | ||||
|  | ||||
| .use-debian/x86_test-base: | ||||
|   extends: | ||||
|     - .fdo.container-build@debian | ||||
|     - .use-base-image | ||||
|   variables: | ||||
|     MESA_BASE_IMAGE: ${DEBIAN_X86_TEST_BASE_IMAGE} | ||||
|     MESA_BASE_TAG: *debian-x86_test-base | ||||
|   needs: | ||||
|     - debian/x86_test-base | ||||
|  | ||||
| # Debian 11 based x86 test image for GL | ||||
| debian/x86_test-gl: | ||||
|   extends: .use-debian/x86_test-base | ||||
|   variables: | ||||
|     FDO_DISTRIBUTION_EXEC: 'env KERNEL_URL=${KERNEL_URL} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} bash .gitlab-ci/container/${CI_JOB_NAME}.sh' | ||||
|     KERNEL_URL: &kernel-rootfs-url "https://gitlab.freedesktop.org/gfx-ci/linux/-/archive/v5.16-for-mesa-ci-991fec6622591/linux-v5.16-for-mesa-ci-991fec6622591.tar.bz2" | ||||
|     MESA_IMAGE_TAG: &debian-x86_test-gl ${DEBIAN_X86_TEST_GL_TAG} | ||||
|  | ||||
| .use-debian/x86_test-gl: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_test-base | ||||
|     MESA_IMAGE_PATH: ${DEBIAN_X86_TEST_IMAGE_PATH} | ||||
|     MESA_IMAGE_TAG: *debian-x86_test-gl | ||||
|   needs: | ||||
|     - debian/x86_test-gl | ||||
|  | ||||
| # Debian 11 based x86 test image for VK | ||||
| debian/x86_test-vk: | ||||
|   extends: .use-debian/x86_test-base | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-x86_test-vk ${DEBIAN_X86_TEST_VK_TAG} | ||||
|  | ||||
| .use-debian/x86_test-vk: | ||||
|   extends: | ||||
|     - .set-image-base-tag | ||||
|   variables: | ||||
|     MESA_BASE_TAG: *debian-x86_test-base | ||||
|     MESA_IMAGE_PATH: "debian/x86_test-vk" | ||||
|     MESA_IMAGE_TAG: *debian-x86_test-vk | ||||
|   needs: | ||||
|     - debian/x86_test-vk | ||||
|  | ||||
| # Debian 11 based ARM build image | ||||
| debian/arm_build: | ||||
|   extends: | ||||
|     - .fdo.container-build@debian | ||||
|     - .container | ||||
|   tags: | ||||
|     - aarch64 | ||||
|   variables: | ||||
|     MESA_IMAGE_TAG: &debian-arm_build ${DEBIAN_BASE_TAG} | ||||
|  | ||||
| .use-debian/arm_build: | ||||
|   extends: | ||||
|     - .set-image | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: "debian/arm_build" | ||||
|     MESA_IMAGE_TAG: *debian-arm_build | ||||
|     MESA_ARTIFACTS_TAG: *debian-arm_build | ||||
|   needs: | ||||
|     - debian/arm_build | ||||
|  | ||||
|  | ||||
| # Fedora 34 based x86 build image | ||||
| fedora/x86_build: | ||||
|   extends: | ||||
|     - .fdo.container-build@fedora | ||||
|     - .container | ||||
|   variables: | ||||
|     FDO_DISTRIBUTION_VERSION: 34 | ||||
|     MESA_IMAGE_TAG: &fedora-x86_build ${FEDORA_X86_BUILD_TAG} | ||||
|  | ||||
| .use-fedora/x86_build: | ||||
|   extends: | ||||
|     - .set-image | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: "fedora/x86_build" | ||||
|     MESA_IMAGE_TAG: *fedora-x86_build | ||||
|   needs: | ||||
|     - fedora/x86_build | ||||
|  | ||||
|  | ||||
| .kernel+rootfs: | ||||
|   extends: | ||||
|     - .ci-run-policy | ||||
|   stage: container | ||||
|   variables: | ||||
|     GIT_STRATEGY: fetch | ||||
|     KERNEL_URL: *kernel-rootfs-url | ||||
|     MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG} | ||||
|     DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|   script: | ||||
|     - .gitlab-ci/container/lava_build.sh | ||||
|  | ||||
| kernel+rootfs_amd64: | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base | ||||
|     - .kernel+rootfs | ||||
|   image: "$FDO_BASE_IMAGE" | ||||
|   variables: | ||||
|     DEBIAN_ARCH: "amd64" | ||||
|     DISTRIBUTION_TAG: &distribution-tag-amd64 "${MESA_ROOTFS_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|  | ||||
| kernel+rootfs_arm64: | ||||
|   extends: | ||||
|     - .use-debian/arm_build | ||||
|     - .kernel+rootfs | ||||
|   tags: | ||||
|     - aarch64 | ||||
|   variables: | ||||
|     DEBIAN_ARCH: "arm64" | ||||
|  | ||||
| kernel+rootfs_armhf: | ||||
|   extends: | ||||
|     - kernel+rootfs_arm64 | ||||
|   variables: | ||||
|     DEBIAN_ARCH: "armhf" | ||||
|  | ||||
| # Cannot use anchors defined here from included files, so use extends: instead | ||||
| .use-kernel+rootfs-arm: | ||||
|   variables: | ||||
|     DISTRIBUTION_TAG: *distribution-tag-arm | ||||
|     MESA_ROOTFS_TAG: *kernel-rootfs | ||||
|  | ||||
| .use-kernel+rootfs-amd64: | ||||
|   variables: | ||||
|     DISTRIBUTION_TAG: *distribution-tag-amd64 | ||||
|     MESA_ROOTFS_TAG: *kernel-rootfs | ||||
|  | ||||
| # x86 image with ARM64 & armhf kernel & rootfs for baremetal testing | ||||
| debian/arm_test: | ||||
|   extends: | ||||
|     - .fdo.container-build@debian | ||||
|     - .container | ||||
|     # Don't want the .container rules | ||||
|     - .ci-run-policy | ||||
|   needs: | ||||
|     - kernel+rootfs_arm64 | ||||
|     - kernel+rootfs_armhf | ||||
|   variables: | ||||
|     FDO_DISTRIBUTION_EXEC: 'env ARTIFACTS_PREFIX=https://${MINIO_HOST}/mesa-lava ARTIFACTS_SUFFIX=${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT} CI_PROJECT_PATH=${CI_PROJECT_PATH} FDO_CI_CONCURRENT=${FDO_CI_CONCURRENT} FDO_UPSTREAM_REPO=${FDO_UPSTREAM_REPO} bash .gitlab-ci/container/${CI_JOB_NAME}.sh' | ||||
|     FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|     MESA_ARM_BUILD_TAG: *debian-arm_build | ||||
|     MESA_IMAGE_TAG: &debian-arm_test ${DEBIAN_BASE_TAG} | ||||
|     MESA_ROOTFS_TAG: *kernel-rootfs | ||||
|  | ||||
| .use-debian/arm_test: | ||||
|   image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${MESA_ARM_BUILD_TAG}--${MESA_TEMPLATES_COMMIT}" | ||||
|   variables: | ||||
|     MESA_ARM_BUILD_TAG: *debian-arm_build | ||||
|     MESA_IMAGE_PATH: "debian/arm_test" | ||||
|     MESA_IMAGE_TAG: *debian-arm_test | ||||
|     MESA_ROOTFS_TAG: *kernel-rootfs | ||||
|   needs: | ||||
|     - debian/arm_test | ||||
|  | ||||
| # Native Windows docker builds | ||||
| # | ||||
| # Unlike the above Linux-based builds - including MinGW builds which | ||||
| # cross-compile for Windows - which use the freedesktop ci-templates, we | ||||
| # cannot use the same scheme here. As Windows lacks support for | ||||
| # Docker-in-Docker, and Podman does not run natively on Windows, we have | ||||
| # to open-code much of the same ourselves. | ||||
| # | ||||
| # This is achieved by first running in a native Windows shell instance | ||||
| # (host PowerShell) in the container stage to build and push the image, | ||||
| # then in the build stage by executing inside Docker. | ||||
|  | ||||
| .windows-docker-vs2019: | ||||
|   variables: | ||||
|     MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}" | ||||
|     MESA_UPSTREAM_IMAGE: "$CI_REGISTRY/$FDO_UPSTREAM_REPO/$MESA_IMAGE_PATH:${MESA_IMAGE_TAG}" | ||||
|  | ||||
| .windows_container_build: | ||||
|   inherit: | ||||
|     default: false | ||||
|   extends: | ||||
|     - .container | ||||
|     - .windows-docker-vs2019 | ||||
|   rules: | ||||
|     - if: '$MICROSOFT_FARM == "offline"' | ||||
|       when: never | ||||
|     - !reference [.container-rules, rules] | ||||
|   variables: | ||||
|     GIT_STRATEGY: fetch # we do actually need the full repository though | ||||
|     MESA_BASE_IMAGE: None | ||||
|   tags: | ||||
|     - windows | ||||
|     - shell | ||||
|     - "1809" | ||||
|     - mesa | ||||
|   script: | ||||
|     - .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE} | ||||
|  | ||||
| windows_build_vs2019: | ||||
|   inherit: | ||||
|     default: false | ||||
|   extends: | ||||
|     - .windows_container_build | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH} | ||||
|     MESA_IMAGE_TAG: &windows_build_image_tag ${WINDOWS_X64_BUILD_TAG} | ||||
|     DOCKERFILE: Dockerfile_build | ||||
|   timeout: 2h 30m # LLVM takes ages | ||||
|  | ||||
| windows_test_vs2019: | ||||
|   inherit: | ||||
|     default: false | ||||
|   extends: | ||||
|     - .windows_container_build | ||||
|   rules: | ||||
|     - if: '$MICROSOFT_FARM == "offline"' | ||||
|       when: never | ||||
|     - !reference [.ci-run-policy, rules] | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH} | ||||
|     MESA_IMAGE_TAG: &windows_test_image_tag ${WINDOWS_X64_BUILD_TAG}--${WINDOWS_X64_TEST_TAG} | ||||
|     DOCKERFILE: Dockerfile_test | ||||
|     # Right now this only needs the VS install to get DXIL.dll. Maybe see about decoupling this at some point | ||||
|     MESA_BASE_IMAGE_PATH: *windows_build_image_path | ||||
|     MESA_BASE_IMAGE_TAG: *windows_build_image_tag | ||||
|     MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}" | ||||
|   script: | ||||
|     - .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE Dockerfile_test ${MESA_BASE_IMAGE} | ||||
|   needs: | ||||
|     - windows_build_vs2019 | ||||
|  | ||||
| .use-windows_build_vs2019: | ||||
|   inherit: | ||||
|     default: false | ||||
|   extends: .windows-docker-vs2019 | ||||
|   image: "$MESA_IMAGE" | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: *windows_build_image_path | ||||
|     MESA_IMAGE_TAG: *windows_build_image_tag | ||||
|   needs: | ||||
|     - windows_build_vs2019 | ||||
|  | ||||
| .use-windows_test_vs2019: | ||||
|   inherit: | ||||
|     default: false | ||||
|   extends: .windows-docker-vs2019 | ||||
|   image: "$MESA_IMAGE" | ||||
|   variables: | ||||
|     MESA_IMAGE_PATH: *windows_test_image_path | ||||
|     MESA_IMAGE_TAG: *windows_test_image_tag | ||||
| @@ -1,255 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -o xtrace | ||||
|  | ||||
| export DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| check_minio() | ||||
| { | ||||
|     MINIO_PATH="${MINIO_HOST}/mesa-lava/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}" | ||||
|     if wget -q --method=HEAD "https://${MINIO_PATH}/done"; then | ||||
|         exit | ||||
|     fi | ||||
| } | ||||
|  | ||||
| # If remote files are up-to-date, skip rebuilding them | ||||
| check_minio "${FDO_UPSTREAM_REPO}" | ||||
| check_minio "${CI_PROJECT_PATH}" | ||||
|  | ||||
| . .gitlab-ci/container/container_pre_build.sh | ||||
|  | ||||
| # Install rust, which we'll be using for deqp-runner.  It will be cleaned up at the end. | ||||
| . .gitlab-ci/container/build-rust.sh | ||||
|  | ||||
| if [[ "$DEBIAN_ARCH" = "arm64" ]]; then | ||||
|     GCC_ARCH="aarch64-linux-gnu" | ||||
|     KERNEL_ARCH="arm64" | ||||
|     DEFCONFIG="arch/arm64/configs/defconfig" | ||||
|     DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dtb" | ||||
|     DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dtb" | ||||
|     KERNEL_IMAGE_NAME="Image" | ||||
| elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then | ||||
|     GCC_ARCH="arm-linux-gnueabihf" | ||||
|     KERNEL_ARCH="arm" | ||||
|     DEFCONFIG="arch/arm/configs/multi_v7_defconfig" | ||||
|     DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb" | ||||
|     DEVICE_TREES+=" arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb" | ||||
|     DEVICE_TREES+=" arch/arm/boot/dts/imx6q-cubox-i.dtb" | ||||
|     KERNEL_IMAGE_NAME="zImage" | ||||
|     . .gitlab-ci/container/create-cross-file.sh armhf | ||||
| else | ||||
|     GCC_ARCH="x86_64-linux-gnu" | ||||
|     KERNEL_ARCH="x86_64" | ||||
|     DEFCONFIG="arch/x86/configs/x86_64_defconfig" | ||||
|     DEVICE_TREES="" | ||||
|     KERNEL_IMAGE_NAME="bzImage" | ||||
|     ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols" | ||||
| fi | ||||
|  | ||||
| # Determine if we're in a cross build. | ||||
| if [[ -e /cross_file-$DEBIAN_ARCH.txt ]]; then | ||||
|     EXTRA_MESON_ARGS="--cross-file /cross_file-$DEBIAN_ARCH.txt" | ||||
|     EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/toolchain-$DEBIAN_ARCH.cmake" | ||||
|  | ||||
|     if [ $DEBIAN_ARCH = arm64 ]; then | ||||
|         RUST_TARGET="aarch64-unknown-linux-gnu" | ||||
|     elif [ $DEBIAN_ARCH = armhf ]; then | ||||
|         RUST_TARGET="armv7-unknown-linux-gnueabihf" | ||||
|     fi | ||||
|     rustup target add $RUST_TARGET | ||||
|     export EXTRA_CARGO_ARGS="--target $RUST_TARGET" | ||||
|  | ||||
|     export ARCH=${KERNEL_ARCH} | ||||
|     export CROSS_COMPILE="${GCC_ARCH}-" | ||||
| fi | ||||
|  | ||||
| apt-get update | ||||
| apt-get install -y --no-remove \ | ||||
|                    ${ARCH_PACKAGES} \ | ||||
|                    automake \ | ||||
|                    bc \ | ||||
|                    clang \ | ||||
|                    cmake \ | ||||
|                    debootstrap \ | ||||
|                    git \ | ||||
|                    glslang-tools \ | ||||
|                    libdrm-dev \ | ||||
|                    libegl1-mesa-dev \ | ||||
|                    libxext-dev \ | ||||
|                    libfontconfig-dev \ | ||||
|                    libgbm-dev \ | ||||
|                    libgl-dev \ | ||||
|                    libgles2-mesa-dev \ | ||||
|                    libglu1-mesa-dev \ | ||||
|                    libglx-dev \ | ||||
|                    libpng-dev \ | ||||
|                    libssl-dev \ | ||||
|                    libudev-dev \ | ||||
|                    libvulkan-dev \ | ||||
|                    libwaffle-dev \ | ||||
|                    libwayland-dev \ | ||||
|                    libx11-xcb-dev \ | ||||
|                    libxcb-dri2-0-dev \ | ||||
|                    libxkbcommon-dev \ | ||||
|                    ninja-build \ | ||||
|                    patch \ | ||||
|                    python-is-python3 \ | ||||
|                    python3-distutils \ | ||||
|                    python3-mako \ | ||||
|                    python3-numpy \ | ||||
|                    python3-serial \ | ||||
|                    unzip \ | ||||
|                    wget | ||||
|  | ||||
|  | ||||
| if [[ "$DEBIAN_ARCH" = "armhf" ]]; then | ||||
|     apt-get install -y --no-remove \ | ||||
|                        libegl1-mesa-dev:armhf \ | ||||
|                        libelf-dev:armhf \ | ||||
|                        libgbm-dev:armhf \ | ||||
|                        libgles2-mesa-dev:armhf \ | ||||
|                        libpng-dev:armhf \ | ||||
|                        libudev-dev:armhf \ | ||||
|                        libvulkan-dev:armhf \ | ||||
|                        libwaffle-dev:armhf \ | ||||
|                        libwayland-dev:armhf \ | ||||
|                        libx11-xcb-dev:armhf \ | ||||
|                        libxkbcommon-dev:armhf | ||||
| fi | ||||
|  | ||||
|  | ||||
| ############### Building | ||||
| STRIP_CMD="${GCC_ARCH}-strip" | ||||
| mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH | ||||
|  | ||||
|  | ||||
| ############### Build apitrace | ||||
| . .gitlab-ci/container/build-apitrace.sh | ||||
| mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/apitrace | ||||
| mv /apitrace/build /lava-files/rootfs-${DEBIAN_ARCH}/apitrace | ||||
| rm -rf /apitrace | ||||
|  | ||||
|  | ||||
| ############### Build dEQP runner | ||||
| . .gitlab-ci/container/build-deqp-runner.sh | ||||
| mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin | ||||
| mv /usr/local/bin/*-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/. | ||||
|  | ||||
|  | ||||
| ############### Build dEQP | ||||
| DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh | ||||
|  | ||||
| mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/. | ||||
|  | ||||
|  | ||||
| ############### Build SKQP | ||||
| if [[ "$DEBIAN_ARCH" = "arm64" ]]; then | ||||
|     SKQP_ARCH="arm64" . .gitlab-ci/container/build-skqp.sh | ||||
|     mv /skqp /lava-files/rootfs-${DEBIAN_ARCH}/. | ||||
| fi | ||||
|  | ||||
|  | ||||
| ############### Build piglit | ||||
| PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh | ||||
| mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/. | ||||
|  | ||||
| ############### Build libva tests | ||||
| if [[ "$DEBIAN_ARCH" = "amd64" ]]; then | ||||
|     . .gitlab-ci/container/build-va-tools.sh | ||||
|     mv /va/bin/* /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/ | ||||
| fi | ||||
|  | ||||
| ############### Build Crosvm | ||||
| if [[ ${DEBIAN_ARCH} = "amd64" ]]; then | ||||
|     . .gitlab-ci/container/build-crosvm.sh | ||||
|     mv /usr/local/bin/crosvm /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/ | ||||
|     mv /usr/local/lib/$GCC_ARCH/libvirglrenderer.* /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/ | ||||
| fi | ||||
|  | ||||
| ############### Build libdrm | ||||
| EXTRA_MESON_ARGS+=" -D prefix=/libdrm" | ||||
| . .gitlab-ci/container/build-libdrm.sh | ||||
|  | ||||
|  | ||||
| ############### Build local stuff for use by igt and kernel testing, which | ||||
| ############### will reuse most of our container build process from a specific | ||||
| ############### hash of the Mesa tree. | ||||
| if [[ -e ".gitlab-ci/local/build-rootfs.sh" ]]; then | ||||
|     . .gitlab-ci/local/build-rootfs.sh | ||||
| fi | ||||
|  | ||||
|  | ||||
| ############### Build kernel | ||||
| . .gitlab-ci/container/build-kernel.sh | ||||
|  | ||||
| ############### Delete rust, since the tests won't be compiling anything. | ||||
| rm -rf /root/.cargo | ||||
| rm -rf /root/.rustup | ||||
|  | ||||
| ############### Create rootfs | ||||
| set +e | ||||
| if ! debootstrap \ | ||||
|      --variant=minbase \ | ||||
|      --arch=${DEBIAN_ARCH} \ | ||||
|      --components main,contrib,non-free \ | ||||
|      bullseye \ | ||||
|      /lava-files/rootfs-${DEBIAN_ARCH}/ \ | ||||
|      http://deb.debian.org/debian; then | ||||
|     cat /lava-files/rootfs-${DEBIAN_ARCH}/debootstrap/debootstrap.log | ||||
|     exit 1 | ||||
| fi | ||||
| set -e | ||||
|  | ||||
| cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/. | ||||
| chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh | ||||
| rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh | ||||
|  | ||||
|  | ||||
| ############### Install the built libdrm | ||||
| # Dependencies pulled during the creation of the rootfs may overwrite | ||||
| # the built libdrm. Hence, we add it after the rootfs has been already | ||||
| # created. | ||||
| find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/. | ||||
| mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/ | ||||
| cp -Rp /libdrm/share /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/share | ||||
| rm -rf /libdrm | ||||
|  | ||||
|  | ||||
| if [ ${DEBIAN_ARCH} = arm64 ]; then | ||||
|     # Make a gzipped copy of the Image for db410c. | ||||
|     gzip -k /lava-files/Image | ||||
|     KERNEL_IMAGE_NAME+=" Image.gz" | ||||
| fi | ||||
|  | ||||
| du -ah /lava-files/rootfs-${DEBIAN_ARCH} | sort -h | tail -100 | ||||
| pushd /lava-files/rootfs-${DEBIAN_ARCH} | ||||
|   tar czf /lava-files/lava-rootfs.tgz . | ||||
| popd | ||||
|  | ||||
| . .gitlab-ci/container/container_post_build.sh | ||||
|  | ||||
| ############### Upload the files! | ||||
| ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}" | ||||
| FILES_TO_UPLOAD="lava-rootfs.tgz \ | ||||
|                  $KERNEL_IMAGE_NAME" | ||||
|  | ||||
| if [[ -n $DEVICE_TREES ]]; then | ||||
|     FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)" | ||||
| fi | ||||
|  | ||||
| for f in $FILES_TO_UPLOAD; do | ||||
|     ci-fairy minio cp /lava-files/$f \ | ||||
|              minio://${MINIO_PATH}/$f | ||||
| done | ||||
|  | ||||
| touch /lava-files/done | ||||
| ci-fairy minio cp /lava-files/done minio://${MINIO_PATH}/done | ||||
| @@ -1,105 +0,0 @@ | ||||
| CONFIG_LOCALVERSION_AUTO=y | ||||
| CONFIG_DEBUG_KERNEL=y | ||||
|  | ||||
| CONFIG_PWM=y | ||||
| CONFIG_PM_DEVFREQ=y | ||||
| CONFIG_OF=y | ||||
| CONFIG_CROS_EC=y | ||||
|  | ||||
| # abootimg with a 'dummy' rootfs fails with root=/dev/nfs | ||||
| CONFIG_BLK_DEV_INITRD=n | ||||
|  | ||||
| CONFIG_DEVFREQ_GOV_PERFORMANCE=y | ||||
| CONFIG_DEVFREQ_GOV_POWERSAVE=y | ||||
| CONFIG_DEVFREQ_GOV_USERSPACE=y | ||||
| CONFIG_DEVFREQ_GOV_PASSIVE=y | ||||
|  | ||||
| CONFIG_DRM=y | ||||
| CONFIG_DRM_PANEL_SIMPLE=y | ||||
| CONFIG_PWM_CROS_EC=y | ||||
| CONFIG_BACKLIGHT_PWM=y | ||||
|  | ||||
| # Strip out some stuff we don't need for graphics testing, to reduce | ||||
| # the build. | ||||
| CONFIG_CAN=n | ||||
| CONFIG_WIRELESS=n | ||||
| CONFIG_RFKILL=n | ||||
| CONFIG_WLAN=n | ||||
|  | ||||
| CONFIG_REGULATOR_FAN53555=y | ||||
| CONFIG_REGULATOR=y | ||||
|  | ||||
| CONFIG_REGULATOR_VCTRL=y | ||||
|  | ||||
| CONFIG_KASAN=n | ||||
| CONFIG_KASAN_INLINE=n | ||||
| CONFIG_STACKTRACE=n | ||||
|  | ||||
| CONFIG_TMPFS=y | ||||
|  | ||||
| CONFIG_PROVE_LOCKING=n | ||||
| CONFIG_DEBUG_LOCKDEP=n | ||||
| CONFIG_SOFTLOCKUP_DETECTOR=y | ||||
| CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y | ||||
|  | ||||
| CONFIG_DETECT_HUNG_TASK=y | ||||
|  | ||||
| CONFIG_USB_USBNET=y | ||||
| CONFIG_NETDEVICES=y | ||||
| CONFIG_USB_NET_DRIVERS=y | ||||
| CONFIG_USB_RTL8152=y | ||||
| CONFIG_USB_NET_AX8817X=y | ||||
| CONFIG_USB_NET_SMSC95XX=y | ||||
| CONFIG_USB_GADGET=y | ||||
| CONFIG_USB_ETH=y | ||||
|  | ||||
| CONFIG_FW_LOADER_COMPRESS=y | ||||
|  | ||||
| # options for AMD devices | ||||
| CONFIG_X86_AMD_PLATFORM_DEVICE=y | ||||
| CONFIG_ACPI_VIDEO=y | ||||
| CONFIG_X86_AMD_FREQ_SENSITIVITY=y | ||||
| CONFIG_PINCTRL=y | ||||
| CONFIG_PINCTRL_AMD=y | ||||
| CONFIG_DRM_AMDGPU=m | ||||
| CONFIG_DRM_AMDGPU_SI=y | ||||
| CONFIG_DRM_AMDGPU_USERPTR=y | ||||
| CONFIG_DRM_AMD_ACP=n | ||||
| CONFIG_ACPI_WMI=y | ||||
| CONFIG_MXM_WMI=y | ||||
| CONFIG_PARPORT=y | ||||
| CONFIG_PARPORT_PC=y | ||||
| CONFIG_PARPORT_SERIAL=y | ||||
| CONFIG_SERIAL_8250_DW=y | ||||
| CONFIG_CHROME_PLATFORMS=y | ||||
| CONFIG_KVM_AMD=m | ||||
|  | ||||
| #options for Intel devices | ||||
| CONFIG_MFD_INTEL_LPSS_PCI=y | ||||
| CONFIG_KVM_INTEL=m | ||||
|  | ||||
| #options for KVM guests | ||||
| CONFIG_FUSE_FS=y | ||||
| CONFIG_HYPERVISOR_GUEST=y | ||||
| CONFIG_KVM=y | ||||
| CONFIG_KVM_GUEST=y | ||||
| CONFIG_VIRT_DRIVERS=y | ||||
| CONFIG_VIRTIO_FS=y | ||||
| CONFIG_DRM_VIRTIO_GPU=y | ||||
| CONFIG_SERIAL_8250_CONSOLE=y | ||||
| CONFIG_VIRTIO_NET=y | ||||
| CONFIG_VIRTIO_CONSOLE=y | ||||
| CONFIG_PARAVIRT=y | ||||
| CONFIG_VIRTIO_BLK=y | ||||
| CONFIG_VIRTUALIZATION=y | ||||
| CONFIG_VIRTIO=y | ||||
| CONFIG_VIRTIO_PCI=y | ||||
| CONFIG_VIRTIO_MMIO=y | ||||
| CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y | ||||
| CONFIG_CRYPTO_DEV_VIRTIO=y | ||||
| CONFIG_HW_RANDOM_VIRTIO=y | ||||
| CONFIG_BLK_MQ_VIRTIO=y | ||||
| CONFIG_TUN=y | ||||
| CONFIG_VSOCKETS=y | ||||
| CONFIG_VIRTIO_VSOCKETS=y | ||||
| CONFIG_VHOST_VSOCK=m | ||||
| @@ -1 +0,0 @@ | ||||
| lp_test_arit | ||||
| @@ -1 +0,0 @@ | ||||
| lp_test_format | ||||
| @@ -1,42 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -e | ||||
|  | ||||
| VSOCK_STDOUT=$1 | ||||
| VSOCK_STDERR=$2 | ||||
| VSOCK_TEMP_DIR=$3 | ||||
|  | ||||
| mount -t proc none /proc | ||||
| mount -t sysfs none /sys | ||||
| mkdir -p /dev/pts | ||||
| mount -t devpts devpts /dev/pts | ||||
| mount -t tmpfs tmpfs /tmp | ||||
|  | ||||
| . ${VSOCK_TEMP_DIR}/crosvm-env.sh | ||||
|  | ||||
| # .gitlab-ci.yml script variable is using relative paths to install directory, | ||||
| # so change to that dir before running `crosvm-script` | ||||
| cd "${CI_PROJECT_DIR}" | ||||
|  | ||||
| # The exception is the dEQP binary, as it needs to run from its own directory | ||||
| [ -z "${DEQP_BIN_DIR}" ] || cd "${DEQP_BIN_DIR}" | ||||
|  | ||||
| # Use a FIFO to collect relevant error messages | ||||
| STDERR_FIFO=/tmp/crosvm-stderr.fifo | ||||
| mkfifo -m 600 ${STDERR_FIFO} | ||||
|  | ||||
| dmesg --level crit,err,warn -w > ${STDERR_FIFO} & | ||||
| DMESG_PID=$! | ||||
|  | ||||
| # Transfer the errors and crosvm-script output via a pair of virtio-vsocks | ||||
| socat -d -u pipe:${STDERR_FIFO} vsock-listen:${VSOCK_STDERR} & | ||||
| socat -d -U vsock-listen:${VSOCK_STDOUT} \ | ||||
|     system:"stdbuf -eL sh ${VSOCK_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VSOCK_TEMP_DIR}/exit_code",nofork | ||||
|  | ||||
| kill ${DMESG_PID} | ||||
| wait | ||||
|  | ||||
| sync | ||||
| poweroff -d -n -f || true | ||||
|  | ||||
| sleep 1   # Just in case init would exit before the kernel shuts down the VM | ||||
| @@ -1,125 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -e | ||||
|  | ||||
| # | ||||
| # Helper to generate CIDs for virtio-vsock based communication with processes | ||||
| # running inside crosvm guests. | ||||
| # | ||||
| # A CID is a 32-bit Context Identifier to be assigned to a crosvm instance | ||||
| # and must be unique across the host system. For this purpose, let's take | ||||
| # the least significant 25 bits from CI_JOB_ID as a base and generate a 7-bit | ||||
| # prefix number to handle up to 128 concurrent crosvm instances per job runner. | ||||
| # | ||||
| # As a result, the following variables are set: | ||||
| #  - VSOCK_CID: the crosvm unique CID to be passed as a run argument | ||||
| # | ||||
| #  - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept | ||||
| #    vsock connections on in order to transfer output messages | ||||
| # | ||||
| #  - VSOCK_TEMP_DIR: the temporary directory path used to pass additional | ||||
| #    context data towards the guest | ||||
| # | ||||
| set_vsock_context() { | ||||
|     [ -n "${CI_JOB_ID}" ] || { | ||||
|         echo "Missing or unset CI_JOB_ID env variable" >&2 | ||||
|         exit 1 | ||||
|     } | ||||
|  | ||||
|     local dir_prefix="/tmp-vsock." | ||||
|     local cid_prefix=0 | ||||
|     unset VSOCK_TEMP_DIR | ||||
|  | ||||
|     while [ ${cid_prefix} -lt 128 ]; do | ||||
|         VSOCK_TEMP_DIR=${dir_prefix}${cid_prefix} | ||||
|         mkdir "${VSOCK_TEMP_DIR}" >/dev/null 2>&1 && break || unset VSOCK_TEMP_DIR | ||||
|         cid_prefix=$((cid_prefix + 1)) | ||||
|     done | ||||
|  | ||||
|     [ -n "${VSOCK_TEMP_DIR}" ] || return 1 | ||||
|  | ||||
|     VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((cid_prefix & 0x7f) << 25))) | ||||
|     VSOCK_STDOUT=5001 | ||||
|     VSOCK_STDERR=5002 | ||||
|  | ||||
|     return 0 | ||||
| } | ||||
|  | ||||
| # The dEQP binary needs to run from the directory it's in | ||||
| if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then | ||||
|     DEQP_BIN_DIR=$(dirname "$1") | ||||
|     export DEQP_BIN_DIR | ||||
| fi | ||||
|  | ||||
| set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; } | ||||
|  | ||||
| # Ensure cleanup on script exit | ||||
| trap 'exit ${exit_code}' INT TERM | ||||
| trap 'exit_code=$?; [ -z "${CROSVM_PID}${SOCAT_PIDS}" ] || kill ${CROSVM_PID} ${SOCAT_PIDS} >/dev/null 2>&1 || true; rm -rf ${VSOCK_TEMP_DIR}' EXIT | ||||
|  | ||||
| # Securely pass the current variables to the crosvm environment | ||||
| echo "Variables passed through:" | ||||
| SCRIPT_DIR=$(readlink -en "${0%/*}") | ||||
| ${SCRIPT_DIR}/common/generate-env.sh | tee ${VSOCK_TEMP_DIR}/crosvm-env.sh | ||||
|  | ||||
| # Set the crosvm-script as the arguments of the current script | ||||
| echo "$@" > ${VSOCK_TEMP_DIR}/crosvm-script.sh | ||||
|  | ||||
| # Setup networking | ||||
| /usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE | ||||
| echo 1 > /proc/sys/net/ipv4/ip_forward | ||||
|  | ||||
| # Start background processes to receive output from guest | ||||
| socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr & | ||||
| SOCAT_PIDS=$! | ||||
| socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout & | ||||
| SOCAT_PIDS="${SOCAT_PIDS} $!" | ||||
|  | ||||
| # Prepare to start crosvm | ||||
| unset DISPLAY | ||||
| unset XDG_RUNTIME_DIR | ||||
|  | ||||
| CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0" | ||||
| CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VSOCK_TEMP_DIR}" | ||||
|  | ||||
| [ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \ | ||||
|     CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false | ||||
|  | ||||
| set +e -x | ||||
|  | ||||
| # We aren't testing the host driver here, so we don't need to validate NIR on the host | ||||
| NIR_DEBUG="novalidate" \ | ||||
| LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \ | ||||
| GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \ | ||||
| crosvm run \ | ||||
|     --gpu "${CROSVM_GPU_ARGS}" -m 4096 -c 2 --disable-sandbox \ | ||||
|     --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \ | ||||
|     --host_ip "192.168.30.1" --netmask "255.255.255.0" --mac "AA:BB:CC:00:00:12" \ | ||||
|     --cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \ | ||||
|     /lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VSOCK_TEMP_DIR}/crosvm 2>&1 & | ||||
|  | ||||
| # Wait for crosvm process to terminate | ||||
| CROSVM_PID=$! | ||||
| wait ${CROSVM_PID} | ||||
| CROSVM_RET=$? | ||||
| unset CROSVM_PID | ||||
|  | ||||
| [ ${CROSVM_RET} -eq 0 ] && { | ||||
|     # socat background processes terminate gracefully on remote peers exit | ||||
|     wait | ||||
|     unset SOCAT_PIDS | ||||
|     # The actual return code is the crosvm guest script's exit code | ||||
|     CROSVM_RET=$(cat ${VSOCK_TEMP_DIR}/exit_code 2>/dev/null) | ||||
|     # Force error when the guest script's exit code is not available | ||||
|     CROSVM_RET=${CROSVM_RET:-1} | ||||
| } | ||||
|  | ||||
| # Show crosvm output on error to help with debugging | ||||
| [ ${CROSVM_RET} -eq 0 ] || { | ||||
|     set +x | ||||
|     echo "Dumping crosvm output.." >&2 | ||||
|     cat ${VSOCK_TEMP_DIR}/crosvm >&2 | ||||
|     set -x | ||||
| } | ||||
|  | ||||
| exit ${CROSVM_RET} | ||||
| @@ -1,242 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| echo -e "\e[0Ksection_start:$(date +%s):test_setup[collapsed=true]\r\e[0Kpreparing test setup" | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| # Needed so configuration files can contain paths to files in /install | ||||
| ln -sf $CI_PROJECT_DIR/install /install | ||||
|  | ||||
| if [ -z "$GPU_VERSION" ]; then | ||||
|    echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/gpu-version-*.txt)' | ||||
|    exit 1 | ||||
| fi | ||||
|  | ||||
| INSTALL=`pwd`/install | ||||
|  | ||||
| # Set up the driver environment. | ||||
| export LD_LIBRARY_PATH=`pwd`/install/lib/ | ||||
| export EGL_PLATFORM=surfaceless | ||||
| export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json | ||||
|  | ||||
| RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results} | ||||
| mkdir -p $RESULTS | ||||
|  | ||||
| # Ensure Mesa Shader Cache resides on tmpfs. | ||||
| SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache} | ||||
| SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache} | ||||
|  | ||||
| findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || { | ||||
|     mkdir -p ${SHADER_CACHE_DIR} | ||||
|     mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR} | ||||
| } | ||||
|  | ||||
| HANG_DETECTION_CMD="" | ||||
|  | ||||
| if [ -z "$DEQP_SUITE" ]; then | ||||
|     if [ -z "$DEQP_VER" ]; then | ||||
|         echo 'DEQP_SUITE must be set to the name of your deqp-gpu_version.toml, or DEQP_VER must be set to something like "gles2", "gles31-khr" or "vk" for the test run' | ||||
|         exit 1 | ||||
|     fi | ||||
|  | ||||
|     DEQP_WIDTH=${DEQP_WIDTH:-256} | ||||
|     DEQP_HEIGHT=${DEQP_HEIGHT:-256} | ||||
|     DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0} | ||||
|     DEQP_VARIANT=${DEQP_VARIANT:-master} | ||||
|  | ||||
|     DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT" | ||||
|     DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=${DEQP_SURFACE_TYPE:-pbuffer}" | ||||
|     DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG" | ||||
|     DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden" | ||||
|  | ||||
|     if [ "$DEQP_VER" = "vk" -a -z "$VK_DRIVER" ]; then | ||||
|         echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run' | ||||
|         exit 1 | ||||
|     fi | ||||
|  | ||||
|     # Generate test case list file. | ||||
|     if [ "$DEQP_VER" = "vk" ]; then | ||||
|        MUSTPASS=/deqp/mustpass/vk-$DEQP_VARIANT.txt | ||||
|        DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk | ||||
|        HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection" | ||||
|     elif [ "$DEQP_VER" = "gles2" -o "$DEQP_VER" = "gles3" -o "$DEQP_VER" = "gles31" -o "$DEQP_VER" = "egl" ]; then | ||||
|        MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt | ||||
|        DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER | ||||
|     elif [ "$DEQP_VER" = "gles2-khr" -o "$DEQP_VER" = "gles3-khr" -o "$DEQP_VER" = "gles31-khr" -o "$DEQP_VER" = "gles32-khr" ]; then | ||||
|        MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt | ||||
|        DEQP=/deqp/external/openglcts/modules/glcts | ||||
|     else | ||||
|        MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt | ||||
|        DEQP=/deqp/external/openglcts/modules/glcts | ||||
|     fi | ||||
|  | ||||
|     cp $MUSTPASS /tmp/case-list.txt | ||||
|  | ||||
|     # If the caselist is too long to run in a reasonable amount of time, let the job | ||||
|     # specify what fraction (1/n) of the caselist we should run.  Note: N~M is a gnu | ||||
|     # sed extension to match every nth line (first line is #1). | ||||
|     if [ -n "$DEQP_FRACTION" ]; then | ||||
|        sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt | ||||
|     fi | ||||
|  | ||||
|     # If the job is parallel at the gitab job level, take the corresponding fraction | ||||
|     # of the caselist. | ||||
|     if [ -n "$CI_NODE_INDEX" ]; then | ||||
|        sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt | ||||
|     fi | ||||
|  | ||||
|     if [ -n "$DEQP_CASELIST_FILTER" ]; then | ||||
|         sed -ni "/$DEQP_CASELIST_FILTER/p" /tmp/case-list.txt | ||||
|     fi | ||||
|  | ||||
|     if [ -n "$DEQP_CASELIST_INV_FILTER" ]; then | ||||
|         sed -ni "/$DEQP_CASELIST_INV_FILTER/!p" /tmp/case-list.txt | ||||
|     fi | ||||
|  | ||||
|     if [ ! -s /tmp/case-list.txt ]; then | ||||
|         echo "Caselist generation failed" | ||||
|         exit 1 | ||||
|     fi | ||||
| fi | ||||
|  | ||||
| if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then | ||||
|     DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt" | ||||
| fi | ||||
|  | ||||
| # Default to an empty known flakes file if it doesn't exist. | ||||
| touch $INSTALL/$GPU_VERSION-flakes.txt | ||||
|  | ||||
|  | ||||
| if [ -n "$VK_DRIVER" ] && [ -e "$INSTALL/$VK_DRIVER-skips.txt" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$VK_DRIVER-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GALLIUM_DRIVER-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$DRIVER_NAME-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then | ||||
|     DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt" | ||||
| fi | ||||
|  | ||||
| report_load() { | ||||
|     echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)" | ||||
|     echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)" | ||||
| } | ||||
|  | ||||
| # wrapper to supress +x to avoid spamming the log | ||||
| quiet() { | ||||
|     set +x | ||||
|     "$@" | ||||
|     set -x | ||||
| } | ||||
|  | ||||
| if [ "$GALLIUM_DRIVER" = "virpipe" ]; then | ||||
|     # deqp is to use virpipe, and virgl_test_server llvmpipe | ||||
|     export GALLIUM_DRIVER="$GALLIUM_DRIVER" | ||||
|  | ||||
|     VTEST_ARGS="--use-egl-surfaceless" | ||||
|     if [ "$VIRGL_HOST_API" = "GLES" ]; then | ||||
|         VTEST_ARGS="$VTEST_ARGS --use-gles" | ||||
|     fi | ||||
|  | ||||
|     GALLIUM_DRIVER=llvmpipe \ | ||||
|     virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 & | ||||
|  | ||||
|     sleep 1 | ||||
| fi | ||||
|  | ||||
| if [ -z "$DEQP_SUITE" ]; then | ||||
|     if [ -n "$DEQP_EXPECTED_RENDERER" ]; then | ||||
|         export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --renderer-check "$DEQP_EXPECTED_RENDERER"" | ||||
|     fi | ||||
|     if [ $DEQP_VER != vk -a $DEQP_VER != egl ]; then | ||||
|         export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check `cat $INSTALL/VERSION | sed 's/[() ]/./g'`" | ||||
|     fi | ||||
| fi | ||||
|  | ||||
| set +x | ||||
| echo -e "\e[0Ksection_end:$(date +%s):test_setup\r\e[0K" | ||||
|  | ||||
| echo -e "\e[0Ksection_start:$(date +%s):deqp[collapsed=false]\r\e[0Kdeqp-runner" | ||||
| set -x | ||||
|  | ||||
| set +e | ||||
| if [ -z "$DEQP_SUITE" ]; then | ||||
|     deqp-runner \ | ||||
|         run \ | ||||
|         --deqp $DEQP \ | ||||
|         --output $RESULTS \ | ||||
|         --caselist /tmp/case-list.txt \ | ||||
|         --skips $INSTALL/all-skips.txt $DEQP_SKIPS \ | ||||
|         --flakes $INSTALL/$GPU_VERSION-flakes.txt \ | ||||
|         --testlog-to-xml /deqp/executor/testlog-to-xml \ | ||||
|         --jobs ${FDO_CI_CONCURRENT:-4} \ | ||||
| 	$DEQP_RUNNER_OPTIONS \ | ||||
|         -- \ | ||||
|         $DEQP_OPTIONS | ||||
| else | ||||
|     deqp-runner \ | ||||
|         suite \ | ||||
|         --suite $INSTALL/deqp-$DEQP_SUITE.toml \ | ||||
|         --output $RESULTS \ | ||||
|         --skips $INSTALL/all-skips.txt $DEQP_SKIPS \ | ||||
|         --flakes $INSTALL/$GPU_VERSION-flakes.txt \ | ||||
|         --testlog-to-xml /deqp/executor/testlog-to-xml \ | ||||
|         --fraction-start $CI_NODE_INDEX \ | ||||
|         --fraction `expr $CI_NODE_TOTAL \* ${DEQP_FRACTION:-1}` \ | ||||
|         --jobs ${FDO_CI_CONCURRENT:-4} \ | ||||
| 	$DEQP_RUNNER_OPTIONS | ||||
| fi | ||||
|  | ||||
| DEQP_EXITCODE=$? | ||||
|  | ||||
| set +x | ||||
| echo -e "\e[0Ksection_end:$(date +%s):deqp\r\e[0K" | ||||
|  | ||||
| report_load | ||||
|  | ||||
| echo -e "\e[0Ksection_start:$(date +%s):test_post_process[collapsed=true]\r\e[0Kpost-processing test results" | ||||
| set -x | ||||
|  | ||||
| # Remove all but the first 50 individual XML files uploaded as artifacts, to | ||||
| # save fd.o space when you break everything. | ||||
| find $RESULTS -name \*.xml | \ | ||||
|     sort -n | | ||||
|     sed -n '1,+49!p' | \ | ||||
|     xargs rm -f | ||||
|  | ||||
| # If any QPA XMLs are there, then include the XSL/CSS in our artifacts. | ||||
| find $RESULTS -name \*.xml \ | ||||
|     -exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \ | ||||
|     -quit | ||||
|  | ||||
| deqp-runner junit \ | ||||
|    --testsuite dEQP \ | ||||
|    --results $RESULTS/failures.csv \ | ||||
|    --output $RESULTS/junit.xml \ | ||||
|    --limit 50 \ | ||||
|    --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml" | ||||
|  | ||||
| # Report the flakes to the IRC channel for monitoring (if configured): | ||||
| if [ -n "$FLAKES_CHANNEL" ]; then | ||||
|   python3 $INSTALL/report-flakes.py \ | ||||
|          --host irc.oftc.net \ | ||||
|          --port 6667 \ | ||||
|          --results $RESULTS/results.csv \ | ||||
|          --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \ | ||||
|          --channel "$FLAKES_CHANNEL" \ | ||||
|          --runner "$CI_RUNNER_DESCRIPTION" \ | ||||
|          --job "$CI_JOB_ID" \ | ||||
|          --url "$CI_JOB_URL" \ | ||||
|          --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \ | ||||
|          --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" | ||||
| fi | ||||
|  | ||||
| echo -e "\e[0Ksection_end:$(date +%s):test_post_process\r\e[0K" | ||||
|  | ||||
| exit $DEQP_EXITCODE | ||||
| @@ -1 +0,0 @@ | ||||
| ../docs/ci | ||||
| @@ -1,36 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set +e | ||||
| set -o xtrace | ||||
|  | ||||
| # if we run this script outside of gitlab-ci for testing, ensure | ||||
| # we got meaningful variables | ||||
| CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/$CI_PROJECT_NAME} | ||||
|  | ||||
| if [[ -e $CI_PROJECT_DIR/.git ]] | ||||
| then | ||||
|     echo "Repository already present, skip cache download" | ||||
|     exit | ||||
| fi | ||||
|  | ||||
| TMP_DIR=$(mktemp -d) | ||||
|  | ||||
| echo "Downloading archived master..." | ||||
| /usr/bin/wget -O $TMP_DIR/$CI_PROJECT_NAME.tar.gz \ | ||||
|               https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz | ||||
|  | ||||
| # check wget error code | ||||
| if [[ $? -ne 0 ]] | ||||
| then | ||||
|     echo "Repository cache not available" | ||||
|     exit | ||||
| fi | ||||
|  | ||||
| set -e | ||||
|  | ||||
| rm -rf "$CI_PROJECT_DIR" | ||||
| echo "Extracting tarball into '$CI_PROJECT_DIR'..." | ||||
| mkdir -p "$CI_PROJECT_DIR" | ||||
| tar xzf "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" -C "$CI_PROJECT_DIR" | ||||
| rm -rf "$TMP_DIR" | ||||
| chmod a+w "$CI_PROJECT_DIR" | ||||
| @@ -1,20 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| if [ -z "$VK_DRIVER" ]; then | ||||
|    echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run' | ||||
|    exit 1 | ||||
| fi | ||||
|  | ||||
| INSTALL=`pwd`/install | ||||
|  | ||||
| # Set up the driver environment. | ||||
| export LD_LIBRARY_PATH=`pwd`/install/lib/ | ||||
| export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.x86_64.json | ||||
|  | ||||
| # To store Fossilize logs on failure. | ||||
| RESULTS=`pwd`/results | ||||
| mkdir -p results | ||||
|  | ||||
| "$INSTALL/fossils/fossils.sh" "$INSTALL/fossils.yml" "$RESULTS" | ||||
| @@ -1,10 +0,0 @@ | ||||
| fossils-db: | ||||
|   repo: "https://gitlab.freedesktop.org/hakzsam/fossils-db" | ||||
|   commit: "5626cedcb58bd95a7b79a9664651818aea92b21c" | ||||
|  | ||||
| fossils: | ||||
|   - path: sascha-willems/database.foz | ||||
|   - path: parallel-rdp/small_subgroup.foz | ||||
|   - path: parallel-rdp/small_uber_subgroup.foz | ||||
|   - path: parallel-rdp/subgroup.foz | ||||
|   - path: parallel-rdp/uber_subgroup.foz | ||||
| @@ -1,77 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| FOSSILS_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" | ||||
| FOSSILS_YAML="$(readlink -f "$1")" | ||||
| FOSSILS_RESULTS="$2" | ||||
|  | ||||
| clone_fossils_db() | ||||
| { | ||||
|     local repo="$1" | ||||
|     local commit="$2" | ||||
|     rm -rf fossils-db | ||||
|     git clone --no-checkout "$repo" fossils-db | ||||
|     (cd fossils-db; git reset "$commit" || git reset "origin/$commit") | ||||
| } | ||||
|  | ||||
| query_fossils_yaml() | ||||
| { | ||||
|     python3 "$FOSSILS_SCRIPT_DIR/query_fossils_yaml.py" \ | ||||
|         --file "$FOSSILS_YAML" "$@" | ||||
| } | ||||
|  | ||||
| create_clean_git() | ||||
| { | ||||
|     rm -rf .clean_git | ||||
|     cp -R .git .clean_git | ||||
| } | ||||
|  | ||||
| restore_clean_git() | ||||
| { | ||||
|     rm -rf .git | ||||
|     cp -R .clean_git .git | ||||
| } | ||||
|  | ||||
| fetch_fossil() | ||||
| { | ||||
|     local fossil="${1//,/?}" | ||||
|     echo -n "[fetch_fossil] Fetching $1... " | ||||
|     local output=$(git lfs pull -I "$fossil" 2>&1) | ||||
|     local ret=0 | ||||
|     if [[ $? -ne 0 || ! -f "$1" ]]; then | ||||
|         echo "ERROR" | ||||
|         echo "$output" | ||||
|         ret=1 | ||||
|     else | ||||
|         echo "OK" | ||||
|     fi | ||||
|     restore_clean_git | ||||
|     return $ret | ||||
| } | ||||
|  | ||||
| if [[ -n "$(query_fossils_yaml fossils_db_repo)" ]]; then | ||||
|     clone_fossils_db "$(query_fossils_yaml fossils_db_repo)" \ | ||||
|                      "$(query_fossils_yaml fossils_db_commit)" | ||||
|     cd fossils-db | ||||
| else | ||||
|     echo "Warning: No fossils-db entry in $FOSSILS_YAML, assuming fossils-db is current directory" | ||||
| fi | ||||
|  | ||||
| # During git operations various git objects get created which | ||||
| # may take up significant space. Store a clean .git instance, | ||||
| # which we restore after various git operations to keep our | ||||
| # storage consumption low. | ||||
| create_clean_git | ||||
|  | ||||
| for fossil in $(query_fossils_yaml fossils) | ||||
| do | ||||
|     fetch_fossil "$fossil" || exit $? | ||||
|     fossilize-replay --num-threads 4 $fossil 1>&2 2> $FOSSILS_RESULTS/fossil_replay.txt | ||||
|     if [ $? != 0 ]; then | ||||
|         echo "Replay of $fossil failed" | ||||
|         grep "pipeline crashed or hung" $FOSSILS_RESULTS/fossil_replay.txt | ||||
|         exit 1 | ||||
|     fi | ||||
|     rm $fossil | ||||
| done | ||||
|  | ||||
| exit $ret | ||||
| @@ -1,69 +0,0 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| # Copyright (c) 2019 Collabora Ltd | ||||
| # Copyright (c) 2020 Valve Corporation | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice shall be included | ||||
| # in all copies or substantial portions of the Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||||
| # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||||
| # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||||
| # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||||
| # OTHER DEALINGS IN THE SOFTWARE. | ||||
| # | ||||
| # SPDX-License-Identifier: MIT | ||||
|  | ||||
| import argparse | ||||
| import yaml | ||||
|  | ||||
| def cmd_fossils_db_repo(args): | ||||
|     with open(args.file, 'r') as f: | ||||
|         y = yaml.safe_load(f) | ||||
|     print(y['fossils-db']['repo']) | ||||
|  | ||||
| def cmd_fossils_db_commit(args): | ||||
|     with open(args.file, 'r') as f: | ||||
|         y = yaml.safe_load(f) | ||||
|     print(y['fossils-db']['commit']) | ||||
|  | ||||
| def cmd_fossils(args): | ||||
|     with open(args.file, 'r') as f: | ||||
|         y = yaml.safe_load(f) | ||||
|  | ||||
|     fossils = list(y['fossils']) | ||||
|     if len(fossils) == 0: | ||||
|         return | ||||
|  | ||||
|     print('\n'.join((t['path'] for t in fossils))) | ||||
|  | ||||
| def main(): | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument('--file', required=True, | ||||
|                         help='the name of the yaml file') | ||||
|  | ||||
|     subparsers = parser.add_subparsers(help='sub-command help') | ||||
|  | ||||
|     parser_fossils_db_repo = subparsers.add_parser('fossils_db_repo') | ||||
|     parser_fossils_db_repo.set_defaults(func=cmd_fossils_db_repo) | ||||
|  | ||||
|     parser_fossils_db_commit = subparsers.add_parser('fossils_db_commit') | ||||
|     parser_fossils_db_commit.set_defaults(func=cmd_fossils_db_commit) | ||||
|  | ||||
|     parser_fossils = subparsers.add_parser('fossils') | ||||
|     parser_fossils.set_defaults(func=cmd_fossils) | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|     args.func(args) | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     main() | ||||
| @@ -1,70 +0,0 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| INSTALL=`pwd`/install | ||||
|  | ||||
| # Set up the driver environment. | ||||
| export LD_LIBRARY_PATH=`pwd`/install/lib/ | ||||
| export LIBVA_DRIVERS_PATH=`pwd`/install/lib/dri/ | ||||
| # libva spams driver open info by default, and that happens per testcase. | ||||
| export LIBVA_MESSAGING_LEVEL=1 | ||||
|  | ||||
| if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then | ||||
|     GTEST_RUNNER_OPTIONS="$GTEST_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt" | ||||
| fi | ||||
|  | ||||
| # Default to an empty known flakes file if it doesn't exist. | ||||
| touch $INSTALL/$GPU_VERSION-flakes.txt | ||||
|  | ||||
| if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then | ||||
|     GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GALLIUM_DRIVER-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then | ||||
|     GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$DRIVER_NAME-skips.txt" | ||||
| fi | ||||
|  | ||||
| if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then | ||||
|     GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GPU_VERSION-skips.txt" | ||||
| fi | ||||
|  | ||||
| set +e | ||||
|  | ||||
| gtest-runner \ | ||||
|     run \ | ||||
|     --gtest $GTEST \ | ||||
|     --output ${GTEST_RESULTS_DIR:-results} \ | ||||
|     --jobs ${FDO_CI_CONCURRENT:-4} \ | ||||
|     $GTEST_SKIPS \ | ||||
|     --flakes $INSTALL/$GPU_VERSION-flakes.txt \ | ||||
|     --fraction-start ${CI_NODE_INDEX:-1} \ | ||||
|     --fraction $((${CI_NODE_TOTAL:-1} * ${GTEST_FRACTION:-1})) \ | ||||
|     --env "LD_PRELOAD=$TEST_LD_PRELOAD" \ | ||||
|     $GTEST_RUNNER_OPTIONS | ||||
|  | ||||
| GTEST_EXITCODE=$? | ||||
|  | ||||
| deqp-runner junit \ | ||||
|    --testsuite gtest \ | ||||
|    --results $RESULTS/failures.csv \ | ||||
|    --output $RESULTS/junit.xml \ | ||||
|    --limit 50 \ | ||||
|    --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml" | ||||
|  | ||||
| # Report the flakes to the IRC channel for monitoring (if configured): | ||||
| if [ -n "$FLAKES_CHANNEL" ]; then | ||||
|   python3 $INSTALL/report-flakes.py \ | ||||
|          --host irc.oftc.net \ | ||||
|          --port 6667 \ | ||||
|          --results $RESULTS/results.csv \ | ||||
|          --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \ | ||||
|          --channel "$FLAKES_CHANNEL" \ | ||||
|          --runner "$CI_RUNNER_DESCRIPTION" \ | ||||
|          --job "$CI_JOB_ID" \ | ||||
|          --url "$CI_JOB_URL" \ | ||||
|          --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \ | ||||
|          --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" | ||||
| fi | ||||
|  | ||||
| exit $GTEST_EXITCODE | ||||
| @@ -1,21 +0,0 @@ | ||||
| variables: | ||||
|    DEBIAN_X86_BUILD_BASE_IMAGE: "debian/x86_build-base" | ||||
|    DEBIAN_BASE_TAG: "2022-02-21-libdrm" | ||||
|  | ||||
|    DEBIAN_X86_BUILD_IMAGE_PATH: "debian/x86_build" | ||||
|    DEBIAN_BUILD_TAG: "2022-02-21-libdrm" | ||||
|  | ||||
|    DEBIAN_X86_TEST_BASE_IMAGE: "debian/x86_test-base" | ||||
|  | ||||
|    DEBIAN_X86_TEST_IMAGE_PATH: "debian/x86_test-gl" | ||||
|    DEBIAN_X86_TEST_GL_TAG: "2022-04-07-virgl-crosvm" | ||||
|    DEBIAN_X86_TEST_VK_TAG: "2022-04-05-deqp-runner" | ||||
|  | ||||
|    FEDORA_X86_BUILD_TAG: "2022-03-18-spirv-tools-5" | ||||
|    KERNEL_ROOTFS_TAG: "2022-04-07-prefix-skqp" | ||||
|  | ||||
|    WINDOWS_X64_BUILD_PATH: "windows/x64_build" | ||||
|    WINDOWS_X64_BUILD_TAG: "2022-20-02-base_split" | ||||
|  | ||||
|    WINDOWS_X64_TEST_PATH: "windows/x64_test" | ||||
|    WINDOWS_X64_TEST_TAG: "2022-04-13-dozen_ci" | ||||
| @@ -1,123 +0,0 @@ | ||||
| .lava-test: | ||||
|   extends: | ||||
|     - .ci-run-policy | ||||
|   # Cancel job if a newer commit is pushed to the same branch | ||||
|   interruptible: true | ||||
|   variables: | ||||
|     GIT_STRATEGY: none # testing doesn't build anything from source | ||||
|     FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions | ||||
|     DEQP_VER: gles2 | ||||
|     # proxy used to cache data locally | ||||
|     FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri=" | ||||
|     # base system generated by the container build job, shared between many pipelines | ||||
|     BASE_SYSTEM_HOST_PREFIX: "${MINIO_HOST}/mesa-lava" | ||||
|     BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}" | ||||
|     BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}" | ||||
|     # per-job build artifacts | ||||
|     BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/${CI_PROJECT_NAME}-${ARCH}.tar.gz" | ||||
|     JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz" | ||||
|     JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz" | ||||
|     MINIO_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}" | ||||
|     PIGLIT_NO_WINDOW: 1 | ||||
|     VISIBILITY_GROUP: "Collabora+fdo" | ||||
|   script: | ||||
|     - ./artifacts/lava/lava-submit.sh | ||||
|   artifacts: | ||||
|     name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}" | ||||
|     when: always | ||||
|     paths: | ||||
|       - results/ | ||||
|     exclude: | ||||
|       - results/*.shader_cache | ||||
|   tags: | ||||
|     - $RUNNER_TAG | ||||
|   after_script: | ||||
|     - wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz | ||||
|  | ||||
| .lava-test:armhf: | ||||
|   variables: | ||||
|     ARCH: armhf | ||||
|     KERNEL_IMAGE_NAME: zImage | ||||
|     KERNEL_IMAGE_TYPE: "zimage" | ||||
|     BOOT_METHOD: u-boot | ||||
|     HWCI_TEST_SCRIPT: "/install/deqp-runner.sh" | ||||
|   extends: | ||||
|     - .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_armhf | ||||
|     - .use-debian/x86_build | ||||
|     - .lava-test | ||||
|     - .use-kernel+rootfs-arm | ||||
|   needs: | ||||
|     - kernel+rootfs_armhf | ||||
|     - debian/x86_build | ||||
|     - debian-armhf | ||||
|  | ||||
| .lava-test:arm64: | ||||
|   variables: | ||||
|     ARCH: arm64 | ||||
|     KERNEL_IMAGE_NAME: Image | ||||
|     KERNEL_IMAGE_TYPE: "image" | ||||
|     BOOT_METHOD: u-boot | ||||
|     HWCI_TEST_SCRIPT: "/install/deqp-runner.sh" | ||||
|   extends: | ||||
|     - .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64 | ||||
|     - .use-debian/x86_build | ||||
|     - .lava-test | ||||
|     - .use-kernel+rootfs-arm | ||||
|   dependencies: | ||||
|     - debian-arm64 | ||||
|   needs: | ||||
|     - kernel+rootfs_arm64 | ||||
|     - debian/x86_build | ||||
|     - debian-arm64 | ||||
|  | ||||
| .lava-test:amd64: | ||||
|   variables: | ||||
|     ARCH: amd64 | ||||
|     KERNEL_IMAGE_NAME: bzImage | ||||
|     KERNEL_IMAGE_TYPE: "zimage" | ||||
|     BOOT_METHOD: u-boot | ||||
|     HWCI_TEST_SCRIPT: "/install/deqp-runner.sh" | ||||
|   extends: | ||||
|     - .use-debian/x86_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_amd64 | ||||
|     - .use-debian/x86_build | ||||
|     - .lava-test | ||||
|     - .use-kernel+rootfs-amd64 | ||||
|   needs: | ||||
|     - kernel+rootfs_amd64 | ||||
|     - debian-testing | ||||
|  | ||||
| .lava-traces-base: | ||||
|   variables: | ||||
|     HWCI_TEST_SCRIPT: "/install/piglit/piglit-traces.sh" | ||||
|   artifacts: | ||||
|     reports: | ||||
|       junit: results/junit.xml | ||||
|  | ||||
| .lava-piglit: | ||||
|   variables: | ||||
|     PIGLIT_REPLAY_DEVICE_NAME: "gl-${GPU_VERSION}" | ||||
|     PIGLIT_RESULTS: "${GPU_VERSION}-${PIGLIT_PROFILES}" | ||||
|     HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh" | ||||
|  | ||||
| .lava-piglit-traces:amd64: | ||||
|   extends: | ||||
|     - .lava-test:amd64 | ||||
|     - .lava-piglit | ||||
|     - .lava-traces-base | ||||
|  | ||||
| .lava-piglit-traces:armhf: | ||||
|   extends: | ||||
|     - .lava-test:armhf | ||||
|     - .lava-piglit | ||||
|     - .lava-traces-base | ||||
|  | ||||
| .lava-piglit-traces:arm64: | ||||
|   extends: | ||||
|     - .lava-test:arm64 | ||||
|     - .lava-piglit | ||||
|     - .lava-traces-base | ||||
|  | ||||
| .lava-piglit:amd64: | ||||
|   extends: | ||||
|     - .lava-test:amd64 | ||||
|     - .lava-piglit | ||||
| @@ -1,34 +0,0 @@ | ||||
| #!/bin/sh | ||||
| # | ||||
| # Copyright (C) 2022 Collabora Limited | ||||
| # Author: Guilherme Gallo <guilherme.gallo@collabora.com> | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| # SOFTWARE. | ||||
|  | ||||
| # This script runs unit/integration tests related with LAVA CI tools | ||||
|  | ||||
| set -ex | ||||
|  | ||||
| TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests | ||||
|  | ||||
| PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \ | ||||
|     pytest "${TEST_DIR}" \ | ||||
|             -W ignore::DeprecationWarning \ | ||||
|             --junitxml=artifacts/ci_scripts_report.xml | ||||
| @@ -1,48 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
| set -x | ||||
|  | ||||
| # Try to use the kernel and rootfs built in mainline first, so we're more | ||||
| # likely to hit cache | ||||
| if wget -q --method=HEAD "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then | ||||
| 	BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}" | ||||
| else | ||||
| 	BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}" | ||||
| fi | ||||
|  | ||||
| rm -rf results | ||||
| mkdir -p results/job-rootfs-overlay/ | ||||
|  | ||||
| cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/ | ||||
| cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/ | ||||
| cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/ | ||||
|  | ||||
| # Prepare env vars for upload. | ||||
| KERNEL_IMAGE_BASE_URL="https://${BASE_SYSTEM_HOST_PATH}" \ | ||||
| 	artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh | ||||
|  | ||||
| tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ . | ||||
| ci-fairy minio login --token-file "${CI_JOB_JWT_FILE}" | ||||
| ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}" | ||||
|  | ||||
| touch results/lava.log | ||||
| tail -f results/lava.log & | ||||
| artifacts/lava/lava_job_submitter.py \ | ||||
| 	--dump-yaml \ | ||||
| 	--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \ | ||||
| 	--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \ | ||||
| 	--kernel-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \ | ||||
| 	--build-url "${FDO_HTTP_CACHE_URI:-}https://${BUILD_PATH}" \ | ||||
| 	--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \ | ||||
| 	--job-timeout ${JOB_TIMEOUT:-30} \ | ||||
| 	--first-stage-init artifacts/ci-common/init-stage1.sh \ | ||||
| 	--ci-project-dir ${CI_PROJECT_DIR} \ | ||||
| 	--device-type ${DEVICE_TYPE} \ | ||||
| 	--dtb ${DTB} \ | ||||
| 	--jwt-file "${CI_JOB_JWT_FILE}" \ | ||||
| 	--kernel-image-name ${KERNEL_IMAGE_NAME} \ | ||||
| 	--kernel-image-type "${KERNEL_IMAGE_TYPE}" \ | ||||
| 	--boot-method ${BOOT_METHOD} \ | ||||
| 	--visibility-group ${VISIBILITY_GROUP} \ | ||||
| 	--lava-tags "${LAVA_TAGS}" >> results/lava.log | ||||
| @@ -1,380 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # | ||||
| # Copyright (C) 2020, 2021 Collabora Limited | ||||
| # Author: Gustavo Padovan <gustavo.padovan@collabora.com> | ||||
| # | ||||
| # Permission is hereby granted, free of charge, to any person obtaining a | ||||
| # copy of this software and associated documentation files (the "Software"), | ||||
| # to deal in the Software without restriction, including without limitation | ||||
| # the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
| # and/or sell copies of the Software, and to permit persons to whom the | ||||
| # Software is furnished to do so, subject to the following conditions: | ||||
| # | ||||
| # The above copyright notice and this permission notice (including the next | ||||
| # paragraph) shall be included in all copies or substantial portions of the | ||||
| # Software. | ||||
| # | ||||
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | ||||
| # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| # SOFTWARE. | ||||
|  | ||||
| """Send a job to LAVA, track it and collect log back""" | ||||
|  | ||||
| import argparse | ||||
| import pathlib | ||||
| import sys | ||||
| import time | ||||
| import traceback | ||||
| import urllib.parse | ||||
| import xmlrpc | ||||
|  | ||||
| from datetime import datetime, timedelta | ||||
| from os import getenv | ||||
|  | ||||
| import lavacli | ||||
| import yaml | ||||
| from lavacli.utils import loader | ||||
|  | ||||
| # Timeout in seconds to decide if the device from the dispatched LAVA job has | ||||
| # hung or not due to the lack of new log output. | ||||
| DEVICE_HANGING_TIMEOUT_SEC = int(getenv("LAVA_DEVICE_HANGING_TIMEOUT_SEC",  5*60)) | ||||
|  | ||||
| # How many seconds the script should wait before try a new polling iteration to | ||||
| # check if the dispatched LAVA job is running or waiting in the job queue. | ||||
| WAIT_FOR_DEVICE_POLLING_TIME_SEC = int(getenv("LAVA_WAIT_FOR_DEVICE_POLLING_TIME_SEC", 10)) | ||||
|  | ||||
| # How many seconds to wait between log output LAVA RPC calls. | ||||
| LOG_POLLING_TIME_SEC = int(getenv("LAVA_LOG_POLLING_TIME_SEC", 5)) | ||||
|  | ||||
| # How many retries should be made when a timeout happen. | ||||
| NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT_DETECTION", 2)) | ||||
|  | ||||
|  | ||||
| def print_log(msg): | ||||
|     print("{}: {}".format(datetime.now(), msg)) | ||||
|  | ||||
| def fatal_err(msg): | ||||
|     print_log(msg) | ||||
|     sys.exit(1) | ||||
|  | ||||
|  | ||||
| def hide_sensitive_data(yaml_data, hide_tag="HIDEME"): | ||||
|     return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line) | ||||
|  | ||||
|  | ||||
| def generate_lava_yaml(args): | ||||
|     # General metadata and permissions, plus also inexplicably kernel arguments | ||||
|     values = { | ||||
|         'job_name': 'mesa: {}'.format(args.pipeline_info), | ||||
|         'device_type': args.device_type, | ||||
|         'visibility': { 'group': [ args.visibility_group ] }, | ||||
|         'priority': 75, | ||||
|         'context': { | ||||
|             'extra_nfsroot_args': ' init=/init rootwait usbcore.quirks=0bda:8153:k' | ||||
|         }, | ||||
|         'timeouts': { | ||||
|             'job': { | ||||
|                 'minutes': args.job_timeout | ||||
|             } | ||||
|         }, | ||||
|     } | ||||
|  | ||||
|     if args.lava_tags: | ||||
|         values['tags'] = args.lava_tags.split(',') | ||||
|  | ||||
|     # URLs to our kernel rootfs to boot from, both generated by the base | ||||
|     # container build | ||||
|     deploy = { | ||||
|       'timeout': { 'minutes': 10 }, | ||||
|       'to': 'tftp', | ||||
|       'os': 'oe', | ||||
|       'kernel': { | ||||
|         'url': '{}/{}'.format(args.kernel_url_prefix, args.kernel_image_name), | ||||
|       }, | ||||
|       'nfsrootfs': { | ||||
|         'url': '{}/lava-rootfs.tgz'.format(args.rootfs_url_prefix), | ||||
|         'compression': 'gz', | ||||
|       } | ||||
|     } | ||||
|     if args.kernel_image_type: | ||||
|         deploy['kernel']['type'] = args.kernel_image_type | ||||
|     if args.dtb: | ||||
|         deploy['dtb'] = { | ||||
|           'url': '{}/{}.dtb'.format(args.kernel_url_prefix, args.dtb) | ||||
|         } | ||||
|  | ||||
|     # always boot over NFS | ||||
|     boot = { | ||||
|       'timeout': { 'minutes': 25 }, | ||||
|       'method': args.boot_method, | ||||
|       'commands': 'nfs', | ||||
|       'prompts': ['lava-shell:'], | ||||
|     } | ||||
|  | ||||
|     # skeleton test definition: only declaring each job as a single 'test' | ||||
|     # since LAVA's test parsing is not useful to us | ||||
|     test = { | ||||
|       'timeout': { 'minutes': args.job_timeout }, | ||||
|       'failure_retry': 1, | ||||
|       'definitions': [ { | ||||
|         'name': 'mesa', | ||||
|         'from': 'inline', | ||||
|         'path': 'inline/mesa.yaml', | ||||
|         'repository': { | ||||
|           'metadata': { | ||||
|             'name': 'mesa', | ||||
|             'description': 'Mesa test plan', | ||||
|             'os': [ 'oe' ], | ||||
|             'scope': [ 'functional' ], | ||||
|             'format': 'Lava-Test Test Definition 1.0', | ||||
|           }, | ||||
|           'parse': { | ||||
|             'pattern': r'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))' | ||||
|           }, | ||||
|           'run': { | ||||
|           }, | ||||
|         }, | ||||
|       } ], | ||||
|     } | ||||
|  | ||||
|     # job execution script: | ||||
|     #   - inline .gitlab-ci/common/init-stage1.sh | ||||
|     #   - fetch and unpack per-pipeline build artifacts from build job | ||||
|     #   - fetch and unpack per-job environment from lava-submit.sh | ||||
|     #   - exec .gitlab-ci/common/init-stage2.sh  | ||||
|     init_lines = [] | ||||
|  | ||||
|     with open(args.first_stage_init, 'r') as init_sh: | ||||
|       init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ] | ||||
|  | ||||
|     with open(args.jwt_file) as jwt_file: | ||||
|         init_lines += [ | ||||
|             "set +x", | ||||
|             f'echo -n "{jwt_file.read()}" > "{args.jwt_file}"  # HIDEME', | ||||
|             "set -x", | ||||
|         ] | ||||
|  | ||||
|     init_lines += [ | ||||
|       'mkdir -p {}'.format(args.ci_project_dir), | ||||
|       'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.build_url, args.ci_project_dir), | ||||
|       'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url), | ||||
|       f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh', | ||||
|       'exec /init-stage2.sh', | ||||
|     ] | ||||
|     test['definitions'][0]['repository']['run']['steps'] = init_lines | ||||
|  | ||||
|     values['actions'] = [ | ||||
|       { 'deploy': deploy }, | ||||
|       { 'boot': boot }, | ||||
|       { 'test': test }, | ||||
|     ] | ||||
|  | ||||
|     return yaml.dump(values, width=10000000) | ||||
|  | ||||
|  | ||||
| def setup_lava_proxy(): | ||||
|     config = lavacli.load_config("default") | ||||
|     uri, usr, tok = (config.get(key) for key in ("uri", "username", "token")) | ||||
|     uri_obj = urllib.parse.urlparse(uri) | ||||
|     uri_str = "{}://{}:{}@{}{}".format(uri_obj.scheme, usr, tok, uri_obj.netloc, uri_obj.path) | ||||
|     transport = lavacli.RequestsTransport( | ||||
|         uri_obj.scheme, | ||||
|         config.get("proxy"), | ||||
|         config.get("timeout", 120.0), | ||||
|         config.get("verify_ssl_cert", True), | ||||
|     ) | ||||
|     proxy = xmlrpc.client.ServerProxy( | ||||
|         uri_str, allow_none=True, transport=transport) | ||||
|  | ||||
|     print_log("Proxy for {} created.".format(config['uri'])) | ||||
|  | ||||
|     return proxy | ||||
|  | ||||
|  | ||||
| def _call_proxy(fn, *args): | ||||
|     retries = 60 | ||||
|     for n in range(1, retries + 1): | ||||
|         try: | ||||
|             return fn(*args) | ||||
|         except xmlrpc.client.ProtocolError as err: | ||||
|             if n == retries: | ||||
|                 traceback.print_exc() | ||||
|                 fatal_err("A protocol error occurred (Err {} {})".format(err.errcode, err.errmsg)) | ||||
|             else: | ||||
|                 time.sleep(15) | ||||
|         except xmlrpc.client.Fault as err: | ||||
|             traceback.print_exc() | ||||
|             fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode)) | ||||
|  | ||||
|  | ||||
| def get_job_results(proxy, job_id, test_suite, test_case): | ||||
|     # Look for infrastructure errors and retry if we see them. | ||||
|     results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id) | ||||
|     results = yaml.load(results_yaml, Loader=loader(False)) | ||||
|     for res in results: | ||||
|         metadata = res["metadata"] | ||||
|         if "result" not in metadata or metadata["result"] != "fail": | ||||
|             continue | ||||
|         if 'error_type' in metadata and metadata['error_type'] == "Infrastructure": | ||||
|             print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id)) | ||||
|             return False | ||||
|         if 'case' in metadata and metadata['case'] == "validate": | ||||
|             print_log("LAVA job {} failed validation (possible download error). Retry.".format(job_id)) | ||||
|             return False | ||||
|  | ||||
|     results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case) | ||||
|     results = yaml.load(results_yaml, Loader=loader(False)) | ||||
|     if not results: | ||||
|         fatal_err("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case)) | ||||
|  | ||||
|     print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result'])) | ||||
|     if results[0]['result'] != 'pass': | ||||
|         fatal_err("FAIL") | ||||
|  | ||||
|     return True | ||||
|  | ||||
| def wait_until_job_is_started(proxy, job_id): | ||||
|     print_log(f"Waiting for job {job_id} to start.") | ||||
|     current_state = "Submitted" | ||||
|     waiting_states = ["Submitted", "Scheduling", "Scheduled"] | ||||
|     while current_state in waiting_states: | ||||
|         job_state = _call_proxy(proxy.scheduler.job_state, job_id) | ||||
|         current_state = job_state["job_state"] | ||||
|  | ||||
|         time.sleep(WAIT_FOR_DEVICE_POLLING_TIME_SEC) | ||||
|     print_log(f"Job {job_id} started.") | ||||
|  | ||||
| def follow_job_execution(proxy, job_id): | ||||
|     line_count = 0 | ||||
|     finished = False | ||||
|     last_time_logs = datetime.now() | ||||
|     while not finished: | ||||
|         (finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count) | ||||
|         if logs := yaml.load(str(data), Loader=loader(False)): | ||||
|             # Reset the timeout | ||||
|             last_time_logs = datetime.now() | ||||
|             for line in logs: | ||||
|                 print("{} {}".format(line["dt"], line["msg"])) | ||||
|  | ||||
|             line_count += len(logs) | ||||
|  | ||||
|         else: | ||||
|             time_limit = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC) | ||||
|             if datetime.now() - last_time_logs > time_limit: | ||||
|                 print_log("LAVA job {} doesn't advance (machine got hung?). Retry.".format(job_id)) | ||||
|                 return False | ||||
|  | ||||
|         # `proxy.scheduler.jobs.logs` does not block, even when there is no | ||||
|         # new log to be fetched. To avoid dosing the LAVA dispatcher | ||||
|         # machine, let's add a sleep to save them some stamina. | ||||
|         time.sleep(LOG_POLLING_TIME_SEC) | ||||
|  | ||||
|     return True | ||||
|  | ||||
| def show_job_data(proxy, job_id): | ||||
|     show = _call_proxy(proxy.scheduler.jobs.show, job_id) | ||||
|     for field, value in show.items(): | ||||
|         print("{}\t: {}".format(field, value)) | ||||
|  | ||||
|  | ||||
| def validate_job(proxy, job_file): | ||||
|     try: | ||||
|         return _call_proxy(proxy.scheduler.jobs.validate, job_file, True) | ||||
|     except: | ||||
|         return False | ||||
|  | ||||
| def submit_job(proxy, job_file): | ||||
|     return _call_proxy(proxy.scheduler.jobs.submit, job_file) | ||||
|  | ||||
|  | ||||
| def retriable_follow_job(proxy, yaml_file): | ||||
|     retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION | ||||
|  | ||||
|     while retry_count >= 0: | ||||
|         job_id = submit_job(proxy, yaml_file) | ||||
|  | ||||
|         print_log("LAVA job id: {}".format(job_id)) | ||||
|  | ||||
|         wait_until_job_is_started(proxy, job_id) | ||||
|  | ||||
|         if not follow_job_execution(proxy, job_id): | ||||
|             print_log(f"Job {job_id} has timed out. Cancelling it.") | ||||
|             # Cancel the job as it is considered unreachable by Mesa CI. | ||||
|             proxy.scheduler.jobs.cancel(job_id) | ||||
|  | ||||
|             retry_count -= 1 | ||||
|             continue | ||||
|  | ||||
|         show_job_data(proxy, job_id) | ||||
|  | ||||
|         if get_job_results(proxy, job_id, "0_mesa", "mesa") == True: | ||||
|             break | ||||
|     else: | ||||
|         # The script attempted all the retries. The job seemed to fail. | ||||
|         return False | ||||
|  | ||||
|     return True | ||||
|  | ||||
|  | ||||
| def main(args): | ||||
|     proxy = setup_lava_proxy() | ||||
|  | ||||
|     yaml_file = generate_lava_yaml(args) | ||||
|  | ||||
|     if args.dump_yaml: | ||||
|         print(hide_sensitive_data(generate_lava_yaml(args))) | ||||
|  | ||||
|     if args.validate_only: | ||||
|         ret = validate_job(proxy, yaml_file) | ||||
|         if not ret: | ||||
|             fatal_err("Error in LAVA job definition") | ||||
|         print("LAVA job definition validated successfully") | ||||
|         return | ||||
|  | ||||
|     if not retriable_follow_job(proxy, yaml_file): | ||||
|         fatal_err( | ||||
|             "Job failed after it exceeded the number of" | ||||
|             f"{NUMBER_OF_RETRIES_TIMEOUT_DETECTION} retries." | ||||
|         ) | ||||
|  | ||||
|  | ||||
| def create_parser(): | ||||
|     parser = argparse.ArgumentParser("LAVA job submitter") | ||||
|  | ||||
|     parser.add_argument("--pipeline-info") | ||||
|     parser.add_argument("--rootfs-url-prefix") | ||||
|     parser.add_argument("--kernel-url-prefix") | ||||
|     parser.add_argument("--build-url") | ||||
|     parser.add_argument("--job-rootfs-overlay-url") | ||||
|     parser.add_argument("--job-timeout", type=int) | ||||
|     parser.add_argument("--first-stage-init") | ||||
|     parser.add_argument("--ci-project-dir") | ||||
|     parser.add_argument("--device-type") | ||||
|     parser.add_argument("--dtb", nargs='?', default="") | ||||
|     parser.add_argument("--kernel-image-name") | ||||
|     parser.add_argument("--kernel-image-type", nargs='?', default="") | ||||
|     parser.add_argument("--boot-method") | ||||
|     parser.add_argument("--lava-tags", nargs='?', default="") | ||||
|     parser.add_argument("--jwt-file", type=pathlib.Path) | ||||
|     parser.add_argument("--validate-only", action='store_true') | ||||
|     parser.add_argument("--dump-yaml", action='store_true') | ||||
|     parser.add_argument("--visibility-group") | ||||
|  | ||||
|     return parser | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     # given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us -> | ||||
|     # GitLab runner -> GitLab primary -> user, safe to say we don't need any | ||||
|     # more buffering | ||||
|     sys.stdout.reconfigure(line_buffering=True) | ||||
|     sys.stderr.reconfigure(line_buffering=True) | ||||
|  | ||||
|     parser = create_parser() | ||||
|  | ||||
|     parser.set_defaults(func=main) | ||||
|     args = parser.parse_args() | ||||
|     args.func(args) | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user