Compare commits
53 Commits
explicit-s
...
mesa-21.0.
Author | SHA1 | Date | |
---|---|---|---|
|
8dc038e80e | ||
|
b98745871b | ||
|
24b733aebd | ||
|
3994f5db48 | ||
|
1d2827379d | ||
|
d64870e874 | ||
|
98c1bf6d18 | ||
|
a6b63210e0 | ||
|
080c433715 | ||
|
be774d1825 | ||
|
4dee39f04d | ||
|
9fc330f27b | ||
|
5e436d9f2d | ||
|
380c7edbc2 | ||
|
be4b618606 | ||
|
7d2ccf574c | ||
|
98706b7754 | ||
|
b024d311b0 | ||
|
c7a7e64ffa | ||
|
bea422f2f4 | ||
|
6051931415 | ||
|
52376c9682 | ||
|
e6a0397be8 | ||
|
22fff81b4b | ||
|
c077276c15 | ||
|
ba3d2c3a4c | ||
|
73ea60139d | ||
|
a6ace60672 | ||
|
99ad50617a | ||
|
9a81b1820c | ||
|
0b66e5b6a1 | ||
|
074631dd67 | ||
|
8d2bc1e60b | ||
|
e1465cd5ec | ||
|
d14f279cfd | ||
|
82e845ba67 | ||
|
3aaf5188fe | ||
|
0d63d9463e | ||
|
71a58f02e5 | ||
|
4562f9f894 | ||
|
3760fdf7e6 | ||
|
28df87bbe2 | ||
|
7f40dc9760 | ||
|
12c0bfaaf9 | ||
|
6a7552aa10 | ||
|
01a7662154 | ||
|
65288fe496 | ||
|
0319695645 | ||
|
126cc30440 | ||
|
3714b68ec1 | ||
|
ac4caa502f | ||
|
b10e32f840 | ||
|
fe1a419e42 |
66
.appveyor/appveyor_msvc.bat
Normal file
66
.appveyor/appveyor_msvc.bat
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
goto %1
|
||||||
|
|
||||||
|
:install
|
||||||
|
rem Check pip
|
||||||
|
python --version
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip --version
|
||||||
|
if "%buildsystem%" == "scons" (
|
||||||
|
rem Install Mako
|
||||||
|
python -m pip install Mako==1.1.3
|
||||||
|
rem Install pywin32 extensions, needed by SCons
|
||||||
|
python -m pip install pypiwin32
|
||||||
|
rem Install python wheels, necessary to install SCons via pip
|
||||||
|
python -m pip install wheel
|
||||||
|
rem Install SCons
|
||||||
|
python -m pip install scons==3.1.2
|
||||||
|
call scons --version
|
||||||
|
) else (
|
||||||
|
python -m pip install Mako meson
|
||||||
|
meson --version
|
||||||
|
|
||||||
|
rem Install pkg-config, which meson requires even on windows
|
||||||
|
cinst -y pkgconfiglite
|
||||||
|
)
|
||||||
|
|
||||||
|
rem Install flex/bison
|
||||||
|
set WINFLEXBISON_ARCHIVE=win_flex_bison-%WINFLEXBISON_VERSION%.zip
|
||||||
|
if not exist "%WINFLEXBISON_ARCHIVE%" appveyor DownloadFile "https://github.com/lexxmark/winflexbison/releases/download/v%WINFLEXBISON_VERSION%/%WINFLEXBISON_ARCHIVE%"
|
||||||
|
7z x -y -owinflexbison\ "%WINFLEXBISON_ARCHIVE%" > nul
|
||||||
|
set Path=%CD%\winflexbison;%Path%
|
||||||
|
win_flex --version
|
||||||
|
win_bison --version
|
||||||
|
rem Download and extract LLVM
|
||||||
|
if not exist "%LLVM_ARCHIVE%" appveyor DownloadFile "https://people.freedesktop.org/~jrfonseca/llvm/%LLVM_ARCHIVE%"
|
||||||
|
7z x -y "%LLVM_ARCHIVE%" > nul
|
||||||
|
if "%buildsystem%" == "scons" (
|
||||||
|
mkdir llvm\bin
|
||||||
|
set LLVM=%CD%\llvm
|
||||||
|
) else (
|
||||||
|
move llvm subprojects\
|
||||||
|
copy .appveyor\llvm-wrap.meson subprojects\llvm\meson.build
|
||||||
|
)
|
||||||
|
goto :eof
|
||||||
|
|
||||||
|
:build_script
|
||||||
|
if "%buildsystem%" == "scons" (
|
||||||
|
call scons -j%NUMBER_OF_PROCESSORS% MSVC_VERSION=14.2 machine=x86 llvm=1
|
||||||
|
) else (
|
||||||
|
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" -arch=x86
|
||||||
|
rem We use default-library as static to affect any wraps (such as expat and zlib)
|
||||||
|
rem it would be better if we could set subprojects buildtype independently,
|
||||||
|
rem but I haven't written that patch yet :)
|
||||||
|
call meson builddir --backend=vs2017 --default-library=static -Dbuild-tests=true -Db_vscrt=mtd --buildtype=release -Dllvm=true -Dgallium-drivers=swrast -Dosmesa=gallium
|
||||||
|
pushd builddir
|
||||||
|
call msbuild mesa.sln /m
|
||||||
|
popd
|
||||||
|
)
|
||||||
|
goto :eof
|
||||||
|
|
||||||
|
:test_script
|
||||||
|
if "%buildsystem%" == "scons" (
|
||||||
|
call scons -j%NUMBER_OF_PROCESSORS% MSVC_VERSION=14.2 machine=x86 llvm=1 check
|
||||||
|
) else (
|
||||||
|
call meson test -C builddir
|
||||||
|
)
|
||||||
|
goto :eof
|
36
.appveyor/llvm-wrap.meson
Normal file
36
.appveyor/llvm-wrap.meson
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# A meson.build file for binary wrapping the LLVM used in the appvyeor CI
|
||||||
|
project('llvm', ['cpp'])
|
||||||
|
|
||||||
|
cpp = meson.get_compiler('cpp')
|
||||||
|
|
||||||
|
_deps = []
|
||||||
|
_search = join_paths(meson.current_source_dir(), 'lib')
|
||||||
|
foreach d : ['LLVMAnalysis', 'LLVMAsmParser', 'LLVMAsmPrinter',
|
||||||
|
'LLVMBinaryFormat', 'LLVMBitReader', 'LLVMBitWriter',
|
||||||
|
'LLVMCodeGen', 'LLVMCore', 'LLVMCoroutines', 'LLVMCoverage',
|
||||||
|
'LLVMDebugInfoCodeView', 'LLVMDebugInfoDWARF',
|
||||||
|
'LLVMDebugInfoMSF', 'LLVMDebugInfoPDB', 'LLVMDemangle',
|
||||||
|
'LLVMDlltoolDriver', 'LLVMExecutionEngine', 'LLVMGlobalISel',
|
||||||
|
'LLVMInstCombine', 'LLVMInstrumentation', 'LLVMInterpreter',
|
||||||
|
'LLVMipo', 'LLVMIRReader', 'LLVMLibDriver', 'LLVMLineEditor',
|
||||||
|
'LLVMLinker', 'LLVMLTO', 'LLVMMCDisassembler', 'LLVMMCJIT',
|
||||||
|
'LLVMMC', 'LLVMMCParser', 'LLVMMIRParser', 'LLVMObjCARCOpts',
|
||||||
|
'LLVMObject', 'LLVMObjectYAML', 'LLVMOption', 'LLVMOrcJIT',
|
||||||
|
'LLVMPasses', 'LLVMProfileData', 'LLVMRuntimeDyld',
|
||||||
|
'LLVMScalarOpts', 'LLVMSelectionDAG', 'LLVMSupport',
|
||||||
|
'LLVMSymbolize', 'LLVMTableGen', 'LLVMTarget',
|
||||||
|
'LLVMTransformUtils', 'LLVMVectorize', 'LLVMX86AsmParser',
|
||||||
|
'LLVMX86AsmPrinter', 'LLVMX86CodeGen', 'LLVMX86Desc',
|
||||||
|
'LLVMX86Disassembler', 'LLVMX86Info', 'LLVMX86Utils',
|
||||||
|
'LLVMXRay']
|
||||||
|
_deps += cpp.find_library(d, dirs : _search)
|
||||||
|
endforeach
|
||||||
|
|
||||||
|
dep_llvm = declare_dependency(
|
||||||
|
include_directories : include_directories('include'),
|
||||||
|
dependencies : _deps,
|
||||||
|
version : '5.0.1',
|
||||||
|
)
|
||||||
|
|
||||||
|
has_rtti = false
|
||||||
|
irbuilder_h = files('include/llvm/IR/IRBuilder.h')
|
@@ -1,2 +0,0 @@
|
|||||||
# Vendored code
|
|
||||||
src/amd/vulkan/radix_sort/*
|
|
@@ -1,9 +0,0 @@
|
|||||||
# The following files are opted into `ninja clang-format` and
|
|
||||||
# enforcement in the CI.
|
|
||||||
|
|
||||||
src/gallium/drivers/i915
|
|
||||||
src/gallium/targets/teflon/**/*
|
|
||||||
src/amd/vulkan/**/*
|
|
||||||
src/amd/compiler/**/*
|
|
||||||
src/egl/**/*
|
|
||||||
src/etnaviv/isa/**/*
|
|
@@ -8,7 +8,7 @@ charset = utf-8
|
|||||||
insert_final_newline = true
|
insert_final_newline = true
|
||||||
tab_width = 8
|
tab_width = 8
|
||||||
|
|
||||||
[*.{c,h,cpp,hpp,cc,hh,y,yy}]
|
[*.{c,h,cpp,hpp,cc,hh}]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 3
|
indent_size = 3
|
||||||
max_line_length = 78
|
max_line_length = 78
|
||||||
@@ -16,17 +16,25 @@ max_line_length = 78
|
|||||||
[{Makefile*,*.mk}]
|
[{Makefile*,*.mk}]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
|
|
||||||
[*.py]
|
[{*.py,SCons*}]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 4
|
indent_size = 4
|
||||||
|
|
||||||
|
[*.pl]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
[*.m4]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
[*.yml]
|
[*.yml]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
[*.rst]
|
[*.html]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 3
|
indent_size = 2
|
||||||
|
|
||||||
[*.patch]
|
[*.patch]
|
||||||
trim_trailing_whitespace = false
|
trim_trailing_whitespace = false
|
||||||
@@ -34,11 +42,3 @@ trim_trailing_whitespace = false
|
|||||||
[{meson.build,meson_options.txt}]
|
[{meson.build,meson_options.txt}]
|
||||||
indent_style = space
|
indent_style = space
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
|
|
||||||
[*.ps1]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 2
|
|
||||||
|
|
||||||
[*.rs]
|
|
||||||
indent_style = space
|
|
||||||
indent_size = 4
|
|
||||||
|
@@ -1,67 +0,0 @@
|
|||||||
# List of commits to ignore when using `git blame`.
|
|
||||||
# Enable with:
|
|
||||||
# git config blame.ignoreRevsFile .git-blame-ignore-revs
|
|
||||||
#
|
|
||||||
# Per git-blame(1):
|
|
||||||
# Ignore revisions listed in the file, one unabbreviated object name
|
|
||||||
# per line, in git-blame. Whitespace and comments beginning with # are
|
|
||||||
# ignored.
|
|
||||||
#
|
|
||||||
# Please keep these in chronological order :)
|
|
||||||
#
|
|
||||||
# You can add a new commit with the following command:
|
|
||||||
# git log -1 --pretty=format:'%n# %s%n%H%n' >> .git-blame-ignore-revs $COMMIT
|
|
||||||
|
|
||||||
# pvr: Fix clang-format error.
|
|
||||||
0ad5b0a74ef73f5fcbe1406ad9d57fe5dc00a5b1
|
|
||||||
|
|
||||||
# panfrost: Fix up some formatting for clang-format
|
|
||||||
a4705afe63412498d13ded73cba969c66be67907
|
|
||||||
|
|
||||||
# asahi: clang-format the world again
|
|
||||||
26c51bb8d8a33098b1990425a391f56ffba5728c
|
|
||||||
|
|
||||||
# perfetto: Add a .clang-format for the directory.
|
|
||||||
da78d5d729b1800136dd713b68492cb339993f4a
|
|
||||||
|
|
||||||
# panfrost/winsys: Clang-format
|
|
||||||
c90f036516a5376002be6550a917e8bad6a8a3b8
|
|
||||||
|
|
||||||
# panfrost: Re-run clang-format
|
|
||||||
4ccf174009af6732cbffa5d8ebb4687da7517505
|
|
||||||
|
|
||||||
# panvk: Clang-format
|
|
||||||
c7bf3b69ebc8f2252dbf724a4de638e6bb2ac402
|
|
||||||
|
|
||||||
# pan/mdg: Fix icky formatting
|
|
||||||
133af0d6c945d3aaca8989edd15283a2b7dcc6c7
|
|
||||||
|
|
||||||
# mapi: clang-format _glapi_add_dispatch()
|
|
||||||
30332529663268a6406e910848e906e725e6fda7
|
|
||||||
|
|
||||||
# radv: reformat according to its .clang-format
|
|
||||||
8b319c6db8bd93603b18bd783eb75225fcfd51b7
|
|
||||||
|
|
||||||
# aco: reformat according to its .clang-format
|
|
||||||
6b21653ab4d3a67e711fe10e3d403128b6d26eb2
|
|
||||||
|
|
||||||
# egl: re-format using clang-format
|
|
||||||
2f670d89db038d5a29f6b72732fd7ad63dfaf4c6
|
|
||||||
|
|
||||||
# panfrost: clang-format the tree
|
|
||||||
0afd691f29683f6e9dde60f79eca094373521806
|
|
||||||
|
|
||||||
# aco: Format.
|
|
||||||
1e2639026fec7069806449f9ba2a124ce4eb5569
|
|
||||||
|
|
||||||
# radv: Format.
|
|
||||||
59c501ca353f8ec9d2717c98af2bfa1a1dbf4d75
|
|
||||||
|
|
||||||
# pvr: clang-format fixes
|
|
||||||
953c04ebd39c52d457301bdd8ac803949001da2d
|
|
||||||
|
|
||||||
# freedreno: Re-indent
|
|
||||||
2d439343ea1aee146d4ce32800992cd389bd505d
|
|
||||||
|
|
||||||
# ir3: Reformat source with clang-format
|
|
||||||
177138d8cb0b4f6a42ef0a1f8593e14d79f17c54
|
|
7
.gitattributes
vendored
7
.gitattributes
vendored
@@ -1,7 +0,0 @@
|
|||||||
*.csv eol=crlf
|
|
||||||
* text=auto
|
|
||||||
*.jpg binary
|
|
||||||
*.png binary
|
|
||||||
*.gif binary
|
|
||||||
*.ico binary
|
|
||||||
*.cl gitlab-language=c
|
|
60
.github/workflows/macos.yml
vendored
60
.github/workflows/macos.yml
vendored
@@ -1,60 +0,0 @@
|
|||||||
name: macOS-CI
|
|
||||||
on: push
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
macOS-CI:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
glx_option: ['dri', 'xlib']
|
|
||||||
runs-on: macos-11
|
|
||||||
env:
|
|
||||||
GALLIUM_DUMP_CPU: true
|
|
||||||
MESON_EXEC: /Users/runner/Library/Python/3.11/bin/meson
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Install Dependencies
|
|
||||||
run: |
|
|
||||||
cat > Brewfile <<EOL
|
|
||||||
brew "bison"
|
|
||||||
brew "expat"
|
|
||||||
brew "gettext"
|
|
||||||
brew "libx11"
|
|
||||||
brew "libxcb"
|
|
||||||
brew "libxdamage"
|
|
||||||
brew "libxext"
|
|
||||||
brew "molten-vk"
|
|
||||||
brew "ninja"
|
|
||||||
brew "pkg-config"
|
|
||||||
brew "python@3.10"
|
|
||||||
EOL
|
|
||||||
|
|
||||||
brew update
|
|
||||||
brew bundle --verbose
|
|
||||||
- name: Install Mako and meson
|
|
||||||
run: pip3 install --user mako meson
|
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
cat > native_config <<EOL
|
|
||||||
[binaries]
|
|
||||||
llvm-config = '/usr/local/opt/llvm/bin/llvm-config'
|
|
||||||
EOL
|
|
||||||
$MESON_EXEC . build --native-file=native_config -Dmoltenvk-dir=$(brew --prefix molten-vk) -Dbuild-tests=true -Dosmesa=true -Dgallium-drivers=swrast,zink -Dglx=${{ matrix.glx_option }}
|
|
||||||
- name: Build
|
|
||||||
run: $MESON_EXEC compile -C build
|
|
||||||
- name: Test
|
|
||||||
run: $MESON_EXEC test -C build --print-errorlogs
|
|
||||||
- name: Install
|
|
||||||
run: $MESON_EXEC install -C build --destdir $PWD/install
|
|
||||||
- name: 'Upload Artifact'
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: macos-${{ matrix.glx_option }}-result
|
|
||||||
path: |
|
|
||||||
build/meson-logs/
|
|
||||||
install/
|
|
||||||
retention-days: 5
|
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,6 +1,4 @@
|
|||||||
.vscode*
|
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
*.out
|
*.out
|
||||||
/build
|
build
|
||||||
.venv/
|
|
||||||
|
1935
.gitlab-ci.yml
1935
.gitlab-ci.yml
File diff suppressed because it is too large
Load Diff
@@ -1,82 +0,0 @@
|
|||||||
# Note: skips lists for CI are just a list of lines that, when
|
|
||||||
# non-zero-length and not starting with '#', will regex match to
|
|
||||||
# delete lines from the test list. Be careful.
|
|
||||||
|
|
||||||
# This test checks the driver's reported conformance version against the
|
|
||||||
# version of the CTS we're running. This check fails every few months
|
|
||||||
# and everyone has to go and bump the number in every driver.
|
|
||||||
# Running this check only makes sense while preparing a conformance
|
|
||||||
# submission, so skip it in the regular CI.
|
|
||||||
dEQP-VK.api.driver_properties.conformance_version
|
|
||||||
|
|
||||||
# Exclude this test which might fail when a new extension is implemented.
|
|
||||||
dEQP-VK.info.device_extensions
|
|
||||||
|
|
||||||
# These are tremendously slow (pushing toward a minute), and aren't
|
|
||||||
# reliable to be run in parallel with other tests due to CPU-side timing.
|
|
||||||
dEQP-GLES[0-9]*.functional.flush_finish.*
|
|
||||||
|
|
||||||
# piglit: WGL is Windows-only
|
|
||||||
wgl@.*
|
|
||||||
|
|
||||||
# These are sensitive to CPU timing, and would need to be run in isolation
|
|
||||||
# on the system rather than in parallel with other tests.
|
|
||||||
glx@glx_arb_sync_control@timing.*
|
|
||||||
|
|
||||||
# This test is not built with waffle, while we do build tests with waffle
|
|
||||||
spec@!opengl 1.1@windowoverlap
|
|
||||||
|
|
||||||
# These tests all read from the front buffer after a swap. Given that we
|
|
||||||
# run piglit tests in parallel in Mesa CI, and don't have a compositor
|
|
||||||
# running, the frontbuffer reads may end up with undefined results from
|
|
||||||
# windows overlapping us.
|
|
||||||
#
|
|
||||||
# Piglit does mark these tests as not to be run in parallel, but deqp-runner
|
|
||||||
# doesn't respect that. We need to extend deqp-runner to allow some tests to be
|
|
||||||
# marked as single-threaded and run after the rayon loop if we want to support
|
|
||||||
# them.
|
|
||||||
#
|
|
||||||
# Note that "glx-" tests don't appear in x11-skips.txt because they can be
|
|
||||||
# run even if PIGLIT_PLATFORM=gbm (for example)
|
|
||||||
glx@glx-copy-sub-buffer.*
|
|
||||||
|
|
||||||
# A majority of the tests introduced in CTS 1.3.7.0 are experiencing failures and flakes.
|
|
||||||
# Disable these tests until someone with a more deeper understanding of EGL examines them.
|
|
||||||
#
|
|
||||||
# Note: on sc8280xp/a690 I get identical results (same passes and fails)
|
|
||||||
# between freedreno, zink, and llvmpipe, so I believe this is either a
|
|
||||||
# deqp bug or egl/wayland bug, rather than driver issue.
|
|
||||||
#
|
|
||||||
# With llvmpipe, the failing tests have the error message:
|
|
||||||
#
|
|
||||||
# "Illegal sampler view creation without bind flag"
|
|
||||||
#
|
|
||||||
# which might be a hint. (But some passing tests also have the same
|
|
||||||
# error message.)
|
|
||||||
#
|
|
||||||
# more context from David Heidelberg on IRC: the deqp commit where these
|
|
||||||
# started failing is: https://github.com/KhronosGroup/VK-GL-CTS/commit/79b25659bcbced0cfc2c3fe318951c585f682abe
|
|
||||||
# prior to that they were skipping.
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.single_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_context.gles1_gles2_gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.color_clears.multi_thread.gles1_gles2_gles3.other
|
|
||||||
|
|
||||||
# Seems to be the same is as wayland-dEQP-EGL.functional.color_clears.*
|
|
||||||
wayland-dEQP-EGL.functional.render.single_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.single_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_context.gles2_gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles2.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles3.other
|
|
||||||
wayland-dEQP-EGL.functional.render.multi_thread.gles2_gles3.other
|
|
54
.gitlab-ci/arm.config
Normal file
54
.gitlab-ci/arm.config
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
CONFIG_LOCALVERSION="ccu"
|
||||||
|
|
||||||
|
CONFIG_DEBUG_KERNEL=y
|
||||||
|
|
||||||
|
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
||||||
|
|
||||||
|
CONFIG_DRM=y
|
||||||
|
CONFIG_DRM_ROCKCHIP=y
|
||||||
|
CONFIG_DRM_PANFROST=y
|
||||||
|
CONFIG_DRM_LIMA=y
|
||||||
|
CONFIG_DRM_PANEL_SIMPLE=y
|
||||||
|
CONFIG_PWM_CROS_EC=y
|
||||||
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
|
|
||||||
|
CONFIG_ROCKCHIP_CDN_DP=n
|
||||||
|
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_DP=y
|
||||||
|
CONFIG_DWMAC_ROCKCHIP=y
|
||||||
|
|
||||||
|
CONFIG_MFD_RK808=y
|
||||||
|
CONFIG_REGULATOR_RK808=y
|
||||||
|
CONFIG_RTC_DRV_RK808=y
|
||||||
|
CONFIG_COMMON_CLK_RK808=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
|
CONFIG_REGULATOR=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_VCTRL=y
|
||||||
|
|
||||||
|
CONFIG_KASAN=n
|
||||||
|
CONFIG_KASAN_INLINE=n
|
||||||
|
CONFIG_STACKTRACE=n
|
||||||
|
|
||||||
|
CONFIG_TMPFS=y
|
||||||
|
|
||||||
|
CONFIG_PROVE_LOCKING=n
|
||||||
|
CONFIG_DEBUG_LOCKDEP=n
|
||||||
|
CONFIG_SOFTLOCKUP_DETECTOR=n
|
||||||
|
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
|
||||||
|
|
||||||
|
CONFIG_FW_LOADER_COMPRESS=y
|
||||||
|
|
||||||
|
CONFIG_USB_USBNET=y
|
||||||
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_USB_NET_DRIVERS=y
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
CONFIG_USB_NET_AX8817X=y
|
||||||
|
CONFIG_USB_NET_SMSC95XX=y
|
136
.gitlab-ci/arm64.config
Normal file
136
.gitlab-ci/arm64.config
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
CONFIG_LOCALVERSION="ccu"
|
||||||
|
|
||||||
|
CONFIG_DEBUG_KERNEL=y
|
||||||
|
|
||||||
|
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||||
|
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||||
|
|
||||||
|
CONFIG_DRM=y
|
||||||
|
CONFIG_DRM_ROCKCHIP=y
|
||||||
|
CONFIG_DRM_PANFROST=y
|
||||||
|
CONFIG_DRM_LIMA=y
|
||||||
|
CONFIG_DRM_PANEL_SIMPLE=y
|
||||||
|
CONFIG_DRM_MSM=y
|
||||||
|
CONFIG_DRM_I2C_ADV7511=y
|
||||||
|
CONFIG_DRM_I2C_ADV7533=y
|
||||||
|
CONFIG_PWM_CROS_EC=y
|
||||||
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
|
|
||||||
|
CONFIG_ROCKCHIP_CDN_DP=n
|
||||||
|
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_DP=y
|
||||||
|
CONFIG_DWMAC_ROCKCHIP=y
|
||||||
|
CONFIG_STMMAC_ETH=y
|
||||||
|
CONFIG_TYPEC_FUSB302=y
|
||||||
|
CONFIG_TYPEC=y
|
||||||
|
CONFIG_TYPEC_TCPM=y
|
||||||
|
|
||||||
|
# MSM platform bits
|
||||||
|
CONFIG_QCOM_RPMHPD=y
|
||||||
|
CONFIG_QCOM_RPMPD=y
|
||||||
|
CONFIG_SDM_GPUCC_845=y
|
||||||
|
CONFIG_SDM_VIDEOCC_845=y
|
||||||
|
CONFIG_SDM_DISPCC_845=y
|
||||||
|
CONFIG_SDM_LPASSCC_845=y
|
||||||
|
CONFIG_SDM_CAMCC_845=y
|
||||||
|
CONFIG_RESET_QCOM_PDC=y
|
||||||
|
CONFIG_DRM_TI_SN65DSI86=y
|
||||||
|
CONFIG_I2C_QCOM_GENI=y
|
||||||
|
CONFIG_SPI_QCOM_GENI=y
|
||||||
|
CONFIG_PHY_QCOM_QUSB2=y
|
||||||
|
CONFIG_PHY_QCOM_QMP=y
|
||||||
|
CONFIG_QCOM_LLCC=y
|
||||||
|
CONFIG_QCOM_SPMI_TEMP_ALARM=y
|
||||||
|
CONFIG_POWER_RESET_QCOM_PON=y
|
||||||
|
CONFIG_RTC_DRV_PM8XXX=y
|
||||||
|
CONFIG_INTERCONNECT=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_SDM845=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_MSM8916=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
|
||||||
|
CONFIG_INTERCONNECT_QCOM_SC7180=y
|
||||||
|
CONFIG_QCOM_WDT=y
|
||||||
|
CONFIG_CRYPTO_DEV_QCOM_RNG=y
|
||||||
|
|
||||||
|
# db410c ethernet
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
# db820c ethernet
|
||||||
|
CONFIG_ATL1C=y
|
||||||
|
|
||||||
|
CONFIG_ARCH_ALPINE=n
|
||||||
|
CONFIG_ARCH_BCM2835=n
|
||||||
|
CONFIG_ARCH_BCM_IPROC=n
|
||||||
|
CONFIG_ARCH_BERLIN=n
|
||||||
|
CONFIG_ARCH_BRCMSTB=n
|
||||||
|
CONFIG_ARCH_EXYNOS=n
|
||||||
|
CONFIG_ARCH_K3=n
|
||||||
|
CONFIG_ARCH_LAYERSCAPE=n
|
||||||
|
CONFIG_ARCH_LG1K=n
|
||||||
|
CONFIG_ARCH_HISI=n
|
||||||
|
CONFIG_ARCH_MEDIATEK=n
|
||||||
|
CONFIG_ARCH_MVEBU=n
|
||||||
|
CONFIG_ARCH_SEATTLE=n
|
||||||
|
CONFIG_ARCH_SYNQUACER=n
|
||||||
|
CONFIG_ARCH_RENESAS=n
|
||||||
|
CONFIG_ARCH_R8A774A1=n
|
||||||
|
CONFIG_ARCH_R8A774C0=n
|
||||||
|
CONFIG_ARCH_R8A7795=n
|
||||||
|
CONFIG_ARCH_R8A7796=n
|
||||||
|
CONFIG_ARCH_R8A77965=n
|
||||||
|
CONFIG_ARCH_R8A77970=n
|
||||||
|
CONFIG_ARCH_R8A77980=n
|
||||||
|
CONFIG_ARCH_R8A77990=n
|
||||||
|
CONFIG_ARCH_R8A77995=n
|
||||||
|
CONFIG_ARCH_STRATIX10=n
|
||||||
|
CONFIG_ARCH_TEGRA=n
|
||||||
|
CONFIG_ARCH_SPRD=n
|
||||||
|
CONFIG_ARCH_THUNDER=n
|
||||||
|
CONFIG_ARCH_THUNDER2=n
|
||||||
|
CONFIG_ARCH_UNIPHIER=n
|
||||||
|
CONFIG_ARCH_VEXPRESS=n
|
||||||
|
CONFIG_ARCH_XGENE=n
|
||||||
|
CONFIG_ARCH_ZX=n
|
||||||
|
CONFIG_ARCH_ZYNQMP=n
|
||||||
|
|
||||||
|
# Strip out some stuff we don't need for graphics testing, to reduce
|
||||||
|
# the build.
|
||||||
|
CONFIG_CAN=n
|
||||||
|
CONFIG_WIRELESS=n
|
||||||
|
CONFIG_RFKILL=n
|
||||||
|
CONFIG_WLAN=n
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
|
CONFIG_REGULATOR=y
|
||||||
|
|
||||||
|
CONFIG_REGULATOR_VCTRL=y
|
||||||
|
|
||||||
|
CONFIG_KASAN=n
|
||||||
|
CONFIG_KASAN_INLINE=n
|
||||||
|
CONFIG_STACKTRACE=n
|
||||||
|
|
||||||
|
CONFIG_TMPFS=y
|
||||||
|
|
||||||
|
CONFIG_PROVE_LOCKING=n
|
||||||
|
CONFIG_DEBUG_LOCKDEP=n
|
||||||
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
|
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
|
||||||
|
|
||||||
|
CONFIG_DETECT_HUNG_TASK=y
|
||||||
|
|
||||||
|
CONFIG_FW_LOADER_COMPRESS=y
|
||||||
|
CONFIG_FW_LOADER_USER_HELPER=n
|
||||||
|
|
||||||
|
CONFIG_USB_USBNET=y
|
||||||
|
CONFIG_NETDEVICES=y
|
||||||
|
CONFIG_USB_NET_DRIVERS=y
|
||||||
|
CONFIG_USB_RTL8152=y
|
||||||
|
CONFIG_USB_NET_AX8817X=y
|
||||||
|
CONFIG_USB_NET_SMSC95XX=y
|
||||||
|
|
||||||
|
# For amlogic
|
||||||
|
CONFIG_MESON_GXL_PHY=y
|
||||||
|
CONFIG_MDIO_BUS_MUX_MESON_G12A=y
|
@@ -1,63 +0,0 @@
|
|||||||
version: 1
|
|
||||||
|
|
||||||
# Rules to match for a machine to qualify
|
|
||||||
target:
|
|
||||||
id: '{{ ci_runner_id }}'
|
|
||||||
|
|
||||||
timeouts:
|
|
||||||
first_console_activity: # This limits the time it can take to receive the first console log
|
|
||||||
minutes: {{ timeout_first_minutes }}
|
|
||||||
retries: {{ timeout_first_retries }}
|
|
||||||
console_activity: # Reset every time we receive a message from the logs
|
|
||||||
minutes: {{ timeout_minutes }}
|
|
||||||
retries: {{ timeout_retries }}
|
|
||||||
boot_cycle:
|
|
||||||
minutes: {{ timeout_boot_minutes }}
|
|
||||||
retries: {{ timeout_boot_retries }}
|
|
||||||
overall: # Maximum time the job can take, not overrideable by the "continue" deployment
|
|
||||||
minutes: {{ timeout_overall_minutes }}
|
|
||||||
retries: 0
|
|
||||||
# no retries possible here
|
|
||||||
|
|
||||||
console_patterns:
|
|
||||||
session_end:
|
|
||||||
regex: >-
|
|
||||||
{{ session_end_regex }}
|
|
||||||
{% if session_reboot_regex %}
|
|
||||||
session_reboot:
|
|
||||||
regex: >-
|
|
||||||
{{ session_reboot_regex }}
|
|
||||||
{% endif %}
|
|
||||||
job_success:
|
|
||||||
regex: >-
|
|
||||||
{{ job_success_regex }}
|
|
||||||
job_warn:
|
|
||||||
regex: >-
|
|
||||||
{{ job_warn_regex }}
|
|
||||||
|
|
||||||
# Environment to deploy
|
|
||||||
deployment:
|
|
||||||
# Initial boot
|
|
||||||
start:
|
|
||||||
kernel:
|
|
||||||
url: '{{ kernel_url }}'
|
|
||||||
cmdline: >
|
|
||||||
SALAD.machine_id={{ '{{' }} machine_id }}
|
|
||||||
console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
|
|
||||||
loglevel={{ log_level }} no_hash_pointers
|
|
||||||
b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
|
|
||||||
b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
|
|
||||||
b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
|
|
||||||
b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
|
|
||||||
b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
|
|
||||||
{% for volume in volumes %}
|
|
||||||
b2c.volume={{ volume }}
|
|
||||||
{% endfor %}
|
|
||||||
b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
|
|
||||||
{% if kernel_cmdline_extras is defined %}
|
|
||||||
{{ kernel_cmdline_extras }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
initramfs:
|
|
||||||
url: '{{ initramfs_url }}'
|
|
||||||
|
|
@@ -1,55 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Copyright © 2022 Valve Corporation
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
# copy of this software and associated documentation files (the "Software"),
|
|
||||||
# to deal in the Software without restriction, including without limitation
|
|
||||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
# and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
# Software is furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice (including the next
|
|
||||||
# paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
# Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
# IN THE SOFTWARE.
|
|
||||||
|
|
||||||
from jinja2 import Environment, FileSystemLoader
|
|
||||||
from os import environ, path
|
|
||||||
|
|
||||||
|
|
||||||
# Pass all the environment variables prefixed by B2C_
|
|
||||||
values = {
|
|
||||||
key.removeprefix("B2C_").lower(): environ[key]
|
|
||||||
for key in environ if key.startswith("B2C_")
|
|
||||||
}
|
|
||||||
|
|
||||||
env = Environment(loader=FileSystemLoader(path.dirname(values['job_template'])),
|
|
||||||
trim_blocks=True, lstrip_blocks=True)
|
|
||||||
|
|
||||||
template = env.get_template(path.basename(values['job_template']))
|
|
||||||
|
|
||||||
values['ci_job_id'] = environ['CI_JOB_ID']
|
|
||||||
values['ci_runner_id'] = environ['CI_RUNNER_ID']
|
|
||||||
values['job_volume_exclusions'] = [excl for excl in values['job_volume_exclusions'].split(",") if excl]
|
|
||||||
values['working_dir'] = environ['CI_PROJECT_DIR']
|
|
||||||
|
|
||||||
# Use the gateway's pull-through registry caches to reduce load on fd.o.
|
|
||||||
values['local_container'] = environ['IMAGE_UNDER_TEST']
|
|
||||||
values['local_container'] = values['local_container'].replace(
|
|
||||||
'registry.freedesktop.org',
|
|
||||||
'{{ fdo_proxy_registry }}'
|
|
||||||
)
|
|
||||||
|
|
||||||
if 'kernel_cmdline_extras' not in values:
|
|
||||||
values['kernel_cmdline_extras'] = ''
|
|
||||||
|
|
||||||
with open(path.splitext(path.basename(values['job_template']))[0], "w") as f:
|
|
||||||
f.write(template.render(values))
|
|
81
.gitlab-ci/bare-metal/arm64_a630_gles_others.sh
Executable file
81
.gitlab-ci/bare-metal/arm64_a630_gles_others.sh
Executable file
@@ -0,0 +1,81 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# This test script groups together a bunch of fast dEQP variant runs
|
||||||
|
# to amortize the cost of rebooting the board.
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
EXIT=0
|
||||||
|
|
||||||
|
# Test rendering with the gmem path forced when possible (~1 minute)
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/gmem \
|
||||||
|
DEQP_VER=gles31 \
|
||||||
|
DEQP_FRACTION=5 \
|
||||||
|
FD_MESA_DEBUG=nobypass \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test rendering with the bypass path forced when possible (~1 minute)
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/bypass \
|
||||||
|
DEQP_VER=gles31 \
|
||||||
|
DEQP_FRACTION=5 \
|
||||||
|
FD_MESA_DEBUG=nogmem \
|
||||||
|
GPU_VERSION=freedreno-a630-bypass \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test rendering with the UBO-to-constants optimization disabled (~1 minute)
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/nouboopt \
|
||||||
|
DEQP_VER=gles31 \
|
||||||
|
IR3_SHADER_DEBUG=nouboopt \
|
||||||
|
DEQP_CASELIST_FILTER="functional.*ubo" \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gles3-565nozs mustpass (~20s)
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/gles3-565nozs \
|
||||||
|
DEQP_VER=gles3 \
|
||||||
|
DEQP_CONFIG="rgb565d0s0ms0" \
|
||||||
|
DEQP_VARIANT="565-no-depth-no-stencil" \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gles31-565nozs mustpass (~1s)
|
||||||
|
if ! env \
|
||||||
|
DEQP_RESULTS_DIR=results/gles31-565nozs \
|
||||||
|
DEQP_VER=gles31 \
|
||||||
|
DEQP_CONFIG="rgb565d0s0ms0" \
|
||||||
|
DEQP_VARIANT="565-no-depth-no-stencil" \
|
||||||
|
/install/deqp-runner.sh; then
|
||||||
|
EXIT=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gles3-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
|
||||||
|
# if ! env \
|
||||||
|
# DEQP_RESULTS_DIR=results/gles3-multisample \
|
||||||
|
# DEQP_VER=gles3 \
|
||||||
|
# DEQP_CONFIG="rgba8888d24s8ms4" \
|
||||||
|
# DEQP_VARIANT="multisample" \
|
||||||
|
# /install/deqp-runner.sh; then
|
||||||
|
# EXIT=1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
# gles31-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
|
||||||
|
# if ! env \
|
||||||
|
# DEQP_RESULTS_DIR=results/gles31-multisample \
|
||||||
|
# DEQP_VER=gles31 \
|
||||||
|
# DEQP_CONFIG="rgba8888d24s8ms4" \
|
||||||
|
# DEQP_VARIANT="multisample" \
|
||||||
|
# /install/deqp-runner.sh; then
|
||||||
|
# EXIT=1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
exit $EXIT
|
@@ -1,13 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# Init entrypoint for bare-metal devices; calls common init code.
|
|
||||||
|
|
||||||
# First stage: very basic setup to bring up network and /dev etc
|
|
||||||
/init-stage1.sh
|
|
||||||
|
|
||||||
# Second stage: run jobs
|
|
||||||
test $? -eq 0 && /init-stage2.sh
|
|
||||||
|
|
||||||
# Wait until the job would have timed out anyway, so we don't spew a "init
|
|
||||||
# exited" panic.
|
|
||||||
sleep 6000
|
|
14
.gitlab-ci/bare-metal/capture-devcoredump.sh
Executable file
14
.gitlab-ci/bare-metal/capture-devcoredump.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null`
|
||||||
|
for i in $devcds; do
|
||||||
|
echo "Found a devcoredump at $i."
|
||||||
|
if cp $i /results/first.devcore; then
|
||||||
|
echo 1 > $i
|
||||||
|
echo "Saved to the job artifacts at /first.devcore"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
sleep 10
|
||||||
|
done
|
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
|
||||||
echo "Must supply the PoE Interface to power down"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
|
||||||
echo "Must supply the PoE Switch host"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
|
||||||
SNMP_OFF="i 4"
|
|
||||||
|
|
||||||
snmpset -v2c -r 3 -t 30 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
|
@@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
|
||||||
echo "Must supply the PoE Interface to power up"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
|
||||||
echo "Must supply the PoE Switch host"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
|
|
||||||
SNMP_ON="i 1"
|
|
||||||
SNMP_OFF="i 4"
|
|
||||||
|
|
||||||
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
|
|
||||||
sleep 3s
|
|
||||||
snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_ON
|
|
@@ -1,15 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# Boot script for Chrome OS devices attached to a servo debug connector, using
|
# Boot script for Chrome OS devices attached to a servo debug connector, using
|
||||||
# NFS and TFTP to boot.
|
# NFS and TFTP to boot.
|
||||||
|
|
||||||
# We're run from the root of the repo, make a helper var for our paths
|
# We're run from the root of the repo, make a helper var for our paths
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
|
||||||
CI_INSTALL=$CI_PROJECT_DIR/install
|
|
||||||
|
|
||||||
# Runner config checks
|
# Runner config checks
|
||||||
if [ -z "$BM_SERIAL" ]; then
|
if [ -z "$BM_SERIAL" ]; then
|
||||||
@@ -84,41 +79,22 @@ mkdir -p /nfs/results
|
|||||||
|
|
||||||
rm -rf /tftp/*
|
rm -rf /tftp/*
|
||||||
if echo "$BM_KERNEL" | grep -q http; then
|
if echo "$BM_KERNEL" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt install -y wget
|
||||||
$BM_KERNEL -o /tftp/vmlinuz
|
wget $BM_KERNEL -O /tftp/vmlinuz
|
||||||
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/nfs/"
|
|
||||||
rm modules.tar.zst &
|
|
||||||
else
|
else
|
||||||
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
|
cp $BM_KERNEL /tftp/vmlinuz
|
||||||
fi
|
fi
|
||||||
echo "$BM_CMDLINE" > /tftp/cmdline
|
echo "$BM_CMDLINE" > /tftp/cmdline
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
STRUCTURED_LOG_FILE=job_detail.json
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
|
|
||||||
python3 $BM/cros_servo_run.py \
|
python3 $BM/cros_servo_run.py \
|
||||||
--cpu $BM_SERIAL \
|
--cpu $BM_SERIAL \
|
||||||
--ec $BM_SERIAL_EC \
|
--ec $BM_SERIAL_EC
|
||||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
|
||||||
ret=$?
|
ret=$?
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
# will look for them.
|
# will look for them.
|
||||||
cp -Rp /nfs/results/. results/
|
cp -Rp /nfs/results/. results/
|
||||||
if [ -f "${STRUCTURED_LOG_FILE}" ]; then
|
|
||||||
cp -p ${STRUCTURED_LOG_FILE} results/
|
|
||||||
echo "Structured log file is available at https://${CI_PROJECT_ROOT_NAMESPACE}.pages.freedesktop.org/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts/results/${STRUCTURED_LOG_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit $ret
|
exit $ret
|
||||||
|
@@ -1,30 +1,75 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
#
|
#
|
||||||
# Copyright © 2020 Google LLC
|
# Copyright © 2020 Google LLC
|
||||||
# SPDX-License-Identifier: MIT
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice (including the next
|
||||||
|
# paragraph) shall be included in all copies or substantial portions of the
|
||||||
|
# Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||||
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import queue
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
|
|
||||||
from custom_logger import CustomLogger
|
|
||||||
from serial_buffer import SerialBuffer
|
from serial_buffer import SerialBuffer
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
|
||||||
|
|
||||||
class CrosServoRun:
|
class CrosServoRun:
|
||||||
def __init__(self, cpu, ec, test_timeout, logger):
|
def __init__(self, cpu, ec):
|
||||||
|
# Merged FIFO for the two serial buffers, fed by threads.
|
||||||
|
self.serial_queue = queue.Queue()
|
||||||
|
self.sentinel = object()
|
||||||
|
self.threads_done = 0
|
||||||
|
|
||||||
|
self.ec_ser = SerialBuffer(
|
||||||
|
ec, "results/serial-ec.txt", "R SERIAL-EC> ")
|
||||||
self.cpu_ser = SerialBuffer(
|
self.cpu_ser = SerialBuffer(
|
||||||
cpu, "results/serial.txt", "R SERIAL-CPU> ")
|
cpu, "results/serial.txt", "R SERIAL-CPU> ")
|
||||||
# Merge the EC serial into the cpu_ser's line stream so that we can
|
|
||||||
# effectively poll on both at the same time and not have to worry about
|
|
||||||
self.ec_ser = SerialBuffer(
|
|
||||||
ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
|
|
||||||
self.test_timeout = test_timeout
|
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def close(self):
|
self.iter_feed_ec = threading.Thread(
|
||||||
self.ec_ser.close()
|
target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
|
||||||
self.cpu_ser.close()
|
self.iter_feed_ec.start()
|
||||||
|
|
||||||
|
self.iter_feed_cpu = threading.Thread(
|
||||||
|
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
|
||||||
|
self.iter_feed_cpu.start()
|
||||||
|
|
||||||
|
# Feed lines from our serial queues into the merged queue, marking when our
|
||||||
|
# input is done.
|
||||||
|
def iter_feed_queue(self, it):
|
||||||
|
for i in it:
|
||||||
|
self.serial_queue.put(i)
|
||||||
|
self.serial_queue.put(sentinel)
|
||||||
|
|
||||||
|
# Return the next line from the queue, counting how many threads have
|
||||||
|
# terminated and joining when done
|
||||||
|
def get_serial_queue_line(self):
|
||||||
|
line = self.serial_queue.get()
|
||||||
|
if line == self.sentinel:
|
||||||
|
self.threads_done = self.threads_done + 1
|
||||||
|
if self.threads_done == 2:
|
||||||
|
self.iter_feed_cpu.join()
|
||||||
|
self.iter_feed_ec.join()
|
||||||
|
return line
|
||||||
|
|
||||||
|
# Returns an iterator for getting the next line.
|
||||||
|
def serial_queue_lines(self):
|
||||||
|
return iter(self.get_serial_queue_line, self.sentinel)
|
||||||
|
|
||||||
def ec_write(self, s):
|
def ec_write(self, s):
|
||||||
print("W SERIAL-EC> %s" % s)
|
print("W SERIAL-EC> %s" % s)
|
||||||
@@ -34,110 +79,55 @@ class CrosServoRun:
|
|||||||
print("W SERIAL-CPU> %s" % s)
|
print("W SERIAL-CPU> %s" % s)
|
||||||
self.cpu_ser.serial.write(s.encode())
|
self.cpu_ser.serial.write(s.encode())
|
||||||
|
|
||||||
def print_error(self, message):
|
|
||||||
RED = '\033[0;31m'
|
|
||||||
NO_COLOR = '\033[0m'
|
|
||||||
print(RED + message + NO_COLOR)
|
|
||||||
self.logger.update_status_fail(message)
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
# Flush any partial commands in the EC's prompt, then ask for a reboot.
|
# Flush any partial commands in the EC's prompt, then ask for a reboot.
|
||||||
self.ec_write("\n")
|
self.ec_write("\n")
|
||||||
self.ec_write("reboot\n")
|
self.ec_write("reboot\n")
|
||||||
|
|
||||||
bootloader_done = False
|
|
||||||
self.logger.create_job_phase("boot")
|
|
||||||
tftp_failures = 0
|
|
||||||
# This is emitted right when the bootloader pauses to check for input.
|
# This is emitted right when the bootloader pauses to check for input.
|
||||||
# Emit a ^N character to request network boot, because we don't have a
|
# Emit a ^N character to request network boot, because we don't have a
|
||||||
# direct-to-netboot firmware on cheza.
|
# direct-to-netboot firmware on cheza.
|
||||||
for line in self.cpu_ser.lines(timeout=120, phase="bootloader"):
|
for line in self.serial_queue_lines():
|
||||||
if re.search("load_archive: loading locale_en.bin", line):
|
if re.search("load_archive: loading locale_en.bin", line):
|
||||||
self.cpu_write("\016")
|
self.cpu_write("\016")
|
||||||
bootloader_done = True
|
|
||||||
break
|
|
||||||
|
|
||||||
# The Cheza firmware seems to occasionally get stuck looping in
|
|
||||||
# this error state during TFTP booting, possibly based on amount of
|
|
||||||
# network traffic around it, but it'll usually recover after a
|
|
||||||
# reboot. Currently mostly visible on google-freedreno-cheza-14.
|
|
||||||
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
|
||||||
tftp_failures += 1
|
|
||||||
if tftp_failures >= 10:
|
|
||||||
self.print_error(
|
|
||||||
"Detected intermittent tftp failure, restarting run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# If the board has a netboot firmware and we made it to booting the
|
|
||||||
# kernel, proceed to processing of the test run.
|
|
||||||
if re.search("Booting Linux", line):
|
|
||||||
bootloader_done = True
|
|
||||||
break
|
break
|
||||||
|
|
||||||
# The Cheza boards have issues with failing to bring up power to
|
# The Cheza boards have issues with failing to bring up power to
|
||||||
# the system sometimes, possibly dependent on ambient temperature
|
# the system sometimes, possibly dependent on ambient temperature
|
||||||
# in the farm.
|
# in the farm.
|
||||||
if re.search("POWER_GOOD not seen in time", line):
|
if re.search("POWER_GOOD not seen in time", line):
|
||||||
self.print_error(
|
print("Detected intermittent poweron failure, restarting run...")
|
||||||
"Detected intermittent poweron failure, abandoning run.")
|
return 2
|
||||||
return 1
|
|
||||||
|
|
||||||
if not bootloader_done:
|
tftp_failures = 0
|
||||||
self.print_error("Failed to make it through bootloader, abandoning run.")
|
for line in self.serial_queue_lines():
|
||||||
return 1
|
|
||||||
|
|
||||||
self.logger.create_job_phase("test")
|
|
||||||
for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
|
|
||||||
if re.search("---. end Kernel panic", line):
|
if re.search("---. end Kernel panic", line):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
# The Cheza firmware seems to occasionally get stuck looping in
|
||||||
|
# this error state during TFTP booting, possibly based on amount of
|
||||||
|
# network traffic around it, but it'll usually recover after a
|
||||||
|
# reboot.
|
||||||
|
if re.search("R8152: Bulk read error 0xffffffbf", line):
|
||||||
|
tftp_failures += 1
|
||||||
|
if tftp_failures >= 100:
|
||||||
|
print("Detected intermittent tftp failure, restarting run...")
|
||||||
|
return 2
|
||||||
|
|
||||||
# There are very infrequent bus errors during power management transitions
|
# There are very infrequent bus errors during power management transitions
|
||||||
# on cheza, which we don't expect to be the case on future boards.
|
# on cheza, which we don't expect to be the case on future boards.
|
||||||
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
|
||||||
self.print_error(
|
print("Detected cheza power management bus error, restarting run...")
|
||||||
"Detected cheza power management bus error, abandoning run.")
|
return 2
|
||||||
return 1
|
|
||||||
|
|
||||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
result = re.search("bare-metal result: (\S*)", line)
|
||||||
if re.search("NETDEV WATCHDOG", line):
|
|
||||||
self.print_error(
|
|
||||||
"Detected network device failure, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# These HFI response errors started appearing with the introduction
|
|
||||||
# of piglit runs. CosmicPenguin says:
|
|
||||||
#
|
|
||||||
# "message ID 106 isn't a thing, so likely what happened is that we
|
|
||||||
# got confused when parsing the HFI queue. If it happened on only
|
|
||||||
# one run, then memory corruption could be a possible clue"
|
|
||||||
#
|
|
||||||
# Given that it seems to trigger randomly near a GPU fault and then
|
|
||||||
# break many tests after that, just restart the whole run.
|
|
||||||
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
|
|
||||||
self.print_error(
|
|
||||||
"Detected cheza power management bus error, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if re.search("coreboot.*bootblock starting", line):
|
|
||||||
self.print_error(
|
|
||||||
"Detected spontaneous reboot, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
|
|
||||||
self.print_error("Detected cheza MMU fail, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
result = re.search("hwci: mesa: (\S*)", line)
|
|
||||||
if result:
|
if result:
|
||||||
if result.group(1) == "pass":
|
if result.group(1) == "pass":
|
||||||
self.logger.update_dut_job("status", "pass")
|
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
self.logger.update_status_fail("test fail")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
self.print_error(
|
print("Reached the end of the CPU serial log without finding a result")
|
||||||
"Reached the end of the CPU serial log without finding a result")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
@@ -147,19 +137,17 @@ def main():
|
|||||||
help='CPU Serial device', required=True)
|
help='CPU Serial device', required=True)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--ec', type=str, help='EC Serial device', required=True)
|
'--ec', type=str, help='EC Serial device', required=True)
|
||||||
parser.add_argument(
|
|
||||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logger = CustomLogger("job_detail.json")
|
servo = CrosServoRun(args.cpu, args.ec)
|
||||||
logger.update_dut_time("start", None)
|
|
||||||
servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60, logger)
|
while True:
|
||||||
retval = servo.run()
|
retval = servo.run()
|
||||||
|
if retval != 2:
|
||||||
|
break
|
||||||
|
|
||||||
# power down the CPU on the device
|
# power down the CPU on the device
|
||||||
servo.ec_write("power off\n")
|
servo.ec_write("power off\n")
|
||||||
logger.update_dut_time("end", None)
|
|
||||||
servo.close()
|
|
||||||
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
|
||||||
|
@@ -1,10 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
relay=$1
|
|
||||||
|
|
||||||
if [ -z "$relay" ]; then
|
|
||||||
echo "Must supply a relay arg"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
|
@@ -1,28 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
|
|
||||||
host = sys.argv[1]
|
|
||||||
port = sys.argv[2]
|
|
||||||
mode = sys.argv[3]
|
|
||||||
relay = sys.argv[4]
|
|
||||||
msg = None
|
|
||||||
|
|
||||||
if mode == "on":
|
|
||||||
msg = b'\x20'
|
|
||||||
else:
|
|
||||||
msg = b'\x21'
|
|
||||||
|
|
||||||
msg += int(relay).to_bytes(1, 'big')
|
|
||||||
msg += b'\x00'
|
|
||||||
|
|
||||||
c = socket.create_connection((host, int(port)))
|
|
||||||
c.sendall(msg)
|
|
||||||
|
|
||||||
data = c.recv(1)
|
|
||||||
c.close()
|
|
||||||
|
|
||||||
if data[0] == b'\x01':
|
|
||||||
print('Command failed')
|
|
||||||
sys.exit(1)
|
|
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
relay=$1
|
|
||||||
|
|
||||||
if [ -z "$relay" ]; then
|
|
||||||
echo "Must supply a relay arg"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
|
|
||||||
sleep 5
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" on "$relay"
|
|
@@ -5,27 +5,26 @@ set -e
|
|||||||
STRINGS=$(mktemp)
|
STRINGS=$(mktemp)
|
||||||
ERRORS=$(mktemp)
|
ERRORS=$(mktemp)
|
||||||
|
|
||||||
trap 'rm $STRINGS; rm $ERRORS;' EXIT
|
trap "rm $STRINGS; rm $ERRORS;" EXIT
|
||||||
|
|
||||||
FILE=$1
|
FILE=$1
|
||||||
shift 1
|
shift 1
|
||||||
|
|
||||||
while getopts "f:e:" opt; do
|
while getopts "f:e:" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
f) echo "$OPTARG" >> "$STRINGS";;
|
f) echo "$OPTARG" >> $STRINGS;;
|
||||||
e) echo "$OPTARG" >> "$STRINGS" ; echo "$OPTARG" >> "$ERRORS";;
|
e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;;
|
||||||
*) exit
|
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
shift $((OPTIND -1))
|
shift $((OPTIND -1))
|
||||||
|
|
||||||
echo "Waiting for $FILE to say one of following strings"
|
echo "Waiting for $FILE to say one of following strings"
|
||||||
cat "$STRINGS"
|
cat $STRINGS
|
||||||
|
|
||||||
while ! grep -E -wf "$STRINGS" "$FILE"; do
|
while ! egrep -wf $STRINGS $FILE; do
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
if grep -E -wf "$ERRORS" "$FILE"; then
|
if egrep -wf $ERRORS $FILE; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@@ -1,14 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
|
||||||
|
|
||||||
if [ -z "$BM_SERIAL" ] && [ -z "$BM_SERIAL_SCRIPT" ]; then
|
if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then
|
||||||
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
|
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
|
||||||
echo "BM_SERIAL:"
|
echo "BM_SERIAL:"
|
||||||
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
|
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
|
||||||
@@ -51,117 +45,89 @@ if [ -z "$BM_ROOTFS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
|
if [ -z "$BM_WEBDAV_IP" -o -z "$BM_WEBDAV_PORT" ]; then
|
||||||
BM_FASTBOOT_NFSROOT=1
|
echo "BM_WEBDAV_IP and/or BM_WEBDAV_PORT is not set - no results will be uploaded from DUT!"
|
||||||
|
WEBDAV_CMDLINE=""
|
||||||
|
else
|
||||||
|
WEBDAV_CMDLINE="webdav=http://$BM_WEBDAV_IP:$BM_WEBDAV_PORT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Clear out any previous run's artifacts.
|
# Clear out any previous run's artifacts.
|
||||||
rm -rf results/
|
rm -rf results/
|
||||||
mkdir -p results/
|
mkdir -p results
|
||||||
|
|
||||||
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
|
# Create the rootfs in a temp dir
|
||||||
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
|
rsync -a --delete $BM_ROOTFS/ rootfs/
|
||||||
# state, since it's volume-mounted on the host.
|
. $BM/rootfs-setup.sh rootfs
|
||||||
rsync -a --delete $BM_ROOTFS/ /nfs/
|
|
||||||
mkdir -p /nfs/results
|
|
||||||
. $BM/rootfs-setup.sh /nfs
|
|
||||||
|
|
||||||
# Root on NFS, no need for an inintramfs.
|
# Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
|
||||||
rm -f rootfs.cpio.gz
|
# these devices use it and it would take up space in the initrd.
|
||||||
touch rootfs.cpio
|
|
||||||
gzip rootfs.cpio
|
if [ -n "$PIGLIT_PROFILES" ]; then
|
||||||
|
EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
|
||||||
else
|
else
|
||||||
# Create the rootfs in a temp dir
|
EXCLUDE_FILTER="piglit|python"
|
||||||
rsync -a --delete $BM_ROOTFS/ rootfs/
|
|
||||||
. $BM/rootfs-setup.sh rootfs
|
|
||||||
|
|
||||||
# Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
|
|
||||||
# these devices use it and it would take up space in the initrd.
|
|
||||||
|
|
||||||
if [ -n "$PIGLIT_PROFILES" ]; then
|
|
||||||
EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
|
|
||||||
else
|
|
||||||
EXCLUDE_FILTER="piglit|python"
|
|
||||||
fi
|
|
||||||
|
|
||||||
pushd rootfs
|
|
||||||
find -H . | \
|
|
||||||
grep -E -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
|
|
||||||
grep -E -v "traces-db|apitrace|renderdoc" | \
|
|
||||||
grep -E -v $EXCLUDE_FILTER | \
|
|
||||||
cpio -H newc -o | \
|
|
||||||
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
|
|
||||||
popd
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
pushd rootfs
|
||||||
|
find -H | \
|
||||||
|
egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
|
||||||
|
egrep -v "traces-db|apitrace|renderdoc" | \
|
||||||
|
egrep -v $EXCLUDE_FILTER | \
|
||||||
|
cpio -H newc -o | \
|
||||||
|
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
|
||||||
|
popd
|
||||||
|
|
||||||
|
# Make the combined kernel image and dtb for passing to fastboot. For normal
|
||||||
|
# Mesa development, we build the kernel and store it in the docker container
|
||||||
|
# that this script is running in.
|
||||||
|
#
|
||||||
|
# However, container builds are expensive, so when you're hacking on the
|
||||||
|
# kernel, it's nice to be able to skip the half hour container build and plus
|
||||||
|
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
|
||||||
|
# fetch them instead of looking in the container.
|
||||||
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt install -y wget
|
||||||
"$BM_KERNEL" -o kernel
|
|
||||||
# FIXME: modules should be supplied too
|
wget $BM_KERNEL -O kernel
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
wget $BM_DTB -O dtb
|
||||||
"$BM_DTB" -o dtb
|
|
||||||
|
|
||||||
cat kernel dtb > Image.gz-dtb
|
cat kernel dtb > Image.gz-dtb
|
||||||
|
rm kernel dtb
|
||||||
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
|
|
||||||
if [ -n "$BM_DTB" ]; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o dtb
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat kernel dtb > Image.gz-dtb || echo "No DTB available, using pure kernel."
|
|
||||||
rm kernel
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "$BM_ROOTFS/"
|
|
||||||
rm modules.tar.zst &
|
|
||||||
else
|
else
|
||||||
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
|
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
|
||||||
cp /baremetal-files/"$BM_DTB".dtb dtb
|
fi
|
||||||
|
|
||||||
|
abootimg \
|
||||||
|
--create artifacts/fastboot.img \
|
||||||
|
-k Image.gz-dtb \
|
||||||
|
-r rootfs.cpio.gz \
|
||||||
|
-c cmdline="$BM_CMDLINE $WEBDAV_CMDLINE"
|
||||||
|
rm Image.gz-dtb
|
||||||
|
|
||||||
|
# Start nginx to get results from DUT
|
||||||
|
if [ -n "$WEBDAV_CMDLINE" ]; then
|
||||||
|
ln -s `pwd`/results /results
|
||||||
|
sed -i s/80/$BM_WEBDAV_PORT/g /etc/nginx/sites-enabled/default
|
||||||
|
sed -i s/www-data/root/g /etc/nginx/nginx.conf
|
||||||
|
nginx
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export PATH=$BM:$PATH
|
export PATH=$BM:$PATH
|
||||||
|
|
||||||
mkdir -p artifacts
|
|
||||||
mkbootimg.py \
|
|
||||||
--kernel Image.gz-dtb \
|
|
||||||
--ramdisk rootfs.cpio.gz \
|
|
||||||
--dtb dtb \
|
|
||||||
--cmdline "$BM_CMDLINE" \
|
|
||||||
$BM_MKBOOT_PARAMS \
|
|
||||||
--header_version 2 \
|
|
||||||
-o artifacts/fastboot.img
|
|
||||||
|
|
||||||
rm Image.gz-dtb dtb
|
|
||||||
|
|
||||||
# Start background command for talking to serial if we have one.
|
# Start background command for talking to serial if we have one.
|
||||||
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
if [ -n "$BM_SERIAL_SCRIPT" ]; then
|
||||||
$BM_SERIAL_SCRIPT > results/serial-output.txt &
|
$BM_SERIAL_SCRIPT | tee results/serial-output.txt &
|
||||||
|
|
||||||
while [ ! -e results/serial-output.txt ]; do
|
while [ ! -e results/serial-output.txt ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set +e
|
|
||||||
$BM/fastboot_run.py \
|
$BM/fastboot_run.py \
|
||||||
--dev="$BM_SERIAL" \
|
--dev="$BM_SERIAL" \
|
||||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20} \
|
|
||||||
--fbserial="$BM_FASTBOOT_SERIAL" \
|
--fbserial="$BM_FASTBOOT_SERIAL" \
|
||||||
--powerup="$BM_POWERUP" \
|
--powerup="$BM_POWERUP" \
|
||||||
--powerdown="$BM_POWERDOWN"
|
--powerdown="$BM_POWERDOWN"
|
||||||
ret=$?
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
|
|
||||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
|
||||||
# will look for them.
|
|
||||||
cp -Rp /nfs/results/. results/
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit $ret
|
|
||||||
|
@@ -22,138 +22,81 @@
|
|||||||
# IN THE SOFTWARE.
|
# IN THE SOFTWARE.
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import subprocess
|
import os
|
||||||
import re
|
import re
|
||||||
from serial_buffer import SerialBuffer
|
from serial_buffer import SerialBuffer
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
|
||||||
class FastbootRun:
|
class FastbootRun:
|
||||||
def __init__(self, args, test_timeout):
|
def __init__(self, args):
|
||||||
self.powerup = args.powerup
|
self.powerup = args.powerup
|
||||||
self.ser = SerialBuffer(
|
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ")
|
||||||
args.dev, "results/serial-output.txt", "R SERIAL> ")
|
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
|
||||||
self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format(
|
|
||||||
ser=args.fbserial)
|
|
||||||
self.test_timeout = test_timeout
|
|
||||||
|
|
||||||
def close(self):
|
def logged_system(self, cmd):
|
||||||
self.ser.close()
|
|
||||||
|
|
||||||
def print_error(self, message):
|
|
||||||
RED = '\033[0;31m'
|
|
||||||
NO_COLOR = '\033[0m'
|
|
||||||
print(RED + message + NO_COLOR)
|
|
||||||
|
|
||||||
def logged_system(self, cmd, timeout=60):
|
|
||||||
print("Running '{}'".format(cmd))
|
print("Running '{}'".format(cmd))
|
||||||
try:
|
return os.system(cmd)
|
||||||
return subprocess.call(cmd, shell=True, timeout=timeout)
|
|
||||||
except subprocess.TimeoutExpired:
|
|
||||||
self.print_error("timeout, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
if ret := self.logged_system(self.powerup):
|
if self.logged_system(self.powerup) != 0:
|
||||||
return ret
|
return 1
|
||||||
|
|
||||||
fastboot_ready = False
|
fastboot_ready = False
|
||||||
for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
|
for line in self.ser.lines():
|
||||||
if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \
|
if re.search("fastboot: processing commands", line) or \
|
||||||
re.search("Listening for fastboot command on", line):
|
re.search("Listening for fastboot command on", line):
|
||||||
fastboot_ready = True
|
fastboot_ready = True
|
||||||
break
|
break
|
||||||
|
|
||||||
if re.search("data abort", line):
|
if re.search("data abort", line):
|
||||||
self.print_error(
|
|
||||||
"Detected crash during boot, abandoning run.")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if not fastboot_ready:
|
if not fastboot_ready:
|
||||||
self.print_error(
|
print("Failed to get to fastboot prompt")
|
||||||
"Failed to get to fastboot prompt, abandoning run.")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if ret := self.logged_system(self.fastboot):
|
if self.logged_system(self.fastboot) != 0:
|
||||||
return ret
|
return 1
|
||||||
|
|
||||||
print_more_lines = -1
|
|
||||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
|
||||||
if print_more_lines == 0:
|
|
||||||
return 1
|
|
||||||
if print_more_lines > 0:
|
|
||||||
print_more_lines -= 1
|
|
||||||
|
|
||||||
|
for line in self.ser.lines():
|
||||||
if re.search("---. end Kernel panic", line):
|
if re.search("---. end Kernel panic", line):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# The db820c boards intermittently reboot. Just restart the run
|
# The db820c boards intermittently reboot. Just restart the run
|
||||||
# when if we see a reboot after we got past fastboot.
|
# when if we see a reboot after we got past fastboot.
|
||||||
if re.search("PON REASON", line):
|
if re.search("PON REASON", line):
|
||||||
self.print_error(
|
print("Detected spontaneous reboot, restarting run...")
|
||||||
"Detected spontaneous reboot, abandoning run.")
|
return 2
|
||||||
return 1
|
|
||||||
|
|
||||||
# db820c sometimes wedges around iommu fault recovery
|
result = re.search("bare-metal result: (\S*)", line)
|
||||||
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
|
|
||||||
self.print_error(
|
|
||||||
"Detected kernel soft lockup, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# If the network device dies, it's probably not graphics's fault, just try again.
|
|
||||||
if re.search("NETDEV WATCHDOG", line):
|
|
||||||
self.print_error(
|
|
||||||
"Detected network device failure, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# A3xx recovery doesn't quite work. Sometimes the GPU will get
|
|
||||||
# wedged and recovery will fail (because power can't be reset?)
|
|
||||||
# This assumes that the jobs are sufficiently well-tested that GPU
|
|
||||||
# hangs aren't always triggered, so just try again. But print some
|
|
||||||
# more lines first so that we get better information on the cause
|
|
||||||
# of the hang. Once a hang happens, it's pretty chatty.
|
|
||||||
if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
|
|
||||||
self.print_error(
|
|
||||||
"Detected GPU hang, abandoning run.")
|
|
||||||
if print_more_lines == -1:
|
|
||||||
print_more_lines = 30
|
|
||||||
|
|
||||||
result = re.search("hwci: mesa: (\S*)", line)
|
|
||||||
if result:
|
if result:
|
||||||
if result.group(1) == "pass":
|
if result.group(1) == "pass":
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
self.print_error(
|
print("Reached the end of the CPU serial log without finding a result")
|
||||||
"Reached the end of the CPU serial log without finding a result, abandoning run.")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
||||||
'--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
|
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||||
parser.add_argument('--powerup', type=str,
|
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||||
help='shell command for rebooting', required=True)
|
parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
|
||||||
parser.add_argument('--powerdown', type=str,
|
|
||||||
help='shell command for powering off', required=True)
|
|
||||||
parser.add_argument('--fbserial', type=str,
|
|
||||||
help='fastboot serial number of the board', required=True)
|
|
||||||
parser.add_argument('--test-timeout', type=int,
|
|
||||||
help='Test phase timeout (minutes)', required=True)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
fastboot = FastbootRun(args, args.test_timeout * 60)
|
fastboot = FastbootRun(args)
|
||||||
|
|
||||||
retval = fastboot.run()
|
while True:
|
||||||
fastboot.close()
|
retval = fastboot.run()
|
||||||
|
if retval != 2:
|
||||||
|
break
|
||||||
|
|
||||||
fastboot.logged_system(args.powerdown)
|
fastboot.logged_system(args.powerdown)
|
||||||
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@@ -7,4 +7,4 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
|
||||||
|
@@ -8,8 +8,8 @@ relay = sys.argv[2]
|
|||||||
|
|
||||||
# our relays are "off" means "board is powered".
|
# our relays are "off" means "board is powered".
|
||||||
mode_swap = {
|
mode_swap = {
|
||||||
"on": "off",
|
"on" : "off",
|
||||||
"off": "on",
|
"off" : "on",
|
||||||
}
|
}
|
||||||
mode = mode_swap[mode]
|
mode = mode_swap[mode]
|
||||||
|
|
||||||
|
@@ -7,6 +7,6 @@ if [ -z "$relay" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
|
||||||
sleep 5
|
sleep 5
|
||||||
"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py on "$relay"
|
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay
|
||||||
|
48
.gitlab-ci/bare-metal/init.sh
Executable file
48
.gitlab-ci/bare-metal/init.sh
Executable file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
mount -t proc none /proc
|
||||||
|
mount -t sysfs none /sys
|
||||||
|
mount -t devtmpfs none /dev || echo possibly already mounted
|
||||||
|
mkdir -p /dev/pts
|
||||||
|
mount -t devpts devpts /dev/pts
|
||||||
|
mount -t tmpfs tmpfs /tmp
|
||||||
|
|
||||||
|
. /set-job-env-vars.sh
|
||||||
|
|
||||||
|
[ -z "$BM_KERNEL_MODULES" ] || modprobe "$BM_KERNEL_MODULES"
|
||||||
|
|
||||||
|
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
||||||
|
export XDG_CACHE_HOME=/tmp
|
||||||
|
|
||||||
|
echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
||||||
|
|
||||||
|
# Not all DUTs have network
|
||||||
|
sntp -sS pool.ntp.org || true
|
||||||
|
|
||||||
|
# Start a little daemon to capture the first devcoredump we encounter. (They
|
||||||
|
# expire after 5 minutes, so we poll for them).
|
||||||
|
./capture-devcoredump.sh &
|
||||||
|
|
||||||
|
if sh $BARE_METAL_TEST_SCRIPT; then
|
||||||
|
OK=1
|
||||||
|
else
|
||||||
|
OK=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# upload artifacts via webdav
|
||||||
|
WEBDAV=$(cat /proc/cmdline | tr " " "\n" | grep webdav | cut -d '=' -f 2 || true)
|
||||||
|
if [ -n "$WEBDAV" ]; then
|
||||||
|
find /results -type f -exec curl -T {} $WEBDAV/{} \;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $OK -eq 1 ]; then
|
||||||
|
echo "bare-metal result: pass"
|
||||||
|
else
|
||||||
|
echo "bare-metal result: fail"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Wait until the job would have timed out anyway, so we don't spew a "init
|
||||||
|
# exited" panic.
|
||||||
|
sleep 6000
|
@@ -1,569 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
#
|
|
||||||
# Copyright 2015, The Android Open Source Project
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""Creates the boot image."""
|
|
||||||
from argparse import (ArgumentParser, ArgumentTypeError,
|
|
||||||
FileType, RawDescriptionHelpFormatter)
|
|
||||||
from hashlib import sha1
|
|
||||||
from os import fstat
|
|
||||||
from struct import pack
|
|
||||||
import array
|
|
||||||
import collections
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
# Constant and structure definition is in
|
|
||||||
# system/tools/mkbootimg/include/bootimg/bootimg.h
|
|
||||||
BOOT_MAGIC = 'ANDROID!'
|
|
||||||
BOOT_MAGIC_SIZE = 8
|
|
||||||
BOOT_NAME_SIZE = 16
|
|
||||||
BOOT_ARGS_SIZE = 512
|
|
||||||
BOOT_EXTRA_ARGS_SIZE = 1024
|
|
||||||
BOOT_IMAGE_HEADER_V1_SIZE = 1648
|
|
||||||
BOOT_IMAGE_HEADER_V2_SIZE = 1660
|
|
||||||
BOOT_IMAGE_HEADER_V3_SIZE = 1580
|
|
||||||
BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
|
|
||||||
BOOT_IMAGE_HEADER_V4_SIZE = 1584
|
|
||||||
BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
|
|
||||||
VENDOR_BOOT_MAGIC = 'VNDRBOOT'
|
|
||||||
VENDOR_BOOT_MAGIC_SIZE = 8
|
|
||||||
VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
|
|
||||||
VENDOR_BOOT_ARGS_SIZE = 2048
|
|
||||||
VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
|
|
||||||
VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
|
|
||||||
VENDOR_RAMDISK_TYPE_NONE = 0
|
|
||||||
VENDOR_RAMDISK_TYPE_PLATFORM = 1
|
|
||||||
VENDOR_RAMDISK_TYPE_RECOVERY = 2
|
|
||||||
VENDOR_RAMDISK_TYPE_DLKM = 3
|
|
||||||
VENDOR_RAMDISK_NAME_SIZE = 32
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
|
|
||||||
# Names with special meaning, mustn't be specified in --ramdisk_name.
|
|
||||||
VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
|
|
||||||
PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
|
|
||||||
def filesize(f):
|
|
||||||
if f is None:
|
|
||||||
return 0
|
|
||||||
try:
|
|
||||||
return fstat(f.fileno()).st_size
|
|
||||||
except OSError:
|
|
||||||
return 0
|
|
||||||
def update_sha(sha, f):
|
|
||||||
if f:
|
|
||||||
sha.update(f.read())
|
|
||||||
f.seek(0)
|
|
||||||
sha.update(pack('I', filesize(f)))
|
|
||||||
else:
|
|
||||||
sha.update(pack('I', 0))
|
|
||||||
def pad_file(f, padding):
|
|
||||||
pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
|
|
||||||
f.write(pack(str(pad) + 'x'))
|
|
||||||
def get_number_of_pages(image_size, page_size):
|
|
||||||
"""calculates the number of pages required for the image"""
|
|
||||||
return (image_size + page_size - 1) // page_size
|
|
||||||
def get_recovery_dtbo_offset(args):
|
|
||||||
"""calculates the offset of recovery_dtbo image in the boot image"""
|
|
||||||
num_header_pages = 1 # header occupies a page
|
|
||||||
num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
|
|
||||||
num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
|
|
||||||
args.pagesize)
|
|
||||||
num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
|
|
||||||
dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
|
|
||||||
num_ramdisk_pages + num_second_pages)
|
|
||||||
return dtbo_offset
|
|
||||||
def write_header_v3_and_above(args):
|
|
||||||
if args.header_version > 3:
|
|
||||||
boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
|
|
||||||
else:
|
|
||||||
boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
|
|
||||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
|
||||||
# kernel size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.kernel)))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
|
||||||
# os version and patch level
|
|
||||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
|
||||||
args.output.write(pack('I', boot_header_size))
|
|
||||||
# reserved
|
|
||||||
args.output.write(pack('4I', 0, 0, 0, 0))
|
|
||||||
# version of boot image header
|
|
||||||
args.output.write(pack('I', args.header_version))
|
|
||||||
args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
|
|
||||||
args.cmdline))
|
|
||||||
if args.header_version >= 4:
|
|
||||||
# The signature used to verify boot image v4.
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
|
|
||||||
pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
|
||||||
def write_vendor_boot_header(args):
|
|
||||||
if filesize(args.dtb) == 0:
|
|
||||||
raise ValueError('DTB image must not be empty.')
|
|
||||||
if args.header_version > 3:
|
|
||||||
vendor_ramdisk_size = args.vendor_ramdisk_total_size
|
|
||||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
|
|
||||||
else:
|
|
||||||
vendor_ramdisk_size = filesize(args.vendor_ramdisk)
|
|
||||||
vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
|
|
||||||
VENDOR_BOOT_MAGIC.encode()))
|
|
||||||
# version of boot image header
|
|
||||||
args.vendor_boot.write(pack('I', args.header_version))
|
|
||||||
# flash page size
|
|
||||||
args.vendor_boot.write(pack('I', args.pagesize))
|
|
||||||
# kernel physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
|
|
||||||
# ramdisk physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_ramdisk_size))
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
|
|
||||||
args.vendor_cmdline))
|
|
||||||
# kernel tags physical load address
|
|
||||||
args.vendor_boot.write(pack('I', args.base + args.tags_offset))
|
|
||||||
# asciiz product name
|
|
||||||
args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
|
|
||||||
# header size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_boot_header_size))
|
|
||||||
# dtb size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', filesize(args.dtb)))
|
|
||||||
# dtb physical load address
|
|
||||||
args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
|
|
||||||
if args.header_version > 3:
|
|
||||||
vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
|
|
||||||
VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
|
|
||||||
# vendor ramdisk table size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
|
|
||||||
# number of vendor ramdisk table entries
|
|
||||||
args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
|
|
||||||
# vendor ramdisk table entry size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
|
|
||||||
# bootconfig section size in bytes
|
|
||||||
args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
|
|
||||||
pad_file(args.vendor_boot, args.pagesize)
|
|
||||||
def write_header(args):
|
|
||||||
if args.header_version > 4:
|
|
||||||
raise ValueError(
|
|
||||||
f'Boot header version {args.header_version} not supported')
|
|
||||||
if args.header_version in {3, 4}:
|
|
||||||
return write_header_v3_and_above(args)
|
|
||||||
ramdisk_load_address = ((args.base + args.ramdisk_offset)
|
|
||||||
if filesize(args.ramdisk) > 0 else 0)
|
|
||||||
second_load_address = ((args.base + args.second_offset)
|
|
||||||
if filesize(args.second) > 0 else 0)
|
|
||||||
args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
|
|
||||||
# kernel size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.kernel)))
|
|
||||||
# kernel physical load address
|
|
||||||
args.output.write(pack('I', args.base + args.kernel_offset))
|
|
||||||
# ramdisk size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.ramdisk)))
|
|
||||||
# ramdisk physical load address
|
|
||||||
args.output.write(pack('I', ramdisk_load_address))
|
|
||||||
# second bootloader size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.second)))
|
|
||||||
# second bootloader physical load address
|
|
||||||
args.output.write(pack('I', second_load_address))
|
|
||||||
# kernel tags physical load address
|
|
||||||
args.output.write(pack('I', args.base + args.tags_offset))
|
|
||||||
# flash page size
|
|
||||||
args.output.write(pack('I', args.pagesize))
|
|
||||||
# version of boot image header
|
|
||||||
args.output.write(pack('I', args.header_version))
|
|
||||||
# os version and patch level
|
|
||||||
args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
|
|
||||||
# asciiz product name
|
|
||||||
args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
|
|
||||||
args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
|
|
||||||
sha = sha1()
|
|
||||||
update_sha(sha, args.kernel)
|
|
||||||
update_sha(sha, args.ramdisk)
|
|
||||||
update_sha(sha, args.second)
|
|
||||||
if args.header_version > 0:
|
|
||||||
update_sha(sha, args.recovery_dtbo)
|
|
||||||
if args.header_version > 1:
|
|
||||||
update_sha(sha, args.dtb)
|
|
||||||
img_id = pack('32s', sha.digest())
|
|
||||||
args.output.write(img_id)
|
|
||||||
args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
|
|
||||||
if args.header_version > 0:
|
|
||||||
if args.recovery_dtbo:
|
|
||||||
# recovery dtbo size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.recovery_dtbo)))
|
|
||||||
# recovert dtbo offset in the boot image
|
|
||||||
args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
|
|
||||||
else:
|
|
||||||
# Set to zero if no recovery dtbo
|
|
||||||
args.output.write(pack('I', 0))
|
|
||||||
args.output.write(pack('Q', 0))
|
|
||||||
# Populate boot image header size for header versions 1 and 2.
|
|
||||||
if args.header_version == 1:
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
|
|
||||||
elif args.header_version == 2:
|
|
||||||
args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
|
|
||||||
if args.header_version > 1:
|
|
||||||
if filesize(args.dtb) == 0:
|
|
||||||
raise ValueError('DTB image must not be empty.')
|
|
||||||
# dtb size in bytes
|
|
||||||
args.output.write(pack('I', filesize(args.dtb)))
|
|
||||||
# dtb physical load address
|
|
||||||
args.output.write(pack('Q', args.base + args.dtb_offset))
|
|
||||||
pad_file(args.output, args.pagesize)
|
|
||||||
return img_id
|
|
||||||
class AsciizBytes:
|
|
||||||
"""Parses a string and encodes it as an asciiz bytes object.
|
|
||||||
>>> AsciizBytes(bufsize=4)('foo')
|
|
||||||
b'foo\\x00'
|
|
||||||
>>> AsciizBytes(bufsize=4)('foob')
|
|
||||||
Traceback (most recent call last):
|
|
||||||
...
|
|
||||||
argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
|
|
||||||
"""
|
|
||||||
def __init__(self, bufsize):
|
|
||||||
self.bufsize = bufsize
|
|
||||||
def __call__(self, arg):
|
|
||||||
arg_bytes = arg.encode() + b'\x00'
|
|
||||||
if len(arg_bytes) > self.bufsize:
|
|
||||||
raise ArgumentTypeError(
|
|
||||||
'Encoded asciiz length exceeded: '
|
|
||||||
f'max {self.bufsize}, got {len(arg_bytes)}')
|
|
||||||
return arg_bytes
|
|
||||||
class VendorRamdiskTableBuilder:
|
|
||||||
"""Vendor ramdisk table builder.
|
|
||||||
Attributes:
|
|
||||||
entries: A list of VendorRamdiskTableEntry namedtuple.
|
|
||||||
ramdisk_total_size: Total size in bytes of all ramdisks in the table.
|
|
||||||
"""
|
|
||||||
VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
|
|
||||||
'VendorRamdiskTableEntry',
|
|
||||||
['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
|
|
||||||
'ramdisk_name', 'board_id'])
|
|
||||||
def __init__(self):
|
|
||||||
self.entries = []
|
|
||||||
self.ramdisk_total_size = 0
|
|
||||||
self.ramdisk_names = set()
|
|
||||||
def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
|
|
||||||
# Strip any trailing null for simple comparison.
|
|
||||||
stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
|
|
||||||
if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
|
|
||||||
raise ValueError(
|
|
||||||
f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
|
|
||||||
if stripped_ramdisk_name in self.ramdisk_names:
|
|
||||||
raise ValueError(
|
|
||||||
f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
|
|
||||||
self.ramdisk_names.add(stripped_ramdisk_name)
|
|
||||||
if board_id is None:
|
|
||||||
board_id = array.array(
|
|
||||||
'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
|
|
||||||
else:
|
|
||||||
board_id = array.array('I', board_id)
|
|
||||||
if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
|
|
||||||
raise ValueError('board_id size must be '
|
|
||||||
f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
|
|
||||||
with open(ramdisk_path, 'rb') as f:
|
|
||||||
ramdisk_size = filesize(f)
|
|
||||||
self.entries.append(self.VendorRamdiskTableEntry(
|
|
||||||
ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
|
|
||||||
ramdisk_name, board_id))
|
|
||||||
self.ramdisk_total_size += ramdisk_size
|
|
||||||
def write_ramdisks_padded(self, fout, alignment):
|
|
||||||
for entry in self.entries:
|
|
||||||
with open(entry.ramdisk_path, 'rb') as f:
|
|
||||||
fout.write(f.read())
|
|
||||||
pad_file(fout, alignment)
|
|
||||||
def write_entries_padded(self, fout, alignment):
|
|
||||||
for entry in self.entries:
|
|
||||||
fout.write(pack('I', entry.ramdisk_size))
|
|
||||||
fout.write(pack('I', entry.ramdisk_offset))
|
|
||||||
fout.write(pack('I', entry.ramdisk_type))
|
|
||||||
fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
|
|
||||||
entry.ramdisk_name))
|
|
||||||
fout.write(entry.board_id)
|
|
||||||
pad_file(fout, alignment)
|
|
||||||
def write_padded_file(f_out, f_in, padding):
|
|
||||||
if f_in is None:
|
|
||||||
return
|
|
||||||
f_out.write(f_in.read())
|
|
||||||
pad_file(f_out, padding)
|
|
||||||
def parse_int(x):
|
|
||||||
return int(x, 0)
|
|
||||||
def parse_os_version(x):
|
|
||||||
match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
|
|
||||||
if match:
|
|
||||||
a = int(match.group(1))
|
|
||||||
b = c = 0
|
|
||||||
if match.lastindex >= 2:
|
|
||||||
b = int(match.group(2))
|
|
||||||
if match.lastindex == 3:
|
|
||||||
c = int(match.group(3))
|
|
||||||
# 7 bits allocated for each field
|
|
||||||
assert a < 128
|
|
||||||
assert b < 128
|
|
||||||
assert c < 128
|
|
||||||
return (a << 14) | (b << 7) | c
|
|
||||||
return 0
|
|
||||||
def parse_os_patch_level(x):
|
|
||||||
match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
|
|
||||||
if match:
|
|
||||||
y = int(match.group(1)) - 2000
|
|
||||||
m = int(match.group(2))
|
|
||||||
# 7 bits allocated for the year, 4 bits for the month
|
|
||||||
assert 0 <= y < 128
|
|
||||||
assert 0 < m <= 12
|
|
||||||
return (y << 4) | m
|
|
||||||
return 0
|
|
||||||
def parse_vendor_ramdisk_type(x):
|
|
||||||
type_dict = {
|
|
||||||
'none': VENDOR_RAMDISK_TYPE_NONE,
|
|
||||||
'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
|
|
||||||
'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
|
|
||||||
'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
|
|
||||||
}
|
|
||||||
if x.lower() in type_dict:
|
|
||||||
return type_dict[x.lower()]
|
|
||||||
return parse_int(x)
|
|
||||||
def get_vendor_boot_v4_usage():
|
|
||||||
return """vendor boot version 4 arguments:
|
|
||||||
--ramdisk_type {none,platform,recovery,dlkm}
|
|
||||||
specify the type of the ramdisk
|
|
||||||
--ramdisk_name NAME
|
|
||||||
specify the name of the ramdisk
|
|
||||||
--board_id{0..15} NUMBER
|
|
||||||
specify the value of the board_id vector, defaults to 0
|
|
||||||
--vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
|
|
||||||
path to the vendor ramdisk file
|
|
||||||
These options can be specified multiple times, where each vendor ramdisk
|
|
||||||
option group ends with a --vendor_ramdisk_fragment option.
|
|
||||||
Each option group appends an additional ramdisk to the vendor boot image.
|
|
||||||
"""
|
|
||||||
def parse_vendor_ramdisk_args(args, args_list):
|
|
||||||
"""Parses vendor ramdisk specific arguments.
|
|
||||||
Args:
|
|
||||||
args: An argparse.Namespace object. Parsed results are stored into this
|
|
||||||
object.
|
|
||||||
args_list: A list of argument strings to be parsed.
|
|
||||||
Returns:
|
|
||||||
A list argument strings that are not parsed by this method.
|
|
||||||
"""
|
|
||||||
parser = ArgumentParser(add_help=False)
|
|
||||||
parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
|
|
||||||
default=VENDOR_RAMDISK_TYPE_NONE)
|
|
||||||
parser.add_argument('--ramdisk_name',
|
|
||||||
type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
|
|
||||||
required=True)
|
|
||||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
|
|
||||||
parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
|
|
||||||
parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
|
|
||||||
unknown_args = []
|
|
||||||
vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
|
|
||||||
if args.vendor_ramdisk is not None:
|
|
||||||
vendor_ramdisk_table_builder.add_entry(
|
|
||||||
args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
|
|
||||||
while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
|
|
||||||
idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
|
|
||||||
vendor_ramdisk_args = args_list[:idx]
|
|
||||||
args_list = args_list[idx:]
|
|
||||||
ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
|
|
||||||
ramdisk_args_dict = vars(ramdisk_args)
|
|
||||||
unknown_args.extend(extra_args)
|
|
||||||
ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
|
|
||||||
ramdisk_type = ramdisk_args.ramdisk_type
|
|
||||||
ramdisk_name = ramdisk_args.ramdisk_name
|
|
||||||
board_id = [ramdisk_args_dict[f'board_id{i}']
|
|
||||||
for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
|
|
||||||
vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
|
|
||||||
ramdisk_name, board_id)
|
|
||||||
if len(args_list) > 0:
|
|
||||||
unknown_args.extend(args_list)
|
|
||||||
args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
|
|
||||||
.ramdisk_total_size)
|
|
||||||
args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
|
|
||||||
.entries)
|
|
||||||
args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
|
|
||||||
return unknown_args
|
|
||||||
def parse_cmdline():
|
|
||||||
version_parser = ArgumentParser(add_help=False)
|
|
||||||
version_parser.add_argument('--header_version', type=parse_int, default=0)
|
|
||||||
if version_parser.parse_known_args()[0].header_version < 3:
|
|
||||||
# For boot header v0 to v2, the kernel commandline field is split into
|
|
||||||
# two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
|
|
||||||
# so we minus one here to ensure the encoded string plus the
|
|
||||||
# null-terminator can fit in the buffer size.
|
|
||||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
|
|
||||||
else:
|
|
||||||
cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
|
|
||||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
|
||||||
epilog=get_vendor_boot_v4_usage())
|
|
||||||
parser.add_argument('--kernel', type=FileType('rb'),
|
|
||||||
help='path to the kernel')
|
|
||||||
parser.add_argument('--ramdisk', type=FileType('rb'),
|
|
||||||
help='path to the ramdisk')
|
|
||||||
parser.add_argument('--second', type=FileType('rb'),
|
|
||||||
help='path to the second bootloader')
|
|
||||||
parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
|
|
||||||
dtbo_group = parser.add_mutually_exclusive_group()
|
|
||||||
dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
|
|
||||||
help='path to the recovery DTBO')
|
|
||||||
dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
|
|
||||||
metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
|
|
||||||
help='path to the recovery ACPIO')
|
|
||||||
parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
|
|
||||||
default='', help='kernel command line arguments')
|
|
||||||
parser.add_argument('--vendor_cmdline',
|
|
||||||
type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
|
|
||||||
default='',
|
|
||||||
help='vendor boot kernel command line arguments')
|
|
||||||
parser.add_argument('--base', type=parse_int, default=0x10000000,
|
|
||||||
help='base address')
|
|
||||||
parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
|
|
||||||
help='kernel offset')
|
|
||||||
parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
|
|
||||||
help='ramdisk offset')
|
|
||||||
parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
|
|
||||||
help='second bootloader offset')
|
|
||||||
parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
|
|
||||||
help='dtb offset')
|
|
||||||
parser.add_argument('--os_version', type=parse_os_version, default=0,
|
|
||||||
help='operating system version')
|
|
||||||
parser.add_argument('--os_patch_level', type=parse_os_patch_level,
|
|
||||||
default=0, help='operating system patch level')
|
|
||||||
parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
|
|
||||||
help='tags offset')
|
|
||||||
parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
|
|
||||||
default='', help='board name')
|
|
||||||
parser.add_argument('--pagesize', type=parse_int,
|
|
||||||
choices=[2**i for i in range(11, 15)], default=2048,
|
|
||||||
help='page size')
|
|
||||||
parser.add_argument('--id', action='store_true',
|
|
||||||
help='print the image ID on standard output')
|
|
||||||
parser.add_argument('--header_version', type=parse_int, default=0,
|
|
||||||
help='boot image header version')
|
|
||||||
parser.add_argument('-o', '--output', type=FileType('wb'),
|
|
||||||
help='output file name')
|
|
||||||
parser.add_argument('--gki_signing_algorithm',
|
|
||||||
help='GKI signing algorithm to use')
|
|
||||||
parser.add_argument('--gki_signing_key',
|
|
||||||
help='path to RSA private key file')
|
|
||||||
parser.add_argument('--gki_signing_signature_args',
|
|
||||||
help='other hash arguments passed to avbtool')
|
|
||||||
parser.add_argument('--gki_signing_avbtool_path',
|
|
||||||
help='path to avbtool for boot signature generation')
|
|
||||||
parser.add_argument('--vendor_boot', type=FileType('wb'),
|
|
||||||
help='vendor boot output file name')
|
|
||||||
parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
|
|
||||||
help='path to the vendor ramdisk')
|
|
||||||
parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
|
|
||||||
help='path to the vendor bootconfig file')
|
|
||||||
args, extra_args = parser.parse_known_args()
|
|
||||||
if args.vendor_boot is not None and args.header_version > 3:
|
|
||||||
extra_args = parse_vendor_ramdisk_args(args, extra_args)
|
|
||||||
if len(extra_args) > 0:
|
|
||||||
raise ValueError(f'Unrecognized arguments: {extra_args}')
|
|
||||||
if args.header_version < 3:
|
|
||||||
args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
|
|
||||||
args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
|
|
||||||
assert len(args.cmdline) <= BOOT_ARGS_SIZE
|
|
||||||
assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
|
|
||||||
return args
|
|
||||||
def add_boot_image_signature(args, pagesize):
|
|
||||||
"""Adds the boot image signature.
|
|
||||||
Note that the signature will only be verified in VTS to ensure a
|
|
||||||
generic boot.img is used. It will not be used by the device
|
|
||||||
bootloader at boot time. The bootloader should only verify
|
|
||||||
the boot vbmeta at the end of the boot partition (or in the top-level
|
|
||||||
vbmeta partition) via the Android Verified Boot process, when the
|
|
||||||
device boots.
|
|
||||||
"""
|
|
||||||
args.output.flush() # Flush the buffer for signature calculation.
|
|
||||||
# Appends zeros if the signing key is not specified.
|
|
||||||
if not args.gki_signing_key or not args.gki_signing_algorithm:
|
|
||||||
zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
|
|
||||||
args.output.write(zeros)
|
|
||||||
pad_file(args.output, pagesize)
|
|
||||||
return
|
|
||||||
avbtool = 'avbtool' # Used from otatools.zip or Android build env.
|
|
||||||
# We need to specify the path of avbtool in build/core/Makefile.
|
|
||||||
# Because avbtool is not guaranteed to be in $PATH there.
|
|
||||||
if args.gki_signing_avbtool_path:
|
|
||||||
avbtool = args.gki_signing_avbtool_path
|
|
||||||
# Need to specify a value of --partition_size for avbtool to work.
|
|
||||||
# We use 64 MB below, but avbtool will not resize the boot image to
|
|
||||||
# this size because --do_not_append_vbmeta_image is also specified.
|
|
||||||
avbtool_cmd = [
|
|
||||||
avbtool, 'add_hash_footer',
|
|
||||||
'--partition_name', 'boot',
|
|
||||||
'--partition_size', str(64 * 1024 * 1024),
|
|
||||||
'--image', args.output.name,
|
|
||||||
'--algorithm', args.gki_signing_algorithm,
|
|
||||||
'--key', args.gki_signing_key,
|
|
||||||
'--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
|
|
||||||
# Additional arguments passed to avbtool.
|
|
||||||
if args.gki_signing_signature_args:
|
|
||||||
avbtool_cmd += args.gki_signing_signature_args.split()
|
|
||||||
# Outputs the signed vbmeta to a separate file, then append to boot.img
|
|
||||||
# as the boot signature.
|
|
||||||
with tempfile.TemporaryDirectory() as temp_out_dir:
|
|
||||||
boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
|
|
||||||
avbtool_cmd += ['--do_not_append_vbmeta_image',
|
|
||||||
'--output_vbmeta_image', boot_signature_output]
|
|
||||||
subprocess.check_call(avbtool_cmd)
|
|
||||||
with open(boot_signature_output, 'rb') as boot_signature:
|
|
||||||
if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
|
|
||||||
raise ValueError(
|
|
||||||
f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
|
|
||||||
write_padded_file(args.output, boot_signature, pagesize)
|
|
||||||
def write_data(args, pagesize):
|
|
||||||
write_padded_file(args.output, args.kernel, pagesize)
|
|
||||||
write_padded_file(args.output, args.ramdisk, pagesize)
|
|
||||||
write_padded_file(args.output, args.second, pagesize)
|
|
||||||
if args.header_version > 0 and args.header_version < 3:
|
|
||||||
write_padded_file(args.output, args.recovery_dtbo, pagesize)
|
|
||||||
if args.header_version == 2:
|
|
||||||
write_padded_file(args.output, args.dtb, pagesize)
|
|
||||||
if args.header_version >= 4:
|
|
||||||
add_boot_image_signature(args, pagesize)
|
|
||||||
def write_vendor_boot_data(args):
|
|
||||||
if args.header_version > 3:
|
|
||||||
builder = args.vendor_ramdisk_table_builder
|
|
||||||
builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
|
||||||
builder.write_entries_padded(args.vendor_boot, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.vendor_bootconfig,
|
|
||||||
args.pagesize)
|
|
||||||
else:
|
|
||||||
write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
|
|
||||||
write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
|
|
||||||
def main():
|
|
||||||
args = parse_cmdline()
|
|
||||||
if args.vendor_boot is not None:
|
|
||||||
if args.header_version not in {3, 4}:
|
|
||||||
raise ValueError(
|
|
||||||
'--vendor_boot not compatible with given header version')
|
|
||||||
if args.header_version == 3 and args.vendor_ramdisk is None:
|
|
||||||
raise ValueError('--vendor_ramdisk missing or invalid')
|
|
||||||
write_vendor_boot_header(args)
|
|
||||||
write_vendor_boot_data(args)
|
|
||||||
if args.output is not None:
|
|
||||||
if args.second is not None and args.header_version > 2:
|
|
||||||
raise ValueError(
|
|
||||||
'--second not compatible with given header version')
|
|
||||||
img_id = write_header(args)
|
|
||||||
if args.header_version > 2:
|
|
||||||
write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
|
|
||||||
else:
|
|
||||||
write_data(args, args.pagesize)
|
|
||||||
if args.id and img_id is not None:
|
|
||||||
print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
20
.gitlab-ci/bare-metal/nginx-default-site
Normal file
20
.gitlab-ci/bare-metal/nginx-default-site
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
dav_methods PUT;
|
||||||
|
dav_ext_methods PROPFIND OPTIONS;
|
||||||
|
dav_access user:rw group:rw all:r;
|
||||||
|
|
||||||
|
client_body_temp_path /tmp;
|
||||||
|
client_max_body_size 0;
|
||||||
|
create_full_put_path on;
|
||||||
|
|
||||||
|
root /results;
|
||||||
|
|
||||||
|
autoindex on;
|
||||||
|
}
|
||||||
|
}
|
@@ -1,16 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
echo "Must supply the PoE Interface to power up"
|
echo "Must supply the PoE Interface to power off"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
flock /var/run/poe.lock -c "$CI_PROJECT_DIR/install/bare-metal/poe-set $BM_POE_INTERFACE off"
|
||||||
echo "Must supply the PoE Switch host"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
|
|
||||||
SNMP_OFF="i 2"
|
|
||||||
|
|
||||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
|
|
||||||
|
@@ -5,15 +5,4 @@ if [ -z "$BM_POE_INTERFACE" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_POE_ADDRESS" ]; then
|
flock /var/run/poe.lock -c "$CI_PROJECT_DIR/install/bare-metal/poe-set $BM_POE_INTERFACE reset"
|
||||||
echo "Must supply the PoE Switch host"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
|
|
||||||
SNMP_ON="i 1"
|
|
||||||
SNMP_OFF="i 2"
|
|
||||||
|
|
||||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
|
|
||||||
sleep 3s
|
|
||||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON"
|
|
||||||
|
@@ -1,18 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC1091
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2059
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
# Boot script for devices attached to a PoE switch, using NFS for the root
|
# Boot script for devices attached to a PoE switch, using NFS for the root
|
||||||
# filesystem.
|
# filesystem.
|
||||||
|
|
||||||
# We're run from the root of the repo, make a helper var for our paths
|
# We're run from the root of the repo, make a helper var for our paths
|
||||||
BM=$CI_PROJECT_DIR/install/bare-metal
|
BM=$CI_PROJECT_DIR/install/bare-metal
|
||||||
CI_COMMON=$CI_PROJECT_DIR/install/common
|
|
||||||
CI_INSTALL=$CI_PROJECT_DIR/install
|
|
||||||
|
|
||||||
# Runner config checks
|
# Runner config checks
|
||||||
if [ -z "$BM_SERIAL" ]; then
|
if [ -z "$BM_SERIAL" ]; then
|
||||||
@@ -27,6 +19,18 @@ if [ -z "$BM_POE_ADDRESS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_USERNAME" ]; then
|
||||||
|
echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is the PoE switch username."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$BM_POE_PASSWORD" ]; then
|
||||||
|
echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
|
||||||
|
echo "This is the PoE switch password."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_POE_INTERFACE" ]; then
|
if [ -z "$BM_POE_INTERFACE" ]; then
|
||||||
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
|
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
|
||||||
echo "This is the PoE switch interface where the device is connected."
|
echo "This is the PoE switch interface where the device is connected."
|
||||||
@@ -61,8 +65,8 @@ if [ -z "$BM_ROOTFS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_BOOTFS" ] && { [ -z "$BM_KERNEL" ] || [ -z "$BM_DTB" ]; } ; then
|
if [ -z "$BM_BOOTFS" ]; then
|
||||||
echo "Must set /boot files for the TFTP boot in the job's variables or set kernel and dtb"
|
echo "Must set /boot files for the TFTP boot in the job's variables"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -73,8 +77,6 @@ fi
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Clear out any previous run's artifacts.
|
# Clear out any previous run's artifacts.
|
||||||
rm -rf results/
|
rm -rf results/
|
||||||
mkdir -p results
|
mkdir -p results
|
||||||
@@ -83,147 +85,34 @@ mkdir -p results
|
|||||||
# state, since it's volume-mounted on the host.
|
# state, since it's volume-mounted on the host.
|
||||||
rsync -a --delete $BM_ROOTFS/ /nfs/
|
rsync -a --delete $BM_ROOTFS/ /nfs/
|
||||||
|
|
||||||
date +'%F %T'
|
[ -z $BM_ROOTFS_EXTRA ] || rsync -a $BM_ROOTFS_EXTRA/ /nfs/
|
||||||
|
|
||||||
# If BM_BOOTFS is an URL, download it
|
mkdir -p /nfs/results
|
||||||
if echo $BM_BOOTFS | grep -q http; then
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS" -o /tmp/bootfs.tar
|
|
||||||
BM_BOOTFS=/tmp/bootfs.tar
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
|
|
||||||
if [ -f "${BM_BOOTFS}" ]; then
|
|
||||||
mkdir -p /tmp/bootfs
|
|
||||||
tar xf $BM_BOOTFS -C /tmp/bootfs
|
|
||||||
BM_BOOTFS=/tmp/bootfs
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If BM_KERNEL and BM_DTS is present
|
|
||||||
if [ -n "${FORCE_KERNEL_TAG}" ]; then
|
|
||||||
if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then
|
|
||||||
echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o "${BM_KERNEL}"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o "${BM_DTB}.dtb"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Install kernel modules (it could be either in /lib/modules or
|
|
||||||
# /usr/lib/modules, but we want to install in the latter)
|
|
||||||
if [ -n "${FORCE_KERNEL_TAG}" ]; then
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/
|
|
||||||
rm modules.tar.zst &
|
|
||||||
elif [ -n "${BM_BOOTFS}" ]; then
|
|
||||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
|
||||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
|
||||||
else
|
|
||||||
echo "No modules!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Install kernel image + bootloader files
|
|
||||||
if [ -n "${FORCE_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
|
|
||||||
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
|
|
||||||
else # BM_BOOTFS
|
|
||||||
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Set up the pxelinux config for Jetson Nano
|
|
||||||
mkdir -p /tftp/pxelinux.cfg
|
|
||||||
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000
|
|
||||||
PROMPT 0
|
|
||||||
TIMEOUT 30
|
|
||||||
DEFAULT primary
|
|
||||||
MENU TITLE jetson nano boot options
|
|
||||||
LABEL primary
|
|
||||||
MENU LABEL CI kernel on TFTP
|
|
||||||
LINUX Image
|
|
||||||
FDT tegra210-p3450-0000.dtb
|
|
||||||
APPEND \${cbootargs} $BM_CMDLINE
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Set up the pxelinux config for Jetson TK1
|
|
||||||
cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1
|
|
||||||
PROMPT 0
|
|
||||||
TIMEOUT 30
|
|
||||||
DEFAULT primary
|
|
||||||
MENU TITLE jetson TK1 boot options
|
|
||||||
LABEL primary
|
|
||||||
MENU LABEL CI kernel on TFTP
|
|
||||||
LINUX zImage
|
|
||||||
FDT tegra124-jetson-tk1.dtb
|
|
||||||
APPEND \${cbootargs} $BM_CMDLINE
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create the rootfs in the NFS directory
|
|
||||||
. $BM/rootfs-setup.sh /nfs
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
date +'%F %T'
|
rsync -a --delete $BM_BOOTFS/ /tftp/
|
||||||
|
|
||||||
echo "$BM_CMDLINE" > /tftp/cmdline.txt
|
echo "$BM_CMDLINE" > /tftp/cmdline.txt
|
||||||
|
|
||||||
# Add some options in config.txt, if defined
|
|
||||||
if [ -n "$BM_BOOTCONFIG" ]; then
|
|
||||||
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
STRUCTURED_LOG_FILE=job_detail.json
|
ATTEMPTS=2
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update dut_job_type "${DEVICE_TYPE}"
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update farm "${FARM}"
|
|
||||||
ATTEMPTS=3
|
|
||||||
first_attempt=True
|
|
||||||
while [ $((ATTEMPTS--)) -gt 0 ]; do
|
while [ $((ATTEMPTS--)) -gt 0 ]; do
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --create-dut-job dut_name "${CI_RUNNER_DESCRIPTION}"
|
|
||||||
# Update subtime time to CI_JOB_STARTED_AT only for the first run
|
|
||||||
if [ "$first_attempt" = "True" ]; then
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit "${CI_JOB_STARTED_AT}"
|
|
||||||
else
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --update-dut-time submit
|
|
||||||
fi
|
|
||||||
python3 $BM/poe_run.py \
|
python3 $BM/poe_run.py \
|
||||||
--dev="$BM_SERIAL" \
|
--dev="$BM_SERIAL" \
|
||||||
--powerup="$BM_POWERUP" \
|
--powerup="$BM_POWERUP" \
|
||||||
--powerdown="$BM_POWERDOWN" \
|
--powerdown="$BM_POWERDOWN"
|
||||||
--test-timeout ${TEST_PHASE_TIMEOUT:-20}
|
|
||||||
ret=$?
|
ret=$?
|
||||||
|
|
||||||
if [ $ret -eq 2 ]; then
|
if [ $ret -eq 2 ]; then
|
||||||
echo "Did not detect boot sequence, retrying..."
|
echo "Did not detect boot sequence, retrying..."
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
|
||||||
first_attempt=False
|
|
||||||
else
|
else
|
||||||
ATTEMPTS=0
|
ATTEMPTS=0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close-dut-job
|
|
||||||
python3 $CI_INSTALL/custom_logger.py ${STRUCTURED_LOG_FILE} --close
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
|
||||||
# will look for them.
|
# will look for them.
|
||||||
cp -Rp /nfs/results/. results/
|
cp -Rp /nfs/results/. results/
|
||||||
if [ -f "${STRUCTURED_LOG_FILE}" ]; then
|
|
||||||
cp -p ${STRUCTURED_LOG_FILE} results/
|
|
||||||
echo "Structured log file is available at ${ARTIFACTS_BASE_URL}/results/${STRUCTURED_LOG_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
exit $ret
|
exit $ret
|
||||||
|
42
.gitlab-ci/bare-metal/poe-set
Executable file
42
.gitlab-ci/bare-metal/poe-set
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
set SWITCHSERVER $env(BM_POE_ADDRESS)
|
||||||
|
set USERNAME $env(BM_POE_USERNAME)
|
||||||
|
set PASSWORD $env(BM_POE_PASSWORD)
|
||||||
|
|
||||||
|
set PORTNUMBER [lindex $argv 0]
|
||||||
|
set POESTATUS [lindex $argv 1]
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
|
||||||
|
spawn telnet $SWITCHSERVER
|
||||||
|
expect "Login"
|
||||||
|
sleep 1
|
||||||
|
send "$USERNAME\t$PASSWORD\r"
|
||||||
|
expect "Menu"
|
||||||
|
send "\x01"
|
||||||
|
expect ">"
|
||||||
|
send "lcli\r"
|
||||||
|
expect "Name:"
|
||||||
|
send "$USERNAME\r"
|
||||||
|
expect "Password:"
|
||||||
|
send "$PASSWORD\r"
|
||||||
|
expect "#"
|
||||||
|
send "configure\r"
|
||||||
|
expect "(config)#"
|
||||||
|
send "interface GE $PORTNUMBER\r"
|
||||||
|
expect "(config-if)#"
|
||||||
|
if { "$POESTATUS" == "off" } {
|
||||||
|
send "power inline never\r"
|
||||||
|
} elseif { "$POESTATUS" == "on" } {
|
||||||
|
send "power inline auto\r"
|
||||||
|
} elseif { "$POESTATUS" == "reset" } {
|
||||||
|
send "power inline never\r"
|
||||||
|
send "power inline auto\r"
|
||||||
|
}
|
||||||
|
expect "(config-if)#"
|
||||||
|
send "exit\r"
|
||||||
|
expect "(config)#"
|
||||||
|
send "exit\r"
|
||||||
|
expect "$#"
|
||||||
|
send "exit\r"
|
||||||
|
expect eof
|
@@ -24,26 +24,15 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
from serial_buffer import SerialBuffer
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from custom_logger import CustomLogger
|
|
||||||
from serial_buffer import SerialBuffer
|
|
||||||
|
|
||||||
class PoERun:
|
class PoERun:
|
||||||
def __init__(self, args, test_timeout, logger):
|
def __init__(self, args):
|
||||||
self.powerup = args.powerup
|
self.powerup = args.powerup
|
||||||
self.powerdown = args.powerdown
|
self.powerdown = args.powerdown
|
||||||
self.ser = SerialBuffer(
|
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", 60)
|
||||||
args.dev, "results/serial-output.txt", "")
|
|
||||||
self.test_timeout = test_timeout
|
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def print_error(self, message):
|
|
||||||
RED = '\033[0;31m'
|
|
||||||
NO_COLOR = '\033[0m'
|
|
||||||
print(RED + message + NO_COLOR)
|
|
||||||
self.logger.update_status_fail(message)
|
|
||||||
|
|
||||||
def logged_system(self, cmd):
|
def logged_system(self, cmd):
|
||||||
print("Running '{}'".format(cmd))
|
print("Running '{}'".format(cmd))
|
||||||
@@ -51,77 +40,51 @@ class PoERun:
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
if self.logged_system(self.powerup) != 0:
|
if self.logged_system(self.powerup) != 0:
|
||||||
self.logger.update_status_fail("powerup failed")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
boot_detected = False
|
boot_detected = False
|
||||||
self.logger.create_job_phase("boot")
|
for line in self.ser.lines():
|
||||||
for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
|
|
||||||
if re.search("Booting Linux", line):
|
if re.search("Booting Linux", line):
|
||||||
boot_detected = True
|
boot_detected = True
|
||||||
break
|
break
|
||||||
|
|
||||||
if not boot_detected:
|
if not boot_detected:
|
||||||
self.print_error(
|
print("Something wrong; couldn't detect the boot start up sequence")
|
||||||
"Something wrong; couldn't detect the boot start up sequence")
|
self.logged_system(self.powerdown)
|
||||||
return 1
|
return 2
|
||||||
|
|
||||||
self.logger.create_job_phase("test")
|
for line in self.ser.lines():
|
||||||
for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
|
|
||||||
if re.search("---. end Kernel panic", line):
|
if re.search("---. end Kernel panic", line):
|
||||||
self.logger.update_status_fail("kernel panic")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# Binning memory problems
|
# Binning memory problems
|
||||||
if re.search("binner overflow mem", line):
|
if re.search("binner overflow mem", line):
|
||||||
self.print_error("Memory overflow in the binner; GPU hang")
|
print("Memory overflow in the binner; GPU hang")
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
|
result = re.search("bare-metal result: (\S*)", line)
|
||||||
self.print_error("nouveau jetson boot bug, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# network fail on tk1
|
|
||||||
if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line):
|
|
||||||
self.print_error("nouveau jetson tk1 network fail, abandoning run.")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
result = re.search("hwci: mesa: (\S*)", line)
|
|
||||||
if result:
|
if result:
|
||||||
if result.group(1) == "pass":
|
if result.group(1) == "pass":
|
||||||
self.logger.update_dut_job("status", "pass")
|
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
self.logger.update_status_fail("test fail")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
self.print_error(
|
print("Reached the end of the CPU serial log without finding a result")
|
||||||
"Reached the end of the CPU serial log without finding a result")
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--dev', type=str,
|
parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
|
||||||
help='Serial device to monitor', required=True)
|
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
|
||||||
parser.add_argument('--powerup', type=str,
|
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
|
||||||
help='shell command for rebooting', required=True)
|
|
||||||
parser.add_argument('--powerdown', type=str,
|
|
||||||
help='shell command for powering off', required=True)
|
|
||||||
parser.add_argument(
|
|
||||||
'--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logger = CustomLogger("job_detail.json")
|
poe = PoERun(args)
|
||||||
logger.update_dut_time("start", None)
|
|
||||||
poe = PoERun(args, args.test_timeout * 60, logger)
|
|
||||||
retval = poe.run()
|
retval = poe.run()
|
||||||
|
|
||||||
poe.logged_system(args.powerdown)
|
poe.logged_system(args.powerdown)
|
||||||
logger.update_dut_time("end", None)
|
|
||||||
|
|
||||||
sys.exit(retval)
|
sys.exit(retval)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@@ -1,37 +1,83 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
rootfs_dst=$1
|
rootfs_dst=$1
|
||||||
|
|
||||||
mkdir -p $rootfs_dst/results
|
mkdir -p $rootfs_dst/results
|
||||||
|
|
||||||
# Set up the init script that brings up the system.
|
# Set up the init script that brings up the system.
|
||||||
cp $BM/bm-init.sh $rootfs_dst/init
|
cp $BM/init.sh $rootfs_dst/init
|
||||||
cp $CI_COMMON/init*.sh $rootfs_dst/
|
|
||||||
|
|
||||||
date +'%F %T'
|
cp $BM/capture-devcoredump.sh $rootfs_dst/
|
||||||
|
|
||||||
# Make JWT token available as file in the bare-metal storage to enable access
|
|
||||||
# to MinIO
|
|
||||||
cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
|
|
||||||
|
|
||||||
date +'%F %T'
|
|
||||||
|
|
||||||
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
|
|
||||||
cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
|
|
||||||
cp $CI_COMMON/kdl.sh $rootfs_dst/
|
|
||||||
cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
|
|
||||||
|
|
||||||
set +x
|
set +x
|
||||||
|
|
||||||
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
# Pass through relevant env vars from the gitlab job to the baremetal init script
|
||||||
|
touch $rootfs_dst/set-job-env-vars.sh
|
||||||
|
chmod +x $rootfs_dst/set-job-env-vars.sh
|
||||||
|
for var in \
|
||||||
|
BARE_METAL_TEST_SCRIPT \
|
||||||
|
BM_KERNEL_MODULES \
|
||||||
|
CI_COMMIT_BRANCH \
|
||||||
|
CI_COMMIT_TITLE \
|
||||||
|
CI_JOB_JWT \
|
||||||
|
CI_JOB_ID \
|
||||||
|
CI_JOB_URL \
|
||||||
|
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
|
||||||
|
CI_MERGE_REQUEST_TITLE \
|
||||||
|
CI_NODE_INDEX \
|
||||||
|
CI_NODE_TOTAL \
|
||||||
|
CI_PIPELINE_ID \
|
||||||
|
CI_PROJECT_PATH \
|
||||||
|
CI_RUNNER_DESCRIPTION \
|
||||||
|
CI_SERVER_URL \
|
||||||
|
DEQP_CASELIST_FILTER \
|
||||||
|
DEQP_CONFIG \
|
||||||
|
DEQP_EXPECTED_RENDERER \
|
||||||
|
DEQP_FRACTION \
|
||||||
|
DEQP_HEIGHT \
|
||||||
|
DEQP_NO_SAVE_RESULTS \
|
||||||
|
DEQP_PARALLEL \
|
||||||
|
DEQP_RESULTS_DIR \
|
||||||
|
DEQP_VARIANT \
|
||||||
|
DEQP_VER \
|
||||||
|
DEQP_WIDTH \
|
||||||
|
DEVICE_NAME \
|
||||||
|
DRIVER_NAME \
|
||||||
|
EGL_PLATFORM \
|
||||||
|
FDO_CI_CONCURRENT \
|
||||||
|
FDO_UPSTREAM_REPO \
|
||||||
|
FD_MESA_DEBUG \
|
||||||
|
FLAKES_CHANNEL \
|
||||||
|
GPU_VERSION \
|
||||||
|
IR3_SHADER_DEBUG \
|
||||||
|
MESA_GL_VERSION_OVERRIDE \
|
||||||
|
MESA_GLSL_VERSION_OVERRIDE \
|
||||||
|
MESA_GLES_VERSION_OVERRIDE \
|
||||||
|
MINIO_HOST \
|
||||||
|
NIR_VALIDATE \
|
||||||
|
PIGLIT_HTML_SUMMARY \
|
||||||
|
PIGLIT_JUNIT_RESULTS \
|
||||||
|
PIGLIT_OPTIONS \
|
||||||
|
PIGLIT_PLATFORM \
|
||||||
|
PIGLIT_PROFILES \
|
||||||
|
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
|
||||||
|
PIGLIT_REPLAY_DESCRIPTION_FILE \
|
||||||
|
PIGLIT_REPLAY_DEVICE_NAME \
|
||||||
|
PIGLIT_REPLAY_EXTRA_ARGS \
|
||||||
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL \
|
||||||
|
PIGLIT_REPLAY_UPLOAD_TO_MINIO \
|
||||||
|
PIGLIT_RESULTS \
|
||||||
|
TU_DEBUG \
|
||||||
|
VK_DRIVER \
|
||||||
|
; do
|
||||||
|
if [ -n "${!var+x}" ]; then
|
||||||
|
echo "export $var=${!var@Q}" >> $rootfs_dst/set-job-env-vars.sh
|
||||||
|
fi
|
||||||
|
done
|
||||||
echo "Variables passed through:"
|
echo "Variables passed through:"
|
||||||
"$CI_COMMON"/generate-env.sh | tee $rootfs_dst/set-job-env-vars.sh
|
cat $rootfs_dst/set-job-env-vars.sh
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# Add the Mesa drivers we built, and make a consistent symlink to them.
|
# Add the Mesa drivers we built, and make a consistent symlink to them.
|
||||||
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
|
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
|
||||||
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
|
tar -C $rootfs_dst/$CI_PROJECT_DIR/ -xf $CI_PROJECT_DIR/artifacts/install.tar
|
||||||
|
ln -sf $CI_PROJECT_DIR/install $rootfs_dst/install
|
||||||
date +'%F %T'
|
|
||||||
|
@@ -30,29 +30,21 @@ import time
|
|||||||
|
|
||||||
|
|
||||||
class SerialBuffer:
|
class SerialBuffer:
|
||||||
def __init__(self, dev, filename, prefix, timeout=None, line_queue=None):
|
def __init__(self, dev, filename, prefix, timeout = None):
|
||||||
self.filename = filename
|
self.filename = filename
|
||||||
self.dev = dev
|
self.dev = dev
|
||||||
|
|
||||||
if dev:
|
if dev:
|
||||||
self.f = open(filename, "wb+")
|
self.f = open(filename, "wb+")
|
||||||
self.serial = serial.Serial(dev, 115200, timeout=timeout)
|
self.serial = serial.Serial(dev, 115200, timeout=timeout if timeout else 10)
|
||||||
else:
|
else:
|
||||||
self.f = open(filename, "rb")
|
self.f = open(filename, "rb")
|
||||||
self.serial = None
|
|
||||||
|
|
||||||
self.byte_queue = queue.Queue()
|
self.byte_queue = queue.Queue()
|
||||||
# allow multiple SerialBuffers to share a line queue so you can merge
|
self.line_queue = queue.Queue()
|
||||||
# servo's CPU and EC streams into one thing to watch the boot/test
|
|
||||||
# progress on.
|
|
||||||
if line_queue:
|
|
||||||
self.line_queue = line_queue
|
|
||||||
else:
|
|
||||||
self.line_queue = queue.Queue()
|
|
||||||
self.prefix = prefix
|
self.prefix = prefix
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.sentinel = object()
|
self.sentinel = object()
|
||||||
self.closing = False
|
|
||||||
|
|
||||||
if self.dev:
|
if self.dev:
|
||||||
self.read_thread = threading.Thread(
|
self.read_thread = threading.Thread(
|
||||||
@@ -66,31 +58,24 @@ class SerialBuffer:
|
|||||||
target=self.serial_lines_thread_loop, daemon=True)
|
target=self.serial_lines_thread_loop, daemon=True)
|
||||||
self.lines_thread.start()
|
self.lines_thread.start()
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.closing = True
|
|
||||||
if self.serial:
|
|
||||||
self.serial.cancel_read()
|
|
||||||
self.read_thread.join()
|
|
||||||
self.lines_thread.join()
|
|
||||||
if self.serial:
|
|
||||||
self.serial.close()
|
|
||||||
|
|
||||||
# Thread that just reads the bytes from the serial device to try to keep from
|
# Thread that just reads the bytes from the serial device to try to keep from
|
||||||
# buffer overflowing it. If nothing is received in 1 minute, it finalizes.
|
# buffer overflowing it. If nothing is received in 1 minute, it finalizes.
|
||||||
def serial_read_thread_loop(self):
|
def serial_read_thread_loop(self):
|
||||||
greet = "Serial thread reading from %s\n" % self.dev
|
greet = "Serial thread reading from %s\n" % self.dev
|
||||||
self.byte_queue.put(greet.encode())
|
self.byte_queue.put(greet.encode())
|
||||||
|
|
||||||
while not self.closing:
|
while True:
|
||||||
try:
|
try:
|
||||||
b = self.serial.read()
|
b = self.serial.read()
|
||||||
if len(b) == 0:
|
if len(b) > 0:
|
||||||
|
self.byte_queue.put(b)
|
||||||
|
elif self.timeout:
|
||||||
|
self.byte_queue.put(self.sentinel)
|
||||||
break
|
break
|
||||||
self.byte_queue.put(b)
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(self.prefix + str(err))
|
print(self.prefix + str(err))
|
||||||
|
self.byte_queue.put(self.sentinel)
|
||||||
break
|
break
|
||||||
self.byte_queue.put(self.sentinel)
|
|
||||||
|
|
||||||
# Thread that just reads the bytes from the file of serial output that some
|
# Thread that just reads the bytes from the file of serial output that some
|
||||||
# other process is appending to.
|
# other process is appending to.
|
||||||
@@ -98,13 +83,12 @@ class SerialBuffer:
|
|||||||
greet = "Serial thread reading from %s\n" % self.filename
|
greet = "Serial thread reading from %s\n" % self.filename
|
||||||
self.byte_queue.put(greet.encode())
|
self.byte_queue.put(greet.encode())
|
||||||
|
|
||||||
while not self.closing:
|
while True:
|
||||||
line = self.f.readline()
|
line = self.f.readline()
|
||||||
if line:
|
if line:
|
||||||
self.byte_queue.put(line)
|
self.byte_queue.put(line)
|
||||||
else:
|
else:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
self.byte_queue.put(self.sentinel)
|
|
||||||
|
|
||||||
# Thread that processes the stream of bytes to 1) log to stdout, 2) log to
|
# Thread that processes the stream of bytes to 1) log to stdout, 2) log to
|
||||||
# file, 3) add to the queue of lines to be read by program logic
|
# file, 3) add to the queue of lines to be read by program logic
|
||||||
@@ -137,30 +121,14 @@ class SerialBuffer:
|
|||||||
self.line_queue.put(line)
|
self.line_queue.put(line)
|
||||||
line = bytearray()
|
line = bytearray()
|
||||||
|
|
||||||
def lines(self, timeout=None, phase=None):
|
def get_line(self):
|
||||||
start_time = time.monotonic()
|
line = self.line_queue.get()
|
||||||
while True:
|
if line == self.sentinel:
|
||||||
read_timeout = None
|
self.lines_thread.join()
|
||||||
if timeout:
|
return line
|
||||||
read_timeout = timeout - (time.monotonic() - start_time)
|
|
||||||
if read_timeout <= 0:
|
|
||||||
print("read timeout waiting for serial during {}".format(phase))
|
|
||||||
self.close()
|
|
||||||
break
|
|
||||||
|
|
||||||
try:
|
def lines(self):
|
||||||
line = self.line_queue.get(timeout=read_timeout)
|
return iter(self.get_line, self.sentinel)
|
||||||
except queue.Empty:
|
|
||||||
print("read timeout waiting for serial during {}".format(phase))
|
|
||||||
self.close()
|
|
||||||
break
|
|
||||||
|
|
||||||
if line == self.sentinel:
|
|
||||||
print("End of serial output")
|
|
||||||
self.lines_thread.join()
|
|
||||||
break
|
|
||||||
|
|
||||||
yield line
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@@ -1,41 +0,0 @@
|
|||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
# Copyright © 2020 Christian Gmeiner
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
# copy of this software and associated documentation files (the "Software"),
|
|
||||||
# to deal in the Software without restriction, including without limitation
|
|
||||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
# and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
# Software is furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice (including the next
|
|
||||||
# paragraph) shall be included in all copies or substantial portions of the
|
|
||||||
# Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
||||||
# IN THE SOFTWARE.
|
|
||||||
#
|
|
||||||
# Tiny script to read bytes from telnet, and write the output to stdout, with a
|
|
||||||
# buffer in between so we don't lose serial output from its buffer.
|
|
||||||
#
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import telnetlib
|
|
||||||
|
|
||||||
host = sys.argv[1]
|
|
||||||
port = sys.argv[2]
|
|
||||||
|
|
||||||
tn = telnetlib.Telnet(host, port, 1000000)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
bytes = tn.read_some()
|
|
||||||
sys.stdout.buffer.write(bytes)
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
tn.close()
|
|
@@ -1 +0,0 @@
|
|||||||
../bin/ci
|
|
36
.gitlab-ci/build-apitrace.sh
Normal file
36
.gitlab-ci/build-apitrace.sh
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Need an unreleased version of Waffle for surfaceless support in apitrace
|
||||||
|
# Replace this build with the Debian package once that's possible
|
||||||
|
|
||||||
|
WAFFLE_VERSION="e3c995d9a2693b687501715b6550619922346089"
|
||||||
|
git clone https://gitlab.freedesktop.org/mesa/waffle.git --single-branch --no-checkout /waffle
|
||||||
|
pushd /waffle
|
||||||
|
git checkout "$WAFFLE_VERSION"
|
||||||
|
cmake -S . -B _build -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_BUILD_TYPE=Release $EXTRA_CMAKE_ARGS
|
||||||
|
make -C _build install
|
||||||
|
mkdir -p build/lib build/bin
|
||||||
|
cp _build/lib/libwaffle-1.so build/lib/libwaffle-1.so.0
|
||||||
|
cp _build/bin/wflinfo build/bin/wflinfo
|
||||||
|
${STRIP_CMD:-strip} build/lib/* build/bin/*
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
||||||
|
|
||||||
|
APITRACE_VERSION="9.0"
|
||||||
|
|
||||||
|
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
||||||
|
pushd /apitrace
|
||||||
|
git checkout "$APITRACE_VERSION"
|
||||||
|
# Note: The cmake stuff for waffle in apitrace fails to use waffle's library
|
||||||
|
# directory. Just force the issue here.
|
||||||
|
env LDFLAGS="-L/usr/local/lib" \
|
||||||
|
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on -DWaffle_DIR=/usr/local/lib/cmake/Waffle/ $EXTRA_CMAKE_ARGS
|
||||||
|
ninja -C _build
|
||||||
|
mkdir build
|
||||||
|
cp _build/apitrace build
|
||||||
|
cp _build/eglretrace build
|
||||||
|
${STRIP_CMD:-strip} build/*
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
9
.gitlab-ci/build-deqp-runner.sh
Normal file
9
.gitlab-ci/build-deqp-runner.sh
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
cargo install deqp-runner \
|
||||||
|
-j ${FDO_CI_CONCURRENT:-4} \
|
||||||
|
--version 0.4.0 \
|
||||||
|
--root /usr/local \
|
||||||
|
$EXTRA_CARGO_ARGS
|
63
.gitlab-ci/build-deqp.sh
Normal file
63
.gitlab-ci/build-deqp.sh
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
git config --global user.email "mesa@example.com"
|
||||||
|
git config --global user.name "Mesa CI"
|
||||||
|
git clone \
|
||||||
|
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
||||||
|
-b vulkan-cts-1.2.5.0 \
|
||||||
|
--depth 1 \
|
||||||
|
/VK-GL-CTS
|
||||||
|
pushd /VK-GL-CTS
|
||||||
|
|
||||||
|
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
||||||
|
# libpng (sigh). The archives get their checksums checked anyway, and git
|
||||||
|
# always goes through ssh or https.
|
||||||
|
python3 external/fetch_sources.py --insecure
|
||||||
|
|
||||||
|
mkdir -p /deqp
|
||||||
|
|
||||||
|
# Save the testlog stylesheets:
|
||||||
|
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
|
||||||
|
popd
|
||||||
|
|
||||||
|
pushd /deqp
|
||||||
|
cmake -S /VK-GL-CTS -B . -G Ninja \
|
||||||
|
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
$EXTRA_CMAKE_ARGS
|
||||||
|
ninja
|
||||||
|
|
||||||
|
# Copy out the mustpass lists we want.
|
||||||
|
mkdir /deqp/mustpass
|
||||||
|
cp /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt \
|
||||||
|
/deqp/mustpass/vk-master.txt
|
||||||
|
|
||||||
|
cp \
|
||||||
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
|
||||||
|
/deqp/mustpass/.
|
||||||
|
cp \
|
||||||
|
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
|
||||||
|
/deqp/mustpass/.
|
||||||
|
|
||||||
|
# Save *some* executor utils, but otherwise strip things down
|
||||||
|
# to reduct deqp build size:
|
||||||
|
mkdir /deqp/executor.save
|
||||||
|
cp /deqp/executor/testlog-to-* /deqp/executor.save
|
||||||
|
rm -rf /deqp/executor
|
||||||
|
mv /deqp/executor.save /deqp/executor
|
||||||
|
|
||||||
|
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
|
||||||
|
rm -rf /deqp/external/openglcts/modules/cts-runner
|
||||||
|
rm -rf /deqp/modules/internal
|
||||||
|
rm -rf /deqp/execserver
|
||||||
|
rm -rf /deqp/modules/egl
|
||||||
|
rm -rf /deqp/framework
|
||||||
|
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
|
||||||
|
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
||||||
|
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
||||||
|
${STRIP_CMD:-strip} modules/*/deqp-*
|
||||||
|
du -sh *
|
||||||
|
rm -rf /VK-GL-CTS
|
||||||
|
popd
|
14
.gitlab-ci/build-fossilize.sh
Normal file
14
.gitlab-ci/build-fossilize.sh
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
git clone https://github.com/ValveSoftware/Fossilize.git
|
||||||
|
cd Fossilize
|
||||||
|
git checkout 6b5b570008c9ab5269e341f04c811fe49a1bb72c
|
||||||
|
git submodule update --init
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||||
|
ninja -C . install
|
||||||
|
cd ../..
|
||||||
|
rm -rf Fossilize
|
21
.gitlab-ci/build-gfxreconstruct.sh
Normal file
21
.gitlab-ci/build-gfxreconstruct.sh
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
GFXRECONSTRUCT_VERSION=57c588c04af631d1d6d381a48e2b9283f9d9d528
|
||||||
|
|
||||||
|
# Using the "dev" branch by now because it solves a crash and will allow us to
|
||||||
|
# use the gfxreconstruct-info tool
|
||||||
|
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b dev --no-checkout /gfxreconstruct
|
||||||
|
pushd /gfxreconstruct
|
||||||
|
git checkout "$GFXRECONSTRUCT_VERSION"
|
||||||
|
git submodule update --init
|
||||||
|
git submodule update
|
||||||
|
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||||
|
ninja -C _build gfxrecon-replay gfxrecon-info
|
||||||
|
mkdir -p build/bin
|
||||||
|
install _build/tools/replay/gfxrecon-replay build/bin
|
||||||
|
install _build/tools/info/gfxrecon-info build/bin
|
||||||
|
strip build/bin/*
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
37
.gitlab-ci/build-libclc.sh
Normal file
37
.gitlab-ci/build-libclc.sh
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
export LLVM_CONFIG="llvm-config-10"
|
||||||
|
|
||||||
|
$LLVM_CONFIG --version
|
||||||
|
|
||||||
|
git clone https://github.com/KhronosGroup/SPIRV-LLVM-Translator -b llvm_release_100 --depth 1 /SPIRV-LLVM-Translator
|
||||||
|
pushd /SPIRV-LLVM-Translator
|
||||||
|
cmake -S . -B . -G Ninja -DLLVM_BUILD_TOOLS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS=-fPIC -DCMAKE_CXX_FLAGS=-fPIC -DCMAKE_INSTALL_PREFIX=`$LLVM_CONFIG --prefix`
|
||||||
|
ninja
|
||||||
|
ninja install
|
||||||
|
popd
|
||||||
|
|
||||||
|
|
||||||
|
git config --global user.email "mesa@example.com"
|
||||||
|
git config --global user.name "Mesa CI"
|
||||||
|
git clone \
|
||||||
|
https://github.com/llvm/llvm-project \
|
||||||
|
--depth 1 \
|
||||||
|
/llvm-project
|
||||||
|
|
||||||
|
mkdir /libclc
|
||||||
|
pushd /libclc
|
||||||
|
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
|
||||||
|
ninja
|
||||||
|
ninja install
|
||||||
|
popd
|
||||||
|
|
||||||
|
# workaroud cmake vs debian packaging.
|
||||||
|
mkdir -p /usr/lib/clc
|
||||||
|
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
|
||||||
|
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
||||||
|
|
||||||
|
du -sh *
|
||||||
|
rm -rf /libclc /llvm-project /SPIRV-LLVM-Translator
|
14
.gitlab-ci/build-libdrm.sh
Normal file
14
.gitlab-ci/build-libdrm.sh
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
export LIBDRM_VERSION=libdrm-2.4.102
|
||||||
|
|
||||||
|
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||||
|
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||||
|
cd $LIBDRM_VERSION
|
||||||
|
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
|
||||||
|
ninja -C build install
|
||||||
|
cd ..
|
||||||
|
rm -rf $LIBDRM_VERSION
|
||||||
|
|
27
.gitlab-ci/build-piglit.sh
Normal file
27
.gitlab-ci/build-piglit.sh
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
if [ -n "$INCLUDE_OPENCL_TESTS" ]; then
|
||||||
|
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON"
|
||||||
|
fi
|
||||||
|
|
||||||
|
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
||||||
|
pushd /piglit
|
||||||
|
git checkout c702d2bbf28b01a18ce613f386a4ffef03f6c0c9
|
||||||
|
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
||||||
|
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
||||||
|
ninja $PIGLIT_BUILD_TARGETS
|
||||||
|
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
|
||||||
|
rm -rf target_api
|
||||||
|
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
|
||||||
|
find ! -regex "^\.$" \
|
||||||
|
! -regex "^\.\/piglit.*" \
|
||||||
|
! -regex "^\.\/framework.*" \
|
||||||
|
! -regex "^\.\/bin$" \
|
||||||
|
! -regex "^\.\/bin\/replayer\.py" \
|
||||||
|
! -regex "^\.\/templates.*" \
|
||||||
|
! -regex "^\.\/tests$" \
|
||||||
|
! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
|
||||||
|
fi
|
||||||
|
popd
|
17
.gitlab-ci/build-renderdoc.sh
Normal file
17
.gitlab-ci/build-renderdoc.sh
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
RENDERDOC_VERSION=da02e88201dc3b64316fc33ce6ff69cc729689aa
|
||||||
|
|
||||||
|
git clone https://github.com/baldurk/renderdoc.git --single-branch --no-checkout /renderdoc
|
||||||
|
pushd /renderdoc
|
||||||
|
git checkout "$RENDERDOC_VERSION"
|
||||||
|
cmake -S . -B _build -G Ninja -DENABLE_QRENDERDOC=false -DCMAKE_BUILD_TYPE=Release $EXTRA_CMAKE_ARGS
|
||||||
|
ninja -C _build
|
||||||
|
mkdir -p build/lib
|
||||||
|
${STRIP_CMD:-strip} _build/lib/*.so
|
||||||
|
cp _build/lib/renderdoc.so build/lib
|
||||||
|
cp _build/lib/librenderdoc.so build/lib
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
31
.gitlab-ci/build-rust.sh
Normal file
31
.gitlab-ci/build-rust.sh
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Note that this script is not actually "building" rust, but build- is the
|
||||||
|
# convention for the shared helpers for putting stuff in our containers.
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
|
||||||
|
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
|
||||||
|
# are just available to all build jobs.
|
||||||
|
mkdir -p $HOME/.cargo
|
||||||
|
ln -s /usr/local/bin $HOME/.cargo/bin
|
||||||
|
|
||||||
|
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
|
||||||
|
# version of the compiler, rather than whatever the container's Debian comes
|
||||||
|
# with.
|
||||||
|
#
|
||||||
|
# Pick the rust compiler (1.41) available in Debian stable, and pick a specific
|
||||||
|
# snapshot from rustup so the compiler doesn't drift on us.
|
||||||
|
wget https://sh.rustup.rs -O - | \
|
||||||
|
sh -s -- -y --default-toolchain 1.41.1-2020-02-27
|
||||||
|
|
||||||
|
# Set up a config script for cross compiling -- cargo needs your system cc for
|
||||||
|
# linking in cross builds, but doesn't know what you want to use for system cc.
|
||||||
|
cat > /root/.cargo/config <<EOF
|
||||||
|
[target.armv7-unknown-linux-gnueabihf]
|
||||||
|
linker = "arm-linux-gnueabihf-gcc"
|
||||||
|
|
||||||
|
[target.aarch64-unknown-linux-gnu]
|
||||||
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
EOF
|
14
.gitlab-ci/build-spirv-tools.sh
Normal file
14
.gitlab-ci/build-spirv-tools.sh
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
git clone --depth 1 https://github.com/KhronosGroup/SPIRV-Tools SPIRV-Tools
|
||||||
|
pushd SPIRV-Tools
|
||||||
|
pushd external
|
||||||
|
git clone --depth 1 https://github.com/KhronosGroup/SPIRV-Headers
|
||||||
|
popd
|
||||||
|
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
|
||||||
|
ninja -C _build
|
||||||
|
ninja -C _build install
|
||||||
|
popd
|
||||||
|
rm -rf SPIRV-Tools
|
20
.gitlab-ci/build-virglrenderer.sh
Normal file
20
.gitlab-ci/build-virglrenderer.sh
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
mkdir -p /epoxy
|
||||||
|
pushd /epoxy
|
||||||
|
wget -qO- https://github.com/anholt/libepoxy/releases/download/1.5.4/libepoxy-1.5.4.tar.xz | tar -xJ --strip-components=1
|
||||||
|
meson build/ $EXTRA_MESON_ARGS
|
||||||
|
ninja -C build install
|
||||||
|
popd
|
||||||
|
rm -rf /epoxy
|
||||||
|
|
||||||
|
VIRGLRENDERER_VERSION=43148d1115a12219a0560a538c9872d07c28c558
|
||||||
|
git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git --single-branch --no-checkout /virglrenderer
|
||||||
|
pushd /virglrenderer
|
||||||
|
git checkout "$VIRGLRENDERER_VERSION"
|
||||||
|
meson build/ $EXTRA_MESON_ARGS
|
||||||
|
ninja -C build install
|
||||||
|
popd
|
||||||
|
rm -rf /virglrenderer
|
29
.gitlab-ci/build-vulkantools.sh
Normal file
29
.gitlab-ci/build-vulkantools.sh
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
VULKANTOOLS_VERSION=1862c6a47b64cd09156205d7f7e6b3bfcea76390
|
||||||
|
|
||||||
|
git clone https://github.com/LunarG/VulkanTools.git --single-branch --no-checkout /VulkanTools
|
||||||
|
pushd /VulkanTools
|
||||||
|
git checkout "$VULKANTOOLS_VERSION"
|
||||||
|
./update_external_sources.sh
|
||||||
|
mkdir _build
|
||||||
|
./scripts/update_deps.py --dir=_build --config=release --generator=Ninja
|
||||||
|
cmake -S . -B _build -G Ninja \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DCMAKE_INSTALL_PREFIX=/VulkanTools/build \
|
||||||
|
-DBUILD_TESTS=OFF \
|
||||||
|
-DBUILD_VLF=OFF \
|
||||||
|
-DBUILD_VKTRACE=OFF \
|
||||||
|
-DBUILD_VIA=OFF \
|
||||||
|
-DBUILD_VKTRACE_REPLAY=OFF \
|
||||||
|
-C_build/helper.cmake
|
||||||
|
ninja -C _build VkLayer_screenshot VkLayer_screenshot-staging-json
|
||||||
|
mkdir -p build/etc/vulkan/explicit_layer.d
|
||||||
|
mkdir build/lib
|
||||||
|
install _build/layersvt/staging-json/VkLayer_screenshot.json build/etc/vulkan/explicit_layer.d
|
||||||
|
install _build/layersvt/libVkLayer_screenshot.so build/lib
|
||||||
|
strip build/lib/*
|
||||||
|
find . -not -path './build' -not -path './build/*' -delete
|
||||||
|
popd
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=clang++-15
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=clang++
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=clang-15
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=clang
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=g++
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,7 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
_COMPILER=gcc
|
|
||||||
. compiler-wrapper.sh
|
|
@@ -1,21 +0,0 @@
|
|||||||
# shellcheck disable=SC1091
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
if command -V ccache >/dev/null 2>/dev/null; then
|
|
||||||
CCACHE=ccache
|
|
||||||
else
|
|
||||||
CCACHE=
|
|
||||||
fi
|
|
||||||
|
|
||||||
if echo "$@" | grep -E 'meson-private/tmp[^ /]*/testfile.c' >/dev/null; then
|
|
||||||
# Invoked for meson feature check
|
|
||||||
exec $CCACHE $_COMPILER "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$(eval printf "'%s'" "\"\${$(($#-1))}\"")" = "-c" ]; then
|
|
||||||
# Not invoked for linking
|
|
||||||
exec $CCACHE $_COMPILER "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Compiler invoked by ninja for linking. Add -Werror to turn compiler warnings into errors
|
|
||||||
# with LTO. (meson's werror should arguably do this, but meanwhile we need to)
|
|
||||||
exec $CCACHE $_COMPILER "$@" -Werror
|
|
@@ -1,675 +0,0 @@
|
|||||||
# Shared between windows and Linux
|
|
||||||
.build-common:
|
|
||||||
extends: .container+build-rules
|
|
||||||
# Cancel job if a newer commit is pushed to the same branch
|
|
||||||
interruptible: true
|
|
||||||
# Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner
|
|
||||||
# without a populated ccache.
|
|
||||||
# These jobs are never slow, either they finish within reasonable time or
|
|
||||||
# something has gone wrong and the job will never terminate, so we should
|
|
||||||
# instead timeout so that the retry mechanism can kick in.
|
|
||||||
# A few exception are made, see `timeout:` overrides in the rest of this
|
|
||||||
# file.
|
|
||||||
timeout: 30m
|
|
||||||
artifacts:
|
|
||||||
name: "mesa_${CI_JOB_NAME}"
|
|
||||||
when: always
|
|
||||||
paths:
|
|
||||||
- _build/meson-logs/*.txt
|
|
||||||
- _build/meson-logs/strace
|
|
||||||
- shader-db
|
|
||||||
- artifacts
|
|
||||||
|
|
||||||
# Just Linux
|
|
||||||
.build-linux:
|
|
||||||
extends: .build-common
|
|
||||||
variables:
|
|
||||||
CCACHE_COMPILERCHECK: "content"
|
|
||||||
CCACHE_COMPRESS: "true"
|
|
||||||
CCACHE_DIR: /cache/mesa/ccache
|
|
||||||
# Use ccache transparently, and print stats before/after
|
|
||||||
before_script:
|
|
||||||
- !reference [default, before_script]
|
|
||||||
- |
|
|
||||||
export PATH="/usr/lib/ccache:$PATH"
|
|
||||||
export CCACHE_BASEDIR="$PWD"
|
|
||||||
if test -x /usr/bin/ccache; then
|
|
||||||
section_start ccache_before "ccache stats before build"
|
|
||||||
ccache --show-stats
|
|
||||||
section_end ccache_before
|
|
||||||
fi
|
|
||||||
after_script:
|
|
||||||
- if test -x /usr/bin/ccache; then ccache --show-stats | grep "Hits:"; fi
|
|
||||||
- !reference [default, after_script]
|
|
||||||
|
|
||||||
.build-windows:
|
|
||||||
extends:
|
|
||||||
- .build-common
|
|
||||||
- .windows-docker-tags
|
|
||||||
cache:
|
|
||||||
key: ${CI_JOB_NAME}
|
|
||||||
paths:
|
|
||||||
- subprojects/packagecache
|
|
||||||
|
|
||||||
.meson-build:
|
|
||||||
extends:
|
|
||||||
- .build-linux
|
|
||||||
- .use-debian/x86_64_build
|
|
||||||
stage: build-x86_64
|
|
||||||
variables:
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
|
|
||||||
debian-testing:
|
|
||||||
extends:
|
|
||||||
- .meson-build
|
|
||||||
- .ci-deqp-artifacts
|
|
||||||
variables:
|
|
||||||
UNWIND: "enabled"
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-nine=true
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-rusticl=true
|
|
||||||
GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915,r300"
|
|
||||||
VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio,nouveau"
|
|
||||||
BUILDTYPE: "debugoptimized"
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D spirv-to-dxil=true
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D perfetto=true
|
|
||||||
-D tools=drm-shim
|
|
||||||
S3_ARTIFACT_NAME: mesa-x86_64-default-${BUILDTYPE}
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
- .gitlab-ci/prepare-artifacts.sh
|
|
||||||
artifacts:
|
|
||||||
reports:
|
|
||||||
junit: artifacts/ci_scripts_report.xml
|
|
||||||
|
|
||||||
debian-testing-asan:
|
|
||||||
extends:
|
|
||||||
- debian-testing
|
|
||||||
variables:
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=stringop-truncation
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D b_sanitize=address
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D tools=dlclose-skip
|
|
||||||
-D intel-clc=system
|
|
||||||
S3_ARTIFACT_NAME: ""
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
# Do a host build for intel-clc (asan complains not being loaded
|
|
||||||
# as the first library)
|
|
||||||
HOST_BUILD_OPTIONS: >
|
|
||||||
-D build-tests=false
|
|
||||||
-D enable-glcpp-tests=false
|
|
||||||
-D gallium-opencl=disabled
|
|
||||||
-D gallium-drivers=
|
|
||||||
-D vulkan-drivers=
|
|
||||||
-D video-codecs=
|
|
||||||
-D glx=disabled
|
|
||||||
-D platforms=
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D install-intel-clc=true
|
|
||||||
|
|
||||||
debian-testing-msan:
|
|
||||||
# https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo
|
|
||||||
# msan cannot fully work until it's used together with msan libc
|
|
||||||
extends:
|
|
||||||
- debian-clang
|
|
||||||
variables:
|
|
||||||
# l_undef is incompatible with msan
|
|
||||||
EXTRA_OPTION:
|
|
||||||
-D b_sanitize=memory
|
|
||||||
-D b_lundef=false
|
|
||||||
-D intel-clc=system
|
|
||||||
S3_ARTIFACT_NAME: ""
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
# Don't run all the tests yet:
|
|
||||||
# GLSL has some issues in sexpression reading.
|
|
||||||
# gtest has issues in its test initialization.
|
|
||||||
MESON_TEST_ARGS: "--suite glcpp --suite format"
|
|
||||||
GALLIUM_DRIVERS: "freedreno,iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
|
|
||||||
VULKAN_DRIVERS: intel,amd,broadcom,virtio
|
|
||||||
# Do a host build for intel-clc (msan complains about
|
|
||||||
# uninitialized values in the LLVM libs)
|
|
||||||
HOST_BUILD_OPTIONS: >
|
|
||||||
-D build-tests=false
|
|
||||||
-D enable-glcpp-tests=false
|
|
||||||
-D gallium-opencl=disabled
|
|
||||||
-D gallium-drivers=
|
|
||||||
-D vulkan-drivers=
|
|
||||||
-D video-codecs=
|
|
||||||
-D glx=disabled
|
|
||||||
-D platforms=
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D install-intel-clc=true
|
|
||||||
|
|
||||||
debian-build-testing:
|
|
||||||
extends: .meson-build
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
UNWIND: "enabled"
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=enabled
|
|
||||||
-D gallium-omx=bellagio
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=enabled
|
|
||||||
-D gallium-nine=true
|
|
||||||
-D gallium-rusticl=false
|
|
||||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
|
|
||||||
VULKAN_DRIVERS: swrast
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D spirv-to-dxil=true
|
|
||||||
-D osmesa=true
|
|
||||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi
|
|
||||||
-D b_lto=true
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
script: |
|
|
||||||
section_start lava-pytest "lava-pytest"
|
|
||||||
.gitlab-ci/lava/lava-pytest.sh
|
|
||||||
section_switch shellcheck "shellcheck"
|
|
||||||
.gitlab-ci/run-shellcheck.sh
|
|
||||||
section_switch yamllint "yamllint"
|
|
||||||
.gitlab-ci/run-yamllint.sh
|
|
||||||
section_switch meson "meson"
|
|
||||||
.gitlab-ci/meson/build.sh
|
|
||||||
section_switch shader-db "shader-db"
|
|
||||||
.gitlab-ci/run-shader-db.sh
|
|
||||||
timeout: 30m
|
|
||||||
|
|
||||||
# Test a release build with -Werror so new warnings don't sneak in.
|
|
||||||
debian-release:
|
|
||||||
extends: .meson-build
|
|
||||||
variables:
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
UNWIND: "enabled"
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=stringop-overread
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=enabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=enabled
|
|
||||||
-D gallium-nine=false
|
|
||||||
-D gallium-rusticl=false
|
|
||||||
-D llvm=enabled
|
|
||||||
GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
|
|
||||||
VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental"
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D spirv-to-dxil=true
|
|
||||||
-D osmesa=true
|
|
||||||
-D tools=all
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D intel-rt=enabled
|
|
||||||
-D imagination-srv=true
|
|
||||||
BUILDTYPE: "release"
|
|
||||||
S3_ARTIFACT_NAME: "mesa-x86_64-default-${BUILDTYPE}"
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
- 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
|
|
||||||
|
|
||||||
alpine-build-testing:
|
|
||||||
extends:
|
|
||||||
- .meson-build
|
|
||||||
- .use-alpine/x86_64_build
|
|
||||||
stage: build-x86_64
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: "release"
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=cpp
|
|
||||||
-Wno-error=array-bounds
|
|
||||||
-Wno-error=stringop-overread
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=disabled
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D glvnd=false
|
|
||||||
-D platforms=wayland
|
|
||||||
LLVM_VERSION: "16"
|
|
||||||
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=disabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=disabled
|
|
||||||
-D gallium-nine=true
|
|
||||||
-D gallium-rusticl=false
|
|
||||||
-D gles1=disabled
|
|
||||||
-D gles2=enabled
|
|
||||||
-D llvm=enabled
|
|
||||||
-D microsoft-clc=disabled
|
|
||||||
-D shared-llvm=enabled
|
|
||||||
UNWIND: "disabled"
|
|
||||||
VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
|
|
||||||
|
|
||||||
fedora-release:
|
|
||||||
extends:
|
|
||||||
- .meson-build
|
|
||||||
- .use-fedora/x86_64_build
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: "release"
|
|
||||||
C_LINK_ARGS: >
|
|
||||||
-Wno-error=stringop-overflow
|
|
||||||
-Wno-error=stringop-overread
|
|
||||||
CPP_ARGS: >
|
|
||||||
-Wno-error=dangling-reference
|
|
||||||
-Wno-error=overloaded-virtual
|
|
||||||
CPP_LINK_ARGS: >
|
|
||||||
-Wno-error=stringop-overflow
|
|
||||||
-Wno-error=stringop-overread
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D glvnd=true
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D b_lto=true
|
|
||||||
-D osmesa=true
|
|
||||||
-D selinux=true
|
|
||||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
|
|
||||||
-D vulkan-layers=device-select,overlay
|
|
||||||
-D intel-rt=enabled
|
|
||||||
-D imagination-srv=true
|
|
||||||
-D teflon=true
|
|
||||||
GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=enabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=enabled
|
|
||||||
-D gallium-nine=false
|
|
||||||
-D gallium-rusticl=true
|
|
||||||
-D gles1=disabled
|
|
||||||
-D gles2=enabled
|
|
||||||
-D llvm=enabled
|
|
||||||
-D microsoft-clc=disabled
|
|
||||||
-D shared-llvm=enabled
|
|
||||||
LLVM_VERSION: ""
|
|
||||||
UNWIND: "disabled"
|
|
||||||
VULKAN_DRIVERS: "amd,broadcom,freedreno,imagination-experimental,intel,intel_hasvk"
|
|
||||||
|
|
||||||
debian-android:
|
|
||||||
extends:
|
|
||||||
- .meson-cross
|
|
||||||
- .use-debian/android_build
|
|
||||||
- .ci-deqp-artifacts
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
UNWIND: "disabled"
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=asm-operand-widths
|
|
||||||
-Wno-error=constant-conversion
|
|
||||||
-Wno-error=enum-conversion
|
|
||||||
-Wno-error=initializer-overrides
|
|
||||||
-Wno-error=sometimes-uninitialized
|
|
||||||
CPP_ARGS: >
|
|
||||||
-Wno-error=c99-designator
|
|
||||||
-Wno-error=unused-variable
|
|
||||||
-Wno-error=unused-but-set-variable
|
|
||||||
-Wno-error=self-assign
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=disabled
|
|
||||||
-D gbm=disabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D platforms=android
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D android-stub=true
|
|
||||||
-D llvm=disabled
|
|
||||||
-D platform-sdk-version=33
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D android-libbacktrace=disabled
|
|
||||||
-D intel-clc=system
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=disabled
|
|
||||||
-D gallium-vdpau=disabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=disabled
|
|
||||||
-D gallium-xa=disabled
|
|
||||||
-D gallium-nine=false
|
|
||||||
-D gallium-rusticl=false
|
|
||||||
LLVM_VERSION: "15"
|
|
||||||
PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files"
|
|
||||||
HOST_BUILD_OPTIONS: >
|
|
||||||
-D build-tests=false
|
|
||||||
-D enable-glcpp-tests=false
|
|
||||||
-D gallium-opencl=disabled
|
|
||||||
-D gallium-drivers=
|
|
||||||
-D vulkan-drivers=
|
|
||||||
-D video-codecs=
|
|
||||||
-D glx=disabled
|
|
||||||
-D platforms=
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D install-intel-clc=true
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
S3_ARTIFACT_NAME: mesa-x86_64-android-${BUILDTYPE}
|
|
||||||
script:
|
|
||||||
- CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio .gitlab-ci/meson/build.sh
|
|
||||||
# x86_64 build:
|
|
||||||
# Can't do Intel because gen_decoder.c currently requires libexpat, which
|
|
||||||
# is not a dependency that AOSP wants to accept. Can't do Radeon Gallium
|
|
||||||
# drivers because they requires LLVM, which we don't have an Android build
|
|
||||||
# of.
|
|
||||||
- CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris,virgl VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh
|
|
||||||
- .gitlab-ci/prepare-artifacts.sh
|
|
||||||
|
|
||||||
.meson-cross:
|
|
||||||
extends:
|
|
||||||
- .meson-build
|
|
||||||
stage: build-misc
|
|
||||||
variables:
|
|
||||||
UNWIND: "disabled"
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
-D osmesa=false
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-vdpau=disabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=disabled
|
|
||||||
-D gallium-xa=disabled
|
|
||||||
-D gallium-nine=false
|
|
||||||
|
|
||||||
.meson-arm:
|
|
||||||
extends:
|
|
||||||
- .meson-cross
|
|
||||||
- .use-debian/arm64_build
|
|
||||||
needs:
|
|
||||||
- debian/arm64_build
|
|
||||||
variables:
|
|
||||||
VULKAN_DRIVERS: freedreno,broadcom
|
|
||||||
GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink"
|
|
||||||
BUILDTYPE: "debugoptimized"
|
|
||||||
tags:
|
|
||||||
- aarch64
|
|
||||||
|
|
||||||
debian-arm32:
|
|
||||||
extends:
|
|
||||||
- .meson-arm
|
|
||||||
- .ci-deqp-artifacts
|
|
||||||
variables:
|
|
||||||
CROSS: armhf
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D llvm=disabled
|
|
||||||
-D valgrind=disabled
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm32-default-${BUILDTYPE}
|
|
||||||
# The strip command segfaults, failing to strip the binary and leaving
|
|
||||||
# tempfiles in our artifacts.
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
- .gitlab-ci/prepare-artifacts.sh
|
|
||||||
|
|
||||||
debian-arm32-asan:
|
|
||||||
extends:
|
|
||||||
- debian-arm32
|
|
||||||
variables:
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D llvm=disabled
|
|
||||||
-D b_sanitize=address
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D tools=dlclose-skip
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm32-asan-${BUILDTYPE}
|
|
||||||
MESON_TEST_ARGS: "--no-suite mesa:compiler --no-suite mesa:util"
|
|
||||||
|
|
||||||
debian-arm64:
|
|
||||||
extends:
|
|
||||||
- .meson-arm
|
|
||||||
- .ci-deqp-artifacts
|
|
||||||
variables:
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=array-bounds
|
|
||||||
-Wno-error=stringop-truncation
|
|
||||||
VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental"
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D llvm=disabled
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D imagination-srv=true
|
|
||||||
-D perfetto=true
|
|
||||||
-D freedreno-kmds=msm,virtio
|
|
||||||
-D teflon=true
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
- .gitlab-ci/prepare-artifacts.sh
|
|
||||||
|
|
||||||
debian-arm64-asan:
|
|
||||||
extends:
|
|
||||||
- debian-arm64
|
|
||||||
variables:
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D llvm=disabled
|
|
||||||
-D b_sanitize=address
|
|
||||||
-D valgrind=disabled
|
|
||||||
-D tools=dlclose-skip
|
|
||||||
ARTIFACTS_DEBUG_SYMBOLS: 1
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-asan-${BUILDTYPE}
|
|
||||||
MESON_TEST_ARGS: "--no-suite mesa:compiler"
|
|
||||||
|
|
||||||
debian-arm64-build-test:
|
|
||||||
extends:
|
|
||||||
- .meson-arm
|
|
||||||
- .ci-deqp-artifacts
|
|
||||||
variables:
|
|
||||||
VULKAN_DRIVERS: "amd"
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-Dtools=panfrost,imagination
|
|
||||||
|
|
||||||
debian-arm64-release:
|
|
||||||
extends:
|
|
||||||
- debian-arm64
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: release
|
|
||||||
S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=array-bounds
|
|
||||||
-Wno-error=stringop-truncation
|
|
||||||
-Wno-error=stringop-overread
|
|
||||||
script:
|
|
||||||
- .gitlab-ci/meson/build.sh
|
|
||||||
- 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
|
|
||||||
|
|
||||||
debian-clang:
|
|
||||||
extends: .meson-build
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
UNWIND: "enabled"
|
|
||||||
C_ARGS: >
|
|
||||||
-Wno-error=constant-conversion
|
|
||||||
-Wno-error=enum-conversion
|
|
||||||
-Wno-error=initializer-overrides
|
|
||||||
-Wno-error=sometimes-uninitialized
|
|
||||||
-Werror=misleading-indentation
|
|
||||||
CPP_ARGS: >
|
|
||||||
-Wno-error=c99-designator
|
|
||||||
-Wno-error=overloaded-virtual
|
|
||||||
-Wno-error=tautological-constant-out-of-range-compare
|
|
||||||
-Wno-error=unused-private-field
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=dri
|
|
||||||
-D gbm=enabled
|
|
||||||
-D egl=enabled
|
|
||||||
-D glvnd=true
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=enabled
|
|
||||||
-D gallium-omx=bellagio
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=enabled
|
|
||||||
-D gallium-nine=true
|
|
||||||
-D gles1=enabled
|
|
||||||
-D gles2=enabled
|
|
||||||
-D llvm=enabled
|
|
||||||
-D microsoft-clc=disabled
|
|
||||||
-D shared-llvm=enabled
|
|
||||||
-D opencl-spirv=true
|
|
||||||
-D shared-glapi=enabled
|
|
||||||
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
|
|
||||||
VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,swrast,panfrost,imagination-experimental,microsoft-experimental,nouveau
|
|
||||||
EXTRA_OPTION:
|
|
||||||
-D spirv-to-dxil=true
|
|
||||||
-D osmesa=true
|
|
||||||
-D imagination-srv=true
|
|
||||||
-D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi,imagination
|
|
||||||
-D vulkan-layers=device-select,overlay
|
|
||||||
-D build-aco-tests=true
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D intel-rt=enabled
|
|
||||||
-D imagination-srv=true
|
|
||||||
-D teflon=true
|
|
||||||
CC: clang-${LLVM_VERSION}
|
|
||||||
CXX: clang++-${LLVM_VERSION}
|
|
||||||
|
|
||||||
debian-clang-release:
|
|
||||||
extends: debian-clang
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: "release"
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=xlib
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-extra-hud=true
|
|
||||||
-D gallium-vdpau=enabled
|
|
||||||
-D gallium-omx=bellagio
|
|
||||||
-D gallium-va=enabled
|
|
||||||
-D gallium-xa=enabled
|
|
||||||
-D gallium-nine=true
|
|
||||||
-D gles1=disabled
|
|
||||||
-D gles2=disabled
|
|
||||||
-D llvm=enabled
|
|
||||||
-D microsoft-clc=disabled
|
|
||||||
-D shared-llvm=enabled
|
|
||||||
-D opencl-spirv=true
|
|
||||||
-D shared-glapi=disabled
|
|
||||||
|
|
||||||
windows-msvc:
|
|
||||||
extends:
|
|
||||||
- .build-windows
|
|
||||||
- .use-windows_build_msvc
|
|
||||||
- .windows-build-rules
|
|
||||||
stage: build-misc
|
|
||||||
script:
|
|
||||||
- pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1
|
|
||||||
artifacts:
|
|
||||||
paths:
|
|
||||||
- _build/meson-logs/*.txt
|
|
||||||
- _install/
|
|
||||||
|
|
||||||
debian-vulkan:
|
|
||||||
extends: .meson-build
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
UNWIND: "disabled"
|
|
||||||
DRI_LOADERS: >
|
|
||||||
-D glx=disabled
|
|
||||||
-D gbm=disabled
|
|
||||||
-D egl=disabled
|
|
||||||
-D opengl=false
|
|
||||||
-D gles1=disabled
|
|
||||||
-D gles2=disabled
|
|
||||||
-D platforms=x11,wayland
|
|
||||||
-D osmesa=false
|
|
||||||
GALLIUM_ST: >
|
|
||||||
-D dri3=enabled
|
|
||||||
-D gallium-vdpau=disabled
|
|
||||||
-D gallium-omx=disabled
|
|
||||||
-D gallium-va=disabled
|
|
||||||
-D gallium-xa=disabled
|
|
||||||
-D gallium-nine=false
|
|
||||||
-D gallium-rusticl=false
|
|
||||||
-D b_sanitize=undefined
|
|
||||||
-D c_args=-fno-sanitize-recover=all
|
|
||||||
-D cpp_args=-fno-sanitize-recover=all
|
|
||||||
UBSAN_OPTIONS: "print_stacktrace=1"
|
|
||||||
VULKAN_DRIVERS: amd,broadcom,freedreno,intel,intel_hasvk,panfrost,virtio,imagination-experimental,microsoft-experimental,nouveau
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D vulkan-layers=device-select,overlay
|
|
||||||
-D build-aco-tests=true
|
|
||||||
-D intel-rt=disabled
|
|
||||||
-D imagination-srv=true
|
|
||||||
|
|
||||||
debian-x86_32:
|
|
||||||
extends:
|
|
||||||
- .meson-cross
|
|
||||||
- .use-debian/x86_32_build
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
CROSS: i386
|
|
||||||
VULKAN_DRIVERS: intel,amd,swrast,virtio
|
|
||||||
GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus,d3d12"
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
EXTRA_OPTION: >
|
|
||||||
-D vulkan-layers=device-select,overlay
|
|
||||||
-D intel-clc=system
|
|
||||||
HOST_BUILD_OPTIONS: >
|
|
||||||
-D build-tests=false
|
|
||||||
-D enable-glcpp-tests=false
|
|
||||||
-D gallium-opencl=disabled
|
|
||||||
-D gallium-drivers=
|
|
||||||
-D vulkan-drivers=
|
|
||||||
-D video-codecs=
|
|
||||||
-D glx=disabled
|
|
||||||
-D platforms=
|
|
||||||
-D intel-clc=enabled
|
|
||||||
-D install-intel-clc=true
|
|
||||||
|
|
||||||
debian-s390x:
|
|
||||||
extends:
|
|
||||||
- debian-ppc64el
|
|
||||||
- .use-debian/s390x_build
|
|
||||||
- .s390x-rules
|
|
||||||
tags:
|
|
||||||
- kvm
|
|
||||||
variables:
|
|
||||||
CROSS: s390x
|
|
||||||
GALLIUM_DRIVERS: "swrast,zink"
|
|
||||||
LLVM_VERSION: 15
|
|
||||||
VULKAN_DRIVERS: "swrast"
|
|
||||||
|
|
||||||
debian-ppc64el:
|
|
||||||
extends:
|
|
||||||
- .meson-cross
|
|
||||||
- .use-debian/ppc64el_build
|
|
||||||
- .ppc64el-rules
|
|
||||||
variables:
|
|
||||||
BUILDTYPE: debug
|
|
||||||
CROSS: ppc64el
|
|
||||||
GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink"
|
|
||||||
VULKAN_DRIVERS: "amd,swrast"
|
|
@@ -1,35 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2035
|
|
||||||
# shellcheck disable=SC2061
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null)
|
|
||||||
for i in $devcds; do
|
|
||||||
echo "Found a devcoredump at $i."
|
|
||||||
if cp $i /results/first.devcore; then
|
|
||||||
echo 1 > $i
|
|
||||||
echo "Saved to the job artifacts at /first.devcore"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
i915_error_states=$(find /sys/devices/ -path */drm/card*/error)
|
|
||||||
for i in $i915_error_states; do
|
|
||||||
tmpfile=$(mktemp)
|
|
||||||
cp "$i" "$tmpfile"
|
|
||||||
filesize=$(stat --printf="%s" "$tmpfile")
|
|
||||||
# Does the file contain "No error state collected" ?
|
|
||||||
if [ "$filesize" = 25 ]; then
|
|
||||||
rm "$tmpfile"
|
|
||||||
else
|
|
||||||
echo "Found an i915 error state at $i size=$filesize."
|
|
||||||
if cp "$tmpfile" /results/first.i915_error_state; then
|
|
||||||
rm "$tmpfile"
|
|
||||||
echo 1 > "$i"
|
|
||||||
echo "Saved to the job artifacts at /first.i915_error_state"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
sleep 10
|
|
||||||
done
|
|
@@ -1,131 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
for var in \
|
|
||||||
ACO_DEBUG \
|
|
||||||
ARTIFACTS_BASE_URL \
|
|
||||||
ASAN_OPTIONS \
|
|
||||||
BASE_SYSTEM_FORK_HOST_PREFIX \
|
|
||||||
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
|
|
||||||
CI_COMMIT_BRANCH \
|
|
||||||
CI_COMMIT_REF_NAME \
|
|
||||||
CI_COMMIT_TITLE \
|
|
||||||
CI_JOB_ID \
|
|
||||||
CI_JOB_JWT_FILE \
|
|
||||||
CI_JOB_STARTED_AT \
|
|
||||||
CI_JOB_NAME \
|
|
||||||
CI_JOB_URL \
|
|
||||||
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
|
|
||||||
CI_MERGE_REQUEST_TITLE \
|
|
||||||
CI_NODE_INDEX \
|
|
||||||
CI_NODE_TOTAL \
|
|
||||||
CI_PAGES_DOMAIN \
|
|
||||||
CI_PIPELINE_ID \
|
|
||||||
CI_PIPELINE_URL \
|
|
||||||
CI_PROJECT_DIR \
|
|
||||||
CI_PROJECT_NAME \
|
|
||||||
CI_PROJECT_PATH \
|
|
||||||
CI_PROJECT_ROOT_NAMESPACE \
|
|
||||||
CI_RUNNER_DESCRIPTION \
|
|
||||||
CI_SERVER_URL \
|
|
||||||
CROSVM_GALLIUM_DRIVER \
|
|
||||||
CROSVM_GPU_ARGS \
|
|
||||||
CURRENT_SECTION \
|
|
||||||
DEQP_BIN_DIR \
|
|
||||||
DEQP_CONFIG \
|
|
||||||
DEQP_EXPECTED_RENDERER \
|
|
||||||
DEQP_FRACTION \
|
|
||||||
DEQP_HEIGHT \
|
|
||||||
DEQP_RESULTS_DIR \
|
|
||||||
DEQP_RUNNER_OPTIONS \
|
|
||||||
DEQP_SUITE \
|
|
||||||
DEQP_TEMP_DIR \
|
|
||||||
DEQP_VER \
|
|
||||||
DEQP_WIDTH \
|
|
||||||
DEVICE_NAME \
|
|
||||||
DRIVER_NAME \
|
|
||||||
EGL_PLATFORM \
|
|
||||||
ETNA_MESA_DEBUG \
|
|
||||||
FDO_CI_CONCURRENT \
|
|
||||||
FDO_UPSTREAM_REPO \
|
|
||||||
FD_MESA_DEBUG \
|
|
||||||
FLAKES_CHANNEL \
|
|
||||||
FREEDRENO_HANGCHECK_MS \
|
|
||||||
GALLIUM_DRIVER \
|
|
||||||
GALLIVM_PERF \
|
|
||||||
GPU_VERSION \
|
|
||||||
GTEST \
|
|
||||||
GTEST_FAILS \
|
|
||||||
GTEST_FRACTION \
|
|
||||||
GTEST_RESULTS_DIR \
|
|
||||||
GTEST_RUNNER_OPTIONS \
|
|
||||||
GTEST_SKIPS \
|
|
||||||
HWCI_FREQ_MAX \
|
|
||||||
HWCI_KERNEL_MODULES \
|
|
||||||
HWCI_KVM \
|
|
||||||
HWCI_START_WESTON \
|
|
||||||
HWCI_START_XORG \
|
|
||||||
HWCI_TEST_SCRIPT \
|
|
||||||
IR3_SHADER_DEBUG \
|
|
||||||
JOB_ARTIFACTS_BASE \
|
|
||||||
JOB_RESULTS_PATH \
|
|
||||||
JOB_ROOTFS_OVERLAY_PATH \
|
|
||||||
KERNEL_IMAGE_BASE \
|
|
||||||
KERNEL_IMAGE_NAME \
|
|
||||||
LD_LIBRARY_PATH \
|
|
||||||
LP_NUM_THREADS \
|
|
||||||
MESA_BASE_TAG \
|
|
||||||
MESA_BUILD_PATH \
|
|
||||||
MESA_DEBUG \
|
|
||||||
MESA_GLES_VERSION_OVERRIDE \
|
|
||||||
MESA_GLSL_VERSION_OVERRIDE \
|
|
||||||
MESA_GL_VERSION_OVERRIDE \
|
|
||||||
MESA_IMAGE \
|
|
||||||
MESA_IMAGE_PATH \
|
|
||||||
MESA_IMAGE_TAG \
|
|
||||||
MESA_LOADER_DRIVER_OVERRIDE \
|
|
||||||
MESA_TEMPLATES_COMMIT \
|
|
||||||
MESA_VK_IGNORE_CONFORMANCE_WARNING \
|
|
||||||
S3_HOST \
|
|
||||||
S3_RESULTS_UPLOAD \
|
|
||||||
NIR_DEBUG \
|
|
||||||
PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
|
|
||||||
PAN_MESA_DEBUG \
|
|
||||||
PANVK_DEBUG \
|
|
||||||
PIGLIT_FRACTION \
|
|
||||||
PIGLIT_NO_WINDOW \
|
|
||||||
PIGLIT_OPTIONS \
|
|
||||||
PIGLIT_PLATFORM \
|
|
||||||
PIGLIT_PROFILES \
|
|
||||||
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
|
|
||||||
PIGLIT_REPLAY_DEVICE_NAME \
|
|
||||||
PIGLIT_REPLAY_EXTRA_ARGS \
|
|
||||||
PIGLIT_REPLAY_LOOP_TIMES \
|
|
||||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
|
|
||||||
PIGLIT_REPLAY_SUBCOMMAND \
|
|
||||||
PIGLIT_RESULTS \
|
|
||||||
PIGLIT_TESTS \
|
|
||||||
PIGLIT_TRACES_FILE \
|
|
||||||
PIPELINE_ARTIFACTS_BASE \
|
|
||||||
RADEON_DEBUG \
|
|
||||||
RADV_DEBUG \
|
|
||||||
RADV_PERFTEST \
|
|
||||||
SKQP_ASSETS_DIR \
|
|
||||||
SKQP_BACKENDS \
|
|
||||||
TU_DEBUG \
|
|
||||||
USE_ANGLE \
|
|
||||||
VIRGL_HOST_API \
|
|
||||||
WAFFLE_PLATFORM \
|
|
||||||
VK_CPU \
|
|
||||||
VK_DRIVER \
|
|
||||||
VK_ICD_FILENAMES \
|
|
||||||
VKD3D_PROTON_RESULTS \
|
|
||||||
VKD3D_CONFIG \
|
|
||||||
VKD3D_TEST_EXCLUDE \
|
|
||||||
ZINK_DESCRIPTORS \
|
|
||||||
ZINK_DEBUG \
|
|
||||||
LVP_POISON_MEMORY \
|
|
||||||
; do
|
|
||||||
if [ -n "${!var+x}" ]; then
|
|
||||||
echo "export $var=${!var@Q}"
|
|
||||||
fi
|
|
||||||
done
|
|
@@ -1,25 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# Very early init, used to make sure devices and network are set up and
|
|
||||||
# reachable.
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
cd /
|
|
||||||
|
|
||||||
findmnt --mountpoint /proc || mount -t proc none /proc
|
|
||||||
findmnt --mountpoint /sys || mount -t sysfs none /sys
|
|
||||||
mount -t debugfs none /sys/kernel/debug
|
|
||||||
findmnt --mountpoint /dev || mount -t devtmpfs none /dev
|
|
||||||
mkdir -p /dev/pts
|
|
||||||
mount -t devpts devpts /dev/pts
|
|
||||||
mkdir /dev/shm
|
|
||||||
mount -t tmpfs -o noexec,nodev,nosuid tmpfs /dev/shm
|
|
||||||
mount -t tmpfs tmpfs /tmp
|
|
||||||
|
|
||||||
echo "nameserver 8.8.8.8" > /etc/resolv.conf
|
|
||||||
[ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
|
|
||||||
|
|
||||||
# Set the time so we can validate certificates before we fetch anything;
|
|
||||||
# however as not all DUTs have network, make this non-fatal.
|
|
||||||
for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
|
|
@@ -1,235 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC1090
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC2155
|
|
||||||
|
|
||||||
# Second-stage init, used to set up devices and our job environment before
|
|
||||||
# running tests.
|
|
||||||
|
|
||||||
shopt -s extglob
|
|
||||||
|
|
||||||
# Make sure to kill itself and all the children process from this script on
|
|
||||||
# exiting, since any console output may interfere with LAVA signals handling,
|
|
||||||
# which based on the log console.
|
|
||||||
cleanup() {
|
|
||||||
if [ "$BACKGROUND_PIDS" = "" ]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
set +x
|
|
||||||
echo "Killing all child processes"
|
|
||||||
for pid in $BACKGROUND_PIDS
|
|
||||||
do
|
|
||||||
kill "$pid" 2>/dev/null || true
|
|
||||||
done
|
|
||||||
|
|
||||||
# Sleep just a little to give enough time for subprocesses to be gracefully
|
|
||||||
# killed. Then apply a SIGKILL if necessary.
|
|
||||||
sleep 5
|
|
||||||
for pid in $BACKGROUND_PIDS
|
|
||||||
do
|
|
||||||
kill -9 "$pid" 2>/dev/null || true
|
|
||||||
done
|
|
||||||
|
|
||||||
BACKGROUND_PIDS=
|
|
||||||
set -x
|
|
||||||
}
|
|
||||||
trap cleanup INT TERM EXIT
|
|
||||||
|
|
||||||
# Space separated values with the PIDS of the processes started in the
|
|
||||||
# background by this script
|
|
||||||
BACKGROUND_PIDS=
|
|
||||||
|
|
||||||
|
|
||||||
for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do
|
|
||||||
[ -f "$path" ] && source "$path"
|
|
||||||
done
|
|
||||||
. "$SCRIPTS_DIR"/setup-test-env.sh
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Set up any devices required by the jobs
|
|
||||||
[ -z "$HWCI_KERNEL_MODULES" ] || {
|
|
||||||
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up ZRAM
|
|
||||||
HWCI_ZRAM_SIZE=2G
|
|
||||||
if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
|
|
||||||
mkswap /dev/zram0
|
|
||||||
swapon /dev/zram0
|
|
||||||
echo "zram: $HWCI_ZRAM_SIZE activated"
|
|
||||||
else
|
|
||||||
echo "zram: skipping, not supported"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#
|
|
||||||
# Load the KVM module specific to the detected CPU virtualization extensions:
|
|
||||||
# - vmx for Intel VT
|
|
||||||
# - svm for AMD-V
|
|
||||||
#
|
|
||||||
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
|
|
||||||
#
|
|
||||||
if [ "$HWCI_KVM" = "true" ]; then
|
|
||||||
unset KVM_KERNEL_MODULE
|
|
||||||
{
|
|
||||||
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
|
|
||||||
} || {
|
|
||||||
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
[ -z "${KVM_KERNEL_MODULE}" ] && \
|
|
||||||
echo "WARNING: Failed to detect CPU virtualization extensions"
|
|
||||||
} || \
|
|
||||||
modprobe ${KVM_KERNEL_MODULE}
|
|
||||||
|
|
||||||
mkdir -p /lava-files
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "/lava-files/${KERNEL_IMAGE_NAME}" \
|
|
||||||
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
|
|
||||||
# it in /install
|
|
||||||
ln -sf $CI_PROJECT_DIR/install /install
|
|
||||||
export LD_LIBRARY_PATH=/install/lib
|
|
||||||
export LIBGL_DRIVERS_PATH=/install/lib/dri
|
|
||||||
|
|
||||||
# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691
|
|
||||||
# The navi21 boards seem to have trouble with ld.so.cache, so try explicitly
|
|
||||||
# telling it to look in /usr/local/lib.
|
|
||||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
|
|
||||||
|
|
||||||
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
|
|
||||||
export XDG_CACHE_HOME=/tmp
|
|
||||||
|
|
||||||
# Make sure Python can find all our imports
|
|
||||||
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
|
|
||||||
|
|
||||||
# If we need to specify a driver, it means several drivers could pick up this gpu;
|
|
||||||
# ensure that the other driver can't accidentally be used
|
|
||||||
if [ -n "$MESA_LOADER_DRIVER_OVERRIDE" ]; then
|
|
||||||
rm /install/lib/dri/!($MESA_LOADER_DRIVER_OVERRIDE)_dri.so
|
|
||||||
fi
|
|
||||||
ls -1 /install/lib/dri/*_dri.so
|
|
||||||
|
|
||||||
if [ "$HWCI_FREQ_MAX" = "true" ]; then
|
|
||||||
# Ensure initialization of the DRM device (needed by MSM)
|
|
||||||
head -0 /dev/dri/renderD128
|
|
||||||
|
|
||||||
# Disable GPU frequency scaling
|
|
||||||
DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true)
|
|
||||||
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
|
|
||||||
|
|
||||||
# Disable CPU frequency scaling
|
|
||||||
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
|
|
||||||
|
|
||||||
# Disable GPU runtime power management
|
|
||||||
GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1)
|
|
||||||
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
|
|
||||||
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
|
|
||||||
# and enable throttling detection & reporting.
|
|
||||||
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
|
|
||||||
# maximum permitted, as an additional measure to mitigate thermal throttling.
|
|
||||||
/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start a little daemon to capture sysfs records and produce a JSON file
|
|
||||||
if [ -x /kdl.sh ]; then
|
|
||||||
echo "launch kdl.sh!"
|
|
||||||
/kdl.sh &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
else
|
|
||||||
echo "kdl.sh not found!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Increase freedreno hangcheck timer because it's right at the edge of the
|
|
||||||
# spilling tests timing out (and some traces, too)
|
|
||||||
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
|
|
||||||
echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start a little daemon to capture the first devcoredump we encounter. (They
|
|
||||||
# expire after 5 minutes, so we poll for them).
|
|
||||||
if [ -x /capture-devcoredump.sh ]; then
|
|
||||||
/capture-devcoredump.sh &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If we want Xorg to be running for the test, then we start it up before the
|
|
||||||
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
|
|
||||||
# without using -displayfd you can race with Xorg's startup), but xinit will eat
|
|
||||||
# your client's return code
|
|
||||||
if [ -n "$HWCI_START_XORG" ]; then
|
|
||||||
echo "touch /xorg-started; sleep 100000" > /xorg-script
|
|
||||||
env \
|
|
||||||
VK_ICD_FILENAMES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
|
|
||||||
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
|
|
||||||
# Wait for xorg to be ready for connections.
|
|
||||||
for _ in 1 2 3 4 5; do
|
|
||||||
if [ -e /xorg-started ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
export DISPLAY=:0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$HWCI_START_WESTON" ]; then
|
|
||||||
WESTON_X11_SOCK="/tmp/.X11-unix/X0"
|
|
||||||
if [ -n "$HWCI_START_XORG" ]; then
|
|
||||||
echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing."
|
|
||||||
WESTON_X11_SOCK="/tmp/.X11-unix/X1"
|
|
||||||
fi
|
|
||||||
export WAYLAND_DISPLAY=wayland-0
|
|
||||||
|
|
||||||
# Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's
|
|
||||||
export DISPLAY=:0
|
|
||||||
mkdir -p /tmp/.X11-unix
|
|
||||||
|
|
||||||
env \
|
|
||||||
VK_ICD_FILENAMES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
|
|
||||||
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
|
|
||||||
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
|
|
||||||
|
|
||||||
while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
|
|
||||||
fi
|
|
||||||
|
|
||||||
set +e
|
|
||||||
bash -c ". $SCRIPTS_DIR/setup-test-env.sh && $HWCI_TEST_SCRIPT"
|
|
||||||
EXIT_CODE=$?
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Let's make sure the results are always stored in current working directory
|
|
||||||
mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
|
|
||||||
|
|
||||||
[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
|
|
||||||
|
|
||||||
# Make sure that capture-devcoredump is done before we start trying to tar up
|
|
||||||
# artifacts -- if it's writing while tar is reading, tar will throw an error and
|
|
||||||
# kill the job.
|
|
||||||
cleanup
|
|
||||||
|
|
||||||
# upload artifacts
|
|
||||||
if [ -n "$S3_RESULTS_UPLOAD" ]; then
|
|
||||||
tar --zstd -cf results.tar.zst results/;
|
|
||||||
ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
|
|
||||||
fi
|
|
||||||
|
|
||||||
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
|
|
||||||
# as the python ones inside the bare-metal folder
|
|
||||||
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
|
|
||||||
|
|
||||||
set +x
|
|
||||||
|
|
||||||
# Print the final result; both bare-metal and LAVA look for this string to get
|
|
||||||
# the result of our run, so try really hard to get it out rather than losing
|
|
||||||
# the run. The device gets shut down right at this point, and a630 seems to
|
|
||||||
# enjoy corrupting the last line of serial output before shutdown.
|
|
||||||
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done
|
|
||||||
|
|
||||||
exit $EXIT_CODE
|
|
@@ -1,768 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2013
|
|
||||||
# shellcheck disable=SC2015
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# shellcheck disable=SC2046
|
|
||||||
# shellcheck disable=SC2059
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
# shellcheck disable=SC2155
|
|
||||||
# shellcheck disable=SC2162
|
|
||||||
# shellcheck disable=SC2229
|
|
||||||
#
|
|
||||||
# This is an utility script to manage Intel GPU frequencies.
|
|
||||||
# It can be used for debugging performance problems or trying to obtain a stable
|
|
||||||
# frequency while benchmarking.
|
|
||||||
#
|
|
||||||
# Note the Intel i915 GPU driver allows to change the minimum, maximum and boost
|
|
||||||
# frequencies in steps of 50 MHz via:
|
|
||||||
#
|
|
||||||
# /sys/class/drm/card<n>/<freq_info>
|
|
||||||
#
|
|
||||||
# Where <n> is the DRM card index and <freq_info> one of the following:
|
|
||||||
#
|
|
||||||
# - gt_max_freq_mhz (enforced maximum freq)
|
|
||||||
# - gt_min_freq_mhz (enforced minimum freq)
|
|
||||||
# - gt_boost_freq_mhz (enforced boost freq)
|
|
||||||
#
|
|
||||||
# The hardware capabilities can be accessed via:
|
|
||||||
#
|
|
||||||
# - gt_RP0_freq_mhz (supported maximum freq)
|
|
||||||
# - gt_RPn_freq_mhz (supported minimum freq)
|
|
||||||
# - gt_RP1_freq_mhz (most efficient freq)
|
|
||||||
#
|
|
||||||
# The current frequency can be read from:
|
|
||||||
# - gt_act_freq_mhz (the actual GPU freq)
|
|
||||||
# - gt_cur_freq_mhz (the last requested freq)
|
|
||||||
#
|
|
||||||
# Also note that in addition to GPU management, the script offers the
|
|
||||||
# possibility to adjust CPU operating frequencies. However, this is currently
|
|
||||||
# limited to just setting the maximum scaling frequency as percentage of the
|
|
||||||
# maximum frequency allowed by the hardware.
|
|
||||||
#
|
|
||||||
# Copyright (C) 2022 Collabora Ltd.
|
|
||||||
# Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: MIT
|
|
||||||
#
|
|
||||||
|
|
||||||
#
|
|
||||||
# Constants
|
|
||||||
#
|
|
||||||
|
|
||||||
# GPU
|
|
||||||
DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
|
|
||||||
ENF_FREQ_INFO="max min boost"
|
|
||||||
CAP_FREQ_INFO="RP0 RPn RP1"
|
|
||||||
ACT_FREQ_INFO="act cur"
|
|
||||||
THROTT_DETECT_SLEEP_SEC=2
|
|
||||||
THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
|
|
||||||
|
|
||||||
# CPU
|
|
||||||
CPU_SYSFS_PREFIX=/sys/devices/system/cpu
|
|
||||||
CPU_PSTATE_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/intel_pstate/%s"
|
|
||||||
CPU_FREQ_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/cpu%s/cpufreq/%s_freq"
|
|
||||||
CAP_CPU_FREQ_INFO="cpuinfo_max cpuinfo_min"
|
|
||||||
ENF_CPU_FREQ_INFO="scaling_max scaling_min"
|
|
||||||
ACT_CPU_FREQ_INFO="scaling_cur"
|
|
||||||
|
|
||||||
#
|
|
||||||
# Global variables.
|
|
||||||
#
|
|
||||||
unset INTEL_DRM_CARD_INDEX
|
|
||||||
unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ
|
|
||||||
unset SET_MIN_FREQ SET_MAX_FREQ
|
|
||||||
unset MONITOR_FREQ
|
|
||||||
unset CPU_SET_MAX_FREQ
|
|
||||||
unset DETECT_THROTT
|
|
||||||
unset DRY_RUN
|
|
||||||
|
|
||||||
#
|
|
||||||
# Simple printf based stderr logger.
|
|
||||||
#
|
|
||||||
log() {
|
|
||||||
local msg_type=$1
|
|
||||||
|
|
||||||
shift
|
|
||||||
printf "%s: %s: " "${msg_type}" "${0##*/}" >&2
|
|
||||||
printf "$@" >&2
|
|
||||||
printf "\n" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper to print sysfs path for the given card index and freq info.
|
|
||||||
#
|
|
||||||
# arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above
|
|
||||||
# arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX
|
|
||||||
#
|
|
||||||
print_freq_sysfs_path() {
|
|
||||||
printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card.
|
|
||||||
#
|
|
||||||
identify_intel_gpu() {
|
|
||||||
local i=0 vendor path
|
|
||||||
|
|
||||||
while [ ${i} -lt 16 ]; do
|
|
||||||
[ -c "/dev/dri/card$i" ] || {
|
|
||||||
i=$((i + 1))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
path=$(print_freq_sysfs_path "" ${i})
|
|
||||||
path=${path%/*}/device/vendor
|
|
||||||
|
|
||||||
[ -r "${path}" ] && read vendor < "${path}" && \
|
|
||||||
[ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
|
|
||||||
|
|
||||||
i=$((i + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Read the specified freq info from sysfs.
|
|
||||||
#
|
|
||||||
# arg1: Flag (y/n) to also enable printing the freq info.
|
|
||||||
# arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above
|
|
||||||
# return: Global variable(s) FREQ_${arg} containing the requested information
|
|
||||||
#
|
|
||||||
read_freq_info() {
|
|
||||||
local var val info path print=0 ret=0
|
|
||||||
|
|
||||||
[ "$1" = "y" ] && print=1
|
|
||||||
shift
|
|
||||||
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
info=$1
|
|
||||||
shift
|
|
||||||
var=FREQ_${info}
|
|
||||||
path=$(print_freq_sysfs_path "${info}")
|
|
||||||
|
|
||||||
[ -r ${path} ] && read ${var} < ${path} || {
|
|
||||||
log ERROR "Failed to read freq info from: %s" "${path}"
|
|
||||||
ret=1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -n "${var}" ] || {
|
|
||||||
log ERROR "Got empty freq info from: %s" "${path}"
|
|
||||||
ret=1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
[ ${print} -eq 1 ] && {
|
|
||||||
eval val=\$${var}
|
|
||||||
printf "%6s: %4s MHz\n" "${info}" "${val}"
|
|
||||||
}
|
|
||||||
done
|
|
||||||
|
|
||||||
return ${ret}
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Display requested info.
|
|
||||||
#
|
|
||||||
print_freq_info() {
|
|
||||||
local req_freq
|
|
||||||
|
|
||||||
[ -n "${GET_CAP_FREQ}" ] && {
|
|
||||||
printf "* Hardware capabilities\n"
|
|
||||||
read_freq_info y ${CAP_FREQ_INFO}
|
|
||||||
printf "\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -n "${GET_ENF_FREQ}" ] && {
|
|
||||||
printf "* Enforcements\n"
|
|
||||||
read_freq_info y ${ENF_FREQ_INFO}
|
|
||||||
printf "\n"
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -n "${GET_ACT_FREQ}" ] && {
|
|
||||||
printf "* Actual\n"
|
|
||||||
read_freq_info y ${ACT_FREQ_INFO}
|
|
||||||
printf "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper to print frequency value as requested by user via '-s, --set' option.
|
|
||||||
# arg1: user requested freq value
|
|
||||||
#
|
|
||||||
compute_freq_set() {
|
|
||||||
local val
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
+)
|
|
||||||
val=${FREQ_RP0}
|
|
||||||
;;
|
|
||||||
-)
|
|
||||||
val=${FREQ_RPn}
|
|
||||||
;;
|
|
||||||
*%)
|
|
||||||
val=$((${1%?} * FREQ_RP0 / 100))
|
|
||||||
# Adjust freq to comply with 50 MHz increments
|
|
||||||
val=$((val / 50 * 50))
|
|
||||||
;;
|
|
||||||
*[!0-9]*)
|
|
||||||
log ERROR "Cannot set freq to invalid value: %s" "$1"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
"")
|
|
||||||
log ERROR "Cannot set freq to unspecified value"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# Adjust freq to comply with 50 MHz increments
|
|
||||||
val=$(($1 / 50 * 50))
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
printf "%s" "${val}"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper for set_freq().
|
|
||||||
#
|
|
||||||
set_freq_max() {
|
|
||||||
log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}"
|
|
||||||
|
|
||||||
read_freq_info n min || return $?
|
|
||||||
|
|
||||||
[ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
|
|
||||||
"${SET_MAX_FREQ}" "${FREQ_RP0}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
|
|
||||||
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \
|
|
||||||
"${SET_MAX_FREQ}" "${FREQ_min}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -z "${DRY_RUN}" ] || return 0
|
|
||||||
|
|
||||||
if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
|
|
||||||
$(print_freq_sysfs_path boost) > /dev/null;
|
|
||||||
then
|
|
||||||
log ERROR "Failed to set GPU max frequency"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper for set_freq().
|
|
||||||
#
|
|
||||||
set_freq_min() {
|
|
||||||
log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}"
|
|
||||||
|
|
||||||
read_freq_info n max || return $?
|
|
||||||
|
|
||||||
[ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && {
|
|
||||||
log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \
|
|
||||||
"${SET_MIN_FREQ}" "${FREQ_max}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && {
|
|
||||||
log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
|
|
||||||
"${SET_MIN_FREQ}" "${FREQ_RPn}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -z "${DRY_RUN}" ] || return 0
|
|
||||||
|
|
||||||
if ! printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min);
|
|
||||||
then
|
|
||||||
log ERROR "Failed to set GPU min frequency"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Set min or max or both GPU frequencies to the user indicated values.
|
|
||||||
#
|
|
||||||
set_freq() {
|
|
||||||
# Get hw max & min frequencies
|
|
||||||
read_freq_info n RP0 RPn || return $?
|
|
||||||
|
|
||||||
[ -z "${SET_MAX_FREQ}" ] || {
|
|
||||||
SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
|
|
||||||
[ -z "${SET_MAX_FREQ}" ] && return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -z "${SET_MIN_FREQ}" ] || {
|
|
||||||
SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}")
|
|
||||||
[ -z "${SET_MIN_FREQ}" ] && return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Ensure correct operation order, to avoid setting min freq
|
|
||||||
# to a value which is larger than max freq.
|
|
||||||
#
|
|
||||||
# E.g.:
|
|
||||||
# crt_min=crt_max=600; new_min=new_max=700
|
|
||||||
# > operation order: max=700; min=700
|
|
||||||
#
|
|
||||||
# crt_min=crt_max=600; new_min=new_max=500
|
|
||||||
# > operation order: min=500; max=500
|
|
||||||
#
|
|
||||||
if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then
|
|
||||||
[ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && {
|
|
||||||
log ERROR "Cannot set GPU max freq to be less than min freq"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
read_freq_info n min || return $?
|
|
||||||
|
|
||||||
if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then
|
|
||||||
set_freq_min || return $?
|
|
||||||
set_freq_max
|
|
||||||
else
|
|
||||||
set_freq_max || return $?
|
|
||||||
set_freq_min
|
|
||||||
fi
|
|
||||||
elif [ -n "${SET_MAX_FREQ}" ]; then
|
|
||||||
set_freq_max
|
|
||||||
elif [ -n "${SET_MIN_FREQ}" ]; then
|
|
||||||
set_freq_min
|
|
||||||
else
|
|
||||||
log "Unexpected call to set_freq()"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper for detect_throttling().
|
|
||||||
#
|
|
||||||
get_thrott_detect_pid() {
|
|
||||||
[ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0
|
|
||||||
|
|
||||||
local pid
|
|
||||||
read pid < ${THROTT_DETECT_PID_FILE_PATH} || {
|
|
||||||
log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
local proc_path=/proc/${pid:-invalid}/cmdline
|
|
||||||
[ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && {
|
|
||||||
printf "%s" "${pid}"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Remove orphaned PID file
|
|
||||||
rm -rf ${THROTT_DETECT_PID_FILE_PATH}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Control detection and reporting of GPU throttling events.
|
|
||||||
# arg1: start - run throttle detector in background
|
|
||||||
# stop - stop throttle detector process, if any
|
|
||||||
# status - verify if throttle detector is running
|
|
||||||
#
|
|
||||||
detect_throttling() {
|
|
||||||
local pid
|
|
||||||
pid=$(get_thrott_detect_pid)
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
status)
|
|
||||||
printf "Throttling detector is "
|
|
||||||
[ -z "${pid}" ] && printf "not running\n" && return 0
|
|
||||||
printf "running (pid=%s)\n" ${pid}
|
|
||||||
;;
|
|
||||||
|
|
||||||
stop)
|
|
||||||
[ -z "${pid}" ] && return 0
|
|
||||||
|
|
||||||
log INFO "Stopping throttling detector (pid=%s)" "${pid}"
|
|
||||||
kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid}
|
|
||||||
rm -rf ${THROTT_DETECT_PID_FILE_PATH}
|
|
||||||
;;
|
|
||||||
|
|
||||||
start)
|
|
||||||
[ -n "${pid}" ] && {
|
|
||||||
log WARN "Throttling detector is already running (pid=%s)" ${pid}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
(
|
|
||||||
read_freq_info n RPn || exit $?
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
sleep ${THROTT_DETECT_SLEEP_SEC}
|
|
||||||
read_freq_info n act min cur || exit $?
|
|
||||||
|
|
||||||
#
|
|
||||||
# The throttling seems to occur when act freq goes below min.
|
|
||||||
# However, it's necessary to exclude the idle states, where
|
|
||||||
# act freq normally reaches RPn and cur goes below min.
|
|
||||||
#
|
|
||||||
[ ${FREQ_act} -lt ${FREQ_min} ] && \
|
|
||||||
[ ${FREQ_act} -gt ${FREQ_RPn} ] && \
|
|
||||||
[ ${FREQ_cur} -ge ${FREQ_min} ] && \
|
|
||||||
printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \
|
|
||||||
${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn}
|
|
||||||
done
|
|
||||||
) &
|
|
||||||
|
|
||||||
pid=$!
|
|
||||||
log INFO "Started GPU throttling detector (pid=%s)" ${pid}
|
|
||||||
|
|
||||||
printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \
|
|
||||||
log WARN "Failed to write throttle detector PID file"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Retrieve the list of online CPUs.
|
|
||||||
#
|
|
||||||
get_online_cpus() {
|
|
||||||
local path cpu_index
|
|
||||||
|
|
||||||
printf "0"
|
|
||||||
for path in $(grep 1 ${CPU_SYSFS_PREFIX}/cpu*/online); do
|
|
||||||
cpu_index=${path##*/cpu}
|
|
||||||
printf " %s" ${cpu_index%%/*}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper to print sysfs path for the given CPU index and freq info.
|
|
||||||
#
|
|
||||||
# arg1: Frequency info sysfs name, one of *_CPU_FREQ_INFO constants above
|
|
||||||
# arg2: CPU index
|
|
||||||
#
|
|
||||||
print_cpu_freq_sysfs_path() {
|
|
||||||
printf ${CPU_FREQ_SYSFS_PATTERN} "$2" "$1"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Read the specified CPU freq info from sysfs.
|
|
||||||
#
|
|
||||||
# arg1: CPU index
|
|
||||||
# arg2: Flag (y/n) to also enable printing the freq info.
|
|
||||||
# arg3...: Frequency info sysfs name(s), see *_CPU_FREQ_INFO constants above
|
|
||||||
# return: Global variable(s) CPU_FREQ_${arg} containing the requested information
|
|
||||||
#
|
|
||||||
read_cpu_freq_info() {
|
|
||||||
local var val info path cpu_index print=0 ret=0
|
|
||||||
|
|
||||||
cpu_index=$1
|
|
||||||
[ "$2" = "y" ] && print=1
|
|
||||||
shift 2
|
|
||||||
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
info=$1
|
|
||||||
shift
|
|
||||||
var=CPU_FREQ_${info}
|
|
||||||
path=$(print_cpu_freq_sysfs_path "${info}" ${cpu_index})
|
|
||||||
|
|
||||||
[ -r ${path} ] && read ${var} < ${path} || {
|
|
||||||
log ERROR "Failed to read CPU freq info from: %s" "${path}"
|
|
||||||
ret=1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -n "${var}" ] || {
|
|
||||||
log ERROR "Got empty CPU freq info from: %s" "${path}"
|
|
||||||
ret=1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
[ ${print} -eq 1 ] && {
|
|
||||||
eval val=\$${var}
|
|
||||||
printf "%6s: %4s Hz\n" "${info}" "${val}"
|
|
||||||
}
|
|
||||||
done
|
|
||||||
|
|
||||||
return ${ret}
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Helper to print freq. value as requested by user via '--cpu-set-max' option.
|
|
||||||
# arg1: user requested freq value
|
|
||||||
#
|
|
||||||
compute_cpu_freq_set() {
|
|
||||||
local val
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
+)
|
|
||||||
val=${CPU_FREQ_cpuinfo_max}
|
|
||||||
;;
|
|
||||||
-)
|
|
||||||
val=${CPU_FREQ_cpuinfo_min}
|
|
||||||
;;
|
|
||||||
*%)
|
|
||||||
val=$((${1%?} * CPU_FREQ_cpuinfo_max / 100))
|
|
||||||
;;
|
|
||||||
*[!0-9]*)
|
|
||||||
log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
"")
|
|
||||||
log ERROR "Cannot set CPU freq to unspecified value"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log ERROR "Cannot set CPU freq to custom value; use +, -, or % instead"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
printf "%s" "${val}"
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Adjust CPU max scaling frequency.
|
|
||||||
#
|
|
||||||
set_cpu_freq_max() {
|
|
||||||
local target_freq res=0
|
|
||||||
case "${CPU_SET_MAX_FREQ}" in
|
|
||||||
+)
|
|
||||||
target_freq=100
|
|
||||||
;;
|
|
||||||
-)
|
|
||||||
target_freq=1
|
|
||||||
;;
|
|
||||||
*%)
|
|
||||||
target_freq=${CPU_SET_MAX_FREQ%?}
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log ERROR "Invalid CPU freq"
|
|
||||||
return 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
|
|
||||||
[ -e "${pstate_info}" ] && {
|
|
||||||
log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
|
|
||||||
if ! printf "%s" "${target_freq}" > "${pstate_info}";
|
|
||||||
then
|
|
||||||
log ERROR "Failed to set intel_pstate max perf"
|
|
||||||
res=1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
local cpu_index
|
|
||||||
for cpu_index in $(get_online_cpus); do
|
|
||||||
read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
|
|
||||||
|
|
||||||
target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
|
|
||||||
[ -z "${target_freq}" ] && { res=$?; continue; }
|
|
||||||
|
|
||||||
log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
|
|
||||||
[ -n "${DRY_RUN}" ] && continue
|
|
||||||
|
|
||||||
if ! printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index});
|
|
||||||
then
|
|
||||||
res=1
|
|
||||||
log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
return ${res}
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Show help message.
|
|
||||||
#
|
|
||||||
print_usage() {
|
|
||||||
cat <<EOF
|
|
||||||
Usage: ${0##*/} [OPTION]...
|
|
||||||
|
|
||||||
A script to manage Intel GPU frequencies. Can be used for debugging performance
|
|
||||||
problems or trying to obtain a stable frequency while benchmarking.
|
|
||||||
|
|
||||||
Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz.
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-g, --get [act|enf|cap|all]
|
|
||||||
Get frequency information: active (default), enforced,
|
|
||||||
hardware capabilities or all of them.
|
|
||||||
|
|
||||||
-s, --set [{min|max}=]{FREQUENCY[%]|+|-}
|
|
||||||
Set min or max frequency to the given value (MHz).
|
|
||||||
Append '%' to interpret FREQUENCY as % of hw max.
|
|
||||||
Use '+' or '-' to set frequency to hardware max or min.
|
|
||||||
Omit min/max prefix to set both frequencies.
|
|
||||||
|
|
||||||
-r, --reset Reset frequencies to hardware defaults.
|
|
||||||
|
|
||||||
-m, --monitor [act|enf|cap|all]
|
|
||||||
Monitor the indicated frequencies via 'watch' utility.
|
|
||||||
See '-g, --get' option for more details.
|
|
||||||
|
|
||||||
-d|--detect-thrott [start|stop|status]
|
|
||||||
Start (default operation) the throttling detector
|
|
||||||
as a background process. Use 'stop' or 'status' to
|
|
||||||
terminate the detector process or verify its status.
|
|
||||||
|
|
||||||
--cpu-set-max [FREQUENCY%|+|-}
|
|
||||||
Set CPU max scaling frequency as % of hw max.
|
|
||||||
Use '+' or '-' to set frequency to hardware max or min.
|
|
||||||
|
|
||||||
-r, --reset Reset frequencies to hardware defaults.
|
|
||||||
|
|
||||||
--dry-run See what the script will do without applying any
|
|
||||||
frequency changes.
|
|
||||||
|
|
||||||
-h, --help Display this help text and exit.
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Parse user input for '-g, --get' option.
|
|
||||||
# Returns 0 if a value has been provided, otherwise 1.
|
|
||||||
#
|
|
||||||
parse_option_get() {
|
|
||||||
local ret=0
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
act) GET_ACT_FREQ=1;;
|
|
||||||
enf) GET_ENF_FREQ=1;;
|
|
||||||
cap) GET_CAP_FREQ=1;;
|
|
||||||
all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;;
|
|
||||||
-*|"")
|
|
||||||
# No value provided, using default.
|
|
||||||
GET_ACT_FREQ=1
|
|
||||||
ret=1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
print_usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
return ${ret}
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Validate user input for '-s, --set' option.
|
|
||||||
# arg1: input value to be validated
|
|
||||||
# arg2: optional flag indicating input is restricted to %
|
|
||||||
#
|
|
||||||
validate_option_set() {
|
|
||||||
case "$1" in
|
|
||||||
+|-|[0-9]%|[0-9][0-9]%)
|
|
||||||
return 0
|
|
||||||
;;
|
|
||||||
*[!0-9]*|"")
|
|
||||||
print_usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
[ -z "$2" ] || { print_usage; exit 1; }
|
|
||||||
}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Parse script arguments.
|
|
||||||
#
|
|
||||||
[ $# -eq 0 ] && { print_usage; exit 1; }
|
|
||||||
|
|
||||||
while [ $# -gt 0 ]; do
|
|
||||||
case "$1" in
|
|
||||||
-g|--get)
|
|
||||||
parse_option_get "$2" && shift
|
|
||||||
;;
|
|
||||||
|
|
||||||
-s|--set)
|
|
||||||
shift
|
|
||||||
case "$1" in
|
|
||||||
min=*)
|
|
||||||
SET_MIN_FREQ=${1#min=}
|
|
||||||
validate_option_set "${SET_MIN_FREQ}"
|
|
||||||
;;
|
|
||||||
max=*)
|
|
||||||
SET_MAX_FREQ=${1#max=}
|
|
||||||
validate_option_set "${SET_MAX_FREQ}"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
SET_MIN_FREQ=$1
|
|
||||||
validate_option_set "${SET_MIN_FREQ}"
|
|
||||||
SET_MAX_FREQ=${SET_MIN_FREQ}
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
|
|
||||||
-r|--reset)
|
|
||||||
RESET_FREQ=1
|
|
||||||
SET_MIN_FREQ="-"
|
|
||||||
SET_MAX_FREQ="+"
|
|
||||||
;;
|
|
||||||
|
|
||||||
-m|--monitor)
|
|
||||||
MONITOR_FREQ=act
|
|
||||||
parse_option_get "$2" && MONITOR_FREQ=$2 && shift
|
|
||||||
;;
|
|
||||||
|
|
||||||
-d|--detect-thrott)
|
|
||||||
DETECT_THROTT=start
|
|
||||||
case "$2" in
|
|
||||||
start|stop|status)
|
|
||||||
DETECT_THROTT=$2
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
|
|
||||||
--cpu-set-max)
|
|
||||||
shift
|
|
||||||
CPU_SET_MAX_FREQ=$1
|
|
||||||
validate_option_set "${CPU_SET_MAX_FREQ}" restricted
|
|
||||||
;;
|
|
||||||
|
|
||||||
--dry-run)
|
|
||||||
DRY_RUN=1
|
|
||||||
;;
|
|
||||||
|
|
||||||
-h|--help)
|
|
||||||
print_usage
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
print_usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
|
|
||||||
#
|
|
||||||
# Main
|
|
||||||
#
|
|
||||||
RET=0
|
|
||||||
|
|
||||||
identify_intel_gpu || {
|
|
||||||
log INFO "No Intel GPU detected"
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
[ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; }
|
|
||||||
print_freq_info
|
|
||||||
|
|
||||||
[ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT}
|
|
||||||
|
|
||||||
[ -n "${CPU_SET_MAX_FREQ}" ] && { set_cpu_freq_max || RET=$?; }
|
|
||||||
|
|
||||||
[ -n "${MONITOR_FREQ}" ] && {
|
|
||||||
log INFO "Entering frequency monitoring mode"
|
|
||||||
sleep 2
|
|
||||||
exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}"
|
|
||||||
}
|
|
||||||
|
|
||||||
exit ${RET}
|
|
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091 # the path is created in build-kdl and
|
|
||||||
# here is check if exist
|
|
||||||
|
|
||||||
terminate() {
|
|
||||||
echo "ci-kdl.sh caught SIGTERM signal! propagating to child processes"
|
|
||||||
for job in $(jobs -p)
|
|
||||||
do
|
|
||||||
kill -15 "$job"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
trap terminate SIGTERM
|
|
||||||
|
|
||||||
if [ -f /ci-kdl.venv/bin/activate ]; then
|
|
||||||
source /ci-kdl.venv/bin/activate
|
|
||||||
/ci-kdl.venv/bin/python /ci-kdl.venv/bin/ci-kdl | tee -a /results/kdl.log &
|
|
||||||
child=$!
|
|
||||||
wait $child
|
|
||||||
mv kdl_*.json /results/kdl.json
|
|
||||||
else
|
|
||||||
echo -e "Not possible to activate ci-kdl virtual environment"
|
|
||||||
fi
|
|
||||||
|
|
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
_XORG_SCRIPT="/xorg-script"
|
|
||||||
_FLAG_FILE="/xorg-started"
|
|
||||||
|
|
||||||
echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
|
|
||||||
if [ "x$1" != "x" ]; then
|
|
||||||
export LD_LIBRARY_PATH="${1}/lib"
|
|
||||||
export LIBGL_DRIVERS_PATH="${1}/lib/dri"
|
|
||||||
fi
|
|
||||||
xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
|
|
||||||
|
|
||||||
# Wait for xorg to be ready for connections.
|
|
||||||
for _ in 1 2 3 4 5; do
|
|
||||||
if [ -e "${_FLAG_FILE}" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 5
|
|
||||||
done
|
|
@@ -1,66 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# ALPINE_X86_64_BUILD_TAG
|
|
||||||
|
|
||||||
set -e
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
export LLVM_VERSION="${LLVM_VERSION:=16}"
|
|
||||||
|
|
||||||
EPHEMERAL=(
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
DEPS=(
|
|
||||||
bash
|
|
||||||
bison
|
|
||||||
ccache
|
|
||||||
clang16-dev
|
|
||||||
cmake
|
|
||||||
clang-dev
|
|
||||||
coreutils
|
|
||||||
curl
|
|
||||||
flex
|
|
||||||
gcc
|
|
||||||
g++
|
|
||||||
git
|
|
||||||
gettext
|
|
||||||
glslang
|
|
||||||
linux-headers
|
|
||||||
llvm16-static
|
|
||||||
llvm16-dev
|
|
||||||
meson
|
|
||||||
expat-dev
|
|
||||||
elfutils-dev
|
|
||||||
libdrm-dev
|
|
||||||
libselinux-dev
|
|
||||||
libva-dev
|
|
||||||
libpciaccess-dev
|
|
||||||
zlib-dev
|
|
||||||
python3-dev
|
|
||||||
py3-mako
|
|
||||||
py3-ply
|
|
||||||
vulkan-headers
|
|
||||||
spirv-tools-dev
|
|
||||||
util-macros
|
|
||||||
wayland-dev
|
|
||||||
wayland-protocols
|
|
||||||
)
|
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/build-llvm-spirv.sh
|
|
||||||
|
|
||||||
. .gitlab-ci/container/build-libclc.sh
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
|
||||||
|
|
||||||
apk del "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
@@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This is a ci-templates build script to generate a container for LAVA SSH client.
|
|
||||||
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
set -e
|
|
||||||
set -o xtrace
|
|
||||||
|
|
||||||
EPHEMERAL=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# We only need these very basic packages to run the tests.
|
|
||||||
DEPS=(
|
|
||||||
openssh-client # for ssh
|
|
||||||
iputils # for ping
|
|
||||||
bash
|
|
||||||
curl
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
apk --no-cache add "${DEPS[@]}" "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_pre_build.sh
|
|
||||||
|
|
||||||
############### Uninstall the build software
|
|
||||||
|
|
||||||
apk del "${EPHEMERAL[@]}"
|
|
||||||
|
|
||||||
. .gitlab-ci/container/container_post_build.sh
|
|
60
.gitlab-ci/container/android_build.sh
Normal file
60
.gitlab-ci/container/android_build.sh
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
EPHEMERAL="\
|
||||||
|
rdfind \
|
||||||
|
unzip \
|
||||||
|
"
|
||||||
|
|
||||||
|
apt-get install -y --no-remove $EPHEMERAL
|
||||||
|
|
||||||
|
# Fetch the NDK and extract just the toolchain we want.
|
||||||
|
ndk=android-ndk-r21d
|
||||||
|
wget -O $ndk.zip https://dl.google.com/android/repository/$ndk-linux-x86_64.zip
|
||||||
|
unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
|
||||||
|
rm $ndk.zip
|
||||||
|
# Since it was packed as a zip file, symlinks/hardlinks got turned into
|
||||||
|
# duplicate files. Turn them into hardlinks to save on container space.
|
||||||
|
rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/
|
||||||
|
# Drop some large tools we won't use in this build.
|
||||||
|
find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f
|
||||||
|
|
||||||
|
sh .gitlab-ci/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3"
|
||||||
|
|
||||||
|
sh .gitlab-ci/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64
|
||||||
|
sh .gitlab-ci/create-android-cross-file.sh /$ndk i686-linux-android x86 x86
|
||||||
|
sh .gitlab-ci/create-android-cross-file.sh /$ndk aarch64-linux-android arm armv8
|
||||||
|
sh .gitlab-ci/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl armv7a-linux-androideabi
|
||||||
|
|
||||||
|
# Not using build-libdrm.sh because we don't want its cleanup after building
|
||||||
|
# each arch. Fetch and extract now.
|
||||||
|
export LIBDRM_VERSION=libdrm-2.4.102
|
||||||
|
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
|
||||||
|
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
|
||||||
|
|
||||||
|
for arch in \
|
||||||
|
x86_64-linux-android \
|
||||||
|
i686-linux-android \
|
||||||
|
aarch64-linux-android \
|
||||||
|
arm-linux-androideabi ; do
|
||||||
|
|
||||||
|
cd $LIBDRM_VERSION
|
||||||
|
rm -rf build-$arch
|
||||||
|
meson build-$arch \
|
||||||
|
--cross-file=/cross_file-$arch.txt \
|
||||||
|
--libdir=lib/$arch \
|
||||||
|
-Dlibkms=false \
|
||||||
|
-Dnouveau=false \
|
||||||
|
-Dvc4=false \
|
||||||
|
-Detnaviv=false \
|
||||||
|
-Dfreedreno=false \
|
||||||
|
-Dintel=false \
|
||||||
|
-Dcairo-tests=false
|
||||||
|
ninja -C build-$arch install
|
||||||
|
cd ..
|
||||||
|
done
|
||||||
|
|
||||||
|
rm -rf $LIBDRM_VERSION
|
||||||
|
|
||||||
|
apt-get purge -y $EPHEMERAL
|
5
.gitlab-ci/container/arm64_test.sh
Normal file
5
.gitlab-ci/container/arm64_test.sh
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
arch=arm64
|
||||||
|
|
||||||
|
. .gitlab-ci/container/baremetal_build.sh
|
70
.gitlab-ci/container/arm_build.sh
Normal file
70
.gitlab-ci/container/arm_build.sh
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
apt-get -y install ca-certificates
|
||||||
|
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||||
|
echo 'deb https://deb.debian.org/debian buster-backports main' >/etc/apt/sources.list.d/backports.list
|
||||||
|
apt-get update
|
||||||
|
apt-get -y install \
|
||||||
|
abootimg \
|
||||||
|
android-sdk-ext4-utils \
|
||||||
|
autoconf \
|
||||||
|
automake \
|
||||||
|
bc \
|
||||||
|
bison \
|
||||||
|
ccache \
|
||||||
|
cmake \
|
||||||
|
debootstrap \
|
||||||
|
fastboot \
|
||||||
|
flex \
|
||||||
|
g++ \
|
||||||
|
git \
|
||||||
|
kmod \
|
||||||
|
lavacli \
|
||||||
|
libdrm-dev \
|
||||||
|
libelf-dev \
|
||||||
|
libexpat1-dev \
|
||||||
|
libx11-dev \
|
||||||
|
libx11-xcb-dev \
|
||||||
|
libxcb-dri2-0-dev \
|
||||||
|
libxcb-dri3-dev \
|
||||||
|
libxcb-glx0-dev \
|
||||||
|
libxcb-present-dev \
|
||||||
|
libxcb-randr0-dev \
|
||||||
|
libxcb-shm0-dev \
|
||||||
|
libxcb-xfixes0-dev \
|
||||||
|
libxdamage-dev \
|
||||||
|
libxext-dev \
|
||||||
|
libxrandr-dev \
|
||||||
|
libxshmfence-dev \
|
||||||
|
libxxf86vm-dev \
|
||||||
|
llvm-8-dev \
|
||||||
|
pkg-config \
|
||||||
|
python \
|
||||||
|
python3-mako \
|
||||||
|
python3-pil \
|
||||||
|
python3-pip \
|
||||||
|
python3-requests \
|
||||||
|
python3-setuptools \
|
||||||
|
unzip \
|
||||||
|
wget \
|
||||||
|
xz-utils \
|
||||||
|
zlib1g-dev
|
||||||
|
|
||||||
|
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
|
||||||
|
|
||||||
|
apt install -y --no-remove -t buster-backports \
|
||||||
|
meson
|
||||||
|
|
||||||
|
arch=armhf
|
||||||
|
. .gitlab-ci/container/cross_build.sh
|
||||||
|
|
||||||
|
. .gitlab-ci/container/container_pre_build.sh
|
||||||
|
|
||||||
|
# dependencies where we want a specific version
|
||||||
|
EXTRA_MESON_ARGS=
|
||||||
|
. .gitlab-ci/build-libdrm.sh
|
||||||
|
|
||||||
|
. .gitlab-ci/container/container_post_build.sh
|
47
.gitlab-ci/container/arm_test-base.sh
Normal file
47
.gitlab-ci/container/arm_test-base.sh
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
############### Install packages for building
|
||||||
|
apt-get install -y ca-certificates
|
||||||
|
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
|
||||||
|
echo 'deb https://deb.debian.org/debian buster-backports main' >/etc/apt/sources.list.d/backports.list
|
||||||
|
apt-get update
|
||||||
|
|
||||||
|
apt-get install -y --no-remove \
|
||||||
|
abootimg \
|
||||||
|
android-sdk-ext4-utils \
|
||||||
|
bc \
|
||||||
|
bison \
|
||||||
|
bzip2 \
|
||||||
|
ccache \
|
||||||
|
cmake \
|
||||||
|
cpio \
|
||||||
|
debootstrap \
|
||||||
|
expect \
|
||||||
|
fastboot \
|
||||||
|
flex \
|
||||||
|
g++ \
|
||||||
|
git \
|
||||||
|
netcat \
|
||||||
|
nginx-full \
|
||||||
|
pkg-config \
|
||||||
|
procps \
|
||||||
|
python3-distutils \
|
||||||
|
python3-minimal \
|
||||||
|
python3-serial \
|
||||||
|
python3.7 \
|
||||||
|
rsync \
|
||||||
|
telnet \
|
||||||
|
u-boot-tools \
|
||||||
|
unzip
|
||||||
|
|
||||||
|
apt install -t buster-backports -y --no-remove \
|
||||||
|
meson
|
||||||
|
|
||||||
|
# setup nginx
|
||||||
|
sed -i '/gzip_/ s/#\ //g' /etc/nginx/nginx.conf
|
||||||
|
cp .gitlab-ci/bare-metal/nginx-default-site /etc/nginx/sites-enabled/default
|
||||||
|
|
||||||
|
. .gitlab-ci/container/container_post_build.sh
|
7
.gitlab-ci/container/armhf_test.sh
Normal file
7
.gitlab-ci/container/armhf_test.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
arch=armhf
|
||||||
|
|
||||||
|
INCLUDE_PIGLIT=1
|
||||||
|
|
||||||
|
. .gitlab-ci/container/baremetal_build.sh
|
@@ -1,62 +1,63 @@
|
|||||||
#!/usr/bin/env bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
set -o xtrace
|
set -o xtrace
|
||||||
|
|
||||||
# Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves
|
ROOTFS=/lava-files/rootfs-${arch}
|
||||||
# network transfer, disk usage, and runtime on test jobs)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2154 # arch is assigned in previous scripts
|
INCLUDE_PIGLIT=1
|
||||||
if curl -X HEAD -s "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
|
|
||||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
|
|
||||||
else
|
|
||||||
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
dpkg --add-architecture $arch
|
||||||
"${ARTIFACTS_URL}"/lava-rootfs.tar.zst -o rootfs.tar.zst
|
apt-get update
|
||||||
mkdir -p /rootfs-"$arch"
|
|
||||||
tar -C /rootfs-"$arch" '--exclude=./dev/*' --zstd -xf rootfs.tar.zst
|
|
||||||
rm rootfs.tar.zst
|
|
||||||
|
|
||||||
if [[ $arch == "arm64" ]]; then
|
# Cross-build test deps
|
||||||
mkdir -p /baremetal-files
|
BAREMETAL_EPHEMERAL=" \
|
||||||
pushd /baremetal-files
|
autoconf \
|
||||||
|
automake \
|
||||||
|
crossbuild-essential-$arch \
|
||||||
|
git-lfs \
|
||||||
|
libboost-dev:$arch \
|
||||||
|
libdrm-dev:$arch \
|
||||||
|
libegl1-mesa-dev:$arch \
|
||||||
|
libelf-dev:$arch \
|
||||||
|
libexpat1-dev:$arch \
|
||||||
|
libffi-dev:$arch \
|
||||||
|
libgbm-dev:$arch \
|
||||||
|
libgles2-mesa-dev:$arch \
|
||||||
|
libpciaccess-dev:$arch \
|
||||||
|
libpcre3-dev:$arch \
|
||||||
|
libpng-dev:$arch \
|
||||||
|
libpython3-dev:$arch \
|
||||||
|
libstdc++6:$arch \
|
||||||
|
libtinfo-dev:$arch \
|
||||||
|
libudev-dev:$arch \
|
||||||
|
libvulkan-dev:$arch \
|
||||||
|
libwaffle-dev:$arch \
|
||||||
|
libxcb-keysyms1-dev:$arch \
|
||||||
|
libxkbcommon-dev:$arch \
|
||||||
|
python3-dev \
|
||||||
|
qt5-default \
|
||||||
|
qt5-qmake \
|
||||||
|
qtbase5-dev:$arch \
|
||||||
|
"
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
apt-get install -y --no-remove $BAREMETAL_EPHEMERAL
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel
|
|
||||||
|
|
||||||
DEVICE_TREES=""
|
mkdir /var/cache/apt/archives/$arch
|
||||||
DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
|
|
||||||
DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
|
|
||||||
DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
|
|
||||||
DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb"
|
|
||||||
|
|
||||||
for DTB in $DEVICE_TREES; do
|
############### Create cross-files
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}/arm64/$DTB"
|
|
||||||
done
|
|
||||||
|
|
||||||
popd
|
. .gitlab-ci/create-cross-file.sh $arch
|
||||||
elif [[ $arch == "armhf" ]]; then
|
|
||||||
mkdir -p /baremetal-files
|
|
||||||
pushd /baremetal-files
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
. .gitlab-ci/container/container_pre_build.sh
|
||||||
-O "${KERNEL_IMAGE_BASE}"/armhf/zImage
|
|
||||||
|
|
||||||
DEVICE_TREES=""
|
############### Create rootfs
|
||||||
DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
|
KERNEL_URL=https://github.com/anholt/linux/archive/cheza-pagetables-2020-09-04.tar.gz
|
||||||
DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb"
|
|
||||||
|
|
||||||
for DTB in $DEVICE_TREES; do
|
DEBIAN_ARCH=$arch INCLUDE_VK_CTS=1 . .gitlab-ci/container/lava_build.sh
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}/armhf/$DTB"
|
|
||||||
done
|
|
||||||
|
|
||||||
popd
|
############### Uninstall the build software
|
||||||
fi
|
|
||||||
|
apt-get purge -y $BAREMETAL_EPHEMERAL
|
||||||
|
|
||||||
|
. .gitlab-ci/container/container_post_build.sh
|
||||||
|
@@ -1,58 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
ANGLE_REV="0518a3ff4d4e7e5b2ce8203358f719613a31c118"
|
|
||||||
|
|
||||||
# DEPOT tools
|
|
||||||
git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git
|
|
||||||
PWD=$(pwd)
|
|
||||||
export PATH=$PWD/depot_tools:$PATH
|
|
||||||
export DEPOT_TOOLS_UPDATE=0
|
|
||||||
|
|
||||||
mkdir /angle-build
|
|
||||||
pushd /angle-build
|
|
||||||
git init
|
|
||||||
git remote add origin https://chromium.googlesource.com/angle/angle.git
|
|
||||||
git fetch --depth 1 origin "$ANGLE_REV"
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
|
|
||||||
# source preparation
|
|
||||||
python3 scripts/bootstrap.py
|
|
||||||
mkdir -p build/config
|
|
||||||
gclient sync
|
|
||||||
|
|
||||||
sed -i "/catapult/d" testing/BUILD.gn
|
|
||||||
|
|
||||||
mkdir -p out/Release
|
|
||||||
echo '
|
|
||||||
is_debug = false
|
|
||||||
angle_enable_swiftshader = false
|
|
||||||
angle_enable_null = false
|
|
||||||
angle_enable_gl = false
|
|
||||||
angle_enable_vulkan = true
|
|
||||||
angle_has_histograms = false
|
|
||||||
build_angle_trace_perf_tests = false
|
|
||||||
build_angle_deqp_tests = false
|
|
||||||
angle_use_custom_libvulkan = false
|
|
||||||
dcheck_always_on=true
|
|
||||||
' > out/Release/args.gn
|
|
||||||
|
|
||||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
|
|
||||||
build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
|
|
||||||
fi
|
|
||||||
|
|
||||||
gn gen out/Release
|
|
||||||
# depot_tools overrides ninja with a version that doesn't work. We want
|
|
||||||
# ninja with FDO_CI_CONCURRENT anyway.
|
|
||||||
/usr/local/bin/ninja -C out/Release/
|
|
||||||
|
|
||||||
mkdir /angle
|
|
||||||
cp out/Release/lib*GL*.so /angle/
|
|
||||||
ln -s libEGL.so /angle/libEGL.so.1
|
|
||||||
ln -s libGLESv2.so /angle/libGLESv2.so.2
|
|
||||||
|
|
||||||
rm -rf out
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf ./depot_tools
|
|
@@ -1,25 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_GL_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a"
|
|
||||||
|
|
||||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
|
|
||||||
pushd /apitrace
|
|
||||||
git checkout "$APITRACE_VERSION"
|
|
||||||
git submodule update --init --depth 1 --recursive
|
|
||||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
|
|
||||||
cmake --build _build --parallel --target apitrace eglretrace
|
|
||||||
mkdir build
|
|
||||||
cp _build/apitrace build
|
|
||||||
cp _build/eglretrace build
|
|
||||||
${STRIP_CMD:-strip} build/*
|
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
|
||||||
popd
|
|
@@ -1,44 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
|
|
||||||
CROSVM_VERSION=1641c55bcc922588e24de73e9cca7b5e4005bd6d
|
|
||||||
git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
|
|
||||||
pushd /platform/crosvm
|
|
||||||
git checkout "$CROSVM_VERSION"
|
|
||||||
git submodule update --init
|
|
||||||
|
|
||||||
VIRGLRENDERER_VERSION=d9c002fac153b834a2c17731f2b85c36e333e102
|
|
||||||
rm -rf third_party/virglrenderer
|
|
||||||
git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
|
|
||||||
pushd third_party/virglrenderer
|
|
||||||
git checkout "$VIRGLRENDERER_VERSION"
|
|
||||||
meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true $EXTRA_MESON_ARGS
|
|
||||||
meson install -C build
|
|
||||||
popd
|
|
||||||
|
|
||||||
cargo update -p pkg-config@0.3.26 --precise 0.3.27
|
|
||||||
|
|
||||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|
||||||
bindgen-cli \
|
|
||||||
--locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local \
|
|
||||||
--version 0.65.1 \
|
|
||||||
$EXTRA_CARGO_ARGS
|
|
||||||
|
|
||||||
CROSVM_USE_SYSTEM_MINIGBM=1 CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--locked \
|
|
||||||
--features 'default-no-sandbox gpu x virgl_renderer' \
|
|
||||||
--path . \
|
|
||||||
--root /usr/local \
|
|
||||||
$EXTRA_CARGO_ARGS
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
rm -rf /platform/crosvm
|
|
@@ -1,70 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_ANDROID_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_GL_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
DEQP_RUNNER_VERSION=0.18.0
|
|
||||||
|
|
||||||
DEQP_RUNNER_GIT_URL="${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/mesa/deqp-runner.git}"
|
|
||||||
|
|
||||||
if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
|
||||||
# Build and install from source
|
|
||||||
DEQP_RUNNER_CARGO_ARGS="--git $DEQP_RUNNER_GIT_URL"
|
|
||||||
|
|
||||||
if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
|
|
||||||
DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_TAG"
|
|
||||||
else
|
|
||||||
DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="$DEQP_RUNNER_GIT_REV"
|
|
||||||
fi
|
|
||||||
|
|
||||||
DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
|
|
||||||
else
|
|
||||||
# Install from package registry
|
|
||||||
DEQP_RUNNER_CARGO_ARGS="--version ${DEQP_RUNNER_VERSION} ${EXTRA_CARGO_ARGS} -- deqp-runner"
|
|
||||||
DEQP_RUNNER_GIT_CHECKOUT="v$DEQP_RUNNER_VERSION"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$RUST_TARGET" != *-android ]]; then
|
|
||||||
cargo install --locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local \
|
|
||||||
${DEQP_RUNNER_CARGO_ARGS}
|
|
||||||
else
|
|
||||||
mkdir -p /deqp-runner
|
|
||||||
pushd /deqp-runner
|
|
||||||
git clone --branch "$DEQP_RUNNER_GIT_CHECKOUT" --depth 1 "$DEQP_RUNNER_GIT_URL" deqp-runner-git
|
|
||||||
pushd deqp-runner-git
|
|
||||||
|
|
||||||
cargo install --locked \
|
|
||||||
-j ${FDO_CI_CONCURRENT:-4} \
|
|
||||||
--root /usr/local --version 2.10.0 \
|
|
||||||
cargo-ndk
|
|
||||||
|
|
||||||
rustup target add $RUST_TARGET
|
|
||||||
RUSTFLAGS='-C target-feature=+crt-static' cargo ndk --target $RUST_TARGET build --release
|
|
||||||
|
|
||||||
mv target/$RUST_TARGET/release/deqp-runner /deqp-runner
|
|
||||||
|
|
||||||
cargo uninstall --locked \
|
|
||||||
--root /usr/local \
|
|
||||||
cargo-ndk
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf deqp-runner-git
|
|
||||||
popd
|
|
||||||
fi
|
|
||||||
|
|
||||||
# remove unused test runners to shrink images for the Mesa CI build (not kernel,
|
|
||||||
# which chooses its own deqp branch)
|
|
||||||
if [ -z "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
|
|
||||||
rm -f /usr/local/bin/igt-runner
|
|
||||||
fi
|
|
@@ -1,275 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_ANDROID_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_GL_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ex -o pipefail
|
|
||||||
|
|
||||||
# See `deqp_build_targets` below for which release is used to produce which
|
|
||||||
# binary. Unless this comment has bitrotten:
|
|
||||||
# - the VK release produces `deqp-vk`,
|
|
||||||
# - the GL release produces `glcts`, and
|
|
||||||
# - the GLES release produces `deqp-gles*` and `deqp-egl`
|
|
||||||
|
|
||||||
DEQP_VK_VERSION=1.3.7.0
|
|
||||||
DEQP_GL_VERSION=4.6.4.0
|
|
||||||
DEQP_GLES_VERSION=3.2.10.0
|
|
||||||
|
|
||||||
# Patches to VulkanCTS may come from commits in their repo (listed in
|
|
||||||
# cts_commits_to_backport) or patch files stored in our repo (in the patch
|
|
||||||
# directory `$OLDPWD/.gitlab-ci/container/patches/` listed in cts_patch_files).
|
|
||||||
# Both list variables would have comments explaining the reasons behind the
|
|
||||||
# patches.
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
vk_cts_commits_to_backport=(
|
|
||||||
# Take multiview into account for task shader inv. stats
|
|
||||||
22aa3f4c59f6e1d4daebd5a8c9c05bce6cd3b63b
|
|
||||||
|
|
||||||
# Remove illegal mesh shader query tests
|
|
||||||
2a87f7b25dc27188be0f0a003b2d7aef69d9002e
|
|
||||||
|
|
||||||
# Relax fragment shader invocations result verifications
|
|
||||||
0d8bf6a2715f95907e9cf86a86876ff1f26c66fe
|
|
||||||
|
|
||||||
# Fix several issues in dynamic rendering basic tests
|
|
||||||
c5453824b498c981c6ba42017d119f5de02a3e34
|
|
||||||
|
|
||||||
# Add setVisible for VulkanWindowDirectDrm
|
|
||||||
a8466bf6ea98f6cd6733849ad8081775318a3e3e
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
vk_cts_patch_files=(
|
|
||||||
# Derivate subgroup fix
|
|
||||||
# https://github.com/KhronosGroup/VK-GL-CTS/pull/442
|
|
||||||
build-deqp-vk_Use-subgroups-helper-in-derivate-tests.patch
|
|
||||||
build-deqp-vk_Add-missing-subgroup-support-checks-for-linear-derivate-tests.patch
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
vk_cts_patch_files+=(
|
|
||||||
build-deqp-vk_Allow-running-on-Android-from-the-command-line.patch
|
|
||||||
build-deqp-vk_Android-prints-to-stdout-instead-of-logcat.patch
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gl_cts_commits_to_backport=(
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gl_cts_patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
gl_cts_patch_files+=(
|
|
||||||
build-deqp-gl_Allow-running-on-Android-from-the-command-line.patch
|
|
||||||
build-deqp-gl_Android-prints-to-stdout-instead-of-logcat.patch
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
# GLES builds also EGL
|
|
||||||
gles_cts_commits_to_backport=(
|
|
||||||
# Implement support for the EGL_EXT_config_select_group extension
|
|
||||||
88ba9ac270db5be600b1ecacbc6d9db0c55d5be4
|
|
||||||
)
|
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
|
||||||
gles_cts_patch_files=(
|
|
||||||
)
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
gles_cts_patch_files+=(
|
|
||||||
build-deqp-gles_Allow-running-on-Android-from-the-command-line.patch
|
|
||||||
build-deqp-gles_Android-prints-to-stdout-instead-of-logcat.patch
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
### Careful editing anything below this line
|
|
||||||
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
|
|
||||||
# shellcheck disable=SC2153
|
|
||||||
case "${DEQP_API}" in
|
|
||||||
VK) DEQP_VERSION="vulkan-cts-$DEQP_VK_VERSION";;
|
|
||||||
GL) DEQP_VERSION="opengl-cts-$DEQP_GL_VERSION";;
|
|
||||||
GLES) DEQP_VERSION="opengl-es-cts-$DEQP_GLES_VERSION";;
|
|
||||||
esac
|
|
||||||
|
|
||||||
git clone \
|
|
||||||
https://github.com/KhronosGroup/VK-GL-CTS.git \
|
|
||||||
-b $DEQP_VERSION \
|
|
||||||
--depth 1 \
|
|
||||||
/VK-GL-CTS
|
|
||||||
pushd /VK-GL-CTS
|
|
||||||
|
|
||||||
mkdir -p /deqp
|
|
||||||
|
|
||||||
# shellcheck disable=SC2153
|
|
||||||
deqp_api=${DEQP_API,,}
|
|
||||||
|
|
||||||
cts_commits_to_backport="${deqp_api}_cts_commits_to_backport[@]"
|
|
||||||
for commit in "${!cts_commits_to_backport}"
|
|
||||||
do
|
|
||||||
PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
|
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $PATCH_URL"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \
|
|
||||||
git am -
|
|
||||||
done
|
|
||||||
|
|
||||||
cts_patch_files="${deqp_api}_cts_patch_files[@]"
|
|
||||||
for patch in "${!cts_patch_files}"
|
|
||||||
do
|
|
||||||
echo "Apply patch to ${DEQP_API} CTS from $patch"
|
|
||||||
git am < $OLDPWD/.gitlab-ci/container/patches/$patch
|
|
||||||
done
|
|
||||||
|
|
||||||
{
|
|
||||||
echo "dEQP base version $DEQP_VERSION"
|
|
||||||
echo "The following local patches are applied on top:"
|
|
||||||
git log --reverse --oneline $DEQP_VERSION.. --format=%s | sed 's/^/- /'
|
|
||||||
} > /deqp/version-$deqp_api
|
|
||||||
|
|
||||||
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
|
|
||||||
# libpng (sigh). The archives get their checksums checked anyway, and git
|
|
||||||
# always goes through ssh or https.
|
|
||||||
python3 external/fetch_sources.py --insecure
|
|
||||||
|
|
||||||
# Save the testlog stylesheets:
|
|
||||||
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
|
|
||||||
popd
|
|
||||||
|
|
||||||
pushd /deqp
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
if [ "${DEQP_TARGET}" = 'android' ]; then
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=android \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
$EXTRA_CMAKE_ARGS
|
|
||||||
mold --run ninja modules/egl/deqp-egl
|
|
||||||
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android
|
|
||||||
else
|
|
||||||
# When including EGL/X11 testing, do that build first and save off its
|
|
||||||
# deqp-egl binary.
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=x11_egl_glx \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
$EXTRA_CMAKE_ARGS
|
|
||||||
mold --run ninja modules/egl/deqp-egl
|
|
||||||
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
|
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=wayland \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
$EXTRA_CMAKE_ARGS
|
|
||||||
mold --run ninja modules/egl/deqp-egl
|
|
||||||
mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
cmake -S /VK-GL-CTS -B . -G Ninja \
|
|
||||||
-DDEQP_TARGET=${DEQP_TARGET} \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
$EXTRA_CMAKE_ARGS
|
|
||||||
|
|
||||||
# Make sure `default` doesn't silently stop detecting one of the platforms we care about
|
|
||||||
if [ "${DEQP_TARGET}" = 'default' ]; then
|
|
||||||
grep -q DEQP_SUPPORT_WAYLAND=1 build.ninja
|
|
||||||
grep -q DEQP_SUPPORT_X11=1 build.ninja
|
|
||||||
grep -q DEQP_SUPPORT_XCB=1 build.ninja
|
|
||||||
fi
|
|
||||||
|
|
||||||
deqp_build_targets=()
|
|
||||||
case "${DEQP_API}" in
|
|
||||||
VK)
|
|
||||||
deqp_build_targets+=(deqp-vk)
|
|
||||||
;;
|
|
||||||
GL)
|
|
||||||
deqp_build_targets+=(glcts)
|
|
||||||
;;
|
|
||||||
GLES)
|
|
||||||
deqp_build_targets+=(deqp-gles{2,3,31})
|
|
||||||
# deqp-egl also comes from this build, but it is handled separately above.
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
if [ "${DEQP_TARGET}" != 'android' ]; then
|
|
||||||
deqp_build_targets+=(testlog-to-xml)
|
|
||||||
deqp_build_targets+=(testlog-to-csv)
|
|
||||||
deqp_build_targets+=(testlog-to-junit)
|
|
||||||
fi
|
|
||||||
|
|
||||||
mold --run ninja "${deqp_build_targets[@]}"
|
|
||||||
|
|
||||||
if [ "${DEQP_TARGET}" != 'android' ]; then
|
|
||||||
# Copy out the mustpass lists we want.
|
|
||||||
mkdir -p /deqp/mustpass
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'VK' ]; then
|
|
||||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
|
|
||||||
cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
|
|
||||||
>> /deqp/mustpass/vk-master.txt
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GL' ]; then
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-main.txt \
|
|
||||||
/deqp/mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/mustpass/gl/khronos_mustpass_single/4.6.1.x/*-single.txt \
|
|
||||||
/deqp/mustpass/
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
|
|
||||||
/deqp/mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-main.txt \
|
|
||||||
/deqp/mustpass/
|
|
||||||
cp \
|
|
||||||
/VK-GL-CTS/external/openglcts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-main.txt \
|
|
||||||
/deqp/mustpass/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Save *some* executor utils, but otherwise strip things down
|
|
||||||
# to reduct deqp build size:
|
|
||||||
mkdir /deqp/executor.save
|
|
||||||
cp /deqp/executor/testlog-to-* /deqp/executor.save
|
|
||||||
rm -rf /deqp/executor
|
|
||||||
mv /deqp/executor.save /deqp/executor
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
|
|
||||||
rm -rf /deqp/external/**/mustpass/
|
|
||||||
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-master*
|
|
||||||
rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default
|
|
||||||
|
|
||||||
rm -rf /deqp/external/openglcts/modules/cts-runner
|
|
||||||
rm -rf /deqp/modules/internal
|
|
||||||
rm -rf /deqp/execserver
|
|
||||||
rm -rf /deqp/framework
|
|
||||||
find . -depth \( -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' \) -exec rm -rf {} \;
|
|
||||||
if [ "${DEQP_API}" = 'VK' ]; then
|
|
||||||
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
|
|
||||||
fi
|
|
||||||
if [ "${DEQP_API}" = 'GL' ]; then
|
|
||||||
${STRIP_CMD:-strip} external/openglcts/modules/glcts
|
|
||||||
fi
|
|
||||||
if [ "${DEQP_API}" = 'GLES' ]; then
|
|
||||||
${STRIP_CMD:-strip} modules/*/deqp-*
|
|
||||||
fi
|
|
||||||
du -sh ./*
|
|
||||||
rm -rf /VK-GL-CTS
|
|
||||||
popd
|
|
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.611.0 --depth 1
|
|
||||||
pushd DirectX-Headers
|
|
||||||
meson setup build --backend=ninja --buildtype=release -Dbuild-test=false $EXTRA_MESON_ARGS
|
|
||||||
meson install -C build
|
|
||||||
popd
|
|
||||||
rm -rf DirectX-Headers
|
|
@@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
git clone https://github.com/ValveSoftware/Fossilize.git
|
|
||||||
cd Fossilize
|
|
||||||
git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
|
|
||||||
git submodule update --init
|
|
||||||
mkdir build
|
|
||||||
cd build
|
|
||||||
cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
|
|
||||||
ninja -C . install
|
|
||||||
cd ../..
|
|
||||||
rm -rf Fossilize
|
|
@@ -1,19 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae
|
|
||||||
|
|
||||||
git clone https://github.com/LunarG/gfxreconstruct.git \
|
|
||||||
--single-branch \
|
|
||||||
-b master \
|
|
||||||
--no-checkout \
|
|
||||||
/gfxreconstruct
|
|
||||||
pushd /gfxreconstruct
|
|
||||||
git checkout "$GFXRECONSTRUCT_VERSION"
|
|
||||||
git submodule update --init
|
|
||||||
git submodule update
|
|
||||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH=/gfxreconstruct/build -DBUILD_WERROR=OFF
|
|
||||||
cmake --build _build --parallel --target tools/{replay,info}/install/strip
|
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
|
||||||
popd
|
|
@@ -1,16 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
|
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
|
|
||||||
pushd /parallel-deqp-runner
|
|
||||||
git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
|
|
||||||
meson . _build
|
|
||||||
ninja -C _build hang-detection
|
|
||||||
mkdir -p build/bin
|
|
||||||
install _build/hang-detection build/bin
|
|
||||||
strip build/bin/*
|
|
||||||
find . -not -path './build' -not -path './build/*' -delete
|
|
||||||
popd
|
|
@@ -1,23 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC1091 # the path is created by the script
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
KDL_REVISION="5056f71b100a68b72b285c6fc845a66a2ed25985"
|
|
||||||
|
|
||||||
mkdir ci-kdl.git
|
|
||||||
pushd ci-kdl.git
|
|
||||||
git init
|
|
||||||
git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git
|
|
||||||
git fetch --depth 1 origin ${KDL_REVISION}
|
|
||||||
git checkout FETCH_HEAD
|
|
||||||
popd
|
|
||||||
|
|
||||||
python3 -m venv ci-kdl.venv
|
|
||||||
source ci-kdl.venv/bin/activate
|
|
||||||
pushd ci-kdl.git
|
|
||||||
pip install -r requirements.txt
|
|
||||||
pip install .
|
|
||||||
popd
|
|
||||||
|
|
||||||
rm -rf ci-kdl.git
|
|
@@ -1,31 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
# shellcheck disable=SC2153
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
mkdir -p kernel
|
|
||||||
pushd kernel
|
|
||||||
|
|
||||||
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
|
|
||||||
KERNEL_IMAGE_NAME+=" cheza-kernel"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for image in ${KERNEL_IMAGE_NAME}; do
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "/lava-files/${image}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${image}"
|
|
||||||
done
|
|
||||||
|
|
||||||
for dtb in ${DEVICE_TREES}; do
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-o "/lava-files/${dtb}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${dtb}"
|
|
||||||
done
|
|
||||||
|
|
||||||
mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}"
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst"
|
|
||||||
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/lava-files/rootfs-${DEBIAN_ARCH}/"
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf kernel
|
|
||||||
|
|
@@ -1,31 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
|
|
||||||
LLVM_TAG="llvmorg-15.0.7"
|
|
||||||
|
|
||||||
$LLVM_CONFIG --version
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
git clone \
|
|
||||||
https://github.com/llvm/llvm-project \
|
|
||||||
--depth 1 \
|
|
||||||
-b "${LLVM_TAG}" \
|
|
||||||
/llvm-project
|
|
||||||
|
|
||||||
mkdir /libclc
|
|
||||||
pushd /libclc
|
|
||||||
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG="$LLVM_CONFIG" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
|
|
||||||
ninja
|
|
||||||
ninja install
|
|
||||||
popd
|
|
||||||
|
|
||||||
# workaroud cmake vs debian packaging.
|
|
||||||
mkdir -p /usr/lib/clc
|
|
||||||
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
|
|
||||||
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
|
|
||||||
|
|
||||||
du -sh ./*
|
|
||||||
rm -rf /libclc /llvm-project
|
|
@@ -1,16 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# Script used for Android and Fedora builds
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
export LIBDRM_VERSION=libdrm-2.4.119
|
|
||||||
|
|
||||||
curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz
|
|
||||||
tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz
|
|
||||||
cd "$LIBDRM_VERSION"
|
|
||||||
meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled $EXTRA_MESON_ARGS
|
|
||||||
meson install -C build
|
|
||||||
cd ..
|
|
||||||
rm -rf "$LIBDRM_VERSION"
|
|
@@ -1,22 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
VER="${LLVM_VERSION:?llvm not set}.0.0"
|
|
||||||
|
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
|
||||||
-O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz"
|
|
||||||
tar -xvf "v${VER}.tar.gz" && rm "v${VER}.tar.gz"
|
|
||||||
|
|
||||||
mkdir "SPIRV-LLVM-Translator-${VER}/build"
|
|
||||||
pushd "SPIRV-LLVM-Translator-${VER}/build"
|
|
||||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
|
|
||||||
ninja
|
|
||||||
ninja install
|
|
||||||
# For some reason llvm-spirv is not installed by default
|
|
||||||
ninja llvm-spirv
|
|
||||||
cp tools/llvm-spirv/llvm-spirv /usr/bin/
|
|
||||||
popd
|
|
||||||
|
|
||||||
du -sh "SPIRV-LLVM-Translator-${VER}"
|
|
||||||
rm -rf "SPIRV-LLVM-Translator-${VER}"
|
|
@@ -1,22 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_BASE_TAG
|
|
||||||
# DEBIAN_BUILD_TAG
|
|
||||||
# FEDORA_X86_64_BUILD_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
MOLD_VERSION="2.4.1"
|
|
||||||
|
|
||||||
git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
|
|
||||||
pushd mold
|
|
||||||
|
|
||||||
cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
|
|
||||||
cmake --build . --parallel
|
|
||||||
cmake --install .
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf mold
|
|
@@ -1,25 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_GL_TAG
|
|
||||||
|
|
||||||
set -ex -o pipefail
|
|
||||||
|
|
||||||
### Careful editing anything below this line
|
|
||||||
|
|
||||||
git config --global user.email "mesa@example.com"
|
|
||||||
git config --global user.name "Mesa CI"
|
|
||||||
git clone https://github.com/axeldavy/Xnine.git /Xnine
|
|
||||||
mkdir /Xnine/build
|
|
||||||
pushd /Xnine/build
|
|
||||||
git checkout c64753d224c08006bcdcfa7880ada826f27164b1
|
|
||||||
|
|
||||||
cmake .. -DBUILD_TESTS=1 -DWITH_DRI3=1 -DD3DADAPTER9_LOCATION=/install/lib/d3d/d3dadapter9.so
|
|
||||||
make
|
|
||||||
|
|
||||||
mkdir -p /NineTests/
|
|
||||||
mv NineTests/NineTests /NineTests/
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -rf /Xnine
|
|
@@ -1,33 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# shellcheck disable=SC2086 # we want word splitting
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# When changing this file, you need to bump the following
|
|
||||||
# .gitlab-ci/image-tags.yml tags:
|
|
||||||
# DEBIAN_X86_64_TEST_GL_TAG
|
|
||||||
# DEBIAN_X86_64_TEST_VK_TAG
|
|
||||||
# KERNEL_ROOTFS_TAG
|
|
||||||
|
|
||||||
REV="1e631479c0b477006dd7561c55e06269d2878d8d"
|
|
||||||
|
|
||||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
|
|
||||||
pushd /piglit
|
|
||||||
git checkout "$REV"
|
|
||||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
|
|
||||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
|
|
||||||
ninja $PIGLIT_BUILD_TARGETS
|
|
||||||
find . -depth \( -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' \) -exec rm -rf {} \;
|
|
||||||
rm -rf target_api
|
|
||||||
if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then
|
|
||||||
find . -depth \
|
|
||||||
! -regex "^\.$" \
|
|
||||||
! -regex "^\.\/piglit.*" \
|
|
||||||
! -regex "^\.\/framework.*" \
|
|
||||||
! -regex "^\.\/bin$" \
|
|
||||||
! -regex "^\.\/bin\/replayer\.py" \
|
|
||||||
! -regex "^\.\/templates.*" \
|
|
||||||
! -regex "^\.\/tests$" \
|
|
||||||
! -regex "^\.\/tests\/replay\.py" \
|
|
||||||
-exec rm -rf {} \; 2>/dev/null
|
|
||||||
fi
|
|
||||||
popd
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user