Compare commits

..

320 Commits

Author SHA1 Message Date
Julien Cristau
34beaefe1c Prepare changelog for upload. 2007-08-09 16:00:58 +02:00
Julien Cristau
10b828f193 New upstream release. 2007-08-09 11:56:00 +02:00
Julien Cristau
43e6260ed0 Merge tag 'mesa_7_0_1' into debian-experimental 2007-08-09 11:25:42 +02:00
Brian
ae16a51e95 added GLU bug fixes 2007-08-03 13:39:09 -06:00
Brian
4398d67546 update 7.0.1 release info/date 2007-08-03 13:32:47 -06:00
Brian
c93738687c remove -rc2 2007-08-03 13:32:28 -06:00
Brian
799492e606 add OSMESA_LIB_NAME 2007-08-03 10:08:16 -06:00
Brian
f33ff071bb added -lm 2007-08-03 07:24:33 -06:00
Brian
99e854743c added bluegene-osmesa 2007-08-02 14:24:05 -06:00
Brian
6f79062d91 OSmesa on BlueGene (Alexander Neundorf) 2007-08-02 14:22:52 -06:00
Brian
2f682c3995 fix stencil value masking bug 11805, and fix sizeof() bug 2007-08-02 09:08:53 -06:00
Brian
067370e68f fix invalid pointer usage in bezierPatchDeleteList(), bug 11807 2007-08-02 08:40:58 -06:00
Brian
2aa439a6a4 fix double free()s (bug 11808) 2007-08-02 08:38:10 -06:00
Brian
b477182dc3 s/MAX_TEXTURE_LEVELS/SIS_MAX_TEXTURE_LEVELS/ and add assertion (bug 11806) 2007-08-02 08:35:27 -06:00
Michel Dänzer
a8964ca89e i915tex: Better attempt to release miptree when overriding texture image.
The previous approach could lead to crashes in FBO code that dereferences the
miptree struct pointer unconditionally.
2007-08-02 12:14:03 +02:00
Brian
4f0e92d07c Merge branch 'mesa_7_0_branch' of git+ssh://brianp@git.freedesktop.org/git/mesa/mesa into mesa_7_0_branch 2007-08-01 17:03:09 -06:00
Brian
577f4e8a5f Obsolete 2007-08-01 11:45:47 -06:00
Brian
0586d9fe56 fix error code test limit (bug 11795) 2007-08-01 11:27:47 -06:00
Michel Dänzer
1f0d9bf05e i915tex: Missing piece of SetTexOffset hook implementation for I830 generation. 2007-08-01 17:46:32 +02:00
Michel Dänzer
8bcae2a527 i915tex: Implement SetTexOffset hook.
Only build tested for I830 generation.
2007-08-01 17:46:31 +02:00
Michel Dänzer
35ca9aae91 r300: Implement SetTexOffset hook. 2007-08-01 17:46:30 +02:00
Michel Dänzer
6cdd2bf8d7 Add interfaces for overriding texture images with driver specific 'offsets'.
To be used by AIGLX for GLX_EXT_texture_from_pixmap without several
additional data copies.
2007-08-01 17:46:29 +02:00
Brian
ce7d175adb fix mem leak (bug 11793) 2007-08-01 08:42:13 -06:00
Brian
1904fd095f fix mem leak (bug 11791) 2007-08-01 08:42:09 -06:00
Brian
897d0ac5cc fix some FreeBSD issues 2007-07-31 14:57:54 -06:00
Brian
e262d0182f fix failure caused by undeclared variable (bug 11783) 2007-07-31 09:43:40 -06:00
Brian
aa5b748c10 glGetAttribLocation always returned 1 (bug 11774) 2007-07-31 09:29:31 -06:00
Julien Cristau
c6728df4a3 Merge tag 'mesa_7_0_1_rc2' into debian-experimental 2007-07-31 04:00:13 +02:00
Brian
3c182c5bc8 Added sunos5-v9-cc-g++ config (Roland Egger) 2007-07-30 09:00:09 -06:00
Brian
1b5677847b _mesa_pack_bitmap() fix from trunk 2007-07-30 08:13:05 -06:00
Brian
1be59a9dbe dlist polygon stipple fix from trunk 2007-07-30 08:12:29 -06:00
Brian
55c2178ad4 fix bug 11754 2007-07-29 18:28:09 -06:00
Jan Dvorak
3e3d392e1c fix glPointParameteriv bug 11754 2007-07-29 18:26:55 -06:00
Julien Cristau
593de57fa2 Prepare changelog for upload. 2007-07-27 20:18:21 +02:00
Julien Cristau
04b0e43ea6 New upstream release candidate. 2007-07-27 19:59:38 +02:00
Julien Cristau
04eee290c8 Merge branch 'mesa_7_0_branch' of git://anongit.freedesktop.org/git/mesa/mesa into debian-experimental 2007-07-27 19:58:30 +02:00
Brian
756a7a4e4b minor tweaks 2007-07-27 11:44:55 -06:00
Brian
121b4ac220 More Mingw32 fixes from Zhang (zxpmyth@yahoo.com.cn) 2007-07-27 11:12:21 -06:00
Brian
cdbd5db3af release candidate 2 2007-07-27 11:10:00 -06:00
Brian
b2240f6ad4 fix bug 11731 2007-07-26 15:33:39 -06:00
Brian
33814a55f8 Fix function call bug 11731. Also, fix up IR_CALL/IR_FUNC confusion. 2007-07-26 15:32:34 -06:00
Brian
a57c5a417b generate error upon writing to varying var in fragment program (bug 11733) 2007-07-26 11:39:37 -06:00
Brian
ff1a28de17 clamp float colors 2007-07-26 08:19:40 -06:00
Brian
772f57f99b call ctx->Driver.NewProgram() instead of _mesa_new_program() 2007-07-24 09:56:54 -06:00
Brian
dacee32b8b fix logic error, typos 2007-07-24 09:19:44 -06:00
Brian
fa1a3eb06d fix GLX_STEREO bug 2007-07-23 16:54:35 -06:00
Brian
bd4817ca54 fix GLX_STEREO handling (bug 11705) 2007-07-23 16:51:03 -06:00
Julien Cristau
d3a160e217 New upstream release candidate. 2007-07-23 16:39:48 +02:00
Julien Cristau
c5837f2791 Merge branch 'mesa_7_0_branch' of git://anongit.freedesktop.org/git/mesa/mesa into debian-experimental 2007-07-23 16:28:56 +02:00
Julien Cristau
490bd43f18 Merge branch 'debian-unstable' of git+ssh://git.debian.org/git/pkg-xorg/lib/mesa into debian-experimental
Conflicts:

	debian/changelog
2007-07-23 16:28:10 +02:00
Brian
005eea249c Fix a number of MINGW32 issues (Zhang <zxpmyth@yahoo.com.cn>) 2007-07-21 11:27:22 -06:00
Brian
9747de8ec6 drop VC6 and VC7 project files until updated 2007-07-21 10:03:30 -06:00
Brian
849a9799d0 fix bug 11588 2007-07-13 16:37:18 -06:00
Jan Dvorak
e32ae4fe33 fix shader/info string length queries (bug 11588) 2007-07-13 16:35:19 -06:00
Brian
4595389c4c fix width/depth mix-up (bug 11577) 2007-07-12 21:43:39 -06:00
Brian
e8ccc7cc49 Merge branch 'mesa_7_0_branch' of git+ssh://brianp@git.freedesktop.org/git/mesa/mesa into mesa_7_0_branch 2007-07-12 14:49:56 -06:00
Brian
673d21047a fix swizzle-related bug 11534 2007-07-12 14:49:24 -06:00
Julien Cristau
6186261589 Prepare changelog for upload. 2007-07-11 05:51:34 +02:00
Julien Cristau
45310e219c Relax libgl1-mesa-dev dependency on libgl1-mesa-glx.
Change the >= ${source:Version} dependency to >= ${source:Upstream-Version}.
This way libgl1-mesa-dev stays installable after a minor revision bump on
architectures where mesa isn't built yet.
2007-07-11 05:45:30 +02:00
Julien Cristau
250f6b2c88 Replace ${Source-Version} with ${source:Version} in one remaining spot. 2007-07-11 05:34:30 +02:00
Michel Dänzer
fdefc2bbda i915tex: Better procedure for dropping batchbuffer on virtual resolution change.
The previous procedure would often result in a GPU lockup.
2007-07-10 11:02:18 +02:00
Michel Dänzer
2079df8527 i915tex: Only wait for vblank when really necessary.
This avoids superfluous waits for vblank timing out under some circumstances.
2007-07-10 10:54:51 +02:00
Michel Dänzer
b53659452c Clear pointers to freed cliprects.
Not doing this could lead to double frees under rare circumstances.
2007-07-10 10:49:28 +02:00
Michel Dänzer
46f1d6653e Remove dubious compile-time test for pre-2.4 Linux kernels.
LINUX_VERSION_CODE shouldn't be used by userspace code, it can be defined empty
these days.

If anybody still cares about 2.2 kernels, they should reinstate this as a
proper runtime test.
2007-07-10 10:34:10 +02:00
Brian
c1938a60f7 Fix zero-size texture checking in _mesa_test_proxy_teximage(), bug 11309 2007-07-08 10:04:14 -06:00
Brian
d65110f352 Check if 'indices' is NULL, bug 11314 2007-07-08 09:21:21 -06:00
Brian
9fa3bbcb5a check depthScale value for optimized ushort->uint case (fixes bug 11474) 2007-07-08 08:53:09 -06:00
Brice Goglin
19e51ecbe6 libgl1-mesa-dev does not need to depend on libgl1-mesa-dri, libgl1-mesa-glx is enough.
libgl1-mesa-glx was included in libgl1-mesa-dri before 6.4.1-0.1,
so libgl1-mesa-dev had to depend on libgl1-mesa-dri at this point.
Since libgl1-mesa-dri only contains the DRI modules now, it is not
required anymore for libgl1-mesa-dev.
2007-07-07 22:17:35 +02:00
Brian
99d62f2922 fix i915 polygon stipple bug 2007-07-06 16:53:28 -06:00
Brian
284743cafe add GL_QUADS to i915_reduced_primitive_state() to fix redbook/polys demo 2007-07-06 16:52:34 -06:00
Xavier Bachelot
08d7307b2a call glutInit(), bug 11486 2007-07-06 12:54:40 -06:00
Brian
ef6a64abe6 fix a syntax error (missing right-paren) 2007-07-05 16:04:10 -06:00
Brian
18bfa52ef7 fix bug 11475 2007-07-05 09:42:07 -06:00
Brian
d62be2652c Copy from Mesa master. Includes fixes for bugs 11475 and 11448. 2007-07-05 09:40:37 -06:00
Julien Cristau
31f3433635 Prepare changelog for upload. 2007-07-05 00:56:56 +02:00
Samuel Thibault
639eaceee7 Fix build on hurd-i386.
Build libgl1-mesa-glx without direct rendering support on that arch, and
don't build any dri driver.
2007-07-05 00:48:30 +02:00
Eric Anholt
ea53ff80af fix LogicOp/bitmap problem, bug 11133 2007-07-04 07:37:28 -06:00
Brian
feeca1bcbc add fbdev/ to DEMO_FILES 2007-07-03 15:46:48 -06:00
Brian
12e7278c08 document GLSL fixes 2007-07-03 12:21:11 -06:00
Brian
45b5c44eb9 bring over Roland's DOT3_RGBA fix from master 2007-07-03 12:20:52 -06:00
Brian
e3456c1028 added vec2(vec4) constructor, bug 11404 2007-07-03 11:42:03 -06:00
Brian
327fb38573 add code for stpq, rgba writemasks in make_writemask(), bug 11404 2007-07-03 11:41:07 -06:00
Brian
1d25d9e15f Bump version to 7.0.1 2007-07-03 10:04:41 -06:00
Brian
cc7cee3f48 Revert the version of libOSMesa.so to 6.5.3 to avoid linking issues that were caused by bumping to 7.0 2007-07-03 10:03:01 -06:00
Brian
d7062710cd use _mesa_unpack_stencil_span() in draw_stencil_pixels(), bug 11457 2007-07-03 08:58:03 -06:00
Julien Cristau
3e21f52796 Make libgl1-mesa-swx11 depend on libosmesa6
libgl1-mesa-swx11 used to contain libOSMesa.so.6, so programs linked against
it don't necessarily depend on libosmesa6; their dependencies were broken
since mesa 6.5.2-1.
2007-07-03 05:34:07 +02:00
Brian
9bfba734d8 add const to wglSetPixelFormat() to match .h declaration 2007-07-02 10:15:39 -06:00
Julien Cristau
0511d9a9d2 Fix merge and delete symlinks which aren't in the tarball. 2007-07-01 02:55:59 +02:00
Brian
aaebf2f47e regenerated 2007-06-29 07:51:49 -06:00
Brian
23f8d77b38 Add a few missing GL 2.0 entrypoints, regenerate related files.
Specifically:
  glVertexAttrib4bv
  glVertexAttrib4iv
  glVertexAttrib4ubv
  glVertexAttrib4uiv
  glVertexAttrib4usv
2007-06-28 16:38:48 -06:00
Samuel Thibault
498b4dbba4 Fix build on hurd-i386.
Build libgl1-mesa-glx without direct rendering support on that arch, and
don't build any dri driver.
2007-06-28 22:33:05 +01:00
Julien Cristau
ecc42e79ae Merge unreleased changelog entries 2007-06-28 22:01:05 +01:00
Julien Cristau
0bf8ed523d * Add a shlibs file for libgl1-mesa-swx11-i686. 2007-06-28 21:58:42 +01:00
Brian
87d22ee0fe initial 7.0.1 relnotes 2007-06-28 08:11:36 -06:00
zhang
52e25f63d4 a variety of fixes for MingW 2007-06-28 08:10:09 -06:00
Brian
f3fb67972b remove obsolete t_save_api.c 2007-06-28 07:43:32 -06:00
Brian
6400756364 remove obsolete sources 2007-06-28 07:21:59 -06:00
David Nusinow
d71d04bf6f Bite the bullet and add myself to uploaders 2007-06-27 22:54:52 -04:00
David Nusinow
af90259e09 Merge remote branch 'origin/debian-experimental' into debian-experimental
Conflicts:

	debian/changelog
	src/mesa/drivers/dri/r300/radeon_lock.c
2007-06-27 22:53:04 -04:00
David Nusinow
7c9762f698 Changelog bump 2007-06-27 21:32:51 -04:00
David Nusinow
b2e67fdb7c Merge branch 'upstream-experimental' into debian-experimental
Conflicts:

	.gitignore
	progs/miniglx/miniglxtest.c
	src/glx/mini/Makefile
	src/glx/mini/miniglx.c
	src/mesa/drivers/dri/r300/.gitignore
2007-06-27 21:29:16 -04:00
Brian
d59f0314bd md5 sums for 7.0 2007-06-22 16:50:24 -06:00
Brian
131baefac3 fix date for 7.0 release 2007-06-22 16:41:36 -06:00
Brian
a450078b67 update version 2007-06-22 16:40:17 -06:00
Brian
f6b041bf6c bring in Eric's FreeBSD fixes for -pthread 2007-06-22 15:40:46 -06:00
Brian
4f340d181a set tentative 7.0 release date 2007-06-20 19:59:15 -06:00
Julien Cristau
74a11701f3 Shorten the one-line synopsis of various packages.
They're cut off in .changes files if they're too long.
Thanks, Marc 'HE' Brockschmidt!
2007-06-13 19:36:49 +02:00
Brian
b58e38e936 fix TEXTREL problem (bug 7459) 2007-06-12 09:52:05 -06:00
Brian
8713cb48a8 Fix TEXREL issues when using GLX_USE_TLS (see bug 7459). 2007-06-12 09:46:12 -06:00
Brian
277c5e57ed fix GLX_USE_TLS breakage 2007-06-12 08:57:39 -06:00
Brian
b3d62d5af5 disable depthBits check in check_compatible(), see bug 11161 2007-06-08 12:36:00 -06:00
Brian
9c0f0c8d81 remove old comment about DEFAULT_SOFTWARE_DEPTH_BITS 2007-06-08 12:35:37 -06:00
Brian
18a0a2a7ac document bug 11049 fix 2007-05-23 16:50:22 -06:00
Brian
8b99d9e33c restore GL_TEXTURE_LOD_BIAS in _mesa_PopAttrib(), bug 11049 2007-05-23 16:49:19 -06:00
Brian
01e7e153e3 document point attentuation fix 2007-05-23 08:59:02 -06:00
Brian
0ad4ca24d2 Fix point attentuation problem (bug 11042)
ctx->Point._Attentuation was computed in wrong place and the VB->Eye coord Z
array wasn't indexed correctly in run_point_stage().
2007-05-23 08:56:45 -06:00
Brian
724a155552 if light position is local, treat it as a homogeneous coord and divide by W (see bug 11009) 2007-05-21 10:55:15 -06:00
Julien Cristau
e1b0715dd6 Prepare changelog for upload. 2007-05-21 11:35:28 +02:00
Julien Cristau
a5c6598966 Merge remote branch 'origin/debian-unstable' into debian-experimental
Conflicts:

	debian/changelog
2007-05-21 11:33:42 +02:00
Roland Scheidegger
d38b74a316 fix miptree layout (i915) for small compressed mipmaps
This seems to work now. Also fix i945 set_level_info, need to match i915
behaviour for storing mip height, as it's assumed to be the mip width
(in texels) elsewhere and the division by 4 is done later (untested).
2007-05-19 04:43:50 +02:00
Roland Scheidegger
54cab4b47b fix copy & paste bug of previous commit, breaking dxt5 formats 2007-05-19 03:56:06 +02:00
Roland Scheidegger
c093666bc5 fix miptree comparison with compressed textures
TexelBytes is always 0 with compressed textures, thus would never match
mt->cpp. This caused constant blitting around of textures, and it fixes at
least the horrible performance of Q3 if compressed textures are enabled.
2007-05-19 01:39:54 +02:00
Roland Scheidegger
04972f6761 fix small s3tc mipmaps (#10968)
make sure that always whole blocks are uploaded.
(May still not work correctly if the top mip map is not at least a full block,
that is 4 pixels wide - not sure, but probably doesn't happen in real world)
2007-05-19 01:39:16 +02:00
Brian
67f8234622 set version to 7.0 2007-05-18 09:41:34 -06:00
Brian
201d6dbd9c fix STATE_HALF_VECTOR value (bug 10987) 2007-05-18 09:35:18 -06:00
Julien Cristau
1865ef6fd4 Prepare changelog for upload. 2007-05-18 13:36:42 +02:00
Ian Romanick
fbcac5aa83 Refactor determining whether a texture target can use compressed format 2007-05-17 14:45:25 -07:00
Brian
2b72ab8f8f document glXDestroyPbuffer fix 2007-05-17 15:30:39 -06:00
Brian
0ea97b9408 In DestroyPbuffer(), use GetReq() intead of GetReqExtra(). See bug 10983. 2007-05-17 15:26:39 -06:00
Brian
28683ac7c0 version 7.0, not 7.1 2007-05-17 09:07:25 -06:00
Brian
c72e3e210f version should be 7.0 2007-05-17 09:04:19 -06:00
Julien Cristau
a8c856d43c Cherry-pick commmit 65faf023 by Michel Dänzer to fix r300 crash.
r300: Don't crash in radeonUpdatePageFlipping when !radeon->glCtx->DrawBuffer.

This feels like a kludge, maybe there's a better solution.
2007-05-09 15:09:12 +02:00
David Nusinow
1953cfc954 Update changelog 2007-04-30 00:08:11 -04:00
David Nusinow
8e7bac950e Merge branch 'upstream-experimental' into debian-experimental 2007-04-29 23:23:00 -04:00
Julien Cristau
03cdacfb6a Prepare changelog for upload. 2007-04-25 10:37:10 +02:00
Julien Cristau
30f3c3dfad Update changelog. 2007-04-25 08:42:02 +02:00
Julien Cristau
dee1b0d5bb Merge remote branch 'upstream/master' into debian-experimental 2007-04-25 08:40:39 +02:00
Julien Cristau
6f2b0fe463 New upstream release candidate.
Refresh a couple patches.
2007-04-25 01:48:33 +02:00
Julien Cristau
0c69176498 Merge tag 'mesa_6_5_3_rc3' into debian-experimental 2007-04-25 01:41:05 +02:00
Julien Cristau
4e7a0385b5 Merge tag 'mesa_6_5_3_rc2' into debian-experimental 2007-04-25 01:40:50 +02:00
Julien Cristau
4adbd94b10 Only install stuff in mesa-swx11-source if we're building arch:all packages. 2007-04-23 11:00:09 +02:00
Julien Cristau
4a2905b645 Delete obsolete file. 2007-04-22 18:08:58 +02:00
Julien Cristau
0638c6b530 Add generated files from the tarball so they're included in mesa-swx11-source. 2007-04-22 18:08:35 +02:00
Julien Cristau
7dd35ce013 Delete .gitignore files which do more harm than good in a debian package tree. 2007-04-22 18:07:52 +02:00
Julien Cristau
c8629c3178 Resync debian/scripts/install-source.sh. 2007-04-22 00:51:10 +02:00
Julien Cristau
77ccf51cb3 New upstream release candidate. 2007-04-22 00:50:40 +02:00
Julien Cristau
01197f0c77 Merge branch 'upstream-experimental' into debian-experimental 2007-04-22 00:34:30 +02:00
Julien Cristau
69c86ff4fd Remove leftover file. 2007-04-22 00:33:38 +02:00
Julien Cristau
9a42ffad81 Add build-dependencies on libxdamage-dev and libxfixes-dev. 2007-04-21 21:23:21 +02:00
Julien Cristau
0d8633bf8c Delete autogenerated files, and files not included in the tarball. 2007-04-21 21:20:37 +02:00
Julien Cristau
d79e80a3d0 New upstream release candidate.
Drop patches included upstream.
2007-04-21 20:47:01 +02:00
Julien Cristau
d890ebdd25 Merge branch 'upstream-experimental' into debian-experimental
Conflicts:

	doxygen/Makefile
	doxygen/common.doxy
	doxygen/doxy.bat
	doxygen/glapi.doxy
	doxygen/header.html
	doxygen/main.doxy
	doxygen/math.doxy
	doxygen/shader.doxy
	doxygen/swrast.doxy
	doxygen/swrast_setup.doxy
	doxygen/tnl.doxy
	doxygen/tnl_dd.doxy
	progs/tests/Makefile
	progs/tests/fbotest1.c
	progs/tests/fbotest2.c
	progs/tests/fbotexture.c
	progs/tests/mipmap_limits.c
	progs/tests/sharedtex.c
	progs/tools/trace/gltrace_support.cc
	progs/trivial/Makefile
	src/egl/docs/EGL_MESA_screen_surface
	src/egl/main/egllog.c
	src/glx/mini/Makefile
	src/glx/mini/driver.h
	src/mesa/drivers/allegro/amesa.c
	src/mesa/drivers/windows/gldirect/dx7/gld_driver_dx7.c
	src/mesa/drivers/windows/gldirect/dx7/gld_ext_dx7.c
	src/mesa/drivers/windows/gldirect/dx7/gld_primitive_dx7.c
	src/mesa/drivers/windows/gldirect/dx8/gld_driver_dx8.c
	src/mesa/drivers/windows/gldirect/dx8/gld_ext_dx8.c
	src/mesa/drivers/windows/gldirect/dx8/gld_primitive_dx8.c
	src/mesa/drivers/windows/gldirect/dx9/gld_driver_dx9.c
	src/mesa/drivers/windows/gldirect/dx9/gld_ext_dx9.c
	src/mesa/drivers/windows/gldirect/dx9/gld_primitive_dx9.c
	src/mesa/drivers/windows/gldirect/mesasw/gld_wgl_mesasw.c
2007-04-21 20:27:55 +02:00
Brice Goglin
5ee9b3ce72 Fix conflicts for libGLw packages.
libglw1-mesa does not have to conflict with earlier libgl1-mesa-swx11
(libGLw was only shipped as development libs/headers within swx11-dev).

libglw1-mesa-dev needs to conflicts with libglw-dev.
2007-04-21 20:20:03 +02:00
Brice Goglin
031b539a09 Split out libGLw libs and headers from libgl1-mesa-swx11.
Ship both static and shared libraries for libGLw, creating libglw1-mesa and
libglw1-mesa-dev (closes: #374904).
2007-04-21 20:19:38 +02:00
Brice Goglin
47c3c706e8 Add 09_i915_always_enable_pixel_fog.patch 2007-04-21 18:00:12 +02:00
Brice Goglin
0d830a616c Add 08_r300_update_window_state_when_bound_but_stamp_changed.patch 2007-04-20 18:53:47 +02:00
Brice Goglin
e219bf8338 Add 07_call_radeonSetCliprects_from_radeonMakeCurrent.patch 2007-04-20 18:44:59 +02:00
Julien Cristau
6c0fba2480 Prepare changelog for upload. 2007-04-20 05:58:00 +02:00
Julien Cristau
f4cc159f53 Revert " * Split out libGLw libs and headers from libgl1-mesa-swx11 and ship both"
This reverts commit 856ae782c7.

Conflicts:

	debian/changelog
2007-04-20 05:55:04 +02:00
Julien Cristau
04c002abc2 Revert "Fix conflicts for libGLw packages."
This reverts commit 048144074b.
2007-04-20 05:53:59 +02:00
Julien Cristau
78fd17907e Merge branch 'debian-experimental' into debian-unstable 2007-04-20 05:52:20 +02:00
Julien Cristau
47edc83080 Merge branch 'debian-experimental' into debian-unstable
Conflicts:

	configs/debian-osmesa16
	configs/debian-osmesa16-static
	configs/debian-osmesa32
	configs/debian-osmesa32-static
	debian/changelog
	debian/compat
	debian/control
	debian/copyright
	debian/libgl1-mesa-dev.install
	debian/libgl1-mesa-swx11-dev.install
	debian/libgl1-mesa-swx11.shlibs
	debian/libglu1-mesa-dev.install
	debian/mesa-common-dev.docs
	debian/mesa-common-dev.install
	debian/rules
	src/mesa/x86-64/matypes.h
	src/mesa/x86/matypes.h
2007-04-20 05:44:24 +02:00
Julien Cristau
33e34f5bf4 Merge tag 'mesa_6_5_1' into debian-unstable
Conflicts:

	progs/util/README
	progs/util/glstate.c
	progs/util/glstate.h
	progs/util/sampleMakefile
	src/glu/sgi/libnurbs/interface/bezierEval.h
	src/glu/sgi/libnurbs/interface/bezierPatch.cc
	src/glu/sgi/libnurbs/interface/bezierPatch.h
	src/glu/sgi/libnurbs/interface/bezierPatchMesh.cc
	src/glu/sgi/libnurbs/interface/bezierPatchMesh.h
	src/glu/sgi/libnurbs/interface/glcurveval.cc
	src/glu/sgi/libnurbs/interface/glimports.h
	src/glu/sgi/libnurbs/interface/glinterface.cc
	src/glu/sgi/libnurbs/interface/glrenderer.h
	src/glu/sgi/libnurbs/interface/incurveeval.cc
	src/glu/sgi/libnurbs/interface/insurfeval.cc
	src/glu/sgi/libnurbs/interface/mystdio.h
	src/glu/sgi/libnurbs/interface/mystdlib.h
	src/glu/sgi/libnurbs/internals/arc.h
	src/glu/sgi/libnurbs/internals/arcsorter.cc
	src/glu/sgi/libnurbs/internals/arcsorter.h
	src/glu/sgi/libnurbs/internals/arctess.h
	src/glu/sgi/libnurbs/internals/backend.cc
	src/glu/sgi/libnurbs/internals/backend.h
	src/glu/sgi/libnurbs/internals/basiccrveval.h
	src/glu/sgi/libnurbs/internals/basicsurfeval.h
	src/glu/sgi/libnurbs/internals/bezierarc.h
	src/glu/sgi/libnurbs/internals/bin.cc
	src/glu/sgi/libnurbs/internals/bin.h
	src/glu/sgi/libnurbs/internals/bufpool.cc
	src/glu/sgi/libnurbs/internals/bufpool.h
	src/glu/sgi/libnurbs/internals/cachingeval.cc
	src/glu/sgi/libnurbs/internals/cachingeval.h
	src/glu/sgi/libnurbs/internals/ccw.cc
	src/glu/sgi/libnurbs/internals/coveandtiler.h
	src/glu/sgi/libnurbs/internals/curve.cc
	src/glu/sgi/libnurbs/internals/curve.h
	src/glu/sgi/libnurbs/internals/curvelist.cc
	src/glu/sgi/libnurbs/internals/curvelist.h
	src/glu/sgi/libnurbs/internals/curvesub.cc
	src/glu/sgi/libnurbs/internals/dataTransform.cc
	src/glu/sgi/libnurbs/internals/dataTransform.h
	src/glu/sgi/libnurbs/internals/defines.h
	src/glu/sgi/libnurbs/internals/displaylist.cc
	src/glu/sgi/libnurbs/internals/displaylist.h
	src/glu/sgi/libnurbs/internals/displaymode.h
	src/glu/sgi/libnurbs/internals/flist.cc
	src/glu/sgi/libnurbs/internals/flist.h
	src/glu/sgi/libnurbs/internals/flistsorter.cc
	src/glu/sgi/libnurbs/internals/flistsorter.h
	src/glu/sgi/libnurbs/internals/gridline.h
	src/glu/sgi/libnurbs/internals/gridtrimvertex.h
	src/glu/sgi/libnurbs/internals/gridvertex.h
	src/glu/sgi/libnurbs/internals/hull.cc
	src/glu/sgi/libnurbs/internals/hull.h
	src/glu/sgi/libnurbs/internals/intersect.cc
	src/glu/sgi/libnurbs/internals/jarcloc.h
	src/glu/sgi/libnurbs/internals/knotvector.h
	src/glu/sgi/libnurbs/internals/mapdesc.cc
	src/glu/sgi/libnurbs/internals/mapdesc.h
	src/glu/sgi/libnurbs/internals/mapdescv.cc
	src/glu/sgi/libnurbs/internals/maplist.cc
	src/glu/sgi/libnurbs/internals/maplist.h
	src/glu/sgi/libnurbs/internals/mesher.cc
	src/glu/sgi/libnurbs/internals/mesher.h
	src/glu/sgi/libnurbs/internals/monoTriangulationBackend.cc
	src/glu/sgi/libnurbs/internals/monotonizer.cc
	src/glu/sgi/libnurbs/internals/monotonizer.h
	src/glu/sgi/libnurbs/internals/myassert.h
	src/glu/sgi/libnurbs/internals/mycode.cc
	src/glu/sgi/libnurbs/internals/mystring.h
	src/glu/sgi/libnurbs/internals/nurbsconsts.h
	src/glu/sgi/libnurbs/internals/nurbstess.cc
	src/glu/sgi/libnurbs/internals/patch.cc
	src/glu/sgi/libnurbs/internals/patch.h
	src/glu/sgi/libnurbs/internals/patchlist.cc
	src/glu/sgi/libnurbs/internals/patchlist.h
	src/glu/sgi/libnurbs/internals/pwlarc.h
	src/glu/sgi/libnurbs/internals/quilt.cc
	src/glu/sgi/libnurbs/internals/quilt.h
	src/glu/sgi/libnurbs/internals/reader.cc
	src/glu/sgi/libnurbs/internals/reader.h
	src/glu/sgi/libnurbs/internals/renderhints.cc
	src/glu/sgi/libnurbs/internals/renderhints.h
	src/glu/sgi/libnurbs/internals/simplemath.h
	src/glu/sgi/libnurbs/internals/slicer.cc
	src/glu/sgi/libnurbs/internals/slicer.h
	src/glu/sgi/libnurbs/internals/sorter.cc
	src/glu/sgi/libnurbs/internals/sorter.h
	src/glu/sgi/libnurbs/internals/splitarcs.cc
	src/glu/sgi/libnurbs/internals/subdivider.h
	src/glu/sgi/libnurbs/internals/tobezier.cc
	src/glu/sgi/libnurbs/internals/trimline.cc
	src/glu/sgi/libnurbs/internals/trimline.h
	src/glu/sgi/libnurbs/internals/trimregion.cc
	src/glu/sgi/libnurbs/internals/trimregion.h
	src/glu/sgi/libnurbs/internals/trimvertex.h
	src/glu/sgi/libnurbs/internals/trimvertpool.cc
	src/glu/sgi/libnurbs/internals/trimvertpool.h
	src/glu/sgi/libnurbs/internals/types.h
	src/glu/sgi/libnurbs/internals/uarray.cc
	src/glu/sgi/libnurbs/internals/uarray.h
	src/glu/sgi/libnurbs/internals/varray.cc
	src/glu/sgi/libnurbs/internals/varray.h
	src/glu/sgi/libnurbs/nurbtess/definitions.h
	src/glu/sgi/libnurbs/nurbtess/directedLine.h
	src/glu/sgi/libnurbs/nurbtess/glimports.h
	src/glu/sgi/libnurbs/nurbtess/gridWrap.cc
	src/glu/sgi/libnurbs/nurbtess/gridWrap.h
	src/glu/sgi/libnurbs/nurbtess/monoChain.cc
	src/glu/sgi/libnurbs/nurbtess/monoChain.h
	src/glu/sgi/libnurbs/nurbtess/monoTriangulation.cc
	src/glu/sgi/libnurbs/nurbtess/monoTriangulation.h
	src/glu/sgi/libnurbs/nurbtess/mystdio.h
	src/glu/sgi/libnurbs/nurbtess/mystdlib.h
	src/glu/sgi/libnurbs/nurbtess/partitionX.cc
	src/glu/sgi/libnurbs/nurbtess/partitionX.h
	src/glu/sgi/libnurbs/nurbtess/partitionY.cc
	src/glu/sgi/libnurbs/nurbtess/partitionY.h
	src/glu/sgi/libnurbs/nurbtess/polyDBG.h
	src/glu/sgi/libnurbs/nurbtess/polyUtil.cc
	src/glu/sgi/libnurbs/nurbtess/polyUtil.h
	src/glu/sgi/libnurbs/nurbtess/primitiveStream.cc
	src/glu/sgi/libnurbs/nurbtess/primitiveStream.h
	src/glu/sgi/libnurbs/nurbtess/quicksort.cc
	src/glu/sgi/libnurbs/nurbtess/quicksort.h
	src/glu/sgi/libnurbs/nurbtess/rectBlock.cc
	src/glu/sgi/libnurbs/nurbtess/rectBlock.h
	src/glu/sgi/libnurbs/nurbtess/sampleComp.cc
	src/glu/sgi/libnurbs/nurbtess/sampleComp.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompBot.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompBot.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompRight.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompRight.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompTop.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompTop.h
	src/glu/sgi/libnurbs/nurbtess/sampleMonoPoly.cc
	src/glu/sgi/libnurbs/nurbtess/sampleMonoPoly.h
	src/glu/sgi/libnurbs/nurbtess/sampledLine.cc
	src/glu/sgi/libnurbs/nurbtess/sampledLine.h
	src/glu/sgi/libnurbs/nurbtess/searchTree.cc
	src/glu/sgi/libnurbs/nurbtess/searchTree.h
	src/glu/sgi/libnurbs/nurbtess/zlassert.h
	src/glu/sgi/libtess/README
	src/glu/sgi/libtess/alg-outline
	src/glu/sgi/libtess/dict-list.h
	src/glu/sgi/libtess/dict.c
	src/glu/sgi/libtess/dict.h
	src/glu/sgi/libtess/geom.c
	src/glu/sgi/libtess/memalloc.c
	src/glu/sgi/libtess/memalloc.h
	src/glu/sgi/libtess/mesh.c
	src/glu/sgi/libtess/mesh.h
	src/glu/sgi/libtess/normal.h
	src/glu/sgi/libtess/priorityq-heap.c
	src/glu/sgi/libtess/priorityq-heap.h
	src/glu/sgi/libtess/priorityq-sort.h
	src/glu/sgi/libtess/priorityq.c
	src/glu/sgi/libtess/priorityq.h
	src/glu/sgi/libtess/render.c
	src/glu/sgi/libtess/render.h
	src/glu/sgi/libtess/sweep.h
	src/glu/sgi/libtess/tess.h
	src/glu/sgi/libtess/tessmono.c
	src/glu/sgi/libtess/tessmono.h
	src/glu/sgi/libutil/error.c
	src/glu/sgi/libutil/glue.c
	src/glu/sgi/libutil/gluint.h
	src/glu/sgi/libutil/project.c
	src/glu/sgi/libutil/registry.c
	src/glx/x11/glxext.c
	src/mesa/main/imports.h
	src/mesa/tnl/t_vb_cliptmp.h
2007-04-20 05:37:11 +02:00
Brice Goglin
3832ecadae Add 06_fix_texture_data_corruption.patch (closes: #412346) 2007-04-08 18:52:31 +02:00
Brice Goglin
285ed128ab Add a .shlibs file for libglw1-mesa. 2007-04-01 00:44:29 +02:00
Brice Goglin
048144074b Fix conflicts for libGLw packages.
libglw1-mesa does not have to conflict with earlier libgl1-mesa-swx11
(libGLw was only shipped as development libs/headers within swx11-dev).

libglw1-mesa-dev needs to conflicts with libglw-dev.
2007-04-01 00:43:20 +02:00
Brice Goglin
856ae782c7 * Split out libGLw libs and headers from libgl1-mesa-swx11 and ship both
static and shared libraries, creating libglw1-mesa and libglw1-mesa-dev
    (closes: #374904).
2007-03-31 03:28:32 +02:00
Julien Cristau
12e3f97541 Fix explanation about libglide3 in libgl1-mesa-dri's description.
Thanks to Michel Dänzer for his help.
2007-03-30 17:20:02 +02:00
Julien Cristau
30f6fe0710 Have libgl1-mesa-dri suggest libglide3.
Add explanation for this in the package description.  Closes: #387339.
2007-03-30 14:10:24 +02:00
Cyril Brulebois
0aa8f1df3c * Non-maintainer upload.
* Backport a fix from mesa 6.5.2 to mesa 6.5.1 which:
   - fixes braces around the ASSERT macro;
   - fixes a clipping problem, which leads to systematic segfaults of the
     X server (Closes: #405803).
2007-02-23 00:48:40 +01:00
Julien Cristau
d39147a8a1 Fix FTBFS on amd64.
Set LIB_DIR and EXTRA_LIB_PATH in configs/debian-default to override
settings in configs/linux-x86-64. This fixes a FTBFS on amd64, thanks to
Marc 'HE' Brockschmidt for the report (closes: #410118).
2007-02-07 23:13:59 +01:00
Julien Cristau
7991f7e936 * Add myself to Uploaders. 2007-02-04 21:17:18 +01:00
Julien Cristau
10011efcd7 Add bug closer for #409638. 2007-02-04 17:21:57 +01:00
Julien Cristau
0058815c8f Re-add src/glut/mini/ which was deleted by mistake. 2007-02-04 15:21:46 +01:00
Julien Cristau
02048984b7 Delete directories not included in the mesa tarballs. 2007-02-04 15:15:43 +01:00
Julien Cristau
544601e9f4 Fix FTBFS on non-i386 archs.
Build architecture-independant debs in binary-indep, and use the debhelper -s
option for commands in binary-arch, because architectures other than i386
don't have to build libgl1-mesa-swx11-i686.
2007-02-04 14:05:23 +01:00
Julien Cristau
2634f06c20 Merge branch 'upstream-experimental' into debian-experimental
Conflicts:

	progs/util/README
	progs/util/glstate.c
	progs/util/glstate.h
	progs/util/sampleMakefile
	src/glu/sgi/libnurbs/interface/bezierEval.h
	src/glu/sgi/libnurbs/interface/bezierPatch.cc
	src/glu/sgi/libnurbs/interface/bezierPatch.h
	src/glu/sgi/libnurbs/interface/bezierPatchMesh.cc
	src/glu/sgi/libnurbs/interface/bezierPatchMesh.h
	src/glu/sgi/libnurbs/interface/glcurveval.cc
	src/glu/sgi/libnurbs/interface/glimports.h
	src/glu/sgi/libnurbs/interface/glinterface.cc
	src/glu/sgi/libnurbs/interface/glrenderer.h
	src/glu/sgi/libnurbs/interface/incurveeval.cc
	src/glu/sgi/libnurbs/interface/insurfeval.cc
	src/glu/sgi/libnurbs/interface/mystdio.h
	src/glu/sgi/libnurbs/interface/mystdlib.h
	src/glu/sgi/libnurbs/internals/arc.h
	src/glu/sgi/libnurbs/internals/arcsorter.cc
	src/glu/sgi/libnurbs/internals/arcsorter.h
	src/glu/sgi/libnurbs/internals/arctess.h
	src/glu/sgi/libnurbs/internals/backend.cc
	src/glu/sgi/libnurbs/internals/backend.h
	src/glu/sgi/libnurbs/internals/basiccrveval.h
	src/glu/sgi/libnurbs/internals/basicsurfeval.h
	src/glu/sgi/libnurbs/internals/bezierarc.h
	src/glu/sgi/libnurbs/internals/bin.cc
	src/glu/sgi/libnurbs/internals/bin.h
	src/glu/sgi/libnurbs/internals/bufpool.cc
	src/glu/sgi/libnurbs/internals/bufpool.h
	src/glu/sgi/libnurbs/internals/cachingeval.cc
	src/glu/sgi/libnurbs/internals/cachingeval.h
	src/glu/sgi/libnurbs/internals/ccw.cc
	src/glu/sgi/libnurbs/internals/coveandtiler.h
	src/glu/sgi/libnurbs/internals/curve.cc
	src/glu/sgi/libnurbs/internals/curve.h
	src/glu/sgi/libnurbs/internals/curvelist.cc
	src/glu/sgi/libnurbs/internals/curvelist.h
	src/glu/sgi/libnurbs/internals/curvesub.cc
	src/glu/sgi/libnurbs/internals/dataTransform.cc
	src/glu/sgi/libnurbs/internals/dataTransform.h
	src/glu/sgi/libnurbs/internals/defines.h
	src/glu/sgi/libnurbs/internals/displaylist.cc
	src/glu/sgi/libnurbs/internals/displaylist.h
	src/glu/sgi/libnurbs/internals/displaymode.h
	src/glu/sgi/libnurbs/internals/flist.cc
	src/glu/sgi/libnurbs/internals/flist.h
	src/glu/sgi/libnurbs/internals/flistsorter.cc
	src/glu/sgi/libnurbs/internals/flistsorter.h
	src/glu/sgi/libnurbs/internals/gridline.h
	src/glu/sgi/libnurbs/internals/gridtrimvertex.h
	src/glu/sgi/libnurbs/internals/gridvertex.h
	src/glu/sgi/libnurbs/internals/hull.cc
	src/glu/sgi/libnurbs/internals/hull.h
	src/glu/sgi/libnurbs/internals/intersect.cc
	src/glu/sgi/libnurbs/internals/jarcloc.h
	src/glu/sgi/libnurbs/internals/knotvector.h
	src/glu/sgi/libnurbs/internals/mapdesc.cc
	src/glu/sgi/libnurbs/internals/mapdesc.h
	src/glu/sgi/libnurbs/internals/mapdescv.cc
	src/glu/sgi/libnurbs/internals/maplist.cc
	src/glu/sgi/libnurbs/internals/maplist.h
	src/glu/sgi/libnurbs/internals/mesher.cc
	src/glu/sgi/libnurbs/internals/mesher.h
	src/glu/sgi/libnurbs/internals/monoTriangulationBackend.cc
	src/glu/sgi/libnurbs/internals/monotonizer.cc
	src/glu/sgi/libnurbs/internals/monotonizer.h
	src/glu/sgi/libnurbs/internals/myassert.h
	src/glu/sgi/libnurbs/internals/mycode.cc
	src/glu/sgi/libnurbs/internals/mystring.h
	src/glu/sgi/libnurbs/internals/nurbsconsts.h
	src/glu/sgi/libnurbs/internals/nurbstess.cc
	src/glu/sgi/libnurbs/internals/patch.cc
	src/glu/sgi/libnurbs/internals/patch.h
	src/glu/sgi/libnurbs/internals/patchlist.cc
	src/glu/sgi/libnurbs/internals/patchlist.h
	src/glu/sgi/libnurbs/internals/pwlarc.h
	src/glu/sgi/libnurbs/internals/quilt.cc
	src/glu/sgi/libnurbs/internals/quilt.h
	src/glu/sgi/libnurbs/internals/reader.cc
	src/glu/sgi/libnurbs/internals/reader.h
	src/glu/sgi/libnurbs/internals/renderhints.cc
	src/glu/sgi/libnurbs/internals/renderhints.h
	src/glu/sgi/libnurbs/internals/simplemath.h
	src/glu/sgi/libnurbs/internals/slicer.cc
	src/glu/sgi/libnurbs/internals/slicer.h
	src/glu/sgi/libnurbs/internals/sorter.cc
	src/glu/sgi/libnurbs/internals/sorter.h
	src/glu/sgi/libnurbs/internals/splitarcs.cc
	src/glu/sgi/libnurbs/internals/subdivider.h
	src/glu/sgi/libnurbs/internals/tobezier.cc
	src/glu/sgi/libnurbs/internals/trimline.cc
	src/glu/sgi/libnurbs/internals/trimline.h
	src/glu/sgi/libnurbs/internals/trimregion.cc
	src/glu/sgi/libnurbs/internals/trimregion.h
	src/glu/sgi/libnurbs/internals/trimvertex.h
	src/glu/sgi/libnurbs/internals/trimvertpool.cc
	src/glu/sgi/libnurbs/internals/trimvertpool.h
	src/glu/sgi/libnurbs/internals/types.h
	src/glu/sgi/libnurbs/internals/uarray.cc
	src/glu/sgi/libnurbs/internals/uarray.h
	src/glu/sgi/libnurbs/internals/varray.cc
	src/glu/sgi/libnurbs/internals/varray.h
	src/glu/sgi/libnurbs/nurbtess/definitions.h
	src/glu/sgi/libnurbs/nurbtess/directedLine.h
	src/glu/sgi/libnurbs/nurbtess/glimports.h
	src/glu/sgi/libnurbs/nurbtess/gridWrap.cc
	src/glu/sgi/libnurbs/nurbtess/gridWrap.h
	src/glu/sgi/libnurbs/nurbtess/monoChain.cc
	src/glu/sgi/libnurbs/nurbtess/monoChain.h
	src/glu/sgi/libnurbs/nurbtess/monoTriangulation.cc
	src/glu/sgi/libnurbs/nurbtess/monoTriangulation.h
	src/glu/sgi/libnurbs/nurbtess/mystdio.h
	src/glu/sgi/libnurbs/nurbtess/mystdlib.h
	src/glu/sgi/libnurbs/nurbtess/partitionX.cc
	src/glu/sgi/libnurbs/nurbtess/partitionX.h
	src/glu/sgi/libnurbs/nurbtess/partitionY.cc
	src/glu/sgi/libnurbs/nurbtess/partitionY.h
	src/glu/sgi/libnurbs/nurbtess/polyDBG.h
	src/glu/sgi/libnurbs/nurbtess/polyUtil.cc
	src/glu/sgi/libnurbs/nurbtess/polyUtil.h
	src/glu/sgi/libnurbs/nurbtess/primitiveStream.cc
	src/glu/sgi/libnurbs/nurbtess/primitiveStream.h
	src/glu/sgi/libnurbs/nurbtess/quicksort.cc
	src/glu/sgi/libnurbs/nurbtess/quicksort.h
	src/glu/sgi/libnurbs/nurbtess/rectBlock.cc
	src/glu/sgi/libnurbs/nurbtess/rectBlock.h
	src/glu/sgi/libnurbs/nurbtess/sampleComp.cc
	src/glu/sgi/libnurbs/nurbtess/sampleComp.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompBot.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompBot.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompRight.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompRight.h
	src/glu/sgi/libnurbs/nurbtess/sampleCompTop.cc
	src/glu/sgi/libnurbs/nurbtess/sampleCompTop.h
	src/glu/sgi/libnurbs/nurbtess/sampleMonoPoly.cc
	src/glu/sgi/libnurbs/nurbtess/sampleMonoPoly.h
	src/glu/sgi/libnurbs/nurbtess/sampledLine.cc
	src/glu/sgi/libnurbs/nurbtess/sampledLine.h
	src/glu/sgi/libnurbs/nurbtess/searchTree.cc
	src/glu/sgi/libnurbs/nurbtess/searchTree.h
	src/glu/sgi/libnurbs/nurbtess/zlassert.h
	src/glu/sgi/libtess/README
	src/glu/sgi/libtess/alg-outline
	src/glu/sgi/libtess/dict-list.h
	src/glu/sgi/libtess/dict.c
	src/glu/sgi/libtess/dict.h
	src/glu/sgi/libtess/geom.c
	src/glu/sgi/libtess/memalloc.c
	src/glu/sgi/libtess/memalloc.h
	src/glu/sgi/libtess/mesh.c
	src/glu/sgi/libtess/mesh.h
	src/glu/sgi/libtess/normal.h
	src/glu/sgi/libtess/priorityq-heap.c
	src/glu/sgi/libtess/priorityq-heap.h
	src/glu/sgi/libtess/priorityq-sort.h
	src/glu/sgi/libtess/priorityq.c
	src/glu/sgi/libtess/priorityq.h
	src/glu/sgi/libtess/render.c
	src/glu/sgi/libtess/render.h
	src/glu/sgi/libtess/sweep.h
	src/glu/sgi/libtess/tess.h
	src/glu/sgi/libtess/tessmono.c
	src/glu/sgi/libtess/tessmono.h
	src/glu/sgi/libutil/error.c
	src/glu/sgi/libutil/glue.c
	src/glu/sgi/libutil/gluint.h
	src/glu/sgi/libutil/project.c
	src/glu/sgi/libutil/registry.c
2007-02-01 11:50:36 +01:00
Julien Cristau
7549426a16 Sync Section and Priority for all packages with the override. 2007-01-28 18:18:38 +01:00
Thierry Reding
6dcb9a3434 Give credit where credit is due. 2007-01-11 15:09:23 +01:00
Thierry Reding
e5a31c00ec libosmesa6{,-dev} replace libgl1-mesa-swx11{,-dev} and mesa-common-dev.
libosmesa6 and libosmesa6-dev are now the only packages to contain OSMesa
libraries and header files so they can be installed independent of which libGL
is installed. They statically link in the software rasterization code.
2007-01-11 15:06:42 +01:00
Thierry Reding
00295242fd mesa-common-dev needs to replace libgl1-mesa-swx11-dev and libgl1-mesa-dev.
The GLX headers were previously shipped in both packages and have been moved
to mesa-common-dev.
2007-01-11 14:56:48 +01:00
Thierry Reding
21b6ed7c29 Build the DRI modules with the default optimization flags. 2007-01-11 14:40:10 +01:00
Thierry Reding
01894cbf10 Revert the previous changes to the libgl1-mesa-{glx,swx11} shlibs. 2007-01-10 15:23:29 +01:00
Thierry Reding
28cad2b9d9 Bump libgl1-mesa-glx, libgl1-mesa-swx11 and libosmesa6 shlibs files. 2007-01-10 13:55:19 +01:00
Thierry Reding
71d7b7dbe7 Drop noop line. 2007-01-08 13:36:25 +01:00
Thierry Reding
4563723db1 Make sure all files in the mesa-swx11-source package have permissions 0644. 2007-01-08 13:36:16 +01:00
Thierry Reding
0bce6a1899 Compile with -O2 optimization instead of -O3 which might lead to problems. 2007-01-08 11:43:25 +01:00
Thierry Reding
e71d6dbd9c Make debugging symbol packages depend on the corresponding binary package and
put them into the libdevel section.
2007-01-08 11:41:02 +01:00
Thierry Reding
132ecd19f3 Do not build any linux*-static configuration with -fPIC. 2007-01-08 10:07:08 +01:00
Thierry Reding
a404143f4c Add patches 04_cleanup-osmesa-configs and 05_static-nonpic.
04_cleanup-osmesa-configs makes the OSMesa configurations behave as expected.
That is respect the OPT_FLAGS, PIC_FLAGS, ARCH_FLAGS and DEFINES variables.
The patch also makes the files more consistent by inheriting from more general
configurations.

05_static-nonpic fixes the linux-static configuration to not use -fPIC for
compiling code because it would violate Debian policy.
2007-01-08 08:35:42 +01:00
Julien Cristau
44467ca45e * Drop obsolete depends on xlibs. 2007-01-05 15:44:51 +01:00
Julien Cristau
f70ea0747d Import changelog entry from the 6.5.1-0.5 NMU. 2007-01-04 21:30:49 +01:00
Thierry Reding
c619abf18a Hijack the package (set XSF as maintainer, add myself to uploaders). \o/ 2007-01-04 16:35:14 +01:00
Julien Cristau
6da1323437 * Non-maintainer upload.
* Build with -fno-strict-aliasing to fix misbuild of i965_dri.so
  (closes: #394311).  Thanks to Michel Dänzer for the fix, and to Ryan
  Richter for the report and testing.
2007-01-03 16:59:06 +01:00
Julien Cristau
1dd29785cf Import changes from the mesa 6.5.1-0.4 NMU. 2007-01-03 13:43:46 +01:00
Julien Cristau
5681987ee4 Import changes from the mesa 6.5.1-0.3 NMU. 2007-01-03 13:41:57 +01:00
Thierry Reding
6eeb18fc68 Build the i915tex DRI module on the i386 and amd64 architectures. 2006-12-05 00:45:42 +00:00
Thierry Reding
01c90575b4 Add missing changelog entry and bug closer. 2006-12-04 07:23:14 +00:00
Thierry Reding
32e9cb0275 Update to new upstream release (6.5.2). 2006-12-02 23:46:07 +00:00
Thierry Reding
017bade93c Update to latest upstream release candidate (6.5.2 RC3). 2006-12-01 00:16:33 +00:00
Thierry Reding
c9c79e7cae Add manpages to the mesa-utils package. 2006-11-29 18:07:26 +00:00
Thierry Reding
777d41dd07 Mark the latest changelog entries as NMUs. 2006-11-26 02:40:01 +00:00
Thierry Reding
6238c30913 Revert maintainer and uploaders fields. Integrate latest NMUs into the
changelog and be a little more verbose in the debian/rules rewrite
changelog entries.
2006-11-26 02:38:31 +00:00
Thierry Reding
db164ab514 Update to latest upstream release candidate. 2006-11-22 19:50:34 +00:00
Thierry Reding
c72ef17731 Add libOSMesa16 and libOSMesa32 entries to libosmesa6.shlibs so packages
linking against those libraries will get the correct dependencies.
2006-11-22 19:47:56 +00:00
Thierry Reding
8dbee7867f Use the new upstream minstall utility to install files and directories. Using
/usr/bin/install would result in a symlink's target being copied instead of
the symlink.
2006-11-18 21:28:07 +00:00
Thierry Reding
f403108f5f Fix typo in changelog. libdrm2-dev is not a package. 2006-11-18 21:22:52 +00:00
Thierry Reding
223a97f92a Bump build-dependency on libdrm-dev (>= 2.2.0). 2006-11-18 21:21:18 +00:00
Thierry Reding
bb44a8ae1f Update to latest upstream release candidate.
Update patches.
2006-11-18 17:59:25 +00:00
Thierry Reding
b5a5062176 Target the mesa package at experimental until etch is released. 2006-11-18 17:50:44 +00:00
Thierry Reding
50faa32ce3 Move the code to choose which configurations to build to a separate script in
order to keep debian/rules cleaner.
2006-11-10 09:00:00 +00:00
Thierry Reding
b6caed527c Also add release notes of version 6.5.1 and the release notes index. 2006-11-10 08:50:35 +00:00
Thierry Reding
d5ef77353b Build Mesa utility programs (glxdemo, glxgears, glxheads, glxinfo) together
with the DRI/GLX-enabled libGL. Don't build the utilities on CPU-optimized
configurations.
2006-11-10 08:40:03 +00:00
Thierry Reding
336beba81b Update Debian changelog. 2006-11-02 21:20:33 +00:00
Thierry Reding
5d23a7905c Add code to provide a mesa-utils package since those utilities are shipped in
the MesaDemos tarball.
2006-11-02 21:05:29 +00:00
Thierry Reding
143dfcc904 Make mesa-common-dev depend on libx11-dev to provide Xlib.h and Xutil.h needed
by the GLX headers.
2006-11-01 07:54:40 +00:00
Thierry Reding
e076cd42f0 Bug #387706 should also be closed because we move libOSMesa to libosmesa6 and
statically link in the software rasterization code.
2006-10-25 00:17:42 +00:00
Thierry Reding
5bed38974e Close 392715 because we now ship the contents of the three Mesa tarballs in
the original source.
2006-10-24 14:45:18 +00:00
Thierry Reding
35bc4b82ff Since the original tarball is repackaged to include the three upstream
tarballs (MesaDemos, MesaGLUT and MesaLib), remove files that are not supposed
to be there.

Note that when importing initially, some files were not even added to the SVN
repository because SVN ignored them. These were configs/diffs~ and
docs/RELNOTES-6.1~. I removed those from the tarball as well.
2006-10-24 14:33:30 +00:00
Thierry Reding
4e5d870f72 Make libosmesa6-dev binNMU-safe again (mesa-common-dev is Architecture: all). 2006-10-24 14:29:11 +00:00
Thierry Reding
83def94db5 Make libgl1-mesa-dev Architecture: all, because all we ship with it is a
symbolic link. Make it binNMU-safe by making it depend on libgl1-mesa-glx and
libgl1-mesa-dri both >= ${Source-Version} instead of exact versions.

Add packages containing debugging symbols for libgl1-mesa-swx11,
libgl1-mesa-glx and libgl1-mesa-dri.
2006-10-24 12:16:16 +00:00
Thierry Reding
9d74c1ef2a -fno-strict-aliasing actually *does* fix #394311 according to the submitter. 2006-10-24 10:31:28 +00:00
Thierry Reding
6023d4bdb5 Build with -fno-strict-aliasing, which should fix bug #394311. 2006-10-24 10:29:21 +00:00
Thierry Reding
5e38f566f2 Provide libgl-dev as alternative to the mesa-common-dev dependency of the
libosmesa6-dev package.
2006-10-23 16:45:00 +00:00
Thierry Reding
b57341a78b Move the glx*.h headers from libgl1-mesa(-swx11)-dev into mesa-common-dev
because both packages provide identical files.
2006-10-23 16:43:41 +00:00
Thierry Reding
910c32c0e9 Install the osmesa.h header in the libosmesa6-dev package where it belongs,
now that all OSMesa libraries have been split off into libosmesa6{,-dev}.

Don't make libosmesa6-dev depend on libgl1-mesa-swx11-dev anymore because it
no longer needs a libGL, only the gl.h header provided by mesa-common-dev.
2006-10-23 16:02:22 +00:00
Thierry Reding
e49456d8d8 Don't force -fPIC everywhere, because it seems only unused on x86 which we
build with assembler optimizations by default, resulting in non-PIC libraries
anyway.
2006-10-23 14:40:10 +00:00
Thierry Reding
c46becc9b3 Move the libOSMesa shlibs entry from libgl1-mesa-swx11 to libosmesa6 because
that's where libOSMesa is now installed.
2006-10-23 08:30:53 +00:00
Thierry Reding
b6e637106f Add the static libGLw and the corresponding headers to the
libgl1-mesa-swx11-dev package again.
2006-10-23 08:23:27 +00:00
Thierry Reding
787cffe3d0 Always force -fPIC. 2006-10-23 08:22:25 +00:00
Thierry Reding
9d914e6809 Make sure the install-source.sh script is executable before trying to run it. 2006-10-20 20:56:41 +00:00
Thierry Reding
4e183c66e9 Add comments to patches and enable the previously disabled patches. 2006-10-20 19:41:00 +00:00
Thierry Reding
b1630fcbe8 Add build-dependency on lesstif2-dev needed to build the GLw libraries. 2006-10-20 19:17:34 +00:00
Thierry Reding
cd44f730cc Enable building the GLw libraries. 2006-10-20 19:07:36 +00:00
Thierry Reding
ce5a39559e Install several documents from the docs subdirectory into the mesa-common-dev
package.
2006-10-20 17:51:36 +00:00
Thierry Reding
ccffaef603 Build architecture-specific static libraries where available. 2006-10-20 12:15:29 +00:00
Thierry Reding
ae41f3a3ee Don't make libgl1-mesa-swx11-dev depend on lesstif2-dev anymore because we
don't ship anything related to LessTif in that package.
2006-10-20 12:06:20 +00:00
Thierry Reding
c7e576d6bf No longer install the default build of libOSMesa in the libgl1-mesa-swx11
package, but in the libosmesa6 package.

Modify the 01_fix-makefile patch to not build libOSMesa on stand-alone builds,
so that it is only created when building OSMesa explicitly.

Build libOSMesa with 8 bits per color channel (the default) in dynamic and
static versions separately from the software rasterization libGL with the
software rasterization code linked in statically so that libOSMesa can run in
parallel with any libGL. This should fix the problem Yann Dirson mentioned a
while ago.

Use the install-source.sh script to install the Mesa sources needed for the
mesa-swx11-source package instead of doing it in the debian/rules file itself.
2006-10-20 07:56:11 +00:00
Thierry Reding
f069b2812e Add a set of files to enable an i686 optimized build of the DRI/GLX-enabled
libGL and the DRI drivers. Disabled for now.
2006-10-20 07:47:37 +00:00
Thierry Reding
f20ff6213e Add a shell script to install the Mesa sources for the mesa-swx11-source
package.
2006-10-20 07:45:39 +00:00
Thierry Reding
24ff74bdcb Add configurations to build static versions of the default OSMesa library.
In all Debian-specific configurations, include the Debian defaults
configuration after the Mesa configurations and drop the 'override' for all
variables because it is no longer needed.
2006-10-20 07:43:50 +00:00
Thierry Reding
230e79916f Merge in the old Debian changelog entries and the old copyright file. 2006-10-17 19:35:35 +00:00
Thierry Reding
9f14b4db35 Use -march in favor of the deprecated -mcpu. 2006-10-17 19:13:27 +00:00
Thierry Reding
ec916398e7 Don't install libOSMesa development files in the libgl1-mesa-swx11 package. 2006-10-16 14:43:43 +00:00
Thierry Reding
6befad112e Actually do rename debian-static to debian-swx11+glu-static. 2006-10-16 13:54:24 +00:00
Thierry Reding
55ce23b8db Beautify IEEE floating point patch. 2006-10-16 13:34:57 +00:00
Thierry Reding
1292ec1ecf Fix typo in the filename of the IEEE FP on s390 and m68k patch. 2006-10-16 13:11:51 +00:00
Thierry Reding
65a324062f Add patches extracted from older Debian diffs but not applied yet because it's
not certain yet that they are needed.
2006-10-16 13:05:42 +00:00
Thierry Reding
c9db617e0b Make the debian-static a subclass of debian-swx11+glu-default, because that's
what it actually is. Rename it accordingly and add it to SWX11_GLU_CONFIGS.

Install the libOSMesa.so symlink and the libOSMesa static library in the
libgl1-mesa-swx11-dev package.

Add .shlibs files for the libgl1-mesa-swx11, libgl1-mesa-glx and libglu1-mesa
packages.
2006-10-16 13:04:29 +00:00
Thierry Reding
b43137d0cc Build libGLU by default for swx11+glu configurations but not in the
configurations targetting specific CPU's because it is unlikely to result in
an increase in performance.
2006-10-16 11:53:06 +00:00
Thierry Reding
eeca642778 Add versioned build-dependency on dpkg-dev (>= 1.13.19) to make sure the
binary:Version and source:Version substitution variables can be used.
2006-10-16 11:49:46 +00:00
Thierry Reding
5cef6ca36a Move the detection of architecture-specific configurations to the debian/rules
file.

Add the configs target to debian/rules for checking whether the correct
configurations will be built.
2006-10-16 10:03:32 +00:00
Thierry Reding
be2b518f7b Also build a static version of libGLU. 2006-10-16 09:53:30 +00:00
Thierry Reding
1b0d29afbc Rename the CPU optimization configurations to make it clear that they only
build swx11 and GLU.

Build architecture-optimized swx11 and GLU where possible.
2006-10-16 09:39:59 +00:00
Thierry Reding
55cf480c0b Drop the libgl1-mesa-swx11-dbg package until there's a decision as to whether
it is really needed.

Allow more than one type of optimization libraries to be built for any given
architecture. Currently optimization configurations are provided for i386
[i686], alpha [ev5], amd64, powerpc [603], ppc64 and sparc [ultrasparc].
However, only i686 and amd64 are enabled for now for safety.

Add the libgl1-mesa-swx11-i686 for i686 optimized libraries.

Drop the debian-common configuration because it is no longer used.

Don't build libGLU in all configurations, only when needed.
2006-10-16 08:58:05 +00:00
Thierry Reding
84db6912f0 Add missing build-dependencies. 2006-10-15 18:02:33 +00:00
Thierry Reding
7495cc28a0 Only build the i810, i915, i965, sis and unichrome DRI drivers on i386 and
amd64.

Only build the ffb DRI driver on sparc.
2006-10-15 16:17:53 +00:00
Thierry Reding
87744f786b Use STAMP_DIR consistently throughout the debian/rules file.
Make the BUILD_STAMPS targets depend on patch so that when they are built
separately the patches will also be applied. Thanks to Michel Dänzer for
catching this.

Make symlinking the source tree idempotent by adding the -f option to cp.
Thanks again Michel Dänzer.
2006-10-15 14:41:12 +00:00
Thierry Reding
5bcfa4c859 Build only optimized versions of the DRI drivers, only using no optimizations
when not supported.

Add configuration to build the DRI drivers on amd64.
2006-10-15 13:30:22 +00:00
Thierry Reding
8ef731bcd1 Add a patch (00_create-libdir) that fixes the upstream build system to make
sure the LIB_DIR is created or bail out.

Update the 01_fix-makefile patch to only install libGL/libOSMesa if they
exist. Don't make installation dependent on which drivers are built.

Build optimized versions of the swx11 libraries in addition to those that
should work on the least-capable supported processor for the given
architecture.

Have most configurations include debian-default instead of debian-common so
that INSTALL_DIR is correctly defined. debian-common can probably be dropped
anyway.
2006-10-15 13:15:56 +00:00
Thierry Reding
8d02f3c68a Add patch to fix upstream build system to correctly install libOSMesa and
variants. This patch also fixes the build system not to install libGL if only
the OSMesa library is built.

Update *.install files to install more files. Pretty much everything except
debugging and optimized libraries is now built and installed.

Enable building static libraries for inclusion in the -dev packages.
2006-10-15 00:49:41 +00:00
Thierry Reding
b1f8b49e8b Add build-dependency on quilt.
Remove shlibs:Depends substitution variable from -dev packages, which doesn't
make sense.
2006-10-15 00:42:25 +00:00
David Nusinow
75733e4237 Commit diff of 6.5.1-0.2 so we've got a record of it here. 2006-10-14 22:42:57 +00:00
Thierry Reding
6e318e2cd1 Base the Debian OSMesa configurations on linux-osmesa* provided upstream.
Add a configuration for building a static version of OSMesa with 32 bits per
color channel.
2006-10-14 17:04:02 +00:00
Thierry Reding
01e6b84b14 Fix some small inconsistencies involving configuration names. 2006-10-14 16:32:25 +00:00
Thierry Reding
69705d22be Rename the debian-dri-common configuration to debian-dri-default, to make it
consistent with the naming scheme of debian-default.
2006-10-14 16:29:32 +00:00
Thierry Reding
b16d1dcff5 Add a default Debian-specific configuration which overrides settings from the
default Mesa configuration.
2006-10-14 16:28:45 +00:00
Thierry Reding
10c5b5d5ba Comment the Debian-specific configurations. 2006-10-14 16:25:30 +00:00
Thierry Reding
d9c5c170f6 Rename the -default configurations to -any, which is a more appropriate name. 2006-10-14 15:44:15 +00:00
Thierry Reding
32c7138d86 Move the debian/debian-dri configurations to debian-default/debian-dri-default
respectively, and use debian/debian-dri to choose an architecture specific
configuration.
2006-10-14 15:42:59 +00:00
Thierry Reding
d29244d872 Add OSMesa configurations and made fixed several issues here and there. 2006-10-14 15:11:31 +00:00
Thierry Reding
0fca6eda1e Initial version of a new build system for Mesa. 2006-10-14 04:33:41 +00:00
Thierry Reding
1ddf606332 Import Mesa 6.5.1 (MesaLib, MesaDemos, MesaGLUT). 2006-10-14 03:46:41 +00:00
David Nusinow
bc8084da74 Fix goofed patch from before 2006-09-26 01:36:37 +00:00
David Nusinow
5eed400862 Prepare changelog for release 2006-09-26 01:21:59 +00:00
David Nusinow
a37a0959ac * Add patch from Cyril Brulebois to allow package to build on HURD, which
lacks DRI and directfb. This includes not using lib-directfb in the
  build-depends for hurd-i386. It also creates a new debian config,
  debian-indirect, which is used when building for HURD. This config is
  invoked in the debian-dri config on hurd-i386. Thanks to Cyril Brulebois
  for the patch, Michael Banck, Michel Dänzer, and Samuel Thibault for
  input on an appropriate fix. (closes: #358065)
2006-09-26 00:46:14 +00:00
David Nusinow
53845c52e5 * Re-add s390 and m68k to the USE_IEEE test in src/mesa/main/imports.h. This
package seriously needs to store patches somewhere that are easy to find
  and re-apply.
2006-09-26 00:32:06 +00:00
David Nusinow
d429f0b14f * Make sure that libGl looks for the dri drivers in the proper location. Do
this by setting the appropriate variables in the debian config
2006-09-25 02:57:24 +00:00
David Nusinow
02339187ad * Stuff not in the upstream tarballs
* Bump libdrm-dev build-dep to >= 2.0.2
* Add libdrm cflags to the debian-dri config. This allows the build system
  to find drm.h
2006-09-24 22:55:41 +00:00
David Nusinow
827f778525 * Stuff not in the upstream tarballs
+ os2 glut stuff
  + docs/gears.png
2006-09-24 21:54:25 +00:00
David Nusinow
d3fc85eea9 Remove os2 glut stuff. It's not in the tarballs 2006-09-24 21:50:01 +00:00
David Nusinow
9a62963129 Add glut 2006-09-24 21:40:02 +00:00
David Nusinow
a0b2543ba1 Add mesa demos 2006-09-24 21:29:21 +00:00
David Nusinow
387acaac69 * New upstream version 2006-09-24 21:21:15 +00:00
David Nusinow
952b775dbe Rename mesa dir 2006-09-24 21:18:17 +00:00
David Nusinow
eadb76b3f8 Bump to latest mesa in Debian 2006-09-24 21:11:46 +00:00
Steve Langasek
b3f7313ae4 Nuke the directory for an obsolete version of mesa -- no reason to merge
it anywhere, it's already superseded
2006-09-18 23:49:49 +00:00
David Nusinow
0c97d48027 * Install the glx sources with the rest in the sources package. This will
allow building of Xgl
2006-04-08 18:28:29 +00:00
David Nusinow
c4a678de9e Update source package install file, change version number to match reality, and note NMU in changelog 2006-04-07 06:36:53 +00:00
David Nusinow
2c834f4d29 * Don't build now non-existant i830 driver in debian dri configs. Fixes
FTBFS.
2006-04-07 03:11:47 +00:00
David Nusinow
b93d290c9f * Remove set_buffer function from idirectfbgl_mesa.c in directfb driver.
Also remove the line that sets SetBuffer to it. Fixes FTBFS.
2006-04-07 02:22:22 +00:00
David Nusinow
32266bb21a * Re-Add s390 and m68k to the USE_IEEE test in src/mesa/main/imports.h.
I need to submit this upstream.
2006-04-07 01:50:23 +00:00
David Nusinow
cab39103b7 * Increment libdrm-dev build-dep to 2.0.1 2006-04-07 00:38:00 +00:00
David Nusinow
5c56a80ac9 Copy over Debian packaging from 6.4.1 2006-04-07 00:35:30 +00:00
David Nusinow
d0bac31721 Copy vendor drop of Mesa 6.5 over to working branch 2006-04-07 00:30:49 +00:00
David Nusinow
9258e54cfa Move incorrect vendor drop of Mesa 6.5 2006-04-07 00:29:08 +00:00
David Nusinow
ccb68495e0 Vendor drop of Mesa 6.5 2006-04-07 00:24:21 +00:00
David Nusinow
db4236fe0b * Add versioned conflict between libgl1-mesa-dri and xlibmesa-dri so that
the xlibmesa-dri transitional upgrade package works
2006-03-07 02:48:44 +00:00
David Nusinow
a4e43427f8 * Move compatibility packages to the xorg source package. The versioning on
these packages will be too low without an epoch and they definitely fit
  with the xorg package anyway.
2006-02-24 05:43:01 +00:00
David Nusinow
829190abdd * Provide compatibility packages for the Xorg 6.9 mesa packages. This
package will be the only source of mesa packages in Debian for the
  present, and these packages will provide for smooth upgrades and
  compatibility.
2006-02-22 03:22:26 +00:00
David Nusinow
99688cd051 Add m68k to previous fix to fix its FTBFS also 2006-02-11 23:00:27 +00:00
David Nusinow
1ba0a92702 * Add s390 to the USE_IEEE test in src/mesa/main/imports.h.
(closes: #349437)
2006-02-09 01:24:52 +00:00
David Nusinow
065d5a8d17 * Remove makedepend from build-depends. Now we'll just build-dep on xutils
to get the app, which will translate over to our own xorg 7.0 plans.
2006-02-01 00:22:24 +00:00
David Nusinow
3b53c0ac19 * Re-add dh_installdirs call to binary-indep target so that we get
arch-specific dirs for the mesa-swx11-source package
2006-01-30 04:02:11 +00:00
David Nusinow
ed56fe4e82 * mesa-swrast-src.install stop looking for the swx11 dir and look for swrast 2006-01-22 19:50:19 +00:00
David Nusinow
1fbbe807ed Make libglu1-mesa default to depending on libgl1-mesa-glx. Thanks Michel Dänzer for catching this one. 2006-01-22 00:07:07 +00:00
David Nusinow
c3dccfa05b Use appropriate version number for an NMU. Thanks Julien for catching this one 2006-01-21 23:40:34 +00:00
David Nusinow
7703d9293f Incorporate other NMU for xlibs-dev transition 2006-01-21 23:32:52 +00:00
David Nusinow
bcf560e4b8 * Change libgl1-mesa to be named libgl1-mesa-glx 2006-01-19 23:45:36 +00:00
David Nusinow
9d944b501a * Change libgl1-mesa-swrast to be named libgl1-mesa-swx11 2006-01-16 22:19:03 +00:00
David Nusinow
cf013a5384 Add more closers 2006-01-11 04:10:39 +00:00
David Nusinow
2f367491c4 Add closer 2006-01-11 04:00:15 +00:00
David Nusinow
e7d93b62ec * Change libgl1-mesa-swrast Depends on libx11-6-dev to libx11-dev.
(closes: #347205)
* Add closer for packaging new upstream
2006-01-11 03:48:31 +00:00
David Nusinow
a5b309c0c9 Document that we'll be NMU'ing this for modular work 2006-01-06 03:09:12 +00:00
David Nusinow
df46b96ab0 Re-add mesa-6.4.1 which was accidentally deleted 2006-01-03 05:15:27 +00:00
David Nusinow
bda1e332ce Update libs to 7.0 release with packaging 2005-12-30 20:12:45 +00:00
David Nusinow
53b2ad5fd0 Rename mesa lib dir to appropriate version 2005-12-16 04:04:38 +00:00
David Nusinow
b44d9c657d Changelog version bump 2005-12-16 04:03:27 +00:00
David Nusinow
92ef7527d1 * New upstream version (6.4.1)
* Merge changes from Ubuntu version 6.4.1-0ubuntu1
  + Add new files required by xorg-server GL build to mesa-swrast-source.
2005-12-14 01:43:13 +00:00
David Nusinow
c37d6b4ca8 Commit Mesa 6.4 sources and packaging 2005-11-09 02:46:07 +00:00
10481 changed files with 885900 additions and 3653677 deletions

View File

@@ -1,18 +0,0 @@
((nil . ((show-trailing-whitespace . t)))
(prog-mode
(indent-tabs-mode . nil)
(tab-width . 8)
(c-basic-offset . 3)
(c-file-style . "stroustrup")
(fill-column . 78)
(eval . (progn
(c-set-offset 'case-label '0)
(c-set-offset 'innamespace '0)
(c-set-offset 'inline-open '0)))
(whitespace-style face indentation)
(whitespace-line-column . 79)
(eval ignore-errors
(require 'whitespace)
(whitespace-mode 1)))
(makefile-mode (indent-tabs-mode . t))
)

View File

@@ -1,48 +0,0 @@
# To use this config on you editor, follow the instructions at:
# http://editorconfig.org
root = true
[*]
charset = utf-8
insert_final_newline = true
tab_width = 8
[*.{c,h,cpp,hpp,cc,hh}]
indent_style = space
indent_size = 3
max_line_length = 78
[{Makefile*,*.mk}]
indent_style = tab
[{*.py,SCons*}]
indent_style = space
indent_size = 4
[*.pl]
indent_style = space
indent_size = 4
[*.m4]
indent_style = space
indent_size = 2
[*.yml]
indent_style = space
indent_size = 2
[*.html]
indent_style = space
indent_size = 2
[*.rst]
indent_style = space
indent_size = 3
[*.patch]
trim_trailing_whitespace = false
[{meson.build,meson_options.txt}]
indent_style = space
indent_size = 2

4
.gitignore vendored
View File

@@ -1,4 +0,0 @@
*.pyc
*.pyo
*.out
build

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
[*.sh]
indent_size = 2

View File

@@ -1,26 +0,0 @@
#!/bin/sh
# This test script groups together a bunch of fast dEQP variant runs
# to amortize the cost of rebooting the board.
set -ex
EXIT=0
# Run reset tests without parallelism:
if ! env \
DEQP_RESULTS_DIR=results/reset \
DEQP_PARALLEL=1 \
DEQP_CASELIST_FILTER='.*reset.*' \
/install/deqp-runner.sh; then
EXIT=1
fi
# Then run everything else with parallelism:
if ! env \
DEQP_RESULTS_DIR=results/nonrobustness \
DEQP_CASELIST_INV_FILTER='.*reset.*' \
/install/deqp-runner.sh; then
EXIT=1
fi

View File

@@ -1,81 +0,0 @@
#!/bin/sh
# This test script groups together a bunch of fast dEQP variant runs
# to amortize the cost of rebooting the board.
set -ex
EXIT=0
# Test rendering with the gmem path forced when possible (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/gmem \
DEQP_VER=gles31 \
DEQP_FRACTION=5 \
FD_MESA_DEBUG=nobypass \
/install/deqp-runner.sh; then
EXIT=1
fi
# Test rendering with the bypass path forced when possible (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/bypass \
DEQP_VER=gles31 \
DEQP_FRACTION=5 \
FD_MESA_DEBUG=nogmem \
GPU_VERSION=freedreno-a630-bypass \
/install/deqp-runner.sh; then
EXIT=1
fi
# Test rendering with the UBO-to-constants optimization disabled (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/nouboopt \
DEQP_VER=gles31 \
IR3_SHADER_DEBUG=nouboopt \
DEQP_CASELIST_FILTER="functional.*ubo" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles3-565nozs mustpass (~20s)
if ! env \
DEQP_RESULTS_DIR=results/gles3-565nozs \
DEQP_VER=gles3 \
DEQP_CONFIG="rgb565d0s0ms0" \
DEQP_VARIANT="565-no-depth-no-stencil" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles31-565nozs mustpass (~1s)
if ! env \
DEQP_RESULTS_DIR=results/gles31-565nozs \
DEQP_VER=gles31 \
DEQP_CONFIG="rgb565d0s0ms0" \
DEQP_VARIANT="565-no-depth-no-stencil" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles3-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
# if ! env \
# DEQP_RESULTS_DIR=results/gles3-multisample \
# DEQP_VER=gles3 \
# DEQP_CONFIG="rgba8888d24s8ms4" \
# DEQP_VARIANT="multisample" \
# /install/deqp-runner.sh; then
# EXIT=1
# fi
# gles31-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
# if ! env \
# DEQP_RESULTS_DIR=results/gles31-multisample \
# DEQP_VER=gles31 \
# DEQP_CONFIG="rgba8888d24s8ms4" \
# DEQP_VARIANT="multisample" \
# /install/deqp-runner.sh; then
# EXIT=1
# fi
exit $EXIT

View File

@@ -1,13 +0,0 @@
#!/bin/sh
# Init entrypoint for bare-metal devices; calls common init code.
# First stage: very basic setup to bring up network and /dev etc
/init-stage1.sh
# Second stage: run jobs
test $? -eq 0 && /init-stage2.sh
# Wait until the job would have timed out anyway, so we don't spew a "init
# exited" panic.
sleep 6000

View File

@@ -1,101 +0,0 @@
#!/bin/bash
# Boot script for Chrome OS devices attached to a servo debug connector, using
# NFS and TFTP to boot.
# We're run from the root of the repo, make a helper var for our paths
BM=$CI_PROJECT_DIR/install/bare-metal
CI_COMMON=$CI_PROJECT_DIR/install/common
# Runner config checks
if [ -z "$BM_SERIAL" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the CPU serial device."
exit 1
fi
if [ -z "$BM_SERIAL_EC" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the EC serial device for controlling board power"
exit 1
fi
if [ ! -d /nfs ]; then
echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
exit 1
fi
if [ ! -d /tftp ]; then
echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
exit 1
fi
# job config checks
if [ -z "$BM_KERNEL" ]; then
echo "Must set BM_KERNEL to your board's kernel FIT image"
exit 1
fi
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
exit 1
fi
if [ -z "$BM_CMDLINE" ]; then
echo "Must set BM_CMDLINE to your board's kernel command line arguments"
exit 1
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
# Put the kernel/dtb image and the boot command line in the tftp directory for
# the board to find. For normal Mesa development, we build the kernel and
# store it in the docker container that this script is running in.
#
# However, container builds are expensive, so when you're hacking on the
# kernel, it's nice to be able to skip the half hour container build and plus
# moving that container to the runner. So, if BM_KERNEL is a URL, fetch it
# instead of looking in the container. Note that the kernel build should be
# the output of:
#
# make Image.lzma
#
# mkimage \
# -A arm64 \
# -f auto \
# -C lzma \
# -d arch/arm64/boot/Image.lzma \
# -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
# cheza-image.img
rm -rf /tftp/*
if echo "$BM_KERNEL" | grep -q http; then
apt install -y wget
wget $BM_KERNEL -O /tftp/vmlinuz
else
cp $BM_KERNEL /tftp/vmlinuz
fi
echo "$BM_CMDLINE" > /tftp/cmdline
set +e
python3 $BM/cros_servo_run.py \
--cpu $BM_SERIAL \
--ec $BM_SERIAL_EC
ret=$?
set -e
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
exit $ret

View File

@@ -1,175 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import queue
import re
from serial_buffer import SerialBuffer
import sys
import threading
class CrosServoRun:
def __init__(self, cpu, ec):
# Merged FIFO for the two serial buffers, fed by threads.
self.serial_queue = queue.Queue()
self.sentinel = object()
self.threads_done = 0
self.ec_ser = SerialBuffer(
ec, "results/serial-ec.txt", "R SERIAL-EC> ")
self.cpu_ser = SerialBuffer(
cpu, "results/serial.txt", "R SERIAL-CPU> ")
self.iter_feed_ec = threading.Thread(
target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
self.iter_feed_ec.start()
self.iter_feed_cpu = threading.Thread(
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
self.iter_feed_cpu.start()
# Feed lines from our serial queues into the merged queue, marking when our
# input is done.
def iter_feed_queue(self, it):
for i in it:
self.serial_queue.put(i)
self.serial_queue.put(sentinel)
# Return the next line from the queue, counting how many threads have
# terminated and joining when done
def get_serial_queue_line(self):
line = self.serial_queue.get()
if line == self.sentinel:
self.threads_done = self.threads_done + 1
if self.threads_done == 2:
self.iter_feed_cpu.join()
self.iter_feed_ec.join()
return line
# Returns an iterator for getting the next line.
def serial_queue_lines(self):
return iter(self.get_serial_queue_line, self.sentinel)
def ec_write(self, s):
print("W SERIAL-EC> %s" % s)
self.ec_ser.serial.write(s.encode())
def cpu_write(self, s):
print("W SERIAL-CPU> %s" % s)
self.cpu_ser.serial.write(s.encode())
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def run(self):
# Flush any partial commands in the EC's prompt, then ask for a reboot.
self.ec_write("\n")
self.ec_write("reboot\n")
# This is emitted right when the bootloader pauses to check for input.
# Emit a ^N character to request network boot, because we don't have a
# direct-to-netboot firmware on cheza.
for line in self.serial_queue_lines():
if re.search("load_archive: loading locale_en.bin", line):
self.cpu_write("\016")
break
# The Cheza boards have issues with failing to bring up power to
# the system sometimes, possibly dependent on ambient temperature
# in the farm.
if re.search("POWER_GOOD not seen in time", line):
self.print_error("Detected intermittent poweron failure, restarting run...")
return 2
tftp_failures = 0
for line in self.serial_queue_lines():
if re.search("---. end Kernel panic", line):
return 1
# The Cheza firmware seems to occasionally get stuck looping in
# this error state during TFTP booting, possibly based on amount of
# network traffic around it, but it'll usually recover after a
# reboot.
if re.search("R8152: Bulk read error 0xffffffbf", line):
tftp_failures += 1
if tftp_failures >= 100:
self.print_error("Detected intermittent tftp failure, restarting run...")
return 2
# There are very infrequent bus errors during power management transitions
# on cheza, which we don't expect to be the case on future boards.
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
self.print_error("Detected cheza power management bus error, restarting run...")
return 2
# These HFI response errors started appearing with the introduction
# of piglit runs. CosmicPenguin says:
#
# "message ID 106 isn't a thing, so likely what happened is that we
# got confused when parsing the HFI queue. If it happened on only
# one run, then memory corruption could be a possible clue"
#
# Given that it seems to trigger randomly near a GPU fault and then
# break many tests after that, just restart the whole run.
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
self.print_error("Detected cheza power management bus error, restarting run...")
return 2
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result")
return 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', type=str,
help='CPU Serial device', required=True)
parser.add_argument(
'--ec', type=str, help='EC Serial device', required=True)
args = parser.parse_args()
servo = CrosServoRun(args.cpu, args.ec)
while True:
retval = servo.run()
if retval != 2:
break
# power down the CPU on the device
servo.ec_write("power off\n")
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,30 +0,0 @@
#!/bin/bash
set -e
STRINGS=$(mktemp)
ERRORS=$(mktemp)
trap "rm $STRINGS; rm $ERRORS;" EXIT
FILE=$1
shift 1
while getopts "f:e:" opt; do
case $opt in
f) echo "$OPTARG" >> $STRINGS;;
e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;;
esac
done
shift $((OPTIND -1))
echo "Waiting for $FILE to say one of following strings"
cat $STRINGS
while ! egrep -wf $STRINGS $FILE; do
sleep 2
done
if egrep -wf $ERRORS $FILE; then
exit 1
fi

View File

@@ -1,148 +0,0 @@
#!/bin/bash
BM=$CI_PROJECT_DIR/install/bare-metal
CI_COMMON=$CI_PROJECT_DIR/install/common
if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
echo "BM_SERIAL:"
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
echo "BM_SERIAL_SCRIPT:"
echo " This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel."
exit 1
fi
if [ -z "$BM_POWERUP" ]; then
echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should reset the device and begin its boot sequence"
echo "such that it pauses at fastboot."
exit 1
fi
if [ -z "$BM_POWERDOWN" ]; then
echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power off the device."
exit 1
fi
if [ -z "$BM_FASTBOOT_SERIAL" ]; then
echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This must be the a stable-across-resets fastboot serial number."
exit 1
fi
if [ -z "$BM_KERNEL" ]; then
echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:"
exit 1
fi
if [ -z "$BM_DTB" ]; then
echo "Must set BM_DTB to your board's DTB file in the job's variables:"
exit 1
fi
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:"
exit 1
fi
if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
BM_FASTBOOT_NFSROOT=1
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results/
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
# Root on NFS, no need for an inintramfs.
rm -f rootfs.cpio.gz
touch rootfs.cpio
gzip rootfs.cpio
else
# Create the rootfs in a temp dir
rsync -a --delete $BM_ROOTFS/ rootfs/
. $BM/rootfs-setup.sh rootfs
# Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
# these devices use it and it would take up space in the initrd.
if [ -n "$PIGLIT_PROFILES" ]; then
EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
else
EXCLUDE_FILTER="piglit|python"
fi
pushd rootfs
find -H | \
egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
egrep -v "traces-db|apitrace|renderdoc" | \
egrep -v $EXCLUDE_FILTER | \
cpio -H newc -o | \
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
popd
fi
# Make the combined kernel image and dtb for passing to fastboot. For normal
# Mesa development, we build the kernel and store it in the docker container
# that this script is running in.
#
# However, container builds are expensive, so when you're hacking on the
# kernel, it's nice to be able to skip the half hour container build and plus
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
# fetch them instead of looking in the container.
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
apt install -y wget
wget $BM_KERNEL -O kernel
wget $BM_DTB -O dtb
cat kernel dtb > Image.gz-dtb
rm kernel dtb
else
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
fi
mkdir -p artifacts
abootimg \
--create artifacts/fastboot.img \
-k Image.gz-dtb \
-r rootfs.cpio.gz \
-c cmdline="$BM_CMDLINE"
rm Image.gz-dtb
export PATH=$BM:$PATH
# Start background command for talking to serial if we have one.
if [ -n "$BM_SERIAL_SCRIPT" ]; then
$BM_SERIAL_SCRIPT > results/serial-output.txt &
while [ ! -e results/serial-output.txt ]; do
sleep 1
done
fi
set +e
$BM/fastboot_run.py \
--dev="$BM_SERIAL" \
--fbserial="$BM_FASTBOOT_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN"
ret=$?
set -e
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
fi
exit $ret

View File

@@ -1,118 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import os
import re
from serial_buffer import SerialBuffer
import sys
import threading
class FastbootRun:
def __init__(self, args):
self.powerup = args.powerup
# We would like something like a 1 minute timeout, but the piglit traces
# jobs stall out for long periods of time.
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600)
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def logged_system(self, cmd):
print("Running '{}'".format(cmd))
return os.system(cmd)
def run(self):
if self.logged_system(self.powerup) != 0:
return 1
fastboot_ready = False
for line in self.ser.lines():
if re.search("fastboot: processing commands", line) or \
re.search("Listening for fastboot command on", line):
fastboot_ready = True
break
if re.search("data abort", line):
self.print_error("Detected crash during boot, restarting run...")
return 2
if not fastboot_ready:
self.print_error("Failed to get to fastboot prompt, restarting run...")
return 2
if self.logged_system(self.fastboot) != 0:
return 1
for line in self.ser.lines():
if re.search("---. end Kernel panic", line):
return 1
# The db820c boards intermittently reboot. Just restart the run
# when if we see a reboot after we got past fastboot.
if re.search("PON REASON", line):
self.print_error("Detected spontaneous reboot, restarting run...")
return 2
# db820c sometimes wedges around iommu fault recovery
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
self.print_error(
"Detected kernel soft lockup, restarting run...")
return 2
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result, restarting run...")
return 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
args = parser.parse_args()
fastboot = FastbootRun(args)
while True:
retval = fastboot.run()
if retval != 2:
break
fastboot = FastbootRun(args)
fastboot.logged_system(args.powerdown)
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,10 +0,0 @@
#!/bin/bash
relay=$1
if [ -z "$relay" ]; then
echo "Must supply a relay arg"
exit 1
fi
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay

View File

@@ -1,19 +0,0 @@
#!/usr/bin/python3
import sys
import serial
mode = sys.argv[1]
relay = sys.argv[2]
# our relays are "off" means "board is powered".
mode_swap = {
"on" : "off",
"off" : "on",
}
mode = mode_swap[mode]
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2)
command = "relay {} {}\n\r".format(mode, relay)
ser.write(command.encode())
ser.close()

View File

@@ -1,12 +0,0 @@
#!/bin/bash
relay=$1
if [ -z "$relay" ]; then
echo "Must supply a relay arg"
exit 1
fi
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
sleep 5
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay

View File

@@ -1,17 +0,0 @@
#!/bin/bash
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must supply the PoE Interface to power up"
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must supply the PoE Switch host"
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
SNMP_ON="i 1"
SNMP_OFF="i 2"
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must supply the PoE Interface to power up"
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must supply the PoE Switch host"
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
SNMP_ON="i 1"
SNMP_OFF="i 2"
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
sleep 3s
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON"

View File

@@ -1,147 +0,0 @@
#!/bin/bash
# Boot script for devices attached to a PoE switch, using NFS for the root
# filesystem.
# We're run from the root of the repo, make a helper var for our paths
BM=$CI_PROJECT_DIR/install/bare-metal
CI_COMMON=$CI_PROJECT_DIR/install/common
# Runner config checks
if [ -z "$BM_SERIAL" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the serial port to listen the device."
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch address to connect for powering up/down devices."
exit 1
fi
if [ -z "$BM_POE_USERNAME" ]; then
echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch username."
exit 1
fi
if [ -z "$BM_POE_PASSWORD" ]; then
echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch password."
exit 1
fi
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch interface where the device is connected."
exit 1
fi
if [ -z "$BM_POWERUP" ]; then
echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power up the device and begin its boot sequence."
exit 1
fi
if [ -z "$BM_POWERDOWN" ]; then
echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power off the device."
exit 1
fi
if [ ! -d /nfs ]; then
echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
exit 1
fi
if [ ! -d /tftp ]; then
echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
exit 1
fi
# job config checks
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
exit 1
fi
if [ -z "$BM_BOOTFS" ]; then
echo "Must set /boot files for the TFTP boot in the job's variables"
exit 1
fi
if [ -z "$BM_CMDLINE" ]; then
echo "Must set BM_CMDLINE to your board's kernel command line arguments"
exit 1
fi
if [ -z "$BM_BOOTCONFIG" ]; then
echo "Must set BM_BOOTCONFIG to your board's required boot configuration arguments"
exit 1
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
# If BM_BOOTFS is an URL, download it
if echo $BM_BOOTFS | grep -q http; then
apt install -y wget
wget ${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS -O /tmp/bootfs.tar
BM_BOOTFS=/tmp/bootfs.tar
fi
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
if [ -f $BM_BOOTFS ]; then
mkdir -p /tmp/bootfs
tar xf $BM_BOOTFS -C /tmp/bootfs
BM_BOOTFS=/tmp/bootfs
fi
# Install kernel modules (it could be either in /lib/modules or
# /usr/lib/modules, but we want to install in the latter)
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a --delete $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
[ -d $BM_BOOTFS/lib/modules ] && rsync -a --delete $BM_BOOTFS/lib/modules/ /nfs/usr/lib/modules/
# Install kernel image + bootloader files
rsync -a --delete $BM_BOOTFS/boot/ /tftp/
# Create the rootfs in the NFS directory
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
echo "$BM_CMDLINE" > /tftp/cmdline.txt
# Add some required options in config.txt
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
set +e
ATTEMPTS=2
while [ $((ATTEMPTS--)) -gt 0 ]; do
python3 $BM/poe_run.py \
--dev="$BM_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN" \
--timeout="${BM_POE_TIMEOUT:-60}"
ret=$?
if [ $ret -eq 2 ]; then
echo "Did not detect boot sequence, retrying..."
else
ATTEMPTS=0
fi
done
set -e
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
exit $ret

View File

@@ -1,96 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Igalia, S.L.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import os
import re
from serial_buffer import SerialBuffer
import sys
import threading
class PoERun:
def __init__(self, args):
self.powerup = args.powerup
self.powerdown = args.powerdown
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout)
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def logged_system(self, cmd):
print("Running '{}'".format(cmd))
return os.system(cmd)
def run(self):
if self.logged_system(self.powerup) != 0:
return 1
boot_detected = False
for line in self.ser.lines():
if re.search("Booting Linux", line):
boot_detected = True
break
if not boot_detected:
self.print_error("Something wrong; couldn't detect the boot start up sequence")
return 2
for line in self.ser.lines():
if re.search("---. end Kernel panic", line):
return 1
# Binning memory problems
if re.search("binner overflow mem", line):
self.print_error("Memory overflow in the binner; GPU hang")
return 1
result = re.search("hwci: mesa: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result")
return 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
parser.add_argument('--timeout', type=int, default=60,
help='time in seconds to wait for activity', required=False)
args = parser.parse_args()
poe = PoERun(args)
retval = poe.run()
poe.logged_system(args.powerdown)
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,24 +0,0 @@
#!/bin/bash
rootfs_dst=$1
mkdir -p $rootfs_dst/results
# Set up the init script that brings up the system.
cp $BM/bm-init.sh $rootfs_dst/init
cp $CI_COMMON/init*.sh $rootfs_dst/
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
set +x
# Pass through relevant env vars from the gitlab job to the baremetal init script
"$CI_COMMON"/generate-env.sh > $rootfs_dst/set-job-env-vars.sh
chmod +x $rootfs_dst/set-job-env-vars.sh
echo "Variables passed through:"
cat $rootfs_dst/set-job-env-vars.sh
echo "export CI_JOB_JWT=${CI_JOB_JWT@Q}" >> $rootfs_dst/set-job-env-vars.sh
set -x
# Add the Mesa drivers we built, and make a consistent symlink to them.
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/

View File

@@ -1,153 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
from datetime import datetime, timezone
import queue
import serial
import threading
import time
class SerialBuffer:
def __init__(self, dev, filename, prefix, timeout = None):
self.filename = filename
self.dev = dev
if dev:
self.f = open(filename, "wb+")
self.serial = serial.Serial(dev, 115200, timeout=timeout if timeout else 10)
else:
self.f = open(filename, "rb")
self.byte_queue = queue.Queue()
self.line_queue = queue.Queue()
self.prefix = prefix
self.timeout = timeout
self.sentinel = object()
if self.dev:
self.read_thread = threading.Thread(
target=self.serial_read_thread_loop, daemon=True)
else:
self.read_thread = threading.Thread(
target=self.serial_file_read_thread_loop, daemon=True)
self.read_thread.start()
self.lines_thread = threading.Thread(
target=self.serial_lines_thread_loop, daemon=True)
self.lines_thread.start()
# Thread that just reads the bytes from the serial device to try to keep from
# buffer overflowing it. If nothing is received in 1 minute, it finalizes.
def serial_read_thread_loop(self):
greet = "Serial thread reading from %s\n" % self.dev
self.byte_queue.put(greet.encode())
while True:
try:
b = self.serial.read()
if len(b) > 0:
self.byte_queue.put(b)
elif self.timeout:
self.byte_queue.put(self.sentinel)
break
except Exception as err:
print(self.prefix + str(err))
self.byte_queue.put(self.sentinel)
break
# Thread that just reads the bytes from the file of serial output that some
# other process is appending to.
def serial_file_read_thread_loop(self):
greet = "Serial thread reading from %s\n" % self.filename
self.byte_queue.put(greet.encode())
while True:
line = self.f.readline()
if line:
self.byte_queue.put(line)
else:
time.sleep(0.1)
# Thread that processes the stream of bytes to 1) log to stdout, 2) log to
# file, 3) add to the queue of lines to be read by program logic
def serial_lines_thread_loop(self):
line = bytearray()
while True:
bytes = self.byte_queue.get(block=True)
if bytes == self.sentinel:
self.read_thread.join()
self.line_queue.put(self.sentinel)
break
# Write our data to the output file if we're the ones reading from
# the serial device
if self.dev:
self.f.write(bytes)
self.f.flush()
for b in bytes:
line.append(b)
if b == b'\n'[0]:
line = line.decode(errors="replace")
time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
print("{endc}{time} {prefix}{line}".format(
time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
self.line_queue.put(line)
line = bytearray()
def get_line(self):
line = self.line_queue.get()
if line == self.sentinel:
self.lines_thread.join()
return line
def lines(self):
return iter(self.get_line, self.sentinel)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device')
parser.add_argument('--file', type=str,
help='Filename for serial output', required=True)
parser.add_argument('--prefix', type=str,
help='Prefix for logging serial to stdout', nargs='?')
args = parser.parse_args()
ser = SerialBuffer(args.dev, args.file, args.prefix or "")
for line in ser.lines():
# We're just using this as a logger, so eat the produced lines and drop
# them
pass
if __name__ == '__main__':
main()

View File

@@ -1,14 +0,0 @@
#!/bin/sh
while true; do
devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null`
for i in $devcds; do
echo "Found a devcoredump at $i."
if cp $i /results/first.devcore; then
echo 1 > $i
echo "Saved to the job artifacts at /first.devcore"
exit 0
fi
done
sleep 10
done

View File

@@ -1,84 +0,0 @@
#!/bin/bash
for var in \
ASAN_OPTIONS \
BASE_SYSTEM_FORK_HOST_PREFIX \
BASE_SYSTEM_MAINLINE_HOST_PREFIX \
CI_COMMIT_BRANCH \
CI_COMMIT_TITLE \
CI_JOB_ID \
CI_JOB_URL \
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
CI_MERGE_REQUEST_TITLE \
CI_NODE_INDEX \
CI_NODE_TOTAL \
CI_PAGES_DOMAIN \
CI_PIPELINE_ID \
CI_PROJECT_DIR \
CI_PROJECT_NAME \
CI_PROJECT_PATH \
CI_PROJECT_ROOT_NAMESPACE \
CI_RUNNER_DESCRIPTION \
CI_SERVER_URL \
DEQP_CASELIST_FILTER \
DEQP_CASELIST_INV_FILTER \
DEQP_CONFIG \
DEQP_EXPECTED_RENDERER \
DEQP_FRACTION \
DEQP_HEIGHT \
DEQP_NO_SAVE_RESULTS \
DEQP_PARALLEL \
DEQP_RESULTS_DIR \
DEQP_RUNNER_OPTIONS \
DEQP_VARIANT \
DEQP_VER \
DEQP_WIDTH \
DEVICE_NAME \
DRIVER_NAME \
EGL_PLATFORM \
FDO_CI_CONCURRENT \
FDO_UPSTREAM_REPO \
FD_MESA_DEBUG \
FLAKES_CHANNEL \
GPU_VERSION \
HWCI_FREQ_MAX \
HWCI_KERNEL_MODULES \
HWCI_START_XORG \
HWCI_TEST_SCRIPT \
IR3_SHADER_DEBUG \
JOB_ARTIFACTS_BASE \
JOB_RESULTS_PATH \
JOB_ROOTFS_OVERLAY_PATH \
MESA_BUILD_PATH \
MESA_GL_VERSION_OVERRIDE \
MESA_GLSL_VERSION_OVERRIDE \
MESA_GLES_VERSION_OVERRIDE \
MESA_VK_IGNORE_CONFORMANCE_WARNING \
MINIO_HOST \
NIR_VALIDATE \
PAN_MESA_DEBUG \
PIGLIT_FRACTION \
PIGLIT_JUNIT_RESULTS \
PIGLIT_NO_WINDOW \
PIGLIT_OPTIONS \
PIGLIT_PLATFORM \
PIGLIT_PROFILES \
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
PIGLIT_REPLAY_SUBCOMMAND \
PIGLIT_REPLAY_DESCRIPTION_FILE \
PIGLIT_REPLAY_DEVICE_NAME \
PIGLIT_REPLAY_EXTRA_ARGS \
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
PIGLIT_REPLAY_UPLOAD_TO_MINIO \
PIGLIT_RESULTS \
PIGLIT_TESTS \
PIPELINE_ARTIFACTS_BASE \
TEST_LD_PRELOAD \
TU_DEBUG \
VK_CPU \
VK_DRIVER \
; do
if [ -n "${!var+x}" ]; then
echo "export $var=${!var@Q}"
fi
done

View File

@@ -1,22 +0,0 @@
#!/bin/sh
# Very early init, used to make sure devices and network are set up and
# reachable.
set -ex
cd /
mount -t proc none /proc
mount -t sysfs none /sys
mount -t devtmpfs none /dev || echo possibly already mounted
mkdir -p /dev/pts
mount -t devpts devpts /dev/pts
mount -t tmpfs tmpfs /tmp
echo "nameserver 8.8.8.8" > /etc/resolv.conf
[ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
# Set the time so we can validate certificates before we fetch anything;
# however as not all DUTs have network, make this non-fatal.
for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true

View File

@@ -1,75 +0,0 @@
#!/bin/sh
# Second-stage init, used to set up devices and our job environment before
# running tests.
. /set-job-env-vars.sh
set -ex
# Set up any devices required by the jobs
[ -z "$HWCI_KERNEL_MODULES" ] || (echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe)
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
# it in /install
ln -sf $CI_PROJECT_DIR/install /install
export LD_LIBRARY_PATH=/install/lib
export LIBGL_DRIVERS_PATH=/install/lib/dri
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
export XDG_CACHE_HOME=/tmp
# Make sure Python can find all our imports
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
if [ "$HWCI_FREQ_MAX" = "true" ]; then
# Disable GPU frequency scaling
DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
# Disable CPU frequency scaling
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
# Disable GPU runtime power management
GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
fi
# Start a little daemon to capture the first devcoredump we encounter. (They
# expire after 5 minutes, so we poll for them).
./capture-devcoredump.sh &
# If we want Xorg to be running for the test, then we start it up before the
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
# your client's return code
if [ -n "$HWCI_START_XORG" ]; then
echo "touch /xorg-started; sleep 100000" > /xorg-script
env \
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
# Wait for xorg to be ready for connections.
for i in 1 2 3 4 5; do
if [ -e /xorg-started ]; then
break
fi
sleep 5
done
export DISPLAY=:0
fi
RESULT=fail
if sh $HWCI_TEST_SCRIPT; then
RESULT=pass
rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
fi
# upload artifacts
MINIO=$(cat /proc/cmdline | tr ' ' '\n' | grep minio_results | cut -d '=' -f 2 || true)
if [ -n "$MINIO" ]; then
tar -czf results.tar.gz results/;
ci-fairy minio login "$CI_JOB_JWT";
ci-fairy minio cp results.tar.gz minio://"$MINIO"/results.tar.gz;
fi
echo "hwci: mesa: $RESULT"

View File

@@ -1,21 +0,0 @@
#!/bin/sh
set -ex
_XORG_SCRIPT="/xorg-script"
_FLAG_FILE="/xorg-started"
echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
if [ "x$1" != "x" ]; then
export LD_LIBRARY_PATH="${1}/lib"
export LIBGL_DRIVERS_PATH="${1}/lib/dri"
fi
xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
# Wait for xorg to be ready for connections.
for i in 1 2 3 4 5; do
if [ -e "${_FLAG_FILE}" ]; then
break
fi
sleep 5
done

View File

@@ -1,56 +0,0 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DRM=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_MFD_RK808=y
CONFIG_REGULATOR_RK808=y
CONFIG_RTC_DRV_RK808=y
CONFIG_COMMON_CLK_RK808=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=n
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y

View File

@@ -1,148 +0,0 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_MSM=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7533=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_STMMAC_ETH=y
CONFIG_TYPEC_FUSB302=y
CONFIG_TYPEC=y
CONFIG_TYPEC_TCPM=y
# MSM platform bits
CONFIG_QCOM_RPMHPD=y
CONFIG_QCOM_RPMPD=y
CONFIG_SDM_GPUCC_845=y
CONFIG_SDM_VIDEOCC_845=y
CONFIG_SDM_DISPCC_845=y
CONFIG_SDM_LPASSCC_845=y
CONFIG_SDM_CAMCC_845=y
CONFIG_RESET_QCOM_PDC=y
CONFIG_DRM_TI_SN65DSI86=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI_QCOM_GENI=y
CONFIG_PHY_QCOM_QUSB2=y
CONFIG_PHY_QCOM_QMP=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SPMI_TEMP_ALARM=y
CONFIG_QCOM_CLK_APCC_MSM8996=y
CONFIG_POWER_RESET_QCOM_PON=y
CONFIG_RTC_DRV_PM8XXX=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_MSM8916=y
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
CONFIG_INTERCONNECT_QCOM_SC7180=y
CONFIG_QCOM_WDT=y
CONFIG_CRYPTO_DEV_QCOM_RNG=y
# db410c ethernet
CONFIG_USB_RTL8152=y
# db820c ethernet
CONFIG_ATL1C=y
CONFIG_ARCH_ALPINE=n
CONFIG_ARCH_BCM2835=n
CONFIG_ARCH_BCM_IPROC=n
CONFIG_ARCH_BERLIN=n
CONFIG_ARCH_BRCMSTB=n
CONFIG_ARCH_EXYNOS=n
CONFIG_ARCH_K3=n
CONFIG_ARCH_LAYERSCAPE=n
CONFIG_ARCH_LG1K=n
CONFIG_ARCH_HISI=n
CONFIG_ARCH_MVEBU=n
CONFIG_ARCH_SEATTLE=n
CONFIG_ARCH_SYNQUACER=n
CONFIG_ARCH_RENESAS=n
CONFIG_ARCH_R8A774A1=n
CONFIG_ARCH_R8A774C0=n
CONFIG_ARCH_R8A7795=n
CONFIG_ARCH_R8A7796=n
CONFIG_ARCH_R8A77965=n
CONFIG_ARCH_R8A77970=n
CONFIG_ARCH_R8A77980=n
CONFIG_ARCH_R8A77990=n
CONFIG_ARCH_R8A77995=n
CONFIG_ARCH_STRATIX10=n
CONFIG_ARCH_TEGRA=n
CONFIG_ARCH_SPRD=n
CONFIG_ARCH_THUNDER=n
CONFIG_ARCH_THUNDER2=n
CONFIG_ARCH_UNIPHIER=n
CONFIG_ARCH_VEXPRESS=n
CONFIG_ARCH_XGENE=n
CONFIG_ARCH_ZX=n
CONFIG_ARCH_ZYNQMP=n
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_USER_HELPER=n
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
# For amlogic
CONFIG_MESON_GXL_PHY=y
CONFIG_MDIO_BUS_MUX_MESON_G12A=y
# For Mediatek
CONFIG_DRM_MEDIATEK=y
CONFIG_PWM_MEDIATEK=y
CONFIG_DRM_MEDIATEK_HDMI=y
CONFIG_GNSS_MTK_SERIAL=y
CONFIG_HW_RANDOM_MTK=y
CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
# Fetch the arm-built rootfs image and unpack it in our x86 container (saves
# network transfer, disk usage, and runtime on test jobs)
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
else
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
fi
wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz
mkdir -p /rootfs-$arch
tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz
rm rootfs.tgz
if [[ $arch == "arm64" ]]; then
mkdir -p /baremetal-files
pushd /baremetal-files
wget ${ARTIFACTS_URL}/Image
wget ${ARTIFACTS_URL}/Image.gz
wget ${ARTIFACTS_URL}/cheza-kernel
DEVICE_TREES="apq8016-sbc.dtb apq8096-db820c.dtb"
for DTB in $DEVICE_TREES; do
wget ${ARTIFACTS_URL}/$DTB
done
popd
fi

View File

@@ -1,18 +0,0 @@
#!/bin/bash
set -ex
APITRACE_VERSION="170424754bb46002ba706e16ee5404b61988d74a"
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
pushd /apitrace
git checkout "$APITRACE_VERSION"
git submodule update --init --depth 1 --recursive
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
ninja -C _build
mkdir build
cp _build/apitrace build
cp _build/eglretrace build
${STRIP_CMD:-strip} build/*
find . -not -path './build' -not -path './build/*' -delete
popd

View File

@@ -1,50 +0,0 @@
#!/bin/bash
set -ex
# Pull down repositories that crosvm depends on to cros checkout-like locations.
CROS_ROOT=/
THIRD_PARTY_ROOT=$CROS_ROOT/third_party
mkdir -p $THIRD_PARTY_ROOT
AOSP_EXTERNAL_ROOT=$CROS_ROOT/aosp/external
mkdir -p $AOSP_EXTERNAL_ROOT
PLATFORM2_ROOT=/platform2
PLATFORM2_COMMIT=2079dd5fcd61f1ac39e2fc16595956617f3f1e9e
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform2 $PLATFORM2_ROOT
pushd $PLATFORM2_ROOT
git checkout $PLATFORM2_COMMIT
popd
# minijail does not exist in upstream linux distros.
MINIJAIL_COMMIT=5f9e3001c61626d2863dad91248ba8496c3ef511
git clone --single-branch --no-checkout https://android.googlesource.com/platform/external/minijail $AOSP_EXTERNAL_ROOT/minijail
pushd $AOSP_EXTERNAL_ROOT/minijail
git checkout $MINIJAIL_COMMIT
make
cp libminijail.so /usr/lib/x86_64-linux-gnu/
popd
# Pull the cras library for audio access.
ADHD_COMMIT=5068bdd18b51de8f2d5bcff754cdecda80de8f44
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/third_party/adhd $THIRD_PARTY_ROOT/adhd
pushd $THIRD_PARTY_ROOT/adhd
git checkout $ADHD_COMMIT
popd
CROSVM_VERSION=f70350ba51e9631e3b7fe711c0296e041a61a499
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
pushd /platform/crosvm
git checkout "$CROSVM_VERSION"
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
-j ${FDO_CI_CONCURRENT:-4} \
--locked \
--features 'default-no-sandbox gpu x virtio-gpu-next' \
--path . \
--root /usr/local \
$EXTRA_CARGO_ARGS
popd
rm -rf $PLATFORM2_ROOT $AOSP_EXTERNAL_ROOT/minijail $THIRD_PARTY_ROOT/adhd /platform/crosvm

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -ex
cargo install --locked deqp-runner \
-j ${FDO_CI_CONCURRENT:-4} \
--version 0.7.2 \
--root /usr/local \
$EXTRA_CARGO_ARGS

View File

@@ -1,80 +0,0 @@
#!/bin/bash
set -ex
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/KhronosGroup/VK-GL-CTS.git \
-b vulkan-cts-1.2.6.2 \
--depth 1 \
/VK-GL-CTS
pushd /VK-GL-CTS
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
# libpng (sigh). The archives get their checksums checked anyway, and git
# always goes through ssh or https.
python3 external/fetch_sources.py --insecure
mkdir -p /deqp
# Save the testlog stylesheets:
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
popd
pushd /deqp
# When including EGL/X11 testing, do that build first and save off its
# deqp-egl binary.
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=x11_egl_glx \
-DCMAKE_BUILD_TYPE=Release \
$EXTRA_CMAKE_ARGS
ninja modules/egl/deqp-egl
cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
-DCMAKE_BUILD_TYPE=Release \
$EXTRA_CMAKE_ARGS
ninja
mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl
# Copy out the mustpass lists we want.
mkdir /deqp/mustpass
cp /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt \
/deqp/mustpass/vk-master.txt
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
/deqp/mustpass/.
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt \
/deqp/mustpass/.
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-master.txt \
/deqp/mustpass/.
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
/deqp/mustpass/.
# Save *some* executor utils, but otherwise strip things down
# to reduct deqp build size:
mkdir /deqp/executor.save
cp /deqp/executor/testlog-to-* /deqp/executor.save
rm -rf /deqp/executor
mv /deqp/executor.save /deqp/executor
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
rm -rf /deqp/external/openglcts/modules/cts-runner
rm -rf /deqp/modules/internal
rm -rf /deqp/execserver
rm -rf /deqp/framework
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
${STRIP_CMD:-strip} external/openglcts/modules/glcts
${STRIP_CMD:-strip} modules/*/deqp-*
du -sh *
rm -rf /VK-GL-CTS
popd

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -ex
git clone https://github.com/ValveSoftware/Fossilize.git
cd Fossilize
git checkout 72088685d90bc814d14aad5505354ffa8a642789
git submodule update --init
mkdir build
cd build
cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
ninja -C . install
cd ../..
rm -rf Fossilize

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -ex
GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct
pushd /gfxreconstruct
git checkout "$GFXRECONSTRUCT_VERSION"
git submodule update --init
git submodule update
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
ninja -C _build gfxrecon-replay gfxrecon-info
mkdir -p build/bin
install _build/tools/replay/gfxrecon-replay build/bin
install _build/tools/info/gfxrecon-info build/bin
strip build/bin/*
find . -not -path './build' -not -path './build/*' -delete
popd

View File

@@ -1,16 +0,0 @@
#!/bin/bash
set -ex
PARALLEL_DEQP_RUNNER_VERSION=6596b71cf37a7efb4d54acd48c770ed2d4ad6b7e
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
pushd /parallel-deqp-runner
git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
meson . _build
ninja -C _build hang-detection
mkdir -p build/bin
install _build/hang-detection build/bin
strip build/bin/*
find . -not -path './build' -not -path './build/*' -delete
popd

View File

@@ -1,51 +0,0 @@
#!/bin/bash
set -ex
mkdir -p kernel
wget -qO- ${KERNEL_URL} | tar -xj --strip-components=1 -C kernel
pushd kernel
# The kernel doesn't like the gold linker (or the old lld in our debians).
# Sneak in some override symlinks during kernel build until we can update
# debian (they'll get blown away by the rm of the kernel dir at the end).
mkdir -p ld-links
for i in /usr/bin/*-ld /usr/bin/ld; do
i=`basename $i`
ln -sf /usr/bin/$i.bfd ld-links/$i
done
export PATH=`pwd`/ld-links:$PATH
export LOCALVERSION="`basename $KERNEL_URL`"
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
make ${KERNEL_IMAGE_NAME}
for image in ${KERNEL_IMAGE_NAME}; do
cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
cp ${DEVICE_TREES} /lava-files/.
fi
if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
make modules
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
fi
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
make Image.lzma
mkimage \
-f auto \
-A arm \
-O linux \
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
/lava-files/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
fi
popd
rm -rf kernel

View File

@@ -1,30 +0,0 @@
#!/bin/bash
set -ex
export LLVM_CONFIG="llvm-config-11"
$LLVM_CONFIG --version
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/llvm/llvm-project \
--depth 1 \
-b llvmorg-12.0.0-rc3 \
/llvm-project
mkdir /libclc
pushd /libclc
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
ninja
ninja install
popd
# workaroud cmake vs debian packaging.
mkdir -p /usr/lib/clc
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
du -sh *
rm -rf /libclc /llvm-project

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -ex
export LIBDRM_VERSION=libdrm-2.4.107
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
cd $LIBDRM_VERSION
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
ninja -C build install
cd ..
rm -rf $LIBDRM_VERSION

View File

@@ -1,23 +0,0 @@
#!/bin/bash
set -ex
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
pushd /piglit
git checkout 11025faf96df23debbefd3678fe959eaa35a50f0
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
ninja $PIGLIT_BUILD_TARGETS
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
rm -rf target_api
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
find ! -regex "^\.$" \
! -regex "^\.\/piglit.*" \
! -regex "^\.\/framework.*" \
! -regex "^\.\/bin$" \
! -regex "^\.\/bin\/replayer\.py" \
! -regex "^\.\/templates.*" \
! -regex "^\.\/tests$" \
! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
fi
popd

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Note that this script is not actually "building" rust, but build- is the
# convention for the shared helpers for putting stuff in our containers.
set -ex
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
# are just available to all build jobs.
mkdir -p $HOME/.cargo
ln -s /usr/local/bin $HOME/.cargo/bin
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
# version of the compiler, rather than whatever the container's Debian comes
# with.
#
# Pick the rust compiler (1.41) available in Debian stable, and pick a specific
# snapshot from rustup so the compiler doesn't drift on us.
wget https://sh.rustup.rs -O - | \
sh -s -- -y --default-toolchain 1.41.1-2020-02-27
# Set up a config script for cross compiling -- cargo needs your system cc for
# linking in cross builds, but doesn't know what you want to use for system cc.
cat > /root/.cargo/config <<EOF
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
EOF

View File

@@ -1,20 +0,0 @@
#!/bin/bash
set -ex
mkdir -p /epoxy
pushd /epoxy
wget -qO- https://github.com/anholt/libepoxy/releases/download/1.5.8/libepoxy-1.5.8.tar.xz | tar -xJ --strip-components=1
meson build/ $EXTRA_MESON_ARGS
ninja -C build install
popd
rm -rf /epoxy
VIRGLRENDERER_VERSION=08e11a495429c222f150b6d6f8c4936f2f0e0759
git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git --single-branch --no-checkout /virglrenderer
pushd /virglrenderer
git checkout "$VIRGLRENDERER_VERSION"
meson build/ $EXTRA_MESON_ARGS
ninja -C build install
popd
rm -rf /virglrenderer

View File

@@ -1,43 +0,0 @@
#!/bin/bash
set -ex
VKD3D_PROTON_VERSION="2.3.1"
VKD3D_PROTON_COMMIT="3ed3526332f53d7d35cf1b685fa8096b01f26ff0"
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION"
function build_arch {
local arch="$1"
shift
meson "$@" \
-Denable_tests=true \
--buildtype release \
--prefix "$VKD3D_PROTON_BUILD_DIR" \
--strip \
--bindir "x${arch}" \
--libdir "x${arch}" \
"$VKD3D_PROTON_BUILD_DIR/build.${arch}"
ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/"*.exe
}
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b "v$VKD3D_PROTON_VERSION" --no-checkout "$VKD3D_PROTON_SRC_DIR"
pushd "$VKD3D_PROTON_SRC_DIR"
git checkout "$VKD3D_PROTON_COMMIT"
git submodule update --init --recursive
git submodule update --recursive
build_arch 64 --cross-file build-win64.txt
build_arch 86 --cross-file build-win32.txt
cp "setup_vkd3d_proton.sh" "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
chmod +x "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
popd
"$VKD3D_PROTON_BUILD_DIR"/setup_vkd3d_proton.sh install
rm -rf "$VKD3D_PROTON_BUILD_DIR"
rm -rf "$VKD3D_PROTON_SRC_DIR"

View File

@@ -1,10 +0,0 @@
#!/bin/sh
if test -f /etc/debian_version; then
apt-get autoremove -y --purge
fi
# Clean up any build cache for rust.
rm -rf /.cargo
ccache --show-stats

View File

@@ -1,36 +0,0 @@
#!/bin/sh
if test -f /etc/debian_version; then
CCACHE_PATH=/usr/lib/ccache
else
CCACHE_PATH=/usr/lib64/ccache
fi
# Common setup among container builds before we get to building code.
export CCACHE_COMPILERCHECK=content
export CCACHE_COMPRESS=true
export CCACHE_DIR=/cache/mesa/ccache
export PATH=$CCACHE_PATH:$PATH
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
export CC="${CCACHE_PATH}/gcc"
export CXX="${CCACHE_PATH}/g++"
# Force linkers to gold, since it's so much faster for building. We can't use
# lld because we're on old debian and it's buggy. ming fails meson builds
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
find /usr/bin -name \*-ld -o -name ld | \
grep -v mingw | \
xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
ccache --show-stats
# Make a wrapper script for ninja to always include the -j flags
echo '#!/bin/sh -x' > /usr/local/bin/ninja
echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' >> /usr/local/bin/ninja
chmod +x /usr/local/bin/ninja
# Set MAKEFLAGS so that all make invocations in container builds include the
# flags (doesn't apply to non-container builds, but we don't run make there)
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"

View File

@@ -1,35 +0,0 @@
#!/bin/bash
ndk=$1
arch=$2
cpu_family=$3
cpu=$4
cross_file="/cross_file-$arch.txt"
# armv7 has the toolchain split between two names.
arch2=${5:-$2}
# Note that we disable C++ exceptions, because Mesa doesn't use exceptions,
# and allowing it in code generation means we get unwind symbols that break
# the libEGL and driver symbol tests.
cat >$cross_file <<EOF
[binaries]
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-ar'
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '-static-libstdc++']
c_ld = 'lld'
cpp_ld = 'lld'
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-strip'
pkgconfig = ['/usr/bin/pkg-config']
[host_machine]
system = 'linux'
cpu_family = '$cpu_family'
cpu = '$cpu'
endian = 'little'
[properties]
needs_exe_wrapper = true
EOF

View File

@@ -1,38 +0,0 @@
#!/bin/sh
# Makes a .pc file in the Android NDK for meson to find its libraries.
set -ex
ndk="$1"
pc="$2"
cflags="$3"
libs="$4"
version="$5"
sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot
for arch in \
x86_64-linux-android \
i686-linux-android \
aarch64-linux-android \
arm-linux-androideabi; do
pcdir=$sysroot/usr/lib/$arch/pkgconfig
mkdir -p $pcdir
cat >$pcdir/$pc <<EOF
prefix=$sysroot
exec_prefix=$sysroot
libdir=$sysroot/usr/lib/$arch/29
sharedlibdir=$sysroot/usr/lib/$arch
includedir=$sysroot/usr/include
Name: zlib
Description: zlib compression library
Version: $version
Requires:
Libs: -L$sysroot/usr/lib/$arch/29 $libs
Cflags: -I$sysroot/usr/include $cflags
EOF
done

View File

@@ -1,51 +0,0 @@
#!/bin/bash
arch=$1
cross_file="/cross_file-$arch.txt"
/usr/share/meson/debcrossgen --arch $arch -o "$cross_file"
# Explicitly set ccache path for cross compilers
sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
if [ "$arch" = "i386" ]; then
# Work around a bug in debcrossgen that should be fixed in the next release
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file"
fi
# Rely on qemu-user being configured in binfmt_misc on the host
sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
# Add a line for rustc, which debcrossgen is missing.
cc=`sed -n 's|c = .\(.*\).|\1|p' < $cross_file`
if [[ "$arch" = "arm64" ]]; then
rust_target=aarch64-unknown-linux-gnu
elif [[ "$arch" = "armhf" ]]; then
rust_target=armv7-unknown-linux-gnueabihf
elif [[ "$arch" = "i386" ]]; then
rust_target=i686-unknown-linux-gnu
elif [[ "$arch" = "ppc64el" ]]; then
rust_target=powerpc64le-unknown-linux-gnu
elif [[ "$arch" = "s390x" ]]; then
rust_target=s390x-unknown-linux-gnu
else
echo "Needs rustc target mapping"
fi
sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
# Set up cmake cross compile toolchain file for dEQP builds
toolchain_file="/toolchain-$arch.cmake"
if [[ "$arch" = "arm64" ]]; then
GCC_ARCH="aarch64-linux-gnu"
DE_CPU="DE_CPU_ARM_64"
CMAKE_ARCH=arm
elif [[ "$arch" = "armhf" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DE_CPU="DE_CPU_ARM"
CMAKE_ARCH=arm
fi
if [[ -n "$GCC_ARCH" ]]; then
echo "set(CMAKE_SYSTEM_NAME Linux)" > "$toolchain_file"
echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> "$toolchain_file"
echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)" >> "$toolchain_file"
echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)" >> "$toolchain_file"
echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")" >> "$toolchain_file"
echo "set(DE_CPU $DE_CPU)" >> "$toolchain_file"
fi

View File

@@ -1,268 +0,0 @@
#!/bin/bash
set -ex
if [ $DEBIAN_ARCH = arm64 ]; then
ARCH_PACKAGES="firmware-qcom-media"
elif [ $DEBIAN_ARCH = amd64 ]; then
ARCH_PACKAGES="firmware-amd-graphics
libelf1
libllvm11
"
fi
INSTALL_CI_FAIRY_PACKAGES="git
python3-dev
python3-pip
python3-setuptools
python3-wheel
"
apt-get -y install --no-install-recommends \
$ARCH_PACKAGES \
$INSTALL_CI_FAIRY_PACKAGES \
ca-certificates \
firmware-realtek \
initramfs-tools \
libasan6 \
libexpat1 \
libpng16-16 \
libpython3.9 \
libsensors5 \
libvulkan1 \
libwaffle-1-0 \
libx11-6 \
libx11-xcb1 \
libxcb-dri2-0 \
libxcb-dri3-0 \
libxcb-glx0 \
libxcb-present0 \
libxcb-randr0 \
libxcb-shm0 \
libxcb-sync1 \
libxcb-xfixes0 \
libxdamage1 \
libxext6 \
libxfixes3 \
libxkbcommon0 \
libxrender1 \
libxshmfence1 \
libxxf86vm1 \
netcat-openbsd \
python3 \
python3-lxml \
python3-mako \
python3-numpy \
python3-packaging \
python3-pil \
python3-renderdoc \
python3-requests \
python3-simplejson \
python3-yaml \
sntp \
strace \
waffle-utils \
wget \
xinit \
xserver-xorg-core \
xz-utils
# Needed for ci-fairy, this revision is able to upload files to
# MinIO and doesn't depend on git
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
apt-get purge -y \
$INSTALL_CI_FAIRY_PACKAGES
passwd root -d
chsh -s /bin/sh
cat > /init <<EOF
#!/bin/sh
export PS1=lava-shell:
exec sh
EOF
chmod +x /init
#######################################################################
# Strip the image to a small minimal system without removing the debian
# toolchain.
# xz compress firmware so it doesn't waste RAM at runtime on ramdisk systems
find /lib/firmware -type f -print0 | \
xargs -0r -P4 -n4 xz -T1 -C crc32
# Copy timezone file and remove tzdata package
rm -rf /etc/localtime
cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
UNNEEDED_PACKAGES="
libfdisk1
"
export DEBIAN_FRONTEND=noninteractive
# Removing unused packages
for PACKAGE in ${UNNEEDED_PACKAGES}
do
echo ${PACKAGE}
if ! apt-get remove --purge --yes "${PACKAGE}"
then
echo "WARNING: ${PACKAGE} isn't installed"
fi
done
apt-get autoremove --yes || true
# Dropping logs
rm -rf /var/log/*
# Dropping documentation, localization, i18n files, etc
rm -rf /usr/share/doc/*
rm -rf /usr/share/locale/*
rm -rf /usr/share/X11/locale/*
rm -rf /usr/share/man
rm -rf /usr/share/i18n/*
rm -rf /usr/share/info/*
rm -rf /usr/share/lintian/*
rm -rf /usr/share/common-licenses/*
rm -rf /usr/share/mime/*
# Dropping reportbug scripts
rm -rf /usr/share/bug
# Drop udev hwdb not required on a stripped system
rm -rf /lib/udev/hwdb.bin /lib/udev/hwdb.d/*
# Drop all gconv conversions && binaries
rm -rf usr/bin/iconv
rm -rf usr/sbin/iconvconfig
rm -rf usr/lib/*/gconv/
# Remove libusb database
rm -rf usr/sbin/update-usbids
rm -rf var/lib/usbutils/usb.ids
rm -rf usr/share/misc/usb.ids
#######################################################################
# Crush into a minimal production image to be deployed via some type of image
# updating system.
# IMPORTANT: The Debian system is not longer functional at this point,
# for example, apt and dpkg will stop working
UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
"ncurses-bin ncurses-base libncursesw6 libncurses6 "\
"perl-base "\
"debconf libdebconfclient0 "\
"e2fsprogs e2fslibs libfdisk1 "\
"insserv "\
"udev "\
"init-system-helpers "\
"bash "\
"cpio "\
"xz-utils "\
"passwd "\
"libsemanage1 libsemanage-common "\
"libsepol1 "\
"gpgv "\
"hostname "\
"adduser "\
"debian-archive-keyring "\
"libegl1-mesa-dev "\
"libegl-mesa0 "\
"libgl1-mesa-dev "\
"libgl1-mesa-dri "\
"libglapi-mesa "\
"libgles2-mesa-dev "\
"libglx-mesa0 "\
"mesa-common-dev "\
# Removing unneeded packages
for PACKAGE in ${UNNEEDED_PACKAGES}
do
echo "Forcing removal of ${PACKAGE}"
if ! dpkg --purge --force-remove-essential --force-depends "${PACKAGE}"
then
echo "WARNING: ${PACKAGE} isn't installed"
fi
done
# Show what's left package-wise before dropping dpkg itself
COLUMNS=300 dpkg-query -W --showformat='${Installed-Size;10}\t${Package}\n' | sort -k1,1n
# Drop dpkg
dpkg --purge --force-remove-essential --force-depends dpkg
# No apt or dpkg, no need for its configuration archives
rm -rf etc/apt
rm -rf etc/dpkg
# Drop directories not part of ostree
# Note that /var needs to exist as ostree bind mounts the deployment /var over
# it
rm -rf var/* opt srv share
# ca-certificates are in /etc drop the source
rm -rf usr/share/ca-certificates
# No bash, no need for completions
rm -rf usr/share/bash-completion
# No zsh, no need for comletions
rm -rf usr/share/zsh/vendor-completions
# drop gcc python helpers
rm -rf usr/share/gcc
# Drop sysvinit leftovers
rm -rf etc/init.d
rm -rf etc/rc[0-6S].d
# Drop upstart helpers
rm -rf etc/init
# Various xtables helpers
rm -rf usr/lib/xtables
# Drop all locales
# TODO: only remaining locale is actually "C". Should we really remove it?
rm -rf usr/lib/locale/*
# partition helpers
rm -rf usr/sbin/*fdisk
# local compiler
rm -rf usr/bin/localedef
# Systemd dns resolver
find usr etc -name '*systemd-resolve*' -prune -exec rm -r {} \;
# Systemd network configuration
find usr etc -name '*networkd*' -prune -exec rm -r {} \;
# systemd ntp client
find usr etc -name '*timesyncd*' -prune -exec rm -r {} \;
# systemd hw database manager
find usr etc -name '*systemd-hwdb*' -prune -exec rm -r {} \;
# No need for fuse
find usr etc -name '*fuse*' -prune -exec rm -r {} \;
# lsb init function leftovers
rm -rf usr/lib/lsb
# Only needed when adding libraries
rm -rf usr/sbin/ldconfig*
# Games, unused
rmdir usr/games
# Remove pam module to authenticate against a DB
# plus libdb-5.3.so that is only used by this pam module
rm -rf usr/lib/*/security/pam_userdb.so
rm -rf usr/lib/*/libdb-5.3.so
# remove NSS support for nis, nisplus and hesiod
rm -rf usr/lib/*/libnss_hesiod*
rm -rf usr/lib/*/libnss_nis*

View File

@@ -1,79 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
"
dpkg --add-architecture $arch
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
crossbuild-essential-$arch \
libelf-dev:$arch \
libexpat1-dev:$arch \
libpciaccess-dev:$arch \
libstdc++6:$arch \
libvulkan-dev:$arch \
libx11-dev:$arch \
libx11-xcb-dev:$arch \
libxcb-dri2-0-dev:$arch \
libxcb-dri3-dev:$arch \
libxcb-glx0-dev:$arch \
libxcb-present-dev:$arch \
libxcb-randr0-dev:$arch \
libxcb-shm0-dev:$arch \
libxcb-xfixes0-dev:$arch \
libxdamage-dev:$arch \
libxext-dev:$arch \
libxrandr-dev:$arch \
libxshmfence-dev:$arch \
libxxf86vm-dev:$arch \
wget
if [[ $arch != "armhf" ]]; then
if [[ $arch == "s390x" ]]; then
LLVM=9
else
LLVM=11
fi
# llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only
# with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get
# around this.
apt-get install -y --no-remove \
libclang-cpp${LLVM}:$arch \
libffi-dev:$arch \
libgcc-s1:$arch \
libtinfo-dev:$arch \
libz3-dev:$arch \
llvm-${LLVM}:$arch \
zlib1g
fi
. .gitlab-ci/container/create-cross-file.sh $arch
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)"
. .gitlab-ci/container/build-libdrm.sh
apt-get purge -y \
$STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh
# This needs to be done after container_post_build.sh, or apt-get breaks in there
if [[ $arch != "armhf" ]]; then
apt-get download llvm-${LLVM}-{dev,tools}:$arch
dpkg -i --force-depends llvm-${LLVM}-*_${arch}.deb
rm llvm-${LLVM}-*_${arch}.deb
fi

View File

@@ -1,60 +0,0 @@
#!/bin/bash
set -ex
EPHEMERAL="\
rdfind \
unzip \
"
apt-get install -y --no-remove $EPHEMERAL
# Fetch the NDK and extract just the toolchain we want.
ndk=android-ndk-r21d
wget -O $ndk.zip https://dl.google.com/android/repository/$ndk-linux-x86_64.zip
unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
rm $ndk.zip
# Since it was packed as a zip file, symlinks/hardlinks got turned into
# duplicate files. Turn them into hardlinks to save on container space.
rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/
# Drop some large tools we won't use in this build.
find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f
sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3"
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x86 x86
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android arm armv8
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl armv7a-linux-androideabi
# Not using build-libdrm.sh because we don't want its cleanup after building
# each arch. Fetch and extract now.
export LIBDRM_VERSION=libdrm-2.4.102
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
for arch in \
x86_64-linux-android \
i686-linux-android \
aarch64-linux-android \
arm-linux-androideabi ; do
cd $LIBDRM_VERSION
rm -rf build-$arch
meson build-$arch \
--cross-file=/cross_file-$arch.txt \
--libdir=lib/$arch \
-Dlibkms=false \
-Dnouveau=false \
-Dvc4=false \
-Detnaviv=false \
-Dfreedreno=false \
-Dintel=false \
-Dcairo-tests=false
ninja -C build-$arch install
cd ..
done
rm -rf $LIBDRM_VERSION
apt-get purge -y $EPHEMERAL

View File

@@ -1,72 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
apt-get -y install ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/buster.list
apt-get update
apt-get -y install \
abootimg \
autoconf \
automake \
bc \
bison \
ccache \
cmake \
debootstrap \
fastboot \
flex \
g++ \
git \
kmod \
libasan6 \
libdrm-dev \
libelf-dev \
libexpat1-dev \
libx11-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxcb-dri3-dev \
libxcb-glx0-dev \
libxcb-present-dev \
libxcb-randr0-dev \
libxcb-shm0-dev \
libxcb-xfixes0-dev \
libxdamage-dev \
libxext-dev \
libxrandr-dev \
libxshmfence-dev \
libxxf86vm-dev \
llvm-11-dev \
meson \
pkg-config \
python-is-python3 \
python3-mako \
python3-pil \
python3-pip \
python3-requests \
python3-setuptools \
u-boot-tools \
wget \
xz-utils \
zlib1g-dev
# Not available anymore in bullseye
apt-get install -y --no-remove -t buster \
android-sdk-ext4-utils
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
arch=armhf
. .gitlab-ci/container/cross_build.sh
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
EXTRA_MESON_ARGS=
. .gitlab-ci/container/build-libdrm.sh
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,34 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
############### Install packages for baremetal testing
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
apt-get update
apt-get install -y --no-remove \
abootimg \
cpio \
fastboot \
netcat \
procps \
python-is-python3 \
python3-distutils \
python3-minimal \
python3-serial \
rsync \
snmp \
wget
# setup SNMPv2 SMI MIB
wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
-O /usr/share/snmp/mibs/SNMPv2-SMI.txt
arch=arm64 . .gitlab-ci/container/baremetal_build.sh
arch=armhf . .gitlab-ci/container/baremetal_build.sh
# This firmware file from Debian bullseye causes hangs
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=i386
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=ppc64el
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=s390x
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,83 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
# Ephemeral packages (installed for this script and removed again at
# the end)
STABLE_EPHEMERAL=" \
python3-pip \
python3-setuptools \
"
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
bison \
ccache \
dpkg-cross \
flex \
g++ \
g++-mingw-w64-x86-64 \
gcc \
git \
glslang-tools \
kmod \
libclang-11-dev \
libclang-9-dev \
libclc-dev \
libelf-dev \
libepoxy-dev \
libexpat1-dev \
libgtk-3-dev \
libllvm11 \
libllvm9 \
libomxil-bellagio-dev \
libpciaccess-dev \
libunwind-dev \
libva-dev \
libvdpau-dev \
libvulkan-dev \
libx11-dev \
libx11-xcb-dev \
libxext-dev \
libxml2-utils \
libxrandr-dev \
libxrender-dev \
libxshmfence-dev \
libxvmc-dev \
libxxf86vm-dev \
libz-mingw-w64-dev \
make \
meson \
pkg-config \
python-is-python3 \
python3-mako \
python3-pil \
python3-requests \
qemu-user \
valgrind \
wayland-protocols \
wget \
wine64 \
x11proto-dri2-dev \
x11proto-gl-dev \
x11proto-randr-dev \
xz-utils \
zlib1g-dev
# Needed for ci-fairy, this revision is able to upload files to MinIO
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
############### Uninstall ephemeral packages
apt-get purge -y $STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,112 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
autoconf \
automake \
autotools-dev \
bzip2 \
cmake \
libgbm-dev \
libtool \
python3-pip \
"
# We need multiarch for Wine
dpkg --add-architecture i386
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
clang \
libasan6 \
libarchive-dev \
libclang-cpp11-dev \
libglvnd-dev \
libllvmspirvlib-dev \
liblua5.3-dev \
libxcb-dri2-0-dev \
libxcb-dri3-dev \
libxcb-glx0-dev \
libxcb-present-dev \
libxcb-randr0-dev \
libxcb-shm0-dev \
libxcb-sync-dev \
libxcb-xfixes0-dev \
libxcb1-dev \
libxml2-dev \
llvm-11-dev \
llvm-9-dev \
ocl-icd-opencl-dev \
procps \
spirv-tools \
strace \
time \
wine \
wine32
. .gitlab-ci/container/container_pre_build.sh
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
# they're going to be fixed, so we'll just have to fix it ourselves
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF
#!/bin/sh
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@
EOF
chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
# dependencies where we want a specific version
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
export LIBWAYLAND_VERSION=wayland-1.18.0
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-libdrm.sh
wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
rm -rf $LIBWAYLAND_VERSION
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1
pushd DirectX-Headers
mkdir build
cd build
meson .. --backend=ninja --buildtype=release -Dbuild-test=false
ninja
ninja install
popd
rm -rf DirectX-Headers
pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7
############### Uninstall the build software
apt-get purge -y \
$STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,71 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
# Ephemeral packages (installed for this script and removed again at
# the end)
STABLE_EPHEMERAL=" \
cargo \
python3-dev \
python3-pip \
python3-setuptools \
python3-wheel \
"
apt-get update
apt-get dist-upgrade -y
apt-get install -y --no-remove \
git \
git-lfs \
libasan6 \
libexpat1 \
libllvm11 \
libllvm9 \
liblz4-1 \
libpng16-16 \
libpython3.9 \
libvulkan1 \
libwayland-client0 \
libwayland-server0 \
libxcb-ewmh2 \
libxcb-randr0 \
libxcb-xfixes0 \
libxkbcommon0 \
libxrandr2 \
libxrender1 \
python-is-python3 \
python3-mako \
python3-numpy \
python3-packaging \
python3-pil \
python3-requests \
python3-six \
python3-yaml \
vulkan-tools \
waffle-utils \
xauth \
xvfb \
zlib1g
apt-get install -y --no-install-recommends \
$STABLE_EPHEMERAL
# Needed for ci-fairy, this revision is able to upload files to MinIO
# and doesn't depend on git
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
rm -rf ~/.cargo
apt-get purge -y $STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1,122 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
autoconf \
automake \
bc \
bison \
bzip2 \
cargo \
ccache \
clang-11 \
cmake \
flex \
g++ \
glslang-tools \
libasound2-dev \
libcap-dev \
libclang-cpp11-dev \
libelf-dev \
libfdt-dev \
libgbm-dev \
libgles2-mesa-dev \
libllvmspirvlib-dev \
libpciaccess-dev \
libpng-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxext-dev \
libxkbcommon-dev \
libxrender-dev \
llvm-11-dev \
llvm-spirv \
make \
meson \
ocl-icd-opencl-dev \
patch \
pkg-config \
python3-distutils \
wayland-protocols \
wget \
xz-utils \
"
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
clinfo \
inetutils-syslogd \
iptables \
libclang-common-11-dev \
libclang-cpp11 \
libcap2 \
libegl1 \
libfdt1 \
libllvmspirvlib11 \
libxcb-shm0 \
ocl-icd-libopencl1 \
python3-lxml \
python3-renderdoc \
python3-simplejson \
spirv-tools \
sysvinit-core
. .gitlab-ci/container/container_pre_build.sh
############### Build kernel
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
export KERNEL_IMAGE_NAME=bzImage
export KERNEL_ARCH=x86_64
export DEBIAN_ARCH=amd64
mkdir -p /lava-files/
. .gitlab-ci/container/build-kernel.sh
############### Build libdrm
. .gitlab-ci/container/build-libdrm.sh
############### Build libclc
. .gitlab-ci/container/build-libclc.sh
############### Build virglrenderer
. .gitlab-ci/container/build-virglrenderer.sh
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
############### Build Crosvm
. .gitlab-ci/container/build-crosvm.sh
rm -rf /root/.cargo
############### Build dEQP GL
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
############### Build apitrace
. .gitlab-ci/container/build-apitrace.sh
############### Uninstall the build software
ccache --show-stats
apt-get purge -y \
$STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1,160 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
ccache \
cmake \
g++ \
g++-mingw-w64-i686-posix \
g++-mingw-w64-x86-64-posix \
glslang-tools \
libgbm-dev \
libgles2-mesa-dev \
liblz4-dev \
libpciaccess-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxcb-ewmh-dev \
libxcb-keysyms1-dev \
libxkbcommon-dev \
libxrandr-dev \
libxrender-dev \
libzstd-dev \
meson \
mingw-w64-i686-dev \
mingw-w64-tools \
mingw-w64-x86-64-dev \
p7zip \
patch \
pkg-config \
python3-distutils \
wget \
xz-utils \
"
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
libxcb-shm0 \
python3-lxml \
python3-simplejson \
xinit \
xserver-xorg-video-amdgpu \
xserver-xorg-video-ati
# We need multiarch for Wine
dpkg --add-architecture i386
apt-get update
apt-get install -y --no-remove \
wine \
wine32 \
wine64
function setup_wine() {
export WINEDEBUG="-all"
export WINEPREFIX="$1"
# We don't want crash dialogs
cat >crashdialog.reg <<EOF
Windows Registry Editor Version 5.00
[HKEY_CURRENT_USER\Software\Wine\WineDbg]
"ShowCrashDialog"=dword:00000000
EOF
# Set the wine prefix and disable the crash dialog
wine regedit crashdialog.reg
rm crashdialog.reg
# An immediate wine command may fail with: "${WINEPREFIX}: Not a
# valid wine prefix." and that is just spit because of checking
# the existance of the system.reg file, which fails. Just giving
# it a bit more of time for it to be created solves the problem
# ...
while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done
}
############### Install DXVK
DXVK_VERSION="1.8.1"
setup_wine "/dxvk-wine64"
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install
rm -rf dxvk-"${DXVK_VERSION}"
rm dxvk-"${DXVK_VERSION}".tar.gz
############### Install Windows' apitrace binaries
APITRACE_VERSION="10.0"
APITRACE_VERSION_DATE=""
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
"apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
# Add the apitrace path to the registry
wine \
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
/v Path \
/t REG_EXPAND_SZ \
/d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \
/f
############### Building ...
. .gitlab-ci/container/container_pre_build.sh
############### Build parallel-deqp-runner's hang-detection tool
. .gitlab-ci/container/build-hang-detection.sh
############### Build piglit
PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
############### Build Fossilize
. .gitlab-ci/container/build-fossilize.sh
############### Build dEQP VK
. .gitlab-ci/container/build-deqp.sh
############### Build gfxreconstruct
. .gitlab-ci/container/build-gfxreconstruct.sh
############### Build VKD3D-Proton
setup_wine "/vkd3d-proton-wine64"
. .gitlab-ci/container/build-vkd3d-proton.sh
############### Build libdrm
. .gitlab-ci/container/build-libdrm.sh
############### Uninstall the build software
ccache --show-stats
apt-get purge -y \
$STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1,107 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
EPHEMERAL="
autoconf
automake
bzip2
git
libtool
pkgconfig(epoxy)
pkgconfig(gbm)
unzip
wget
xz
"
dnf install -y --setopt=install_weak_deps=False \
bison \
ccache \
clang-devel \
flex \
gcc \
gcc-c++ \
gettext \
kernel-headers \
llvm-devel \
meson \
"pkgconfig(dri2proto)" \
"pkgconfig(expat)" \
"pkgconfig(glproto)" \
"pkgconfig(libclc)" \
"pkgconfig(libelf)" \
"pkgconfig(libglvnd)" \
"pkgconfig(libomxil-bellagio)" \
"pkgconfig(libselinux)" \
"pkgconfig(libva)" \
"pkgconfig(pciaccess)" \
"pkgconfig(vdpau)" \
"pkgconfig(vulkan)" \
"pkgconfig(wayland-egl-backend)" \
"pkgconfig(wayland-protocols)" \
"pkgconfig(wayland-scanner)" \
"pkgconfig(x11)" \
"pkgconfig(x11-xcb)" \
"pkgconfig(xcb)" \
"pkgconfig(xcb-dri2)" \
"pkgconfig(xcb-dri3)" \
"pkgconfig(xcb-glx)" \
"pkgconfig(xcb-present)" \
"pkgconfig(xcb-randr)" \
"pkgconfig(xcb-sync)" \
"pkgconfig(xcb-xfixes)" \
"pkgconfig(xdamage)" \
"pkgconfig(xext)" \
"pkgconfig(xfixes)" \
"pkgconfig(xrandr)" \
"pkgconfig(xshmfence)" \
"pkgconfig(xxf86vm)" \
"pkgconfig(zlib)" \
python-unversioned-command \
python3-devel \
python3-mako \
python3-devel \
python3-mako \
vulkan-headers \
$EPHEMERAL
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
export LIBWAYLAND_VERSION=wayland-1.18.0
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-libdrm.sh
wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
rm -rf $LIBWAYLAND_VERSION
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
############### Uninstall the build software
dnf remove -y $EPHEMERAL
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,211 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
check_minio()
{
MINIO_PATH="${MINIO_HOST}/mesa-lava/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
if wget -q --method=HEAD "https://${MINIO_PATH}/done"; then
exit
fi
}
# If remote files are up-to-date, skip rebuilding them
check_minio "${FDO_UPSTREAM_REPO}"
check_minio "${CI_PROJECT_PATH}"
. .gitlab-ci/container/container_pre_build.sh
# Install rust, which we'll be using for deqp-runner. It will be cleaned up at the end.
. .gitlab-ci/container/build-rust.sh
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
GCC_ARCH="aarch64-linux-gnu"
KERNEL_ARCH="arm64"
DEFCONFIG="arch/arm64/configs/defconfig"
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
KERNEL_IMAGE_NAME="Image"
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
KERNEL_ARCH="arm"
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
KERNEL_IMAGE_NAME="zImage"
. .gitlab-ci/container/create-cross-file.sh armhf
else
GCC_ARCH="x86_64-linux-gnu"
KERNEL_ARCH="x86_64"
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
DEVICE_TREES=""
KERNEL_IMAGE_NAME="bzImage"
fi
# Determine if we're in a cross build.
if [[ -e /cross_file-$DEBIAN_ARCH.txt ]]; then
EXTRA_MESON_ARGS="--cross-file /cross_file-$DEBIAN_ARCH.txt"
EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/toolchain-$DEBIAN_ARCH.cmake"
if [ $DEBIAN_ARCH = arm64 ]; then
RUST_TARGET="aarch64-unknown-linux-gnu"
elif [ $DEBIAN_ARCH = armhf ]; then
RUST_TARGET="armv7-unknown-linux-gnueabihf"
fi
rustup target add $RUST_TARGET
export EXTRA_CARGO_ARGS="--target $RUST_TARGET"
export ARCH=${KERNEL_ARCH}
export CROSS_COMPILE="${GCC_ARCH}-"
fi
apt-get update
apt-get install -y --no-remove \
automake \
bc \
cmake \
debootstrap \
git \
glslang-tools \
libdrm-dev \
libegl1-mesa-dev \
libgbm-dev \
libgles2-mesa-dev \
libpng-dev \
libssl-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxkbcommon-dev \
patch \
python3-distutils \
python3-mako \
python3-numpy \
python3-serial \
wget
if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
apt-get install -y --no-remove \
libegl1-mesa-dev:armhf \
libelf-dev:armhf \
libgbm-dev:armhf \
libgles2-mesa-dev:armhf \
libpng-dev:armhf \
libudev-dev:armhf \
libvulkan-dev:armhf \
libwaffle-dev:armhf \
libwayland-dev:armhf \
libx11-xcb-dev:armhf \
libxkbcommon-dev:armhf
fi
############### Building
STRIP_CMD="${GCC_ARCH}-strip"
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}
############### Build apitrace
. .gitlab-ci/container/build-apitrace.sh
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/apitrace
mv /apitrace/build /lava-files/rootfs-${DEBIAN_ARCH}/apitrace
rm -rf /apitrace
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin
mv /usr/local/bin/deqp-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/.
mv /usr/local/bin/piglit-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/.
############### Build dEQP
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
############### Build piglit
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
############### Build libdrm
EXTRA_MESON_ARGS+=" -D prefix=/libdrm"
. .gitlab-ci/container/build-libdrm.sh
############### Build kernel
. .gitlab-ci/container/build-kernel.sh
############### Delete rust, since the tests won't be compiling anything.
rm -rf /root/.cargo
############### Create rootfs
set +e
if ! debootstrap \
--variant=minbase \
--arch=${DEBIAN_ARCH} \
--components main,contrib,non-free \
bullseye \
/lava-files/rootfs-${DEBIAN_ARCH}/ \
http://deb.debian.org/debian; then
cat /lava-files/rootfs-${DEBIAN_ARCH}/debootstrap/debootstrap.log
exit 1
fi
set -e
cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/.
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
############### Install the built libdrm
# Dependencies pulled during the creation of the rootfs may overwrite
# the built libdrm. Hence, we add it after the rootfs has been already
# created.
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
rm -rf /libdrm
if [ ${DEBIAN_ARCH} = arm64 ]; then
# Make a gzipped copy of the Image for db410c.
gzip -k /lava-files/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
du -ah /lava-files/rootfs-${DEBIAN_ARCH} | sort -h | tail -100
pushd /lava-files/rootfs-${DEBIAN_ARCH}
tar czf /lava-files/lava-rootfs.tgz .
popd
. .gitlab-ci/container/container_post_build.sh
############### Upload the files!
ci-fairy minio login $CI_JOB_JWT
FILES_TO_UPLOAD="lava-rootfs.tgz \
$KERNEL_IMAGE_NAME"
if [[ -n $DEVICE_TREES ]]; then
FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
fi
for f in $FILES_TO_UPLOAD; do
ci-fairy minio cp /lava-files/$f \
minio://${MINIO_PATH}/$f
done
touch /lava-files/done
ci-fairy minio cp /lava-files/done minio://${MINIO_PATH}/done

View File

@@ -1,95 +0,0 @@
CONFIG_LOCALVERSION_AUTO=y
CONFIG_DEBUG_KERNEL=y
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
CONFIG_BLK_DEV_INITRD=n
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=y
CONFIG_FW_LOADER_COMPRESS=y
# options for AMD devices
CONFIG_X86_AMD_PLATFORM_DEVICE=y
CONFIG_ACPI_VIDEO=y
CONFIG_X86_AMD_FREQ_SENSITIVITY=y
CONFIG_PINCTRL=y
CONFIG_PINCTRL_AMD=y
CONFIG_DRM_AMDGPU=m
CONFIG_DRM_AMDGPU_SI=m
CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AMD_ACP=n
CONFIG_ACPI_WMI=y
CONFIG_MXM_WMI=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_SERIAL_8250_DW=y
CONFIG_CHROME_PLATFORMS=y
#options for Intel devices
CONFIG_MFD_INTEL_LPSS_PCI=y
#options for KVM guests
CONFIG_FUSE_FS=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_KVM=y
CONFIG_KVM_GUEST=y
CONFIG_VIRT_DRIVERS=y
CONFIG_VIRTIO_FS=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_VIRTIO_NET=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_PARAVIRT=y
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTUALIZATION=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_BLK_MQ_VIRTIO=y
CONFIG_TUN=y

View File

@@ -1 +0,0 @@
lp_test_arit

View File

@@ -1,2 +0,0 @@
lp_test_arit
lp_test_format

View File

@@ -1,25 +0,0 @@
#!/bin/sh
set -ex
mount -t proc none /proc
mount -t sysfs none /sys
mount -t devtmpfs none /dev || echo possibly already mounted
mkdir -p /dev/pts
mount -t devpts devpts /dev/pts
mount -t tmpfs tmpfs /tmp
. /crosvm-env.sh
# / is ro
export PIGLIT_REPLAY_EXTRA_ARGS="$PIGLIT_REPLAY_EXTRA_ARGS --db-path /tmp/replayer-db"
if sh $CROSVM_TEST_SCRIPT; then
touch /results/success
fi
poweroff -d -n -f || true
sleep 10 # Just in case init would exit before the kernel shuts down the VM
exit 1

View File

@@ -1,45 +0,0 @@
#!/bin/sh
set -x
ln -sf $CI_PROJECT_DIR/install /install
export LD_LIBRARY_PATH=$CI_PROJECT_DIR/install/lib/
export EGL_PLATFORM=surfaceless
export -p > /crosvm-env.sh
export GALLIUM_DRIVER="$CROSVM_GALLIUM_DRIVER"
export LIBGL_ALWAYS_SOFTWARE="true"
CROSVM_KERNEL_ARGS="root=my_root rw rootfstype=virtiofs loglevel=3 init=$CI_PROJECT_DIR/install/crosvm-init.sh ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
# Temporary results dir because from the guest we cannot write to /
mkdir -p /results
mount -t tmpfs tmpfs /results
mkdir -p /piglit/.gitlab-ci/piglit
mount -t tmpfs tmpfs /piglit/.gitlab-ci/piglit
unset DISPLAY
unset XDG_RUNTIME_DIR
/usr/sbin/iptables-legacy -t nat -A POSTROUTING -o eth0 -j MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
# Crosvm wants this
syslogd > /dev/null
crosvm run \
--gpu gles=false,backend=3d,egl=true,surfaceless=true \
-m 4096 \
-c 4 \
--disable-sandbox \
--shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
--host_ip=192.168.30.1 --netmask=255.255.255.0 --mac "AA:BB:CC:00:00:12" \
-p "$CROSVM_KERNEL_ARGS" \
/lava-files/bzImage
mkdir -p $CI_PROJECT_DIR/results
mv /results/* $CI_PROJECT_DIR/results/.
test -f $CI_PROJECT_DIR/results/success

View File

@@ -1,10 +0,0 @@
# Note: skips lists for CI are just a list of lines that, when
# non-zero-length and not starting with '#', will regex match to
# delete lines from the test list. Be careful.
# These are tremendously slow (pushing toward a minute), and aren't
# reliable to be run in parallel with other tests due to CPU-side timing.
dEQP-GLES[0-9]*.functional.flush_finish.*
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/4575
dEQP-VK.wsi.display.get_display_plane_capabilities

View File

@@ -1,257 +0,0 @@
#!/bin/sh
set -ex
DEQP_WIDTH=${DEQP_WIDTH:-256}
DEQP_HEIGHT=${DEQP_HEIGHT:-256}
DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0}
DEQP_VARIANT=${DEQP_VARIANT:-master}
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=${DEQP_SURFACE_TYPE:-pbuffer}"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden"
if [ -z "$DEQP_VER" ]; then
echo 'DEQP_VER must be set to something like "gles2", "gles31-khr" or "vk" for the test run'
exit 1
fi
if [ "$DEQP_VER" = "vk" ]; then
if [ -z "$VK_DRIVER" ]; then
echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
exit 1
fi
fi
if [ -z "$GPU_VERSION" ]; then
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/deqp-gpu-version-*.txt)'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export EGL_PLATFORM=surfaceless
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
# the runner was failing to look for libkms in /usr/local/lib for some reason
# I never figured out.
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
mkdir -p $RESULTS
HANG_DETECTION_CMD=""
# Generate test case list file.
if [ "$DEQP_VER" = "vk" ]; then
cp /deqp/mustpass/vk-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
elif [ "$DEQP_VER" = "gles2" -o "$DEQP_VER" = "gles3" -o "$DEQP_VER" = "gles31" -o "$DEQP_VER" = "egl" ]; then
cp /deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER
SUITE=dEQP
elif [ "$DEQP_VER" = "gles2-khr" -o "$DEQP_VER" = "gles3-khr" -o "$DEQP_VER" = "gles31-khr" -o "$DEQP_VER" = "gles32-khr" ]; then
cp /deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/external/openglcts/modules/glcts
SUITE=dEQP
else
cp /deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/external/openglcts/modules/glcts
SUITE=KHR
fi
# If the caselist is too long to run in a reasonable amount of time, let the job
# specify what fraction (1/n) of the caselist we should run. Note: N~M is a gnu
# sed extension to match every nth line (first line is #1).
if [ -n "$DEQP_FRACTION" ]; then
sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt
fi
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
fi
if [ -n "$DEQP_CASELIST_FILTER" ]; then
sed -ni "/$DEQP_CASELIST_FILTER/p" /tmp/case-list.txt
fi
if [ -n "$DEQP_CASELIST_INV_FILTER" ]; then
sed -ni "/$DEQP_CASELIST_INV_FILTER/!p" /tmp/case-list.txt
fi
if [ ! -s /tmp/case-list.txt ]; then
echo "Caselist generation failed"
exit 1
fi
if [ -e "$INSTALL/deqp-$GPU_VERSION-fails.txt" ]; then
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/deqp-$GPU_VERSION-fails.txt"
fi
# Default to an empty known flakes file if it doesn't exist.
touch $INSTALL/deqp-$GPU_VERSION-flakes.txt
if [ -e "$INSTALL/deqp-$GPU_VERSION-skips.txt" ]; then
DEQP_SKIPS="$INSTALL/deqp-$GPU_VERSION-skips.txt"
fi
set +e
if [ -n "$DEQP_PARALLEL" ]; then
JOB="--jobs $DEQP_PARALLEL"
elif [ -n "$FDO_CI_CONCURRENT" ]; then
JOB="--jobs $FDO_CI_CONCURRENT"
else
JOB="--jobs 4"
fi
# If this CI lab lacks artifacts support, print the whole list of failures/flakes.
if [ -n "$DEQP_NO_SAVE_RESULTS" ]; then
SUMMARY_LIMIT="--summary-limit 0"
fi
run_cts() {
deqp=$1
caselist=$2
output=$3
deqp-runner \
run \
--deqp $deqp \
--output $RESULTS \
--caselist $caselist \
--skips $INSTALL/deqp-all-skips.txt $DEQP_SKIPS \
--flakes $INSTALL/deqp-$GPU_VERSION-flakes.txt \
--testlog-to-xml /deqp/executor/testlog-to-xml \
$JOB \
$SUMMARY_LIMIT \
$DEQP_RUNNER_OPTIONS \
-- \
$DEQP_OPTIONS
}
parse_renderer() {
RENDERER=`grep -A1 TestCaseResult.\*info.renderer $RESULTS/deqp-info.qpa | grep '<Text' | sed 's|.*<Text>||g' | sed 's|</Text>||g'`
VERSION=`grep -A1 TestCaseResult.\*info.version $RESULTS/deqp-info.qpa | grep '<Text' | sed 's|.*<Text>||g' | sed 's|</Text>||g'`
echo "Renderer: $RENDERER"
echo "Version: $VERSION "
if ! echo $RENDERER | grep -q $DEQP_EXPECTED_RENDERER; then
echo "Expected GL_RENDERER $DEQP_EXPECTED_RENDERER"
exit 1
fi
}
check_renderer() {
if echo $DEQP_VER | grep -q egl; then
return
fi
echo "Capturing renderer info for GLES driver sanity checks"
# If you're having trouble loading your driver, uncommenting this may help
# debug.
# export EGL_LOG_LEVEL=debug
VERSION=`echo $DEQP_VER | cut -d '-' -f1 | tr '[a-z]' '[A-Z]'`
export LD_PRELOAD=$TEST_LD_PRELOAD
$DEQP $DEQP_OPTIONS --deqp-case=$SUITE-$VERSION.info.\* --deqp-log-filename=$RESULTS/deqp-info.qpa
export LD_PRELOAD=
parse_renderer
}
check_vk_device_name() {
echo "Capturing device info for VK driver sanity checks"
export LD_PRELOAD=$TEST_LD_PRELOAD
$DEQP $DEQP_OPTIONS --deqp-case=dEQP-VK.info.device --deqp-log-filename=$RESULTS/deqp-info.qpa
export LD_PRELOAD=
DEVICENAME=`grep deviceName $RESULTS/deqp-info.qpa | sed 's|deviceName: ||g'`
echo "deviceName: $DEVICENAME"
if ! echo $DEVICENAME | grep -q "$DEQP_EXPECTED_RENDERER"; then
echo "Expected deviceName $DEQP_EXPECTED_RENDERER"
exit 1
fi
}
report_load() {
echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)"
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
# deqp is to use virpipe, and virgl_test_server llvmpipe
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
VTEST_ARGS="--use-egl-surfaceless"
if [ "$VIRGL_HOST_API" = "GLES" ]; then
VTEST_ARGS="$VTEST_ARGS --use-gles"
fi
GALLIUM_DRIVER=llvmpipe \
GALLIVM_PERF="nopt,no_filter_hacks" \
virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
sleep 1
fi
if [ $DEQP_VER = vk ]; then
quiet check_vk_device_name
else
quiet check_renderer
fi
RESULTS_CSV=$RESULTS/results.csv
FAILURES_CSV=$RESULTS/failures.csv
export LD_PRELOAD=$TEST_LD_PRELOAD
run_cts $DEQP /tmp/case-list.txt $RESULTS_CSV
DEQP_EXITCODE=$?
export LD_PRELOAD=
quiet report_load
# Remove all but the first 50 individual XML files uploaded as artifacts, to
# save fd.o space when you break everything.
find $RESULTS -name \*.xml | \
sort -n |
sed -n '1,+49!p' | \
xargs rm -f
# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
find $RESULTS -name \*.xml \
-exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \
-quit
$HANG_DETECTION_CMD deqp-runner junit \
--testsuite $DEQP_VER \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
# Report the flakes to the IRC channel for monitoring (if configured):
if [ -n "$FLAKES_CHANNEL" ]; then
python3 $INSTALL/report-flakes.py \
--host irc.oftc.net \
--port 6667 \
--results $RESULTS_CSV \
--known-flakes $INSTALL/deqp-$GPU_VERSION-flakes.txt \
--channel "$FLAKES_CHANNEL" \
--runner "$CI_RUNNER_DESCRIPTION" \
--job "$CI_JOB_ID" \
--url "$CI_JOB_URL" \
--branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
exit $DEQP_EXITCODE

View File

@@ -1 +0,0 @@
../docs/ci

View File

@@ -1,36 +0,0 @@
#!/bin/bash
set +e
set -o xtrace
# if we run this script outside of gitlab-ci for testing, ensure
# we got meaningful variables
CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/mesa}
if [[ -e $CI_PROJECT_DIR/.git ]]
then
echo "Repository already present, skip cache download"
exit
fi
TMP_DIR=$(mktemp -d)
echo "Downloading archived master..."
/usr/bin/wget -O $TMP_DIR/mesa.tar.gz \
https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/mesa.tar.gz
# check wget error code
if [[ $? -ne 0 ]]
then
echo "Repository cache not available"
exit
fi
set -e
rm -rf "$CI_PROJECT_DIR"
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
mkdir -p "$CI_PROJECT_DIR"
tar xzf "$TMP_DIR/mesa.tar.gz" -C "$CI_PROJECT_DIR"
rm -rf "$TMP_DIR"
chmod a+w "$CI_PROJECT_DIR"

View File

@@ -1,20 +0,0 @@
#!/bin/sh
set -ex
if [ -z "$VK_DRIVER" ]; then
echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.x86_64.json
# To store Fossilize logs on failure.
RESULTS=`pwd`/results
mkdir -p results
"$INSTALL/fossils/fossils.sh" "$INSTALL/fossils.yml" "$RESULTS"

View File

@@ -1,10 +0,0 @@
fossils-db:
repo: "https://gitlab.freedesktop.org/hakzsam/fossils-db"
commit: "5626cedcb58bd95a7b79a9664651818aea92b21c"
fossils:
- path: sascha-willems/database.foz
- path: parallel-rdp/small_subgroup.foz
- path: parallel-rdp/small_uber_subgroup.foz
- path: parallel-rdp/subgroup.foz
- path: parallel-rdp/uber_subgroup.foz

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env bash
FOSSILS_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
FOSSILS_YAML="$(readlink -f "$1")"
FOSSILS_RESULTS="$2"
clone_fossils_db()
{
local repo="$1"
local commit="$2"
rm -rf fossils-db
git clone --no-checkout "$repo" fossils-db
(cd fossils-db; git reset "$commit" || git reset "origin/$commit")
}
query_fossils_yaml()
{
python3 "$FOSSILS_SCRIPT_DIR/query_fossils_yaml.py" \
--file "$FOSSILS_YAML" "$@"
}
create_clean_git()
{
rm -rf .clean_git
cp -R .git .clean_git
}
restore_clean_git()
{
rm -rf .git
cp -R .clean_git .git
}
fetch_fossil()
{
local fossil="${1//,/?}"
echo -n "[fetch_fossil] Fetching $1... "
local output=$(git lfs pull -I "$fossil" 2>&1)
local ret=0
if [[ $? -ne 0 || ! -f "$1" ]]; then
echo "ERROR"
echo "$output"
ret=1
else
echo "OK"
fi
restore_clean_git
return $ret
}
if [[ -n "$(query_fossils_yaml fossils_db_repo)" ]]; then
clone_fossils_db "$(query_fossils_yaml fossils_db_repo)" \
"$(query_fossils_yaml fossils_db_commit)"
cd fossils-db
else
echo "Warning: No fossils-db entry in $FOSSILS_YAML, assuming fossils-db is current directory"
fi
# During git operations various git objects get created which
# may take up significant space. Store a clean .git instance,
# which we restore after various git operations to keep our
# storage consumption low.
create_clean_git
for fossil in $(query_fossils_yaml fossils)
do
fetch_fossil "$fossil" || exit $?
fossilize-replay --num-threads 4 $fossil 1>&2 2> $FOSSILS_RESULTS/fossil_replay.txt
if [ $? != 0 ]; then
echo "Replay of $fossil failed"
grep "pipeline crashed or hung" $FOSSILS_RESULTS/fossil_replay.txt
exit 1
fi
rm $fossil
done
exit $ret

View File

@@ -1,69 +0,0 @@
#!/usr/bin/python3
# Copyright (c) 2019 Collabora Ltd
# Copyright (c) 2020 Valve Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# SPDX-License-Identifier: MIT
import argparse
import yaml
def cmd_fossils_db_repo(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
print(y['fossils-db']['repo'])
def cmd_fossils_db_commit(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
print(y['fossils-db']['commit'])
def cmd_fossils(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
fossils = list(y['fossils'])
if len(fossils) == 0:
return
print('\n'.join((t['path'] for t in fossils)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True,
help='the name of the yaml file')
subparsers = parser.add_subparsers(help='sub-command help')
parser_fossils_db_repo = subparsers.add_parser('fossils_db_repo')
parser_fossils_db_repo.set_defaults(func=cmd_fossils_db_repo)
parser_fossils_db_commit = subparsers.add_parser('fossils_db_commit')
parser_fossils_db_commit.set_defaults(func=cmd_fossils_db_commit)
parser_fossils = subparsers.add_parser('fossils')
parser_fossils.set_defaults(func=cmd_fossils)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -1,120 +0,0 @@
.lava-test:
extends:
- .ci-run-policy
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
variables:
GIT_STRATEGY: none # testing doesn't build anything from source
DEQP_PARALLEL: 6 # should be replaced by per-machine definitions
DEQP_NO_SAVE_RESULTS: 1 # but why not ... ?
DEQP_VER: gles2
# proxy used to cache data locally
FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
# base system generated by the container build job, shared between many pipelines
BASE_SYSTEM_HOST_PREFIX: "${MINIO_HOST}/mesa-lava"
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}"
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}"
# per-job build artifacts
MESA_BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/mesa-${ARCH}.tar.gz"
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz"
PIGLIT_NO_WINDOW: 1
script:
- ./artifacts/lava/lava-submit.sh
artifacts:
name: "mesa_${CI_JOB_NAME}"
when: always
paths:
- results/
exclude:
- results/*.shader_cache
after_script:
- wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz
.lava-test:armhf:
variables:
ARCH: armhf
KERNEL_IMAGE_NAME: zImage
KERNEL_IMAGE_TYPE: "zimage"
BOOT_METHOD: u-boot
HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_armhf
- .use-debian/x86_build
- .lava-test
- .use-kernel+rootfs-arm
needs:
- kernel+rootfs_armhf
- debian/x86_build
- debian-armhf
.lava-test:arm64:
variables:
ARCH: arm64
KERNEL_IMAGE_NAME: Image
KERNEL_IMAGE_TYPE: "image"
BOOT_METHOD: u-boot
HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64
- .use-debian/x86_build
- .lava-test
- .use-kernel+rootfs-arm
dependencies:
- debian-arm64
needs:
- kernel+rootfs_arm64
- debian/x86_build
- debian-arm64
.lava-test:amd64:
variables:
ARCH: amd64
KERNEL_IMAGE_NAME: bzImage
KERNEL_IMAGE_TYPE: "zimage"
BOOT_METHOD: u-boot
HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .use-debian/x86_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_amd64
- .use-debian/x86_build
- .lava-test
- .use-kernel+rootfs-amd64
needs:
- kernel+rootfs_amd64
- debian-testing
.lava-traces-base:
variables:
HWCI_TEST_SCRIPT: "/install/piglit/run.sh"
artifacts:
reports:
junit: results/junit.xml
.lava-piglit:
variables:
PIGLIT_REPLAY_DEVICE_NAME: "gl-${GPU_VERSION}"
PIGLIT_RESULTS: "${GPU_VERSION}-${PIGLIT_PROFILES}"
HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
.lava-piglit-traces:amd64:
extends:
- .lava-test:amd64
- .lava-piglit
- .lava-traces-base
.lava-piglit-traces:armhf:
extends:
- .lava-test:armhf
- .lava-piglit
- .lava-traces-base
.lava-piglit-traces:arm64:
extends:
- .lava-test:arm64
- .lava-piglit
- .lava-traces-base
.lava-piglit:amd64:
extends:
- .lava-test:amd64
- .lava-piglit

View File

@@ -1,45 +0,0 @@
#!/bin/bash
set -e
set -x
# Try to use the kernel and rootfs built in mainline first, so we're more
# likely to hit cache
if wget -q --method=HEAD "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
else
BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
fi
rm -rf results
mkdir -p results/job-rootfs-overlay/
# LAVA always uploads to MinIO when necessary as we don't have direct upload
# from the DUT
export PIGLIT_REPLAY_UPLOAD_TO_MINIO=1
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
ci-fairy minio login "${CI_JOB_JWT}"
ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}"
touch results/lava.log
tail -f results/lava.log &
artifacts/lava/lava_job_submitter.py \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--base-system-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
--mesa-build-url "${FDO_HTTP_CACHE_URI:-}https://${MESA_BUILD_PATH}" \
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-artifacts-base ${JOB_ARTIFACTS_BASE} \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir ${CI_PROJECT_DIR} \
--device-type ${DEVICE_TYPE} \
--dtb ${DTB} \
--jwt "${CI_JOB_JWT}" \
--kernel-image-name ${KERNEL_IMAGE_NAME} \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--boot-method ${BOOT_METHOD} \
--lava-tags "${LAVA_TAGS}" >> results/lava.log

View File

@@ -1,301 +0,0 @@
#!/usr/bin/env python3
#
# Copyright (C) 2020, 2021 Collabora Limited
# Author: Gustavo Padovan <gustavo.padovan@collabora.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Send a job to LAVA, track it and collect log back"""
import argparse
import lavacli
import os
import sys
import time
import traceback
import urllib.parse
import xmlrpc
import yaml
from datetime import datetime
from lavacli.utils import loader
def print_log(msg):
print("{}: {}".format(datetime.now(), msg))
def fatal_err(msg):
print_log(msg)
sys.exit(1)
def generate_lava_yaml(args):
# General metadata and permissions, plus also inexplicably kernel arguments
values = {
'job_name': 'mesa: {}'.format(args.pipeline_info),
'device_type': args.device_type,
'visibility': { 'group': [ 'Collabora+fdo'] },
'priority': 75,
'context': {
'extra_nfsroot_args': ' init=/init rootwait minio_results={}'.format(args.job_artifacts_base)
},
'timeouts': {
'job': {
'minutes': 30
}
},
}
if args.lava_tags:
values['tags'] = args.lava_tags.split(',')
# URLs to our kernel rootfs to boot from, both generated by the base
# container build
deploy = {
'timeout': { 'minutes': 10 },
'to': 'tftp',
'os': 'oe',
'kernel': {
'url': '{}/{}'.format(args.base_system_url_prefix, args.kernel_image_name),
},
'nfsrootfs': {
'url': '{}/lava-rootfs.tgz'.format(args.base_system_url_prefix),
'compression': 'gz',
}
}
if args.kernel_image_type:
deploy['kernel']['type'] = args.kernel_image_type
if args.dtb:
deploy['dtb'] = {
'url': '{}/{}.dtb'.format(args.base_system_url_prefix, args.dtb)
}
# always boot over NFS
boot = {
'timeout': { 'minutes': 25 },
'method': args.boot_method,
'commands': 'nfs',
'prompts': ['lava-shell:'],
}
# skeleton test definition: only declaring each job as a single 'test'
# since LAVA's test parsing is not useful to us
test = {
'timeout': { 'minutes': 30 },
'failure_retry': 1,
'definitions': [ {
'name': 'mesa',
'from': 'inline',
'path': 'inline/mesa.yaml',
'repository': {
'metadata': {
'name': 'mesa',
'description': 'Mesa test plan',
'os': [ 'oe' ],
'scope': [ 'functional' ],
'format': 'Lava-Test Test Definition 1.0',
},
'parse': {
'pattern': 'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
},
'run': {
},
},
} ],
}
# job execution script:
# - inline .gitlab-ci/common/init-stage1.sh
# - fetch and unpack per-pipeline build artifacts from build job
# - fetch and unpack per-job environment from lava-submit.sh
# - exec .gitlab-ci/common/init-stage2.sh
init_lines = []
with open(args.first_stage_init, 'r') as init_sh:
init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
init_lines += [
'mkdir -p {}'.format(args.ci_project_dir),
'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.mesa_build_url, args.ci_project_dir),
'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
'set +x',
'export CI_JOB_JWT="{}"'.format(args.jwt),
'set -x',
'exec /init-stage2.sh',
]
test['definitions'][0]['repository']['run']['steps'] = init_lines
values['actions'] = [
{ 'deploy': deploy },
{ 'boot': boot },
{ 'test': test },
]
return yaml.dump(values, width=10000000)
def setup_lava_proxy():
config = lavacli.load_config("default")
uri, usr, tok = (config.get(key) for key in ("uri", "username", "token"))
uri_obj = urllib.parse.urlparse(uri)
uri_str = "{}://{}:{}@{}{}".format(uri_obj.scheme, usr, tok, uri_obj.netloc, uri_obj.path)
transport = lavacli.RequestsTransport(
uri_obj.scheme,
config.get("proxy"),
config.get("timeout", 120.0),
config.get("verify_ssl_cert", True),
)
proxy = xmlrpc.client.ServerProxy(
uri_str, allow_none=True, transport=transport)
print_log("Proxy for {} created.".format(config['uri']))
return proxy
def _call_proxy(fn, *args):
retries = 60
for n in range(1, retries + 1):
try:
return fn(*args)
except xmlrpc.client.ProtocolError as err:
if n == retries:
traceback.print_exc()
fatal_err("A protocol error occurred (Err {} {})".format(err.errcode, err.errmsg))
else:
time.sleep(15)
pass
except xmlrpc.client.Fault as err:
traceback.print_exc()
fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode))
def get_job_results(proxy, job_id, test_suite, test_case):
# Look for infrastructure errors and retry if we see them.
results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
results = yaml.load(results_yaml, Loader=loader(False))
for res in results:
metadata = res['metadata']
if not 'result' in metadata or metadata['result'] != 'fail':
continue
if 'error_type' in metadata and metadata['error_type'] == "Infrastructure":
print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id))
return False
if 'case' in metadata and metadata['case'] == "validate":
print_log("LAVA job {} failed validation (possible download error). Retry.".format(job_id))
return False
results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case)
results = yaml.load(results_yaml, Loader=loader(False))
if not results:
fatal_err("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case))
print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result']))
if results[0]['result'] != 'pass':
fatal_err("FAIL")
return True
def follow_job_execution(proxy, job_id):
line_count = 0
finished = False
while not finished:
(finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count)
logs = yaml.load(str(data), Loader=loader(False))
if logs:
for line in logs:
print("{} {}".format(line["dt"], line["msg"]))
line_count += len(logs)
def show_job_data(proxy, job_id):
show = _call_proxy(proxy.scheduler.jobs.show, job_id)
for field, value in show.items():
print("{}\t: {}".format(field, value))
def validate_job(proxy, job_file):
try:
return _call_proxy(proxy.scheduler.jobs.validate, job_file, True)
except:
return False
def submit_job(proxy, job_file):
return _call_proxy(proxy.scheduler.jobs.submit, job_file)
def main(args):
proxy = setup_lava_proxy()
yaml_file = generate_lava_yaml(args)
if args.dump_yaml:
censored_args = args
censored_args.jwt = "jwt-hidden"
print(generate_lava_yaml(censored_args))
if args.validate_only:
ret = validate_job(proxy, yaml_file)
if not ret:
fatal_err("Error in LAVA job definition")
print("LAVA job definition validated successfully")
return
while True:
job_id = submit_job(proxy, yaml_file)
print_log("LAVA job id: {}".format(job_id))
follow_job_execution(proxy, job_id)
show_job_data(proxy, job_id)
if get_job_results(proxy, job_id, "0_mesa", "mesa") == True:
break
if __name__ == '__main__':
# given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
# GitLab runner -> GitLab primary -> user, safe to say we don't need any
# more buffering
sys.stdout.reconfigure(line_buffering=True)
sys.stderr.reconfigure(line_buffering=True)
parser = argparse.ArgumentParser("LAVA job submitter")
parser.add_argument("--pipeline-info")
parser.add_argument("--base-system-url-prefix")
parser.add_argument("--mesa-build-url")
parser.add_argument("--job-rootfs-overlay-url")
parser.add_argument("--job-artifacts-base")
parser.add_argument("--first-stage-init")
parser.add_argument("--ci-project-dir")
parser.add_argument("--device-type")
parser.add_argument("--dtb", nargs='?', default="")
parser.add_argument("--kernel-image-name")
parser.add_argument("--kernel-image-type", nargs='?', default="")
parser.add_argument("--boot-method")
parser.add_argument("--lava-tags", nargs='?', default="")
parser.add_argument("--jwt")
parser.add_argument("--validate-only", action='store_true')
parser.add_argument("--dump-yaml", action='store_true')
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)

View File

@@ -1,82 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
CROSS_FILE=/cross_file-"$CROSS".txt
# We need to control the version of llvm-config we're using, so we'll
# tweak the cross file or generate a native file to do so.
if test -n "$LLVM_VERSION"; then
LLVM_CONFIG="llvm-config-${LLVM_VERSION}"
echo -e "[binaries]\nllvm-config = '`which $LLVM_CONFIG`'" > native.file
if [ -n "$CROSS" ]; then
sed -i -e '/\[binaries\]/a\' -e "llvm-config = '`which $LLVM_CONFIG`'" $CROSS_FILE
fi
$LLVM_CONFIG --version
else
rm -f native.file
touch native.file
fi
# cross-xfail-$CROSS, if it exists, contains a list of tests that are expected
# to fail for the $CROSS configuration, one per line. you can then mark those
# tests in their meson.build with:
#
# test(...,
# should_fail: meson.get_cross_property('xfail', '').contains(t),
# )
#
# where t is the name of the test, and the '' is the string to search when
# not cross-compiling (which is empty, because for amd64 everything is
# expected to pass).
if [ -n "$CROSS" ]; then
CROSS_XFAIL=.gitlab-ci/cross-xfail-"$CROSS"
if [ -s "$CROSS_XFAIL" ]; then
sed -i \
-e '/\[properties\]/a\' \
-e "xfail = '$(tr '\n' , < $CROSS_XFAIL)'" \
"$CROSS_FILE"
fi
fi
# Only use GNU time if available, not any shell built-in command
case $CI_JOB_NAME in
# strace and wine don't seem to mix well
# ASAN leak detection is incompatible with strace
debian-mingw32-x86_64|*-asan*)
if test -f /usr/bin/time; then
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
fi
;;
*)
if test -f /usr/bin/time -a -f /usr/bin/strace; then
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time-strace.sh
fi
;;
esac
rm -rf _build
meson _build --native-file=native.file \
--wrap-mode=nofallback \
${CROSS+--cross "$CROSS_FILE"} \
-D prefix=`pwd`/install \
-D libdir=lib \
-D buildtype=${BUILDTYPE:-debug} \
-D build-tests=true \
-D c_args="$(echo -n $C_ARGS)" \
-D cpp_args="$(echo -n $CPP_ARGS)" \
-D libunwind=${UNWIND} \
${DRI_LOADERS} \
-D dri-drivers=${DRI_DRIVERS:-[]} \
${GALLIUM_ST} \
-D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
-D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
-D werror=true \
${EXTRA_OPTION}
cd _build
meson configure
ninja
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} ${MESON_TEST_ARGS}
ninja install
cd ..

View File

@@ -1,27 +0,0 @@
#!/bin/sh
STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
mkdir -p $STRACEDIR
# If the test times out, meson sends SIGTERM to this process.
# Simply exec'ing "time" would result in no output from that in this case.
# Instead, we need to run "time" in the background, catch the signals and
# propagate them to the actual test process.
/usr/bin/time -v strace -ff -tt -T -o $STRACEDIR/log "$@" &
TIMEPID=$!
STRACEPID=$(ps --ppid $TIMEPID -o pid=)
TESTPID=$(ps --ppid $STRACEPID -o pid=)
if test "x$TESTPID" != x; then
trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
fi
wait $TIMEPID
EXITCODE=$?
# Only keep strace logs if the test timed out
rm -rf $STRACEDIR &
exit $EXITCODE

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# If the test times out, meson sends SIGTERM to this process.
# Simply exec'ing "time" would result in no output from that in this case.
# Instead, we need to run "time" in the background, catch the signals and
# propagate them to the actual test process.
/usr/bin/time -v "$@" &
TIMEPID=$!
TESTPID=$(ps --ppid $TIMEPID -o pid=)
if test "x$TESTPID" != x; then
trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
fi
wait $TIMEPID
exit $?

View File

@@ -1,36 +0,0 @@
diff --git a/generated_tests/CMakeLists.txt b/generated_tests/CMakeLists.txt
index 738526546..6f89048cd 100644
--- a/generated_tests/CMakeLists.txt
+++ b/generated_tests/CMakeLists.txt
@@ -206,11 +206,6 @@ piglit_make_generated_tests(
templates/gen_variable_index_write_tests/vs.shader_test.mako
templates/gen_variable_index_write_tests/fs.shader_test.mako
templates/gen_variable_index_write_tests/helpers.mako)
-piglit_make_generated_tests(
- vs_in_fp64.list
- gen_vs_in_fp64.py
- templates/gen_vs_in_fp64/columns.shader_test.mako
- templates/gen_vs_in_fp64/regular.shader_test.mako)
piglit_make_generated_tests(
shader_framebuffer_fetch_tests.list
gen_shader_framebuffer_fetch_tests.py)
@@ -279,7 +274,6 @@ add_custom_target(gen-gl-tests
gen_extensions_defined.list
vp-tex.list
variable_index_write_tests.list
- vs_in_fp64.list
gpu_shader4_tests.list
)
diff --git a/tests/sanity.py b/tests/sanity.py
index 12f1614c9..9019087e2 100644
--- a/tests/sanity.py
+++ b/tests/sanity.py
@@ -100,7 +100,6 @@ shader_tests = (
'spec/arb_tessellation_shader/execution/barrier-patch.shader_test',
'spec/arb_tessellation_shader/execution/built-in-functions/tcs-any-bvec4-using-if.shader_test',
'spec/arb_tessellation_shader/execution/sanity.shader_test',
- 'spec/arb_vertex_attrib_64bit/execution/vs_in/vs-input-uint_uvec4-double_dmat3x4_array2-position.shader_test',
'spec/glsl-1.50/execution/geometry-basic.shader_test',
'spec/oes_viewport_array/viewport-gs-write-simple.shader_test',
)

View File

@@ -1,6 +0,0 @@
# WGL is Windows-only
wgl@.*
# These are sensitive to CPU timing, and would need to be run in isolation
# on the system rather than in parallel with other tests.
glx@glx_arb_sync_control@timing.*

View File

@@ -1,94 +0,0 @@
#!/bin/sh
set -ex
if [ -z "$GPU_VERSION" ]; then
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in your ci/piglit-gpu-version-*.txt)'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export EGL_PLATFORM=surfaceless
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
RESULTS=`pwd`/${PIGLIT_RESULTS_DIR:-results}
mkdir -p $RESULTS
if [ -n "$PIGLIT_FRACTION" -o -n "$CI_NODE_INDEX" ]; then
FRACTION=`expr ${PIGLIT_FRACTION:-1} \* ${CI_NODE_TOTAL:-1}`
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction $FRACTION"
fi
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction-start ${CI_NODE_INDEX}"
fi
if [ -e "$INSTALL/piglit-$GPU_VERSION-fails.txt" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --baseline $INSTALL/piglit-$GPU_VERSION-fails.txt"
fi
# Default to an empty known flakes file if it doesn't exist.
touch $INSTALL/piglit-$GPU_VERSION-flakes.txt
if [ -e "$INSTALL/piglit-$GPU_VERSION-skips.txt" ]; then
PIGLIT_SKIPS="$INSTALL/piglit-$GPU_VERSION-skips.txt"
fi
set +e
if [ -n "$PIGLIT_PARALLEL" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs $PIGLIT_PARALLEL"
elif [ -n "$FDO_CI_CONCURRENT" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs $FDO_CI_CONCURRENT"
else
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs 4"
fi
RESULTS_CSV=$RESULTS/results.csv
FAILURES_CSV=$RESULTS/failures.csv
export LD_PRELOAD=$TEST_LD_PRELOAD
piglit-runner \
run \
--piglit-folder /piglit \
--output $RESULTS \
--skips $INSTALL/piglit/piglit-all-skips.txt $PIGLIT_SKIPS \
--flakes $INSTALL/piglit-$GPU_VERSION-flakes.txt \
--profile $PIGLIT_PROFILES \
--process-isolation \
$PIGLIT_RUNNER_OPTIONS \
-v -v
PIGLIT_EXITCODE=$?
export LD_PRELOAD=
deqp-runner junit \
--testsuite $PIGLIT_PROFILES \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
# Report the flakes to the IRC channel for monitoring (if configured):
if [ -n "$FLAKES_CHANNEL" ]; then
python3 $INSTALL/report-flakes.py \
--host irc.oftc.net \
--port 6667 \
--results $RESULTS_CSV \
--known-flakes $INSTALL/piglit-$GPU_VERSION-flakes.txt \
--channel "$FLAKES_CHANNEL" \
--runner "$CI_RUNNER_DESCRIPTION" \
--job "$CI_JOB_ID" \
--url "$CI_JOB_URL" \
--branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
--branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
fi
exit $PIGLIT_EXITCODE

View File

@@ -1,284 +0,0 @@
#!/bin/sh
set -ex
INSTALL=$(realpath -s "$PWD"/install)
MINIO_ARGS="--credentials=/tmp/.minio_credentials"
RESULTS=$(realpath -s "$PWD"/results)
mkdir -p "$RESULTS"
# Set up the driver environment.
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
# using a command wrapper. Hence, we will just set it when running the
# command.
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
# Sanity check to ensure that our environment is sufficient to make our tests
# run against the Mesa built by CI, rather than any installed distro version.
MESA_VERSION=$(head -1 "$INSTALL/VERSION" | sed 's/\./\\./g')
print_red() {
RED='\033[0;31m'
NC='\033[0m' # No Color
printf "${RED}"
"$@"
printf "${NC}"
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
if [ "$VK_DRIVER" ]; then
### VULKAN ###
# Set the Vulkan driver to use.
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
# Set environment for Wine.
export WINEDEBUG="-all"
export WINEPREFIX="/dxvk-wine64"
export WINEESYNC=1
# Set environment for DXVK.
export DXVK_LOG_LEVEL="none"
export DXVK_STATE_CACHE=0
# Set environment for gfxreconstruct executables.
export PATH="/gfxreconstruct/build/bin:$PATH"
fi
SANITY_MESA_VERSION_CMD="vulkaninfo"
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
# Set up the Window System Interface (WSI)
if [ ${TEST_START_XORG:-0} -eq 1 ]; then
"$INSTALL"/common/start-x.sh "$INSTALL"
export DISPLAY=:0
else
# Run vulkan against the host's running X server (xvfb doesn't
# have DRI3 support).
# Set the DISPLAY env variable in each gitlab-runner's
# configuration file:
# https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
quiet printf "%s%s\n" "Running against the hosts' X server. " \
"DISPLAY is \"$DISPLAY\"."
fi
else
### GL/ES ###
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
# Set environment for apitrace executable.
export PATH="/apitrace/build:$PATH"
# Our rootfs may not have "less", which apitrace uses during
# apitrace dump
export PAGER=cat
fi
SANITY_MESA_VERSION_CMD="wflinfo"
HANG_DETECTION_CMD=""
# Set up the platform windowing system.
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
# Use the surfaceless EGL platform.
export DISPLAY=
export WAFFLE_PLATFORM="surfaceless_egl"
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
# piglit is to use virpipe, and virgl_test_server llvmpipe
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
GALLIUM_DRIVER=llvmpipe \
GALLIVM_PERF="nopt,no_filter_hacks" \
VTEST_USE_EGL_SURFACELESS=1 \
VTEST_USE_GLES=1 \
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
sleep 1
fi
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
# It is assumed that you have already brought up your X server before
# calling this script.
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
else
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
fi
fi
if [ "$ZINK_USE_LAVAPIPE" ]; then
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
fi
# If the job is parallel at the gitlab job level, will take the corresponding
# fraction of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
FAILURE_MESSAGE=$(printf "%s" "Can't parallelize piglit with multiple profiles")
quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
exit 1
fi
USE_CASELIST=1
fi
replay_minio_upload_images() {
find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
| while read -r line; do
__TRACE="${line%-*-*}"
if grep -q "^$__PREFIX/$__TRACE: pass$" ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig"; then
if [ "x$CI_PROJECT_PATH" != "x$FDO_UPSTREAM_REPO" ]; then
continue
fi
__MINIO_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE"
__DESTINATION_FILE_PATH="${line##*-}"
if wget -q --method=HEAD "https://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}" 2>/dev/null; then
continue
fi
else
__MINIO_PATH="$JOB_ARTIFACTS_BASE"
__DESTINATION_FILE_PATH="$__MINIO_TRACES_PREFIX/${line##*-}"
# Adding to the JUnit the direct link to the diff page in
# the dashboard
__PIGLIT_TESTCASE_CLASSNAME="piglit\.trace\.$PIGLIT_REPLAY_DEVICE_NAME\.$(dirname $__TRACE | sed 's%/%\\.%g;s@%@\\%@')"
__PIGLIT_TESTCASE_NAME="$(basename $__TRACE | sed 's%\.%_%g;s@%@\\%@')"
__DASHBOARD_URL="https://tracie.freedesktop.org/dashboard/imagediff/${CI_PROJECT_PATH}/${CI_JOB_ID}/${__TRACE}"
__START_TEST_PATTERN='<testcase classname="'"${__PIGLIT_TESTCASE_CLASSNAME}"'" name="'"${__PIGLIT_TESTCASE_NAME}"'" status="fail"'
__REPLACE_TEST_PATTERN='</system-out><failure type="fail"/></testcase>'
# Replace in the range between __START_TEST_PATTERN and
# __REPLACE_TEST_PATTERN leaving __START_TEST_PATTERN out
# from the substitution
sed '\%'"${__START_TEST_PATTERN}"'%,\%'"${__REPLACE_TEST_PATTERN}"'%{\%'"${__START_TEST_PATTERN}"'%b;s%'"${__REPLACE_TEST_PATTERN}"'%</system-out><failure type="fail">To view the image differences visit: '"${__DASHBOARD_URL}"'</failure></testcase>%}' \
-i "$RESULTS"/junit.xml
fi
ci-fairy minio cp $MINIO_ARGS "$RESULTS/$__PREFIX/$line" \
"minio://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
done
}
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
if [ -d results ]; then
cd results && rm -rf ..?* .[!.]* *
fi
cd /piglit
if [ -n "$USE_CASELIST" ]; then
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
PIGLIT_GENTESTS="./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format \"{name}\" > /tmp/case-list.txt"
RUN_GENTESTS="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $PIGLIT_GENTESTS"
eval $RUN_GENTESTS
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
PIGLIT_TESTS="--test-list /tmp/case-list.txt"
fi
PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
PIGLIT_CMD="./piglit run --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES "$(/usr/bin/printf "%q" "$RESULTS")
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
if [ "$RUN_CMD_WRAPPER" ]; then
RUN_CMD="set +e; $RUN_CMD_WRAPPER "$(/usr/bin/printf "%q" "$RUN_CMD")"; set -e"
fi
FAILURE_MESSAGE=$(printf "%s" "Unexpected change in results:")
eval $RUN_CMD
if [ $? -ne 0 ]; then
printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
fi
ARTIFACTS_BASE_URL="https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts"
if [ ${PIGLIT_JUNIT_RESULTS:-0} -eq 1 ]; then
./piglit summary aggregate "$RESULTS" -o junit.xml
FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the JUnit report for failures at: ${ARTIFACTS_BASE_URL}/results/junit.xml")
fi
PIGLIT_RESULTS="${PIGLIT_RESULTS:-$PIGLIT_PROFILES}"
RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt"
mkdir -p .gitlab-ci/piglit
./piglit summary console "$RESULTS"/results.json.bz2 \
| tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
| head -n -1 | grep -v ": pass" \
| sed '/^summary:/Q' \
> $RESULTSFILE
if [ "x$PIGLIT_PROFILES" = "xreplay" ] \
&& [ ${PIGLIT_REPLAY_UPLOAD_TO_MINIO:-0} -eq 1 ]; then
ci-fairy minio login $MINIO_ARGS $CI_JOB_JWT
__PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
__MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
__MINIO_TRACES_PREFIX="traces"
if [ "x$PIGLIT_REPLAY_SUBCOMMAND" != "xprofile" ]; then
quiet replay_minio_upload_images
fi
fi
if [ -n "$USE_CASELIST" ]; then
# Just filter the expected results based on the tests that were actually
# executed, and switch to the version with no summary
cat ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" | sed '/^summary:/Q' | rev \
| cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
grep -F -f /tmp/executed.txt "$INSTALL/$PIGLIT_RESULTS.txt" \
> ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" || true
elif [ -f "$INSTALL/$PIGLIT_RESULTS.txt" ]; then
cp "$INSTALL/$PIGLIT_RESULTS.txt" \
".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline"
else
touch ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline"
fi
if diff -q ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE; then
exit 0
fi
./piglit summary html --exclude-details=pass \
"$RESULTS"/summary "$RESULTS"/results.json.bz2
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
find "$RESULTS"/summary -type f -name "*.html" -print0 \
| xargs -0 sed -i 's%<img src="file://'"${RESULTS}"'.*-\([0-9a-f]*\)\.png%<img src="https://'"${JOB_ARTIFACTS_BASE}"'/traces/\1.png%g'
find "$RESULTS"/summary -type f -name "*.html" -print0 \
| xargs -0 sed -i 's%<img src="file://%<img src="https://'"${PIGLIT_REPLAY_REFERENCE_IMAGES_BASE}"'/%g'
fi
FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the HTML summary for problems at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html")
quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
quiet diff --color=always -u ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE
exit 1

View File

@@ -1,75 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
VERSION=`head -1 install/VERSION`
ROOTDIR=`pwd`
if [ -d results ]; then
cd results && rm -rf ..?* .[!.]* *
fi
cd /piglit
export OCL_ICD_VENDORS=$ROOTDIR/install/etc/OpenCL/vendors/
set +e
unset DISPLAY
export LD_LIBRARY_PATH=$ROOTDIR/install/lib
clinfo
# If the job is parallel at the gitlab job level, will take the corresponding
# fraction of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
echo "Can't parallelize piglit with multiple profiles"
exit 1
fi
USE_CASELIST=1
fi
if [ -n "$USE_CASELIST" ]; then
./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format "{name}" > /tmp/case-list.txt
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
PIGLIT_TESTS="--test-list /tmp/case-list.txt"
fi
./piglit run -c -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES $ROOTDIR/results
retVal=$?
if [ $retVal -ne 0 ]; then
echo "Found $(cat /tmp/version.txt), expected $VERSION"
fi
set -e
PIGLIT_RESULTS=${PIGLIT_RESULTS:-$PIGLIT_PROFILES}
mkdir -p .gitlab-ci/piglit
./piglit summary console $ROOTDIR/results \
| tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
| head -n -1 \
| grep -v ": pass" \
| sed '/^summary:/Q' \
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt
if [ -n "$USE_CASELIST" ]; then
# Just filter the expected results based on the tests that were actually
# executed, and switch to the version with no summary
cat .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig | sed '/^summary:/Q' | rev \
| cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
grep -F -f /tmp/executed.txt $ROOTDIR/install/$PIGLIT_RESULTS.txt \
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline || true
else
cp $ROOTDIR/install/$PIGLIT_RESULTS.txt .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline
fi
if diff -q .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}; then
exit 0
fi
./piglit summary html --exclude-details=pass $ROOTDIR/results/summary $ROOTDIR/results
echo Unexpected change in results:
diff -u .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}
exit 1

View File

@@ -1,57 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
CROSS_FILE=/cross_file-"$CROSS".txt
# Delete unused bin and includes from artifacts to save space.
rm -rf install/bin install/include
# Strip the drivers in the artifacts to cut 80% of the artifacts size.
if [ -n "$CROSS" ]; then
STRIP=`sed -n -E "s/strip\s*=\s*'(.*)'/\1/p" "$CROSS_FILE"`
if [ -z "$STRIP" ]; then
echo "Failed to find strip command in cross file"
exit 1
fi
else
STRIP="strip"
fi
if [ -z "$ARTIFACTS_DEBUG_SYMBOLS"]; then
find install -name \*.so -exec $STRIP {} \;
fi
# Test runs don't pull down the git tree, so put the dEQP helper
# script and associated bits there.
echo "$(cat VERSION) (git-$(git rev-parse HEAD | cut -b -10))" > install/VERSION
cp -Rp .gitlab-ci/bare-metal install/
cp -Rp .gitlab-ci/common install/
cp -Rp .gitlab-ci/piglit install/
cp -Rp .gitlab-ci/fossils.yml install/
cp -Rp .gitlab-ci/fossils install/
cp -Rp .gitlab-ci/fossilize-runner.sh install/
cp -Rp .gitlab-ci/deqp-runner.sh install/
cp -Rp .gitlab-ci/crosvm-runner.sh install/
cp -Rp .gitlab-ci/crosvm-init.sh install/
cp -Rp .gitlab-ci/deqp-*.txt install/
cp -Rp .gitlab-ci/report-flakes.py install/
cp -Rp .gitlab-ci/vkd3d-proton install/
find . -path \*/ci/\*.txt \
-o -path \*/ci/\*traces\*.yml \
| xargs -I '{}' cp -p '{}' install/
# Tar up the install dir so that symlinks and hardlinks aren't each
# packed separately in the zip file.
mkdir -p artifacts/
tar -cf artifacts/install.tar install
cp -Rp .gitlab-ci/common artifacts/ci-common
cp -Rp .gitlab-ci/lava artifacts/
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
# Pass needed files to the test stage
MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.gz"
gzip -c artifacts/install.tar > ${MINIO_ARTIFACT_NAME}
ci-fairy minio login $CI_JOB_JWT
ci-fairy minio cp ${MINIO_ARTIFACT_NAME} minio://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}
fi

View File

@@ -1,151 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2021 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import io
import re
import socket
import time
class Connection:
def __init__(self, host, port, verbose):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
self.s.setblocking(0)
self.verbose = verbose
def send_line(self, line):
if self.verbose:
print(f"IRC: sending {line}")
self.s.sendall((line + '\n').encode())
def wait(self, secs):
for i in range(secs):
if self.verbose:
while True:
try:
data = self.s.recv(1024)
except io.BlockingIOError:
break
if data == "":
break
for line in data.decode().split('\n'):
print(f"IRC: received {line}")
time.sleep(1)
def quit(self):
self.send_line("QUIT")
self.s.shutdown(socket.SHUT_WR)
self.s.close()
def read_flakes(results):
flakes = []
csv = re.compile("(.*),(.*),(.*)")
for line in open(results, 'r').readlines():
match = csv.match(line)
if match.group(2) == "Flake":
flakes.append(match.group(1))
return flakes
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str,
help='IRC server hostname', required=True)
parser.add_argument('--port', type=int,
help='IRC server port', required=True)
parser.add_argument('--results', type=str,
help='results.csv file from deqp-runner or piglit-runner', required=True)
parser.add_argument('--known-flakes', type=str,
help='*-flakes.txt file passed to deqp-runner or piglit-runner', required=True)
parser.add_argument('--channel', type=str,
help='Known flakes report channel', required=True)
parser.add_argument('--url', type=str,
help='$CI_JOB_URL', required=True)
parser.add_argument('--runner', type=str,
help='$CI_RUNNER_DESCRIPTION', required=True)
parser.add_argument('--branch', type=str,
help='optional branch name')
parser.add_argument('--branch-title', type=str,
help='optional branch title')
parser.add_argument('--job', type=str,
help='$CI_JOB_ID', required=True)
parser.add_argument('--verbose', "-v", action="store_true",
help='log IRC interactions')
args = parser.parse_args()
flakes = read_flakes(args.results)
if not flakes:
exit(0)
known_flakes = []
for line in open(args.known_flakes).readlines():
line = line.strip()
if not line or line.startswith("#"):
continue
known_flakes.append(re.compile(line))
irc = Connection(args.host, args.port, args.verbose)
# The nick needs to be something unique so that multiple runners
# connecting at the same time don't race for one nick and get blocked.
# freenode has a 16-char limit on nicks (9 is the IETF standard, but
# various servers extend that). So, trim off the common prefixes of the
# runner name, and append the job ID so that software runners with more
# than one concurrent job (think swrast) don't collide. For freedreno,
# that gives us a nick as long as db410c-N-JJJJJJJJ, and it'll be a while
# before we make it to 9-digit jobs (we're at 7 so far).
nick = args.runner
nick = nick.replace('mesa-', '')
nick = nick.replace('google-freedreno-', '')
nick += f'-{args.job}'
irc.send_line(f"NICK {nick}")
irc.send_line(f"USER {nick} unused unused: Gitlab CI Notifier")
irc.wait(10)
irc.send_line(f"JOIN {args.channel}")
irc.wait(1)
branchinfo = ""
if args.branch:
branchinfo = f" on branch {args.branch} ({args.branch_title})"
irc.send_line(
f"PRIVMSG {args.channel} :Flakes detected in job {args.url} on {args.runner}{branchinfo}:")
for flake in flakes:
status = "NEW "
for known in known_flakes:
if known.match(flake):
status = ""
break
irc.send_line(f"PRIVMSG {args.channel} :{status}{flake}")
irc.send_line(
f"PRIVMSG {args.channel} :See {args.url}/artifacts/browse/results/")
irc.quit()
if __name__ == '__main__':
main()

View File

@@ -1,18 +0,0 @@
set -e
set -v
ARTIFACTSDIR=`pwd`/shader-db
mkdir -p $ARTIFACTSDIR
export DRM_SHIM_DEBUG=true
LIBDIR=`pwd`/install/lib
export LD_LIBRARY_PATH=$LIBDIR
cd /usr/local/shader-db
for driver in freedreno intel v3d; do
echo "Running drm-shim for $driver"
env LD_PRELOAD=$LIBDIR/lib${driver}_noop_drm_shim.so \
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
> $ARTIFACTSDIR/${driver}-shader-db.txt
done

View File

@@ -1,518 +0,0 @@
# This file list source dependencies to avoid creating/running jobs
# those outcome cannot be changed by the modifications from a branch.
# Generic rule to not run the job during scheduled pipelines
# ----------------------------------------------------------
.scheduled_pipelines-rules:
rules: &ignore_scheduled_pipelines
if: '$CI_PIPELINE_SOURCE == "schedule"'
when: never
# Mesa core source file dependencies
# ----------------------------------
.mesa-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &mesa_core_file_list
- .gitlab-ci.yml
- .gitlab-ci/**/*
- include/**/*
- meson.build
- src/*
- src/compiler/**/*
- src/drm-shim/**/*
- src/egl/**/*
- src/gbm/**/*
- src/glx/**/*
- src/gtest/**/*
- src/hgl/**/*
- src/include/**/*
- src/loader/**/*
- src/mapi/**/*
- src/mesa/*
- src/mesa/drivers/*
- src/mesa/drivers/common/**/*
- src/mesa/drivers/dri/*
- src/mesa/drivers/dri/common/**/*
- src/mesa/main/**/*
- src/mesa/math/**/*
- src/mesa/program/**/*
- src/mesa/sparc/**/*
- src/mesa/state_tracker/**/*
- src/mesa/swrast/**/*
- src/mesa/swrast_setup/**/*
- src/mesa/tnl/**/*
- src/mesa/tnl_dd/**/*
- src/mesa/vbo/**/*
- src/mesa/x86/**/*
- src/mesa/x86-64/**/*
- src/util/**/*
.vulkan-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &vulkan_file_list
- src/vulkan/**/*
when: on_success
- when: never
# Gallium core source file dependencies
# -------------------------------------
.gallium-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &gallium_core_file_list
- src/gallium/*
- src/gallium/auxiliary/**/*
- src/gallium/drivers/*
- src/gallium/include/**/*
- src/gallium/frontends/dri/*
- src/gallium/frontends/glx/**/*
- src/gallium/targets/**/*
- src/gallium/tests/**/*
- src/gallium/winsys/*
.softpipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &softpipe_file_list
- src/gallium/drivers/softpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- when: never
.llvmpipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &llvmpipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- when: never
.lavapipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &lavapipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/frontends/lavapipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- changes:
*vulkan_file_list
when: on_success
- when: never
.llvmpipe-cl-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
- .gitlab-ci.yml
- .gitlab-ci/**/*
- meson.build
- include/**/*
- src/compiler/**/*
- src/include/**/*
- src/util/**/*
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes: &clover_file_list
- src/gallium/frontends/clover/**/*
when: on_success
- when: never
.freedreno-rules:
stage: freedreno
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes:
# Note: when https://gitlab.com/gitlab-org/gitlab/-/issues/198688
# is supported, we can change the src/freedreno/ rule to explicitly
# exclude tools
- src/freedreno/**/*
- src/gallium/drivers/freedreno/**/*
- src/gallium/winsys/freedreno/**/*
when: on_success
- when: never
.panfrost-midgard-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &panfrost_gallium_file_list
- src/gallium/drivers/panfrost/**/*
- src/gallium/winsys/panfrost/**/*
when: on_success
- changes: &panfrost_common_file_list
- src/panfrost/include/*
- src/panfrost/lib/*
- src/panfrost/shared/*
- src/panfrost/util/*
when: on_success
- changes:
- src/panfrost/midgard/**/*
when: on_success
- when: never
.panfrost-bifrost-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*panfrost_common_file_list
when: on_success
- changes:
*panfrost_gallium_file_list
when: on_success
- changes:
- src/panfrost/bifrost/**/*
when: on_success
- when: never
.vc4-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/broadcom/**/*
- src/gallium/drivers/vc4/**/*
- src/gallium/winsys/kmsro/**/*
- src/gallium/winsys/vc4/**/*
when: on_success
- when: never
.v3d-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/broadcom/**/*
- src/gallium/drivers/v3d/**/*
- src/gallium/winsys/kmsro/**/*
- src/gallium/winsys/v3d/**/*
when: on_success
- when: never
.v3dv-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes:
- src/broadcom/**/*
when: on_success
- when: never
.lima-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/lima/**/*
- src/gallium/winsys/lima/**/*
- src/lima/**/*
when: on_success
- when: never
.radv-rules:
stage: amd
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes: &radv_file_list
- src/amd/**/*
- src/vulkan/**/*
when: on_success
- when: never
.virgl-rules:
stage: layered-backends
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes: &virgl_file_list
- src/gallium/drivers/virgl/**/*
- src/gallium/winsys/virgl/**/*
when: on_success
- when: never
.radeonsi-rules:
stage: amd
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &radeonsi_file_list
- src/gallium/drivers/radeonsi/**/*
- src/gallium/winsys/amdgpu/**/*
- src/amd/*
- src/amd/addrlib/**/*
- src/amd/common/**/*
- src/amd/llvm/**/*
- src/amd/registers/**/*
when: on_success
- when: never
.i915g-rules:
stage: intel
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/i915/**/*
- src/gallium/winsys/i915/**/*
- src/intel/**/*
when: on_success
- when: never
.iris-rules:
stage: intel
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/iris/**/*
- src/gallium/winsys/iris/**/*
- src/intel/**/*
when: on_success
- when: never
.anv-rules:
stage: intel
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes:
- src/intel/**/*
when: on_success
- when: never
.zink-rules:
stage: layered-backends
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes: &zink_file_list
- src/gallium/drivers/zink/**/*
when: on_success
- when: never
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
# rules duplication manually
.windows-build-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes: &d3d12_file_list
- src/gallium/drivers/d3d12/**/*
- src/microsoft/**/*
- src/gallium/frontends/wgl/*
- src/gallium/winsys/d3d12/wgl/*
- src/gallium/targets/libgl-gdi/*
- src/gallium/targets/libgl-d3d12/*
when: on_success
- changes:
*zink_file_list
when: on_success
- changes:
*radv_file_list
when: on_success
- when: never
.windows-test-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes: *d3d12_file_list
when: on_success
- when: never
# Rules for unusual architectures that only build a subset of drivers
.ppc64el-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*softpipe_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*radv_file_list
when: on_success
- changes:
*radeonsi_file_list
when: on_success
- changes:
*zink_file_list
when: on_success
- changes:
*virgl_file_list
when: on_success
- changes:
- src/gallium/drivers/nouveau/**/*
- src/gallium/winsys/nouveau/**/*
when: on_success
- when: never
.s390x-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*softpipe_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*zink_file_list
when: on_success
- when: never

View File

@@ -1,93 +0,0 @@
#!/bin/sh
set -ex
if [ "x$VK_DRIVER" = "x" ]; then
exit 1
fi
INSTALL=$(realpath -s "$PWD"/install)
RESULTS=$(realpath -s "$PWD"/results)
# Set up the driver environment.
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
# using a command wrapper. Hence, we will just set it when running the
# command.
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
# Sanity check to ensure that our environment is sufficient to make our tests
# run against the Mesa built by CI, rather than any installed distro version.
MESA_VERSION=$(cat "$INSTALL/VERSION" | sed 's/\./\\./g')
# Set the Vulkan driver to use.
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
# Set environment for Wine.
export WINEDEBUG="-all"
export WINEPREFIX="/vkd3d-proton-wine64"
export WINEESYNC=1
print_red() {
RED='\033[0;31m'
NC='\033[0m' # No Color
printf "${RED}"
"$@"
printf "${NC}"
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
SANITY_MESA_VERSION_CMD="vulkaninfo | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD"
set +e
eval $RUN_CMD
if [ $? -ne 0 ]; then
printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
fi
set -e
if [ -d "$RESULTS" ]; then
cd "$RESULTS" && rm -rf ..?* .[!.]* * && cd -
else
mkdir "$RESULTS"
fi
VKD3D_PROTON_TESTSUITE_CMD="wine /vkd3d-proton-tests/x64/bin/d3d12.exe >$RESULTS/vkd3d-proton.log 2>&1"
quiet printf "%s\n" "Running vkd3d-proton testsuite..."
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $HANG_DETECTION_CMD $VKD3D_PROTON_TESTSUITE_CMD"
set +e
eval $RUN_CMD
VKD3D_PROTON_RESULTS="vkd3d-proton-${VKD3D_PROTON_RESULTS:-results}"
RESULTSFILE="$RESULTS/$VKD3D_PROTON_RESULTS.txt"
mkdir -p .gitlab-ci/vkd3d-proton
grep "Test failed" "$RESULTS"/vkd3d-proton.log > "$RESULTSFILE"
if [ -f "$INSTALL/$VKD3D_PROTON_RESULTS.txt" ]; then
cp "$INSTALL/$VKD3D_PROTON_RESULTS.txt" \
".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
else
touch ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
fi
if diff -q ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"; then
exit 0
fi
quiet print_red printf "%s\n" "Changes found, see vkd3d-proton.log!"
quiet diff --color=always -u ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"
exit 1

View File

@@ -1,13 +0,0 @@
# escape=`
FROM mcr.microsoft.com/windows:1809
# Make sure any failure in PowerShell scripts is fatal
SHELL ["powershell", "-ExecutionPolicy", "RemoteSigned", "-Command", "$ErrorActionPreference = 'Stop';"]
ENV ErrorActionPreference='Stop'
COPY mesa_deps_vs2019.ps1 C:\
RUN C:\mesa_deps_vs2019.ps1
COPY mesa_deps.ps1 C:\
RUN C:\mesa_deps.ps1

View File

@@ -1,32 +0,0 @@
# Native Windows GitLab CI builds
Unlike Linux, Windows cannot reuse the freedesktop ci-templates as they exist
as we do not have Podman, Skopeo, or even Docker-in-Docker builds available
under Windows.
We still reuse the same model: build a base container with the core operating
system and infrequently-changed build dependencies, then execute Mesa builds
only inside that base container. This is open-coded in PowerShell scripts.
## Base container build
The base container build job executes the `mesa_container.ps1` script which
reproduces the ci-templates behaviour. It looks for the registry image in
the user's namespace, and exits if found. If not found, it tries to copy
the same image tag from the upstream Mesa repository. If that is not found,
the image is rebuilt inside the user's namespace.
The rebuild executes `docker build` which calls `mesa_deps.ps1` inside the
container to fetch and install all build dependencies. This includes Visual
Studio Community Edition (downloaded from Microsoft, under the license which
allows use by open-source projects), other build tools from Chocolatey, and
finally Meson and Python dependencies from PyPI.
This job is executed inside a Windows shell environment directly inside the
host, without Docker.
## Mesa build
The Mesa build runs inside the base container, executing `mesa_build.ps1`.
This simply compiles Mesa using Meson and Ninja, executing the build and
unit tests. Currently, no build artifacts are captured.

View File

@@ -1,24 +0,0 @@
# force the CA cert cache to be rebuilt, in case Meson tries to access anything
Write-Host "Refreshing Windows TLS CA cache"
(New-Object System.Net.WebClient).DownloadString("https://github.com") >$null
$env:PYTHONUTF8=1
Get-Date
Write-Host "Compiling Mesa"
$builddir = New-Item -ItemType Directory -Name "_build"
$installdir = New-Item -ItemType Directory -Name "_install"
Push-Location $builddir.FullName
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && meson --default-library=shared -Dzlib:default_library=static --buildtype=release -Db_ndebug=false -Dc_std=c17 -Dcpp_std=vc++latest -Db_vscrt=mt --cmake-prefix-path=`"C:\llvm-10`" --pkg-config-path=`"C:\llvm-10\lib\pkgconfig;C:\llvm-10\share\pkgconfig;C:\spirv-tools\lib\pkgconfig`" --prefix=`"$installdir`" -Dllvm=enabled -Dshared-llvm=disabled -Dvulkan-drivers=swrast,amd -Dgallium-drivers=swrast,d3d12,zink -Dmicrosoft-clc=enabled -Dstatic-libclc=all -Dbuild-tests=true -Dwerror=true -Dwarning_level=2 -Dzlib:warning_level=1 -Dlibelf:warning_level=1 && ninja -j32 install && meson test --num-processes 32"
$buildstatus = $?
Pop-Location
Get-Date
if (!$buildstatus) {
Write-Host "Mesa build or test failed"
Exit 1
}
Copy-Item ".\.gitlab-ci\windows\piglit_run.ps1" -Destination $installdir
Copy-Item ".\.gitlab-ci\windows\quick_gl.txt" -Destination $installdir

View File

@@ -1,56 +0,0 @@
# Implements the equivalent of ci-templates container-ifnot-exists, using
# Docker directly as we don't have buildah/podman/skopeo available under
# Windows, nor can we execute Docker-in-Docker
$registry_uri = $args[0]
$registry_username = $args[1]
$registry_password = $args[2]
$registry_user_image = $args[3]
$registry_central_image = $args[4]
Set-Location -Path ".\.gitlab-ci\windows"
docker --config "windows-docker.conf" login -u "$registry_username" -p "$registry_password" "$registry_uri"
if (!$?) {
Write-Host "docker login failed to $registry_uri"
Exit 1
}
# if the image already exists, don't rebuild it
docker --config "windows-docker.conf" pull "$registry_user_image"
if ($?) {
Write-Host "User image $registry_user_image already exists; not rebuilding"
docker --config "windows-docker.conf" logout "$registry_uri"
Exit 0
}
# if the image already exists upstream, copy it
docker --config "windows-docker.conf" pull "$registry_central_image"
if ($?) {
Write-Host "Copying central image $registry_central_image to user image $registry_user_image"
docker --config "windows-docker.conf" tag "$registry_central_image" "$registry_user_image"
docker --config "windows-docker.conf" push "$registry_user_image"
$pushstatus = $?
docker --config "windows-docker.conf" logout "$registry_uri"
if (!$pushstatus) {
Write-Host "Pushing image to $registry_user_image failed"
Exit 1
}
Exit 0
}
Write-Host "No image found at $registry_user_image or $registry_central_image; rebuilding"
docker --config "windows-docker.conf" build --no-cache -t "$registry_user_image" .
if (!$?) {
Write-Host "Container build failed"
docker --config "windows-docker.conf" logout "$registry_uri"
Exit 1
}
Get-Date
docker --config "windows-docker.conf" push "$registry_user_image"
$pushstatus = $?
docker --config "windows-docker.conf" logout "$registry_uri"
if (!$pushstatus) {
Write-Host "Pushing image to $registry_user_image failed"
Exit 1
}

View File

@@ -1,205 +0,0 @@
# Download new TLS certs from Windows Update
Get-Date
Write-Host "Updating TLS certificate store"
$certdir = (New-Item -ItemType Directory -Name "_tlscerts")
certutil -syncwithWU "$certdir"
Foreach ($file in (Get-ChildItem -Path "$certdir\*" -Include "*.crt")) {
Import-Certificate -FilePath $file -CertStoreLocation Cert:\LocalMachine\Root
}
Remove-Item -Recurse -Path $certdir
Get-Date
Write-Host "Installing Chocolatey"
Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
Import-Module "$env:ProgramData\chocolatey\helpers\chocolateyProfile.psm1"
Update-SessionEnvironment
Write-Host "Installing Chocolatey packages"
# Chocolatey tries to download winflexbison from SourceForge, which is not super reliable, and has no retry
# loop of its own - so we give it a helping hand here
For ($i = 0; $i -lt 5; $i++) {
choco install -y python3 --params="/InstallDir:C:\python3"
$python_install = $?
choco install --allow-empty-checksums -y cmake git git-lfs ninja pkgconfiglite winflexbison vulkan-sdk --installargs "ADD_CMAKE_TO_PATH=System"
$other_install = $?
$choco_installed = $other_install -and $python_install
if ($choco_installed) {
Break
}
}
if (!$choco_installed) {
Write-Host "Couldn't install dependencies from Chocolatey"
Exit 1
}
# Add Chocolatey's native install path
Update-SessionEnvironment
# Python and CMake add themselves to the system environment path, which doesn't get refreshed
# until we start a new shell
$env:PATH = "C:\python3;C:\python3\scripts;C:\Program Files\CMake\bin;$env:PATH"
Start-Process -NoNewWindow -Wait git -ArgumentList 'config --global core.autocrlf false'
Get-Date
Write-Host "Installing Meson, Mako and numpy"
pip3 install meson mako numpy
if (!$?) {
Write-Host "Failed to install dependencies from pip"
Exit 1
}
# we want more secure TLS 1.2 for most things, but it breaks SourceForge
# downloads so must be done after Chocolatey use
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -bor [Net.SecurityProtocolType]::Tls13;
Get-Date
Write-Host "Cloning LLVM release/12.x"
git clone -b release/12.x --depth=1 https://github.com/llvm/llvm-project llvm-project
if (!$?) {
Write-Host "Failed to clone LLVM repository"
Exit 1
}
# ideally we want to use a tag here insted of a sha,
# but as of today, SPIRV-LLVM-Translator doesn't have
# a tag matching LLVM 12.0.0
Get-Date
Write-Host "Cloning SPIRV-LLVM-Translator"
git clone https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
if (!$?) {
Write-Host "Failed to clone SPIRV-LLVM-Translator repository"
Exit 1
}
Push-Location llvm-project/llvm/projects/SPIRV-LLVM-Translator
git checkout 5b641633b3bcc3251a52260eee11db13a79d7258
Pop-Location
Get-Date
# slightly convoluted syntax but avoids the CWD being under the PS filesystem meta-path
$llvm_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build"
Push-Location -Path $llvm_build.FullName
Write-Host "Compiling LLVM and Clang"
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../llvm -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_CRT_RELEASE=MT -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_TARGETS_TO_BUILD=AMDGPU;X86 -DLLVM_OPTIMIZED_TABLEGEN=TRUE -DLLVM_ENABLE_ASSERTIONS=TRUE -DLLVM_INCLUDE_UTILS=OFF -DLLVM_INCLUDE_RUNTIMES=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_INCLUDE_GO_TESTS=OFF -DLLVM_INCLUDE_BENCHMARKS=OFF -DLLVM_BUILD_LLVM_C_DYLIB=OFF -DLLVM_ENABLE_DIA_SDK=OFF -DCLANG_BUILD_TOOLS=ON -DLLVM_SPIRV_INCLUDE_TESTS=OFF && ninja -j32 install'
$buildstatus = $?
Pop-Location
if (!$buildstatus) {
Write-Host "Failed to compile LLVM"
Exit 1
}
Get-Date
$libclc_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build-libclc"
Push-Location -Path $libclc_build.FullName
Write-Host "Compiling libclc"
# libclc can only be built with Ninja, because CMake's VS backend doesn't know how to compile new language types
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../libclc -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS="-m64" -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" && ninja -j32 install'
$buildstatus = $?
Pop-Location
Remove-Item -Recurse -Path $libclc_build
if (!$buildstatus) {
Write-Host "Failed to compile libclc"
Exit 1
}
Remove-Item -Recurse -Path $llvm_build
Get-Date
Write-Host "Cloning SPIRV-Tools"
git clone https://github.com/KhronosGroup/SPIRV-Tools
if (!$?) {
Write-Host "Failed to clone SPIRV-Tools repository"
Exit 1
}
git clone https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
if (!$?) {
Write-Host "Failed to clone SPIRV-Headers repository"
Exit 1
}
Write-Host "Building SPIRV-Tools"
$spv_build = New-Item -ItemType Directory -Path ".\SPIRV-Tools" -Name "build"
Push-Location -Path $spv_build.FullName
# SPIRV-Tools doesn't use multi-threaded MSVCRT, but we need it to
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\spirv-tools" && ninja -j32 install'
$buildstatus = $?
Pop-Location
Remove-Item -Recurse -Path $spv_build
if (!$buildstatus) {
Write-Host "Failed to compile SPIRV-Tools"
Exit 1
}
Get-Date
Write-Host "Downloading Vulkan-Runtime"
Invoke-WebRequest -Uri 'https://sdk.lunarg.com/sdk/download/latest/windows/vulkan-runtime.exe' -OutFile 'C:\vulkan-runtime.exe' | Out-Null
Write-Host "Installing Vulkan-Runtime"
Start-Process -NoNewWindow -Wait C:\vulkan-runtime.exe -ArgumentList '/S'
if (!$?) {
Write-Host "Failed to install Vulkan-Runtime"
Exit 1
}
Remove-Item C:\vulkan-runtime.exe -Force
Get-Date
Write-Host "Downloading Freeglut"
$freeglut_zip = 'freeglut-MSVC.zip'
$freeglut_url = "https://www.transmissionzero.co.uk/files/software/development/GLUT/$freeglut_zip"
For ($i = 0; $i -lt 5; $i++) {
Invoke-WebRequest -Uri $freeglut_url -OutFile $freeglut_zip
$freeglut_downloaded = $?
if ($freeglut_downloaded) {
Break
}
}
if (!$freeglut_downloaded) {
Write-Host "Failed to download Freeglut"
Exit 1
}
Get-Date
Write-Host "Installing Freeglut"
Expand-Archive $freeglut_zip -DestinationPath C:\
if (!$?) {
Write-Host "Failed to install Freeglut"
Exit 1
}
Get-Date
Write-Host "Downloading glext.h"
New-Item -ItemType Directory -Path ".\glext" -Name "GL"
$ProgressPreference = "SilentlyContinue"
Invoke-WebRequest -Uri 'https://www.khronos.org/registry/OpenGL/api/GL/glext.h' -OutFile '.\glext\GL\glext.h' | Out-Null
Get-Date
Write-Host "Cloning Piglit"
git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\src\piglit'
if (!$?) {
Write-Host "Failed to clone Piglit repository"
Exit 1
}
Push-Location -Path C:\src\piglit
git checkout b0bbeb876a506e0ee689dd7e17cee374c8284058
Pop-Location
Get-Date
$piglit_build = New-Item -ItemType Directory -Path "C:\src\piglit" -Name "build"
Push-Location -Path $piglit_build.FullName
Write-Host "Compiling Piglit"
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="C:\Piglit" -DGLUT_INCLUDE_DIR=C:\freeglut\include -DGLUT_glut_LIBRARY_RELEASE=C:\freeglut\lib\x64\freeglut.lib -DGLEXT_INCLUDE_DIR=.\glext && ninja -j32'
$buildstatus = $?
ninja -j32 install | Out-Null
$installstatus = $?
Pop-Location
Remove-Item -Recurse -Path $piglit_build
if (!$buildstatus -Or !$installstatus) {
Write-Host "Failed to compile or install Piglit"
Exit 1
}
Copy-Item -Path C:\freeglut\bin\x64\freeglut.dll -Destination C:\Piglit\lib\piglit\bin\freeglut.dll
Get-Date
Write-Host "Complete"

View File

@@ -1,18 +0,0 @@
# we want more secure TLS 1.2 for most things
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;
# VS16.x is 2019
$msvc_2019_url = 'https://aka.ms/vs/16/release/vs_buildtools.exe'
Get-Date
Write-Host "Downloading Visual Studio 2019 build tools"
Invoke-WebRequest -Uri $msvc_2019_url -OutFile C:\vs_buildtools.exe
Get-Date
Write-Host "Installing Visual Studio 2019"
Start-Process -NoNewWindow -Wait C:\vs_buildtools.exe -ArgumentList '--wait --quiet --norestart --nocache --installPath C:\BuildTools --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Component.VC.ATL --add Microsoft.VisualStudio.Component.VC.ATLMFC --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 --add Microsoft.VisualStudio.Component.Graphics.Tools --add Microsoft.VisualStudio.Component.Windows10SDK.18362 --includeRecommended'
if (!$?) {
Write-Host "Failed to install Visual Studio tools"
Exit 1
}
Remove-Item C:\vs_buildtools.exe -Force

View File

@@ -1,26 +0,0 @@
$env:PIGLIT_NO_FAST_SKIP = 1
Copy-Item -Path _install\bin\opengl32.dll -Destination C:\Piglit\lib\piglit\bin\opengl32.dll
# Run this using VsDevCmd.bat to ensure DXIL.dll is in %PATH%
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && py -3 C:\Piglit\bin\piglit.py run `"$env:PIGLIT_PROFILE`" $env:PIGLIT_OPTIONS $env:PIGLIT_TESTS .\results"
py -3 C:\Piglit\bin\piglit.py summary console .\results | Select -SkipLast 1 | Select-String -NotMatch -Pattern ': pass' | Set-Content -Path .\result.txt
$reference = Get-Content ".\_install\$env:PIGLIT_PROFILE.txt"
$result = Get-Content .\result.txt
if (-Not ($reference -And $result)) {
Exit 1
}
$diff = Compare-Object -ReferenceObject $reference -DifferenceObject $result
if (-Not $diff) {
Exit 0
}
py -3 C:\Piglit\bin\piglit.py summary html --exclude-details=pass .\summary .\results
Write-Host "Unexpected change in results:"
Write-Output $diff | Format-Table -Property SideIndicator,InputObject -Wrap
Exit 1

Some files were not shown because too many files have changed in this diff Show More