Compare commits
	
		
			120 Commits
		
	
	
		
			mesa-21.3.
			...
			mesa_5_0_1
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					6faeeb59ba | ||
| 
						 | 
					0670bb52e2 | ||
| 
						 | 
					42ed7b78c3 | ||
| 
						 | 
					c44e5d39a5 | ||
| 
						 | 
					af709ec217 | ||
| 
						 | 
					c6c35a0d7e | ||
| 
						 | 
					4ccebff103 | ||
| 
						 | 
					0a89996605 | ||
| 
						 | 
					3bff7aed0e | ||
| 
						 | 
					605b7859c2 | ||
| 
						 | 
					bc0359a076 | ||
| 
						 | 
					6677a919f1 | ||
| 
						 | 
					b099d2137e | ||
| 
						 | 
					b58c0dec59 | ||
| 
						 | 
					9935d3434d | ||
| 
						 | 
					b28cbbf5a4 | ||
| 
						 | 
					a5213e2cac | ||
| 
						 | 
					e62a6bdb3a | ||
| 
						 | 
					7709bd06f2 | ||
| 
						 | 
					4b3f54ecb1 | ||
| 
						 | 
					3a55d83d21 | ||
| 
						 | 
					6899598be8 | ||
| 
						 | 
					88db8b061b | ||
| 
						 | 
					8b0f7c5ab7 | ||
| 
						 | 
					683dba1164 | ||
| 
						 | 
					4965446dbd | ||
| 
						 | 
					9c3b611b85 | ||
| 
						 | 
					48f8949cd2 | ||
| 
						 | 
					cd4fc963a4 | ||
| 
						 | 
					c24d9b330e | ||
| 
						 | 
					62968d677c | ||
| 
						 | 
					5e4a96980e | ||
| 
						 | 
					6397f53174 | ||
| 
						 | 
					3de75f6f68 | ||
| 
						 | 
					d1f324a779 | ||
| 
						 | 
					3442cd9a9f | ||
| 
						 | 
					324616ed83 | ||
| 
						 | 
					6c59c0b9ec | ||
| 
						 | 
					2ccb06cb4e | ||
| 
						 | 
					7b1b099af3 | ||
| 
						 | 
					6accaacbed | ||
| 
						 | 
					30beb26dbb | ||
| 
						 | 
					735a8aee5a | ||
| 
						 | 
					764b1d5455 | ||
| 
						 | 
					b87a9089da | ||
| 
						 | 
					69a4ff186a | ||
| 
						 | 
					d16d5fb06a | ||
| 
						 | 
					8fee42cd6f | ||
| 
						 | 
					34af895a99 | ||
| 
						 | 
					4c5745b081 | ||
| 
						 | 
					dd2f83fb0e | ||
| 
						 | 
					379c151989 | ||
| 
						 | 
					379412994f | ||
| 
						 | 
					c5e6bd00be | ||
| 
						 | 
					bf490e3ae5 | ||
| 
						 | 
					984f7e5324 | ||
| 
						 | 
					7a23c68f35 | ||
| 
						 | 
					cb2eaf84a3 | ||
| 
						 | 
					8829f949f8 | ||
| 
						 | 
					015c4787d7 | ||
| 
						 | 
					19c628d279 | ||
| 
						 | 
					50bfa18670 | ||
| 
						 | 
					57f9dc808a | ||
| 
						 | 
					ef2d42b144 | ||
| 
						 | 
					e3aa4c1d98 | ||
| 
						 | 
					d7facf95f8 | ||
| 
						 | 
					3543b12519 | ||
| 
						 | 
					cfdb6711f8 | ||
| 
						 | 
					283eab087b | ||
| 
						 | 
					9c04e1c1d9 | ||
| 
						 | 
					16a832fa89 | ||
| 
						 | 
					5b8c98abee | ||
| 
						 | 
					8126d169ec | ||
| 
						 | 
					27a2f9fdfd | ||
| 
						 | 
					ad6514aac0 | ||
| 
						 | 
					de0c25a481 | ||
| 
						 | 
					a77151fb44 | ||
| 
						 | 
					df27002ac9 | ||
| 
						 | 
					7e07dfb604 | ||
| 
						 | 
					76cb735d0c | ||
| 
						 | 
					530ce7f0ed | ||
| 
						 | 
					d856a056d3 | ||
| 
						 | 
					7e7415fbab | ||
| 
						 | 
					789d79e882 | ||
| 
						 | 
					0e4e3606a3 | ||
| 
						 | 
					b53092916f | ||
| 
						 | 
					92b132b966 | ||
| 
						 | 
					a5eb3c6a37 | ||
| 
						 | 
					050268971b | ||
| 
						 | 
					dbdc9e92b5 | ||
| 
						 | 
					ca3d8e4605 | ||
| 
						 | 
					fc16876967 | ||
| 
						 | 
					83666eb33f | ||
| 
						 | 
					adbf94ca38 | ||
| 
						 | 
					7b3a50acf1 | ||
| 
						 | 
					4396dd1a3b | ||
| 
						 | 
					8f5f0fd6f8 | ||
| 
						 | 
					ce733f495e | ||
| 
						 | 
					b0e246bf9a | ||
| 
						 | 
					7ffbdc48b6 | ||
| 
						 | 
					bd77908248 | ||
| 
						 | 
					b319c105ea | ||
| 
						 | 
					a987857ca5 | ||
| 
						 | 
					b6af31d4c6 | ||
| 
						 | 
					3cfadcf097 | ||
| 
						 | 
					10174c5efd | ||
| 
						 | 
					f24e9c8ad7 | ||
| 
						 | 
					8f47c7f0df | ||
| 
						 | 
					dce96e2450 | ||
| 
						 | 
					c264409c46 | ||
| 
						 | 
					83df49f95d | ||
| 
						 | 
					ffb4ef6929 | ||
| 
						 | 
					1f7aeedf55 | ||
| 
						 | 
					2e5a88ab6a | ||
| 
						 | 
					d520ff08d0 | ||
| 
						 | 
					ef0e1dcc2e | ||
| 
						 | 
					6412641b19 | ||
| 
						 | 
					3ee066b48f | ||
| 
						 | 
					33c0b3e233 | ||
| 
						 | 
					658145bd82 | 
@@ -1,18 +0,0 @@
 | 
			
		||||
((nil . ((show-trailing-whitespace . t)))
 | 
			
		||||
 (prog-mode
 | 
			
		||||
  (indent-tabs-mode . nil)
 | 
			
		||||
  (tab-width . 8)
 | 
			
		||||
  (c-basic-offset . 3)
 | 
			
		||||
  (c-file-style . "stroustrup")
 | 
			
		||||
  (fill-column . 78)
 | 
			
		||||
  (eval . (progn
 | 
			
		||||
	    (c-set-offset 'case-label '0)
 | 
			
		||||
	    (c-set-offset 'innamespace '0)
 | 
			
		||||
	    (c-set-offset 'inline-open '0)))
 | 
			
		||||
  (whitespace-style face indentation)
 | 
			
		||||
  (whitespace-line-column . 79)
 | 
			
		||||
  (eval ignore-errors
 | 
			
		||||
        (require 'whitespace)
 | 
			
		||||
        (whitespace-mode 1)))
 | 
			
		||||
 (makefile-mode (indent-tabs-mode . t))
 | 
			
		||||
 )
 | 
			
		||||
@@ -1,48 +0,0 @@
 | 
			
		||||
# To use this config on you editor, follow the instructions at:
 | 
			
		||||
# http://editorconfig.org
 | 
			
		||||
 | 
			
		||||
root = true
 | 
			
		||||
 | 
			
		||||
[*]
 | 
			
		||||
charset = utf-8
 | 
			
		||||
insert_final_newline = true
 | 
			
		||||
tab_width = 8
 | 
			
		||||
 | 
			
		||||
[*.{c,h,cpp,hpp,cc,hh}]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 3
 | 
			
		||||
max_line_length = 78
 | 
			
		||||
 | 
			
		||||
[{Makefile*,*.mk}]
 | 
			
		||||
indent_style = tab
 | 
			
		||||
 | 
			
		||||
[{*.py,SCons*}]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 4
 | 
			
		||||
 | 
			
		||||
[*.pl]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 4
 | 
			
		||||
 | 
			
		||||
[*.m4]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 2
 | 
			
		||||
 | 
			
		||||
[*.yml]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 2
 | 
			
		||||
 | 
			
		||||
[*.html]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 2
 | 
			
		||||
 | 
			
		||||
[*.rst]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 3
 | 
			
		||||
 | 
			
		||||
[*.patch]
 | 
			
		||||
trim_trailing_whitespace = false
 | 
			
		||||
 | 
			
		||||
[{meson.build,meson_options.txt}]
 | 
			
		||||
indent_style = space
 | 
			
		||||
indent_size = 2
 | 
			
		||||
							
								
								
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@@ -1,4 +0,0 @@
 | 
			
		||||
*.pyc
 | 
			
		||||
*.pyo
 | 
			
		||||
*.out
 | 
			
		||||
build
 | 
			
		||||
							
								
								
									
										1353
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							
							
						
						
									
										1353
									
								
								.gitlab-ci.yml
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,17 +0,0 @@
 | 
			
		||||
# Note: skips lists for CI are just a list of lines that, when
 | 
			
		||||
# non-zero-length and not starting with '#', will regex match to
 | 
			
		||||
# delete lines from the test list.  Be careful.
 | 
			
		||||
 | 
			
		||||
# These are tremendously slow (pushing toward a minute), and aren't
 | 
			
		||||
# reliable to be run in parallel with other tests due to CPU-side timing.
 | 
			
		||||
dEQP-GLES[0-9]*.functional.flush_finish.*
 | 
			
		||||
 | 
			
		||||
# https://gitlab.freedesktop.org/mesa/mesa/-/issues/4575
 | 
			
		||||
dEQP-VK.wsi.display.get_display_plane_capabilities
 | 
			
		||||
 | 
			
		||||
# piglit: WGL is Windows-only
 | 
			
		||||
wgl@.*
 | 
			
		||||
 | 
			
		||||
# These are sensitive to CPU timing, and would need to be run in isolation
 | 
			
		||||
# on the system rather than in parallel with other tests.
 | 
			
		||||
glx@glx_arb_sync_control@timing.*
 | 
			
		||||
@@ -1,2 +0,0 @@
 | 
			
		||||
[*.sh]
 | 
			
		||||
indent_size = 2
 | 
			
		||||
@@ -1,26 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# This test script groups together a bunch of fast dEQP variant runs
 | 
			
		||||
# to amortize the cost of rebooting the board.
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
EXIT=0
 | 
			
		||||
 | 
			
		||||
# Run reset tests without parallelism:
 | 
			
		||||
if ! env \
 | 
			
		||||
  DEQP_RESULTS_DIR=results/reset \
 | 
			
		||||
  FDO_CI_CONCURRENT=1 \
 | 
			
		||||
  DEQP_CASELIST_FILTER='.*reset.*' \
 | 
			
		||||
  /install/deqp-runner.sh; then
 | 
			
		||||
    EXIT=1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Then run everything else with parallelism:
 | 
			
		||||
if ! env \
 | 
			
		||||
  DEQP_RESULTS_DIR=results/nonrobustness \
 | 
			
		||||
  DEQP_CASELIST_INV_FILTER='.*reset.*' \
 | 
			
		||||
  /install/deqp-runner.sh; then
 | 
			
		||||
    EXIT=1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
@@ -1,13 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# Init entrypoint for bare-metal devices; calls common init code.
 | 
			
		||||
 | 
			
		||||
# First stage: very basic setup to bring up network and /dev etc
 | 
			
		||||
/init-stage1.sh
 | 
			
		||||
 | 
			
		||||
# Second stage: run jobs
 | 
			
		||||
test $? -eq 0 && /init-stage2.sh
 | 
			
		||||
 | 
			
		||||
# Wait until the job would have timed out anyway, so we don't spew a "init
 | 
			
		||||
# exited" panic.
 | 
			
		||||
sleep 6000
 | 
			
		||||
@@ -1,101 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Boot script for Chrome OS devices attached to a servo debug connector, using
 | 
			
		||||
# NFS and TFTP to boot.
 | 
			
		||||
 | 
			
		||||
# We're run from the root of the repo, make a helper var for our paths
 | 
			
		||||
BM=$CI_PROJECT_DIR/install/bare-metal
 | 
			
		||||
CI_COMMON=$CI_PROJECT_DIR/install/common
 | 
			
		||||
 | 
			
		||||
# Runner config checks
 | 
			
		||||
if [ -z "$BM_SERIAL" ]; then
 | 
			
		||||
  echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the CPU serial device."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_SERIAL_EC" ]; then
 | 
			
		||||
  echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the EC serial device for controlling board power"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ ! -d /nfs ]; then
 | 
			
		||||
  echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ ! -d /tftp ]; then
 | 
			
		||||
  echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# job config checks
 | 
			
		||||
if [ -z "$BM_KERNEL" ]; then
 | 
			
		||||
  echo "Must set BM_KERNEL to your board's kernel FIT image"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_ROOTFS" ]; then
 | 
			
		||||
  echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_CMDLINE" ]; then
 | 
			
		||||
  echo "Must set BM_CMDLINE to your board's kernel command line arguments"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# Clear out any previous run's artifacts.
 | 
			
		||||
rm -rf results/
 | 
			
		||||
mkdir -p results
 | 
			
		||||
 | 
			
		||||
# Create the rootfs in the NFS directory.  rm to make sure it's in a pristine
 | 
			
		||||
# state, since it's volume-mounted on the host.
 | 
			
		||||
rsync -a --delete $BM_ROOTFS/ /nfs/
 | 
			
		||||
mkdir -p /nfs/results
 | 
			
		||||
. $BM/rootfs-setup.sh /nfs
 | 
			
		||||
 | 
			
		||||
# Put the kernel/dtb image and the boot command line in the tftp directory for
 | 
			
		||||
# the board to find.  For normal Mesa development, we build the kernel and
 | 
			
		||||
# store it in the docker container that this script is running in.
 | 
			
		||||
#
 | 
			
		||||
# However, container builds are expensive, so when you're hacking on the
 | 
			
		||||
# kernel, it's nice to be able to skip the half hour container build and plus
 | 
			
		||||
# moving that container to the runner.  So, if BM_KERNEL is a URL, fetch it
 | 
			
		||||
# instead of looking in the container.  Note that the kernel build should be
 | 
			
		||||
# the output of:
 | 
			
		||||
#
 | 
			
		||||
# make Image.lzma
 | 
			
		||||
#
 | 
			
		||||
# mkimage \
 | 
			
		||||
#  -A arm64 \
 | 
			
		||||
#  -f auto \
 | 
			
		||||
#  -C lzma \
 | 
			
		||||
#  -d arch/arm64/boot/Image.lzma \
 | 
			
		||||
#  -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
 | 
			
		||||
#  cheza-image.img
 | 
			
		||||
 | 
			
		||||
rm -rf /tftp/*
 | 
			
		||||
if echo "$BM_KERNEL" | grep -q http; then
 | 
			
		||||
  apt install -y wget
 | 
			
		||||
  wget $BM_KERNEL -O /tftp/vmlinuz
 | 
			
		||||
else
 | 
			
		||||
  cp $BM_KERNEL /tftp/vmlinuz
 | 
			
		||||
fi
 | 
			
		||||
echo "$BM_CMDLINE" > /tftp/cmdline
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
python3 $BM/cros_servo_run.py \
 | 
			
		||||
        --cpu $BM_SERIAL \
 | 
			
		||||
        --ec $BM_SERIAL_EC
 | 
			
		||||
ret=$?
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
 | 
			
		||||
# will look for them.
 | 
			
		||||
cp -Rp /nfs/results/. results/
 | 
			
		||||
 | 
			
		||||
exit $ret
 | 
			
		||||
@@ -1,186 +0,0 @@
 | 
			
		||||
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright © 2020 Google LLC
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import queue
 | 
			
		||||
import re
 | 
			
		||||
from serial_buffer import SerialBuffer
 | 
			
		||||
import sys
 | 
			
		||||
import threading
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class CrosServoRun:
 | 
			
		||||
    def __init__(self, cpu, ec):
 | 
			
		||||
        # Merged FIFO for the two serial buffers, fed by threads.
 | 
			
		||||
        self.serial_queue = queue.Queue()
 | 
			
		||||
        self.sentinel = object()
 | 
			
		||||
        self.threads_done = 0
 | 
			
		||||
 | 
			
		||||
        self.ec_ser = SerialBuffer(
 | 
			
		||||
            ec, "results/serial-ec.txt", "R SERIAL-EC> ")
 | 
			
		||||
        self.cpu_ser = SerialBuffer(
 | 
			
		||||
            cpu, "results/serial.txt", "R SERIAL-CPU> ")
 | 
			
		||||
 | 
			
		||||
        self.iter_feed_ec = threading.Thread(
 | 
			
		||||
            target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
 | 
			
		||||
        self.iter_feed_ec.start()
 | 
			
		||||
 | 
			
		||||
        self.iter_feed_cpu = threading.Thread(
 | 
			
		||||
            target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
 | 
			
		||||
        self.iter_feed_cpu.start()
 | 
			
		||||
 | 
			
		||||
    # Feed lines from our serial queues into the merged queue, marking when our
 | 
			
		||||
    # input is done.
 | 
			
		||||
    def iter_feed_queue(self, it):
 | 
			
		||||
        for i in it:
 | 
			
		||||
            self.serial_queue.put(i)
 | 
			
		||||
        self.serial_queue.put(sentinel)
 | 
			
		||||
 | 
			
		||||
    # Return the next line from the queue, counting how many threads have
 | 
			
		||||
    # terminated and joining when done
 | 
			
		||||
    def get_serial_queue_line(self):
 | 
			
		||||
        line = self.serial_queue.get()
 | 
			
		||||
        if line == self.sentinel:
 | 
			
		||||
            self.threads_done = self.threads_done + 1
 | 
			
		||||
            if self.threads_done == 2:
 | 
			
		||||
                self.iter_feed_cpu.join()
 | 
			
		||||
                self.iter_feed_ec.join()
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
    # Returns an iterator for getting the next line.
 | 
			
		||||
    def serial_queue_lines(self):
 | 
			
		||||
        return iter(self.get_serial_queue_line, self.sentinel)
 | 
			
		||||
 | 
			
		||||
    def ec_write(self, s):
 | 
			
		||||
        print("W SERIAL-EC> %s" % s)
 | 
			
		||||
        self.ec_ser.serial.write(s.encode())
 | 
			
		||||
 | 
			
		||||
    def cpu_write(self, s):
 | 
			
		||||
        print("W SERIAL-CPU> %s" % s)
 | 
			
		||||
        self.cpu_ser.serial.write(s.encode())
 | 
			
		||||
 | 
			
		||||
    def print_error(self, message):
 | 
			
		||||
        RED = '\033[0;31m'
 | 
			
		||||
        NO_COLOR = '\033[0m'
 | 
			
		||||
        print(RED + message + NO_COLOR)
 | 
			
		||||
 | 
			
		||||
    def run(self):
 | 
			
		||||
        # Flush any partial commands in the EC's prompt, then ask for a reboot.
 | 
			
		||||
        self.ec_write("\n")
 | 
			
		||||
        self.ec_write("reboot\n")
 | 
			
		||||
 | 
			
		||||
        # This is emitted right when the bootloader pauses to check for input.
 | 
			
		||||
        # Emit a ^N character to request network boot, because we don't have a
 | 
			
		||||
        # direct-to-netboot firmware on cheza.
 | 
			
		||||
        for line in self.serial_queue_lines():
 | 
			
		||||
            if re.search("load_archive: loading locale_en.bin", line):
 | 
			
		||||
                self.cpu_write("\016")
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            # The Cheza boards have issues with failing to bring up power to
 | 
			
		||||
            # the system sometimes, possibly dependent on ambient temperature
 | 
			
		||||
            # in the farm.
 | 
			
		||||
            if re.search("POWER_GOOD not seen in time", line):
 | 
			
		||||
                self.print_error("Detected intermittent poweron failure, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
        tftp_failures = 0
 | 
			
		||||
        for line in self.serial_queue_lines():
 | 
			
		||||
            if re.search("---. end Kernel panic", line):
 | 
			
		||||
                return 1
 | 
			
		||||
 | 
			
		||||
            # The Cheza firmware seems to occasionally get stuck looping in
 | 
			
		||||
            # this error state during TFTP booting, possibly based on amount of
 | 
			
		||||
            # network traffic around it, but it'll usually recover after a
 | 
			
		||||
            # reboot.
 | 
			
		||||
            if re.search("R8152: Bulk read error 0xffffffbf", line):
 | 
			
		||||
                tftp_failures += 1
 | 
			
		||||
                if tftp_failures >= 100:
 | 
			
		||||
                    self.print_error("Detected intermittent tftp failure, restarting run...")
 | 
			
		||||
                    return 2
 | 
			
		||||
 | 
			
		||||
            # There are very infrequent bus errors during power management transitions
 | 
			
		||||
            # on cheza, which we don't expect to be the case on future boards.
 | 
			
		||||
            if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
 | 
			
		||||
                self.print_error("Detected cheza power management bus error, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            # If the network device dies, it's probably not graphics's fault, just try again.
 | 
			
		||||
            if re.search("NETDEV WATCHDOG", line):
 | 
			
		||||
                self.print_error(
 | 
			
		||||
                    "Detected network device failure, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            # These HFI response errors started appearing with the introduction
 | 
			
		||||
            # of piglit runs.  CosmicPenguin says:
 | 
			
		||||
            #
 | 
			
		||||
            # "message ID 106 isn't a thing, so likely what happened is that we
 | 
			
		||||
            # got confused when parsing the HFI queue.  If it happened on only
 | 
			
		||||
            # one run, then memory corruption could be a possible clue"
 | 
			
		||||
            #
 | 
			
		||||
            # Given that it seems to trigger randomly near a GPU fault and then
 | 
			
		||||
            # break many tests after that, just restart the whole run.
 | 
			
		||||
            if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
 | 
			
		||||
                self.print_error("Detected cheza power management bus error, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            if re.search("coreboot.*bootblock starting", line):
 | 
			
		||||
                self.print_error(
 | 
			
		||||
                    "Detected spontaneous reboot, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            result = re.search("hwci: mesa: (\S*)", line)
 | 
			
		||||
            if result:
 | 
			
		||||
                if result.group(1) == "pass":
 | 
			
		||||
                    return 0
 | 
			
		||||
                else:
 | 
			
		||||
                    return 1
 | 
			
		||||
 | 
			
		||||
        self.print_error("Reached the end of the CPU serial log without finding a result")
 | 
			
		||||
        return 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--cpu', type=str,
 | 
			
		||||
                        help='CPU Serial device', required=True)
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        '--ec', type=str, help='EC Serial device', required=True)
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    servo = CrosServoRun(args.cpu, args.ec)
 | 
			
		||||
 | 
			
		||||
    while True:
 | 
			
		||||
        retval = servo.run()
 | 
			
		||||
        if retval != 2:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
    # power down the CPU on the device
 | 
			
		||||
    servo.ec_write("power off\n")
 | 
			
		||||
 | 
			
		||||
    sys.exit(retval)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,10 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
relay=$1
 | 
			
		||||
 | 
			
		||||
if [ -z "$relay" ]; then
 | 
			
		||||
    echo "Must supply a relay arg"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay
 | 
			
		||||
@@ -1,28 +0,0 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import socket
 | 
			
		||||
 | 
			
		||||
host = sys.argv[1]
 | 
			
		||||
port = sys.argv[2]
 | 
			
		||||
mode = sys.argv[3]
 | 
			
		||||
relay = sys.argv[4]
 | 
			
		||||
msg = None
 | 
			
		||||
 | 
			
		||||
if mode == "on":
 | 
			
		||||
    msg = b'\x20'
 | 
			
		||||
else:
 | 
			
		||||
    msg = b'\x21'
 | 
			
		||||
 | 
			
		||||
msg += int(relay).to_bytes(1, 'big')
 | 
			
		||||
msg += b'\x00'
 | 
			
		||||
 | 
			
		||||
c = socket.create_connection((host, int(port)))
 | 
			
		||||
c.sendall(msg)
 | 
			
		||||
 | 
			
		||||
data = c.recv(1)
 | 
			
		||||
c.close()
 | 
			
		||||
 | 
			
		||||
if data[0] == b'\x01':
 | 
			
		||||
    print('Command failed')
 | 
			
		||||
    sys.exit(1)
 | 
			
		||||
@@ -1,12 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
relay=$1
 | 
			
		||||
 | 
			
		||||
if [ -z "$relay" ]; then
 | 
			
		||||
    echo "Must supply a relay arg"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT off $relay
 | 
			
		||||
sleep 5
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/eth008-power-relay.py $ETH_HOST $ETH_PORT on $relay
 | 
			
		||||
@@ -1,30 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
STRINGS=$(mktemp)
 | 
			
		||||
ERRORS=$(mktemp)
 | 
			
		||||
 | 
			
		||||
trap "rm $STRINGS; rm $ERRORS;" EXIT
 | 
			
		||||
 | 
			
		||||
FILE=$1
 | 
			
		||||
shift 1
 | 
			
		||||
 | 
			
		||||
while getopts "f:e:" opt; do
 | 
			
		||||
  case $opt in
 | 
			
		||||
    f) echo "$OPTARG" >> $STRINGS;;
 | 
			
		||||
    e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;;
 | 
			
		||||
  esac
 | 
			
		||||
done
 | 
			
		||||
shift $((OPTIND -1))
 | 
			
		||||
 | 
			
		||||
echo "Waiting for $FILE to say one of following strings"
 | 
			
		||||
cat $STRINGS
 | 
			
		||||
 | 
			
		||||
while ! egrep -wf $STRINGS $FILE; do
 | 
			
		||||
  sleep 2
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
if egrep -wf $ERRORS $FILE; then
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,148 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
BM=$CI_PROJECT_DIR/install/bare-metal
 | 
			
		||||
CI_COMMON=$CI_PROJECT_DIR/install/common
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then
 | 
			
		||||
  echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "BM_SERIAL:"
 | 
			
		||||
  echo "  This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
 | 
			
		||||
  echo "BM_SERIAL_SCRIPT:"
 | 
			
		||||
  echo "  This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POWERUP" ]; then
 | 
			
		||||
  echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is a shell script that should reset the device and begin its boot sequence"
 | 
			
		||||
  echo "such that it pauses at fastboot."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POWERDOWN" ]; then
 | 
			
		||||
  echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is a shell script that should power off the device."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_FASTBOOT_SERIAL" ]; then
 | 
			
		||||
  echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This must be the a stable-across-resets fastboot serial number."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_KERNEL" ]; then
 | 
			
		||||
  echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_DTB" ]; then
 | 
			
		||||
  echo "Must set BM_DTB to your board's DTB file in the job's variables:"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_ROOTFS" ]; then
 | 
			
		||||
  echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
 | 
			
		||||
  BM_FASTBOOT_NFSROOT=1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# Clear out any previous run's artifacts.
 | 
			
		||||
rm -rf results/
 | 
			
		||||
mkdir -p results/
 | 
			
		||||
 | 
			
		||||
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
 | 
			
		||||
  # Create the rootfs in the NFS directory.  rm to make sure it's in a pristine
 | 
			
		||||
  # state, since it's volume-mounted on the host.
 | 
			
		||||
  rsync -a --delete $BM_ROOTFS/ /nfs/
 | 
			
		||||
  mkdir -p /nfs/results
 | 
			
		||||
  . $BM/rootfs-setup.sh /nfs
 | 
			
		||||
 | 
			
		||||
  # Root on NFS, no need for an inintramfs.
 | 
			
		||||
  rm -f rootfs.cpio.gz
 | 
			
		||||
  touch rootfs.cpio
 | 
			
		||||
  gzip rootfs.cpio
 | 
			
		||||
else
 | 
			
		||||
  # Create the rootfs in a temp dir
 | 
			
		||||
  rsync -a --delete $BM_ROOTFS/ rootfs/
 | 
			
		||||
  . $BM/rootfs-setup.sh rootfs
 | 
			
		||||
 | 
			
		||||
  # Finally, pack it up into a cpio rootfs.  Skip the vulkan CTS since none of
 | 
			
		||||
  # these devices use it and it would take up space in the initrd.
 | 
			
		||||
 | 
			
		||||
  if [ -n "$PIGLIT_PROFILES" ]; then
 | 
			
		||||
    EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
 | 
			
		||||
  else
 | 
			
		||||
    EXCLUDE_FILTER="piglit|python"
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  pushd rootfs
 | 
			
		||||
  find -H | \
 | 
			
		||||
    egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
 | 
			
		||||
    egrep -v "traces-db|apitrace|renderdoc" | \
 | 
			
		||||
    egrep -v $EXCLUDE_FILTER | \
 | 
			
		||||
    cpio -H newc -o | \
 | 
			
		||||
    xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
 | 
			
		||||
  popd
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Make the combined kernel image and dtb for passing to fastboot.  For normal
 | 
			
		||||
# Mesa development, we build the kernel and store it in the docker container
 | 
			
		||||
# that this script is running in.
 | 
			
		||||
#
 | 
			
		||||
# However, container builds are expensive, so when you're hacking on the
 | 
			
		||||
# kernel, it's nice to be able to skip the half hour container build and plus
 | 
			
		||||
# moving that container to the runner.  So, if BM_KERNEL+BM_DTB are URLs,
 | 
			
		||||
# fetch them instead of looking in the container.
 | 
			
		||||
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
 | 
			
		||||
  apt install -y wget
 | 
			
		||||
 | 
			
		||||
  wget $BM_KERNEL -O kernel
 | 
			
		||||
  wget $BM_DTB -O dtb
 | 
			
		||||
 | 
			
		||||
  cat kernel dtb > Image.gz-dtb
 | 
			
		||||
  rm kernel dtb
 | 
			
		||||
else
 | 
			
		||||
  cat $BM_KERNEL $BM_DTB > Image.gz-dtb
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
mkdir -p artifacts
 | 
			
		||||
abootimg \
 | 
			
		||||
  --create artifacts/fastboot.img \
 | 
			
		||||
  -k Image.gz-dtb \
 | 
			
		||||
  -r rootfs.cpio.gz \
 | 
			
		||||
  -c cmdline="$BM_CMDLINE"
 | 
			
		||||
rm Image.gz-dtb
 | 
			
		||||
 | 
			
		||||
export PATH=$BM:$PATH
 | 
			
		||||
 | 
			
		||||
# Start background command for talking to serial if we have one.
 | 
			
		||||
if [ -n "$BM_SERIAL_SCRIPT" ]; then
 | 
			
		||||
  $BM_SERIAL_SCRIPT > results/serial-output.txt &
 | 
			
		||||
 | 
			
		||||
  while [ ! -e results/serial-output.txt ]; do
 | 
			
		||||
    sleep 1
 | 
			
		||||
  done
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
$BM/fastboot_run.py \
 | 
			
		||||
  --dev="$BM_SERIAL" \
 | 
			
		||||
  --fbserial="$BM_FASTBOOT_SERIAL" \
 | 
			
		||||
  --powerup="$BM_POWERUP" \
 | 
			
		||||
  --powerdown="$BM_POWERDOWN"
 | 
			
		||||
ret=$?
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
 | 
			
		||||
  # Bring artifacts back from the NFS dir to the build dir where gitlab-runner
 | 
			
		||||
  # will look for them.
 | 
			
		||||
  cp -Rp /nfs/results/. results/
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
exit $ret
 | 
			
		||||
@@ -1,124 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright © 2020 Google LLC
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
from serial_buffer import SerialBuffer
 | 
			
		||||
import sys
 | 
			
		||||
import threading
 | 
			
		||||
 | 
			
		||||
class FastbootRun:
 | 
			
		||||
    def __init__(self, args):
 | 
			
		||||
        self.powerup = args.powerup
 | 
			
		||||
        # We would like something like a 1 minute timeout, but the piglit traces
 | 
			
		||||
        # jobs stall out for long periods of time.
 | 
			
		||||
        self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ", timeout=600)
 | 
			
		||||
        self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
 | 
			
		||||
 | 
			
		||||
    def print_error(self, message):
 | 
			
		||||
        RED = '\033[0;31m'
 | 
			
		||||
        NO_COLOR = '\033[0m'
 | 
			
		||||
        print(RED + message + NO_COLOR)
 | 
			
		||||
 | 
			
		||||
    def logged_system(self, cmd):
 | 
			
		||||
        print("Running '{}'".format(cmd))
 | 
			
		||||
        return os.system(cmd)
 | 
			
		||||
 | 
			
		||||
    def run(self):
 | 
			
		||||
        if self.logged_system(self.powerup) != 0:
 | 
			
		||||
            return 1
 | 
			
		||||
 | 
			
		||||
        fastboot_ready = False
 | 
			
		||||
        for line in self.ser.lines():
 | 
			
		||||
            if re.search("fastboot: processing commands", line) or \
 | 
			
		||||
                re.search("Listening for fastboot command on", line):
 | 
			
		||||
                fastboot_ready = True
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            if re.search("data abort", line):
 | 
			
		||||
                self.print_error("Detected crash during boot, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
        if not fastboot_ready:
 | 
			
		||||
            self.print_error("Failed to get to fastboot prompt, restarting run...")
 | 
			
		||||
            return 2
 | 
			
		||||
 | 
			
		||||
        if self.logged_system(self.fastboot) != 0:
 | 
			
		||||
            return 1
 | 
			
		||||
 | 
			
		||||
        for line in self.ser.lines():
 | 
			
		||||
            if re.search("---. end Kernel panic", line):
 | 
			
		||||
                return 1
 | 
			
		||||
 | 
			
		||||
            # The db820c boards intermittently reboot.  Just restart the run
 | 
			
		||||
            # when if we see a reboot after we got past fastboot.
 | 
			
		||||
            if re.search("PON REASON", line):
 | 
			
		||||
                self.print_error("Detected spontaneous reboot, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            # db820c sometimes wedges around iommu fault recovery
 | 
			
		||||
            if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
 | 
			
		||||
                self.print_error(
 | 
			
		||||
                    "Detected kernel soft lockup, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            # If the network device dies, it's probably not graphics's fault, just try again.
 | 
			
		||||
            if re.search("NETDEV WATCHDOG", line):
 | 
			
		||||
                self.print_error(
 | 
			
		||||
                    "Detected network device failure, restarting run...")
 | 
			
		||||
                return 2
 | 
			
		||||
 | 
			
		||||
            result = re.search("hwci: mesa: (\S*)", line)
 | 
			
		||||
            if result:
 | 
			
		||||
                if result.group(1) == "pass":
 | 
			
		||||
                    return 0
 | 
			
		||||
                else:
 | 
			
		||||
                    return 1
 | 
			
		||||
 | 
			
		||||
        self.print_error("Reached the end of the CPU serial log without finding a result, restarting run...")
 | 
			
		||||
        return 2
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
 | 
			
		||||
    parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
 | 
			
		||||
    parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
 | 
			
		||||
    parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    fastboot = FastbootRun(args)
 | 
			
		||||
 | 
			
		||||
    while True:
 | 
			
		||||
        retval = fastboot.run()
 | 
			
		||||
        if retval != 2:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
        fastboot = FastbootRun(args)
 | 
			
		||||
 | 
			
		||||
    fastboot.logged_system(args.powerdown)
 | 
			
		||||
 | 
			
		||||
    sys.exit(retval)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,10 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
relay=$1
 | 
			
		||||
 | 
			
		||||
if [ -z "$relay" ]; then
 | 
			
		||||
    echo "Must supply a relay arg"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
 | 
			
		||||
@@ -1,19 +0,0 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import serial
 | 
			
		||||
 | 
			
		||||
mode = sys.argv[1]
 | 
			
		||||
relay = sys.argv[2]
 | 
			
		||||
 | 
			
		||||
# our relays are "off" means "board is powered".
 | 
			
		||||
mode_swap = {
 | 
			
		||||
     "on" : "off",
 | 
			
		||||
     "off" : "on",
 | 
			
		||||
}
 | 
			
		||||
mode = mode_swap[mode]
 | 
			
		||||
 | 
			
		||||
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2)
 | 
			
		||||
command = "relay {} {}\n\r".format(mode, relay)
 | 
			
		||||
ser.write(command.encode())
 | 
			
		||||
ser.close()
 | 
			
		||||
@@ -1,12 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
relay=$1
 | 
			
		||||
 | 
			
		||||
if [ -z "$relay" ]; then
 | 
			
		||||
    echo "Must supply a relay arg"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
 | 
			
		||||
sleep 5
 | 
			
		||||
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay
 | 
			
		||||
@@ -1,17 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_INTERFACE" ]; then
 | 
			
		||||
    echo "Must supply the PoE Interface to power up"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_ADDRESS" ]; then
 | 
			
		||||
    echo "Must supply the PoE Switch host"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
 | 
			
		||||
SNMP_ON="i 1"
 | 
			
		||||
SNMP_OFF="i 2"
 | 
			
		||||
 | 
			
		||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
 | 
			
		||||
@@ -1,19 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_INTERFACE" ]; then
 | 
			
		||||
    echo "Must supply the PoE Interface to power up"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_ADDRESS" ]; then
 | 
			
		||||
    echo "Must supply the PoE Switch host"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
 | 
			
		||||
SNMP_ON="i 1"
 | 
			
		||||
SNMP_OFF="i 2"
 | 
			
		||||
 | 
			
		||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
 | 
			
		||||
sleep 3s
 | 
			
		||||
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON"
 | 
			
		||||
@@ -1,147 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Boot script for devices attached to a PoE switch, using NFS for the root
 | 
			
		||||
# filesystem.
 | 
			
		||||
 | 
			
		||||
# We're run from the root of the repo, make a helper var for our paths
 | 
			
		||||
BM=$CI_PROJECT_DIR/install/bare-metal
 | 
			
		||||
CI_COMMON=$CI_PROJECT_DIR/install/common
 | 
			
		||||
 | 
			
		||||
# Runner config checks
 | 
			
		||||
if [ -z "$BM_SERIAL" ]; then
 | 
			
		||||
  echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the serial port to listen the device."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_ADDRESS" ]; then
 | 
			
		||||
  echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the PoE switch address to connect for powering up/down devices."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_USERNAME" ]; then
 | 
			
		||||
  echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the PoE switch username."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_PASSWORD" ]; then
 | 
			
		||||
  echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the PoE switch password."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POE_INTERFACE" ]; then
 | 
			
		||||
  echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is the PoE switch interface where the device is connected."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POWERUP" ]; then
 | 
			
		||||
  echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is a shell script that should power up the device and begin its boot sequence."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_POWERDOWN" ]; then
 | 
			
		||||
  echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
 | 
			
		||||
  echo "This is a shell script that should power off the device."
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ ! -d /nfs ]; then
 | 
			
		||||
  echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ ! -d /tftp ]; then
 | 
			
		||||
  echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# job config checks
 | 
			
		||||
if [ -z "$BM_ROOTFS" ]; then
 | 
			
		||||
  echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_BOOTFS" ]; then
 | 
			
		||||
  echo "Must set /boot files for the TFTP boot in the job's variables"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_CMDLINE" ]; then
 | 
			
		||||
  echo "Must set BM_CMDLINE to your board's kernel command line arguments"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$BM_BOOTCONFIG" ]; then
 | 
			
		||||
  echo "Must set BM_BOOTCONFIG to your board's required boot configuration arguments"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# Clear out any previous run's artifacts.
 | 
			
		||||
rm -rf results/
 | 
			
		||||
mkdir -p results
 | 
			
		||||
 | 
			
		||||
# Create the rootfs in the NFS directory.  rm to make sure it's in a pristine
 | 
			
		||||
# state, since it's volume-mounted on the host.
 | 
			
		||||
rsync -a --delete $BM_ROOTFS/ /nfs/
 | 
			
		||||
 | 
			
		||||
# If BM_BOOTFS is an URL, download it
 | 
			
		||||
if echo $BM_BOOTFS | grep -q http; then
 | 
			
		||||
  apt install -y wget
 | 
			
		||||
  wget ${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS -O /tmp/bootfs.tar
 | 
			
		||||
  BM_BOOTFS=/tmp/bootfs.tar
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
 | 
			
		||||
if [ -f $BM_BOOTFS ]; then
 | 
			
		||||
  mkdir -p /tmp/bootfs
 | 
			
		||||
  tar xf $BM_BOOTFS -C /tmp/bootfs
 | 
			
		||||
  BM_BOOTFS=/tmp/bootfs
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Install kernel modules (it could be either in /lib/modules or
 | 
			
		||||
# /usr/lib/modules, but we want to install in the latter)
 | 
			
		||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a --delete $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
 | 
			
		||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a --delete $BM_BOOTFS/lib/modules/ /nfs/usr/lib/modules/
 | 
			
		||||
 | 
			
		||||
# Install kernel image + bootloader files
 | 
			
		||||
rsync -a --delete $BM_BOOTFS/boot/ /tftp/
 | 
			
		||||
 | 
			
		||||
# Create the rootfs in the NFS directory
 | 
			
		||||
mkdir -p /nfs/results
 | 
			
		||||
. $BM/rootfs-setup.sh /nfs
 | 
			
		||||
 | 
			
		||||
echo "$BM_CMDLINE" > /tftp/cmdline.txt
 | 
			
		||||
 | 
			
		||||
# Add some required options in config.txt
 | 
			
		||||
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
ATTEMPTS=2
 | 
			
		||||
while [ $((ATTEMPTS--)) -gt 0 ]; do
 | 
			
		||||
  python3 $BM/poe_run.py \
 | 
			
		||||
          --dev="$BM_SERIAL" \
 | 
			
		||||
          --powerup="$BM_POWERUP" \
 | 
			
		||||
          --powerdown="$BM_POWERDOWN" \
 | 
			
		||||
          --timeout="${BM_POE_TIMEOUT:-60}"
 | 
			
		||||
  ret=$?
 | 
			
		||||
 | 
			
		||||
  if [ $ret -eq 2 ]; then
 | 
			
		||||
    echo "Did not detect boot sequence, retrying..."
 | 
			
		||||
  else
 | 
			
		||||
    ATTEMPTS=0
 | 
			
		||||
  fi
 | 
			
		||||
done
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
 | 
			
		||||
# will look for them.
 | 
			
		||||
cp -Rp /nfs/results/. results/
 | 
			
		||||
 | 
			
		||||
exit $ret
 | 
			
		||||
@@ -1,96 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright © 2020 Igalia, S.L.
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
from serial_buffer import SerialBuffer
 | 
			
		||||
import sys
 | 
			
		||||
import threading
 | 
			
		||||
 | 
			
		||||
class PoERun:
 | 
			
		||||
    def __init__(self, args):
 | 
			
		||||
        self.powerup = args.powerup
 | 
			
		||||
        self.powerdown = args.powerdown
 | 
			
		||||
        self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout)
 | 
			
		||||
 | 
			
		||||
    def print_error(self, message):
 | 
			
		||||
        RED = '\033[0;31m'
 | 
			
		||||
        NO_COLOR = '\033[0m'
 | 
			
		||||
        print(RED + message + NO_COLOR)
 | 
			
		||||
 | 
			
		||||
    def logged_system(self, cmd):
 | 
			
		||||
        print("Running '{}'".format(cmd))
 | 
			
		||||
        return os.system(cmd)
 | 
			
		||||
 | 
			
		||||
    def run(self):
 | 
			
		||||
        if self.logged_system(self.powerup) != 0:
 | 
			
		||||
            return 1
 | 
			
		||||
 | 
			
		||||
        boot_detected = False
 | 
			
		||||
        for line in self.ser.lines():
 | 
			
		||||
            if re.search("Booting Linux", line):
 | 
			
		||||
                boot_detected = True
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
        if not boot_detected:
 | 
			
		||||
            self.print_error("Something wrong; couldn't detect the boot start up sequence")
 | 
			
		||||
            return 2
 | 
			
		||||
 | 
			
		||||
        for line in self.ser.lines():
 | 
			
		||||
            if re.search("---. end Kernel panic", line):
 | 
			
		||||
                return 1
 | 
			
		||||
 | 
			
		||||
            # Binning memory problems
 | 
			
		||||
            if re.search("binner overflow mem", line):
 | 
			
		||||
                self.print_error("Memory overflow in the binner; GPU hang")
 | 
			
		||||
                return 1
 | 
			
		||||
 | 
			
		||||
            result = re.search("hwci: mesa: (\S*)", line)
 | 
			
		||||
            if result:
 | 
			
		||||
                if result.group(1) == "pass":
 | 
			
		||||
                    return 0
 | 
			
		||||
                else:
 | 
			
		||||
                    return 1
 | 
			
		||||
 | 
			
		||||
        self.print_error("Reached the end of the CPU serial log without finding a result")
 | 
			
		||||
        return 2
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
 | 
			
		||||
    parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
 | 
			
		||||
    parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
 | 
			
		||||
    parser.add_argument('--timeout', type=int, default=60,
 | 
			
		||||
                        help='time in seconds to wait for activity', required=False)
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    poe = PoERun(args)
 | 
			
		||||
    retval = poe.run()
 | 
			
		||||
 | 
			
		||||
    poe.logged_system(args.powerdown)
 | 
			
		||||
 | 
			
		||||
    sys.exit(retval)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,24 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
rootfs_dst=$1
 | 
			
		||||
 | 
			
		||||
mkdir -p $rootfs_dst/results
 | 
			
		||||
 | 
			
		||||
# Set up the init script that brings up the system.
 | 
			
		||||
cp $BM/bm-init.sh $rootfs_dst/init
 | 
			
		||||
cp $CI_COMMON/init*.sh $rootfs_dst/
 | 
			
		||||
 | 
			
		||||
cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
 | 
			
		||||
 | 
			
		||||
set +x
 | 
			
		||||
# Pass through relevant env vars from the gitlab job to the baremetal init script
 | 
			
		||||
"$CI_COMMON"/generate-env.sh > $rootfs_dst/set-job-env-vars.sh
 | 
			
		||||
chmod +x $rootfs_dst/set-job-env-vars.sh
 | 
			
		||||
echo "Variables passed through:"
 | 
			
		||||
cat $rootfs_dst/set-job-env-vars.sh
 | 
			
		||||
echo "export CI_JOB_JWT=${CI_JOB_JWT@Q}" >> $rootfs_dst/set-job-env-vars.sh
 | 
			
		||||
set -x
 | 
			
		||||
 | 
			
		||||
# Add the Mesa drivers we built, and make a consistent symlink to them.
 | 
			
		||||
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
 | 
			
		||||
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
 | 
			
		||||
@@ -1,153 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright © 2020 Google LLC
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
from datetime import datetime, timezone
 | 
			
		||||
import queue
 | 
			
		||||
import serial
 | 
			
		||||
import threading
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SerialBuffer:
 | 
			
		||||
    def __init__(self, dev, filename, prefix, timeout = None):
 | 
			
		||||
        self.filename = filename
 | 
			
		||||
        self.dev = dev
 | 
			
		||||
 | 
			
		||||
        if dev:
 | 
			
		||||
            self.f = open(filename, "wb+")
 | 
			
		||||
            self.serial = serial.Serial(dev, 115200, timeout=timeout if timeout else 10)
 | 
			
		||||
        else:
 | 
			
		||||
            self.f = open(filename, "rb")
 | 
			
		||||
 | 
			
		||||
        self.byte_queue = queue.Queue()
 | 
			
		||||
        self.line_queue = queue.Queue()
 | 
			
		||||
        self.prefix = prefix
 | 
			
		||||
        self.timeout = timeout
 | 
			
		||||
        self.sentinel = object()
 | 
			
		||||
 | 
			
		||||
        if self.dev:
 | 
			
		||||
            self.read_thread = threading.Thread(
 | 
			
		||||
                target=self.serial_read_thread_loop, daemon=True)
 | 
			
		||||
        else:
 | 
			
		||||
            self.read_thread = threading.Thread(
 | 
			
		||||
                target=self.serial_file_read_thread_loop, daemon=True)
 | 
			
		||||
        self.read_thread.start()
 | 
			
		||||
 | 
			
		||||
        self.lines_thread = threading.Thread(
 | 
			
		||||
            target=self.serial_lines_thread_loop, daemon=True)
 | 
			
		||||
        self.lines_thread.start()
 | 
			
		||||
 | 
			
		||||
    # Thread that just reads the bytes from the serial device to try to keep from
 | 
			
		||||
    # buffer overflowing it. If nothing is received in 1 minute, it finalizes.
 | 
			
		||||
    def serial_read_thread_loop(self):
 | 
			
		||||
        greet = "Serial thread reading from %s\n" % self.dev
 | 
			
		||||
        self.byte_queue.put(greet.encode())
 | 
			
		||||
 | 
			
		||||
        while True:
 | 
			
		||||
            try:
 | 
			
		||||
                b = self.serial.read()
 | 
			
		||||
                if len(b) > 0:
 | 
			
		||||
                    self.byte_queue.put(b)
 | 
			
		||||
                elif self.timeout:
 | 
			
		||||
                    self.byte_queue.put(self.sentinel)
 | 
			
		||||
                    break
 | 
			
		||||
            except Exception as err:
 | 
			
		||||
                print(self.prefix + str(err))
 | 
			
		||||
                self.byte_queue.put(self.sentinel)
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
    # Thread that just reads the bytes from the file of serial output that some
 | 
			
		||||
    # other process is appending to.
 | 
			
		||||
    def serial_file_read_thread_loop(self):
 | 
			
		||||
        greet = "Serial thread reading from %s\n" % self.filename
 | 
			
		||||
        self.byte_queue.put(greet.encode())
 | 
			
		||||
 | 
			
		||||
        while True:
 | 
			
		||||
            line = self.f.readline()
 | 
			
		||||
            if line:
 | 
			
		||||
                self.byte_queue.put(line)
 | 
			
		||||
            else:
 | 
			
		||||
                time.sleep(0.1)
 | 
			
		||||
 | 
			
		||||
    # Thread that processes the stream of bytes to 1) log to stdout, 2) log to
 | 
			
		||||
    # file, 3) add to the queue of lines to be read by program logic
 | 
			
		||||
 | 
			
		||||
    def serial_lines_thread_loop(self):
 | 
			
		||||
        line = bytearray()
 | 
			
		||||
        while True:
 | 
			
		||||
            bytes = self.byte_queue.get(block=True)
 | 
			
		||||
 | 
			
		||||
            if bytes == self.sentinel:
 | 
			
		||||
                self.read_thread.join()
 | 
			
		||||
                self.line_queue.put(self.sentinel)
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
            # Write our data to the output file if we're the ones reading from
 | 
			
		||||
            # the serial device
 | 
			
		||||
            if self.dev:
 | 
			
		||||
                self.f.write(bytes)
 | 
			
		||||
                self.f.flush()
 | 
			
		||||
 | 
			
		||||
            for b in bytes:
 | 
			
		||||
                line.append(b)
 | 
			
		||||
                if b == b'\n'[0]:
 | 
			
		||||
                    line = line.decode(errors="replace")
 | 
			
		||||
 | 
			
		||||
                    time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
 | 
			
		||||
                    print("{endc}{time} {prefix}{line}".format(
 | 
			
		||||
                        time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
 | 
			
		||||
 | 
			
		||||
                    self.line_queue.put(line)
 | 
			
		||||
                    line = bytearray()
 | 
			
		||||
 | 
			
		||||
    def get_line(self):
 | 
			
		||||
        line = self.line_queue.get()
 | 
			
		||||
        if line == self.sentinel:
 | 
			
		||||
            self.lines_thread.join()
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
    def lines(self):
 | 
			
		||||
        return iter(self.get_line, self.sentinel)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--dev', type=str, help='Serial device')
 | 
			
		||||
    parser.add_argument('--file', type=str,
 | 
			
		||||
                        help='Filename for serial output', required=True)
 | 
			
		||||
    parser.add_argument('--prefix', type=str,
 | 
			
		||||
                        help='Prefix for logging serial to stdout', nargs='?')
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    ser = SerialBuffer(args.dev, args.file, args.prefix or "")
 | 
			
		||||
    for line in ser.lines():
 | 
			
		||||
        # We're just using this as a logger, so eat the produced lines and drop
 | 
			
		||||
        # them
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,41 +0,0 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
# Copyright © 2020 Christian Gmeiner
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
#
 | 
			
		||||
# Tiny script to read bytes from telnet, and write the output to stdout, with a
 | 
			
		||||
# buffer in between so we don't lose serial output from its buffer.
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import telnetlib
 | 
			
		||||
 | 
			
		||||
host=sys.argv[1]
 | 
			
		||||
port=sys.argv[2]
 | 
			
		||||
 | 
			
		||||
tn = telnetlib.Telnet(host, port, 1000000)
 | 
			
		||||
 | 
			
		||||
while True:
 | 
			
		||||
    bytes = tn.read_some()
 | 
			
		||||
    sys.stdout.buffer.write(bytes)
 | 
			
		||||
    sys.stdout.flush()
 | 
			
		||||
 | 
			
		||||
tn.close()
 | 
			
		||||
@@ -1,14 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
while true; do
 | 
			
		||||
  devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null`
 | 
			
		||||
  for i in $devcds; do
 | 
			
		||||
    echo "Found a devcoredump at $i."
 | 
			
		||||
    if cp $i /results/first.devcore; then
 | 
			
		||||
      echo 1 > $i
 | 
			
		||||
      echo "Saved to the job artifacts at /first.devcore"
 | 
			
		||||
      exit 0
 | 
			
		||||
    fi
 | 
			
		||||
  done
 | 
			
		||||
  sleep 10
 | 
			
		||||
done
 | 
			
		||||
@@ -1,90 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
for var in \
 | 
			
		||||
    ASAN_OPTIONS \
 | 
			
		||||
    BASE_SYSTEM_FORK_HOST_PREFIX \
 | 
			
		||||
    BASE_SYSTEM_MAINLINE_HOST_PREFIX \
 | 
			
		||||
    CI_COMMIT_BRANCH \
 | 
			
		||||
    CI_COMMIT_TITLE \
 | 
			
		||||
    CI_JOB_ID \
 | 
			
		||||
    CI_JOB_URL \
 | 
			
		||||
    CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
 | 
			
		||||
    CI_MERGE_REQUEST_TITLE \
 | 
			
		||||
    CI_NODE_INDEX \
 | 
			
		||||
    CI_NODE_TOTAL \
 | 
			
		||||
    CI_PAGES_DOMAIN \
 | 
			
		||||
    CI_PIPELINE_ID \
 | 
			
		||||
    CI_PROJECT_DIR \
 | 
			
		||||
    CI_PROJECT_NAME \
 | 
			
		||||
    CI_PROJECT_PATH \
 | 
			
		||||
    CI_PROJECT_ROOT_NAMESPACE \
 | 
			
		||||
    CI_RUNNER_DESCRIPTION \
 | 
			
		||||
    CI_SERVER_URL \
 | 
			
		||||
    DEQP_CASELIST_FILTER \
 | 
			
		||||
    DEQP_CASELIST_INV_FILTER \
 | 
			
		||||
    DEQP_CONFIG \
 | 
			
		||||
    DEQP_EXPECTED_RENDERER \
 | 
			
		||||
    DEQP_FRACTION \
 | 
			
		||||
    DEQP_HEIGHT \
 | 
			
		||||
    DEQP_RESULTS_DIR \
 | 
			
		||||
    DEQP_RUNNER_OPTIONS \
 | 
			
		||||
    DEQP_SUITE \
 | 
			
		||||
    DEQP_VARIANT \
 | 
			
		||||
    DEQP_VER \
 | 
			
		||||
    DEQP_WIDTH \
 | 
			
		||||
    DEVICE_NAME \
 | 
			
		||||
    DRIVER_NAME \
 | 
			
		||||
    EGL_PLATFORM \
 | 
			
		||||
    ETNA_MESA_DEBUG \
 | 
			
		||||
    FDO_CI_CONCURRENT \
 | 
			
		||||
    FDO_UPSTREAM_REPO \
 | 
			
		||||
    FD_MESA_DEBUG \
 | 
			
		||||
    FLAKES_CHANNEL \
 | 
			
		||||
    GPU_VERSION \
 | 
			
		||||
    GTEST \
 | 
			
		||||
    GTEST_FAILS \
 | 
			
		||||
    GTEST_FRACTION \
 | 
			
		||||
    GTEST_RESULTS_DIR \
 | 
			
		||||
    GTEST_RUNNER_OPTIONS \
 | 
			
		||||
    GTEST_SKIPS \
 | 
			
		||||
    HWCI_FREQ_MAX \
 | 
			
		||||
    HWCI_KERNEL_MODULES \
 | 
			
		||||
    HWCI_START_XORG \
 | 
			
		||||
    HWCI_TEST_SCRIPT \
 | 
			
		||||
    IR3_SHADER_DEBUG \
 | 
			
		||||
    JOB_ARTIFACTS_BASE \
 | 
			
		||||
    JOB_RESULTS_PATH \
 | 
			
		||||
    JOB_ROOTFS_OVERLAY_PATH \
 | 
			
		||||
    MESA_BUILD_PATH \
 | 
			
		||||
    MESA_GL_VERSION_OVERRIDE \
 | 
			
		||||
    MESA_GLSL_VERSION_OVERRIDE \
 | 
			
		||||
    MESA_GLES_VERSION_OVERRIDE \
 | 
			
		||||
    MESA_VK_IGNORE_CONFORMANCE_WARNING \
 | 
			
		||||
    MINIO_HOST \
 | 
			
		||||
    NIR_VALIDATE \
 | 
			
		||||
    PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
 | 
			
		||||
    PAN_MESA_DEBUG \
 | 
			
		||||
    PIGLIT_FRACTION \
 | 
			
		||||
    PIGLIT_JUNIT_RESULTS \
 | 
			
		||||
    PIGLIT_NO_WINDOW \
 | 
			
		||||
    PIGLIT_OPTIONS \
 | 
			
		||||
    PIGLIT_PLATFORM \
 | 
			
		||||
    PIGLIT_PROFILES \
 | 
			
		||||
    PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
 | 
			
		||||
    PIGLIT_REPLAY_SUBCOMMAND \
 | 
			
		||||
    PIGLIT_REPLAY_DESCRIPTION_FILE \
 | 
			
		||||
    PIGLIT_REPLAY_DEVICE_NAME \
 | 
			
		||||
    PIGLIT_REPLAY_EXTRA_ARGS \
 | 
			
		||||
    PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
 | 
			
		||||
    PIGLIT_REPLAY_UPLOAD_TO_MINIO \
 | 
			
		||||
    PIGLIT_RESULTS \
 | 
			
		||||
    PIGLIT_TESTS \
 | 
			
		||||
    PIPELINE_ARTIFACTS_BASE \
 | 
			
		||||
    TU_DEBUG \
 | 
			
		||||
    VK_CPU \
 | 
			
		||||
    VK_DRIVER \
 | 
			
		||||
    ; do
 | 
			
		||||
  if [ -n "${!var+x}" ]; then
 | 
			
		||||
    echo "export $var=${!var@Q}"
 | 
			
		||||
  fi
 | 
			
		||||
done
 | 
			
		||||
@@ -1,22 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# Very early init, used to make sure devices and network are set up and
 | 
			
		||||
# reachable.
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
cd /
 | 
			
		||||
 | 
			
		||||
mount -t proc none /proc
 | 
			
		||||
mount -t sysfs none /sys
 | 
			
		||||
mount -t devtmpfs none /dev || echo possibly already mounted
 | 
			
		||||
mkdir -p /dev/pts
 | 
			
		||||
mount -t devpts devpts /dev/pts
 | 
			
		||||
mount -t tmpfs tmpfs /tmp
 | 
			
		||||
 | 
			
		||||
echo "nameserver 8.8.8.8" > /etc/resolv.conf
 | 
			
		||||
[ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
 | 
			
		||||
 | 
			
		||||
# Set the time so we can validate certificates before we fetch anything;
 | 
			
		||||
# however as not all DUTs have network, make this non-fatal.
 | 
			
		||||
for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
 | 
			
		||||
@@ -1,78 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# Second-stage init, used to set up devices and our job environment before
 | 
			
		||||
# running tests.
 | 
			
		||||
 | 
			
		||||
. /set-job-env-vars.sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# Set up any devices required by the jobs
 | 
			
		||||
[ -z "$HWCI_KERNEL_MODULES" ] || (echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe)
 | 
			
		||||
 | 
			
		||||
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
 | 
			
		||||
# it in /install
 | 
			
		||||
ln -sf $CI_PROJECT_DIR/install /install
 | 
			
		||||
export LD_LIBRARY_PATH=/install/lib
 | 
			
		||||
export LIBGL_DRIVERS_PATH=/install/lib/dri
 | 
			
		||||
 | 
			
		||||
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
 | 
			
		||||
export XDG_CACHE_HOME=/tmp
 | 
			
		||||
 | 
			
		||||
# Make sure Python can find all our imports
 | 
			
		||||
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
 | 
			
		||||
 | 
			
		||||
if [ "$HWCI_FREQ_MAX" = "true" ]; then
 | 
			
		||||
  # Ensure initialization of the DRM device (needed by MSM)
 | 
			
		||||
  head -0 /dev/dri/renderD128
 | 
			
		||||
 | 
			
		||||
  # Disable GPU frequency scaling
 | 
			
		||||
  DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
 | 
			
		||||
  test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
 | 
			
		||||
 | 
			
		||||
  # Disable CPU frequency scaling
 | 
			
		||||
  echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
 | 
			
		||||
 | 
			
		||||
  # Disable GPU runtime power management
 | 
			
		||||
  GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
 | 
			
		||||
  test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Start a little daemon to capture the first devcoredump we encounter.  (They
 | 
			
		||||
# expire after 5 minutes, so we poll for them).
 | 
			
		||||
./capture-devcoredump.sh &
 | 
			
		||||
 | 
			
		||||
# If we want Xorg to be running for the test, then we start it up before the
 | 
			
		||||
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
 | 
			
		||||
# without using -displayfd you can race with Xorg's startup), but xinit will eat
 | 
			
		||||
# your client's return code
 | 
			
		||||
if [ -n "$HWCI_START_XORG" ]; then
 | 
			
		||||
  echo "touch /xorg-started; sleep 100000" > /xorg-script
 | 
			
		||||
  env \
 | 
			
		||||
    xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
 | 
			
		||||
 | 
			
		||||
  # Wait for xorg to be ready for connections.
 | 
			
		||||
  for i in 1 2 3 4 5; do
 | 
			
		||||
    if [ -e /xorg-started ]; then
 | 
			
		||||
      break
 | 
			
		||||
    fi
 | 
			
		||||
    sleep 5
 | 
			
		||||
  done
 | 
			
		||||
  export DISPLAY=:0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
RESULT=fail
 | 
			
		||||
if sh $HWCI_TEST_SCRIPT; then
 | 
			
		||||
  RESULT=pass
 | 
			
		||||
  rm -rf results/trace/$PIGLIT_REPLAY_DEVICE_NAME
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# upload artifacts
 | 
			
		||||
MINIO=$(cat /proc/cmdline | tr ' ' '\n' | grep minio_results | cut -d '=' -f 2 || true)
 | 
			
		||||
if [ -n "$MINIO" ]; then
 | 
			
		||||
  tar -czf results.tar.gz results/;
 | 
			
		||||
  ci-fairy minio login "$CI_JOB_JWT";
 | 
			
		||||
  ci-fairy minio cp results.tar.gz minio://"$MINIO"/results.tar.gz;
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo "hwci: mesa: $RESULT"
 | 
			
		||||
@@ -1,21 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
_XORG_SCRIPT="/xorg-script"
 | 
			
		||||
_FLAG_FILE="/xorg-started"
 | 
			
		||||
 | 
			
		||||
echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
 | 
			
		||||
if [ "x$1" != "x" ]; then
 | 
			
		||||
    export LD_LIBRARY_PATH="${1}/lib"
 | 
			
		||||
    export LIBGL_DRIVERS_PATH="${1}/lib/dri"
 | 
			
		||||
fi
 | 
			
		||||
xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
 | 
			
		||||
 | 
			
		||||
# Wait for xorg to be ready for connections.
 | 
			
		||||
for i in 1 2 3 4 5; do
 | 
			
		||||
    if [ -e "${_FLAG_FILE}" ]; then
 | 
			
		||||
        break
 | 
			
		||||
    fi
 | 
			
		||||
    sleep 5
 | 
			
		||||
done
 | 
			
		||||
@@ -1,57 +0,0 @@
 | 
			
		||||
CONFIG_LOCALVERSION_AUTO=y
 | 
			
		||||
CONFIG_DEBUG_KERNEL=y
 | 
			
		||||
 | 
			
		||||
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
 | 
			
		||||
CONFIG_BLK_DEV_INITRD=n
 | 
			
		||||
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 | 
			
		||||
 | 
			
		||||
CONFIG_DRM=y
 | 
			
		||||
CONFIG_DRM_ETNAVIV=y
 | 
			
		||||
CONFIG_DRM_ROCKCHIP=y
 | 
			
		||||
CONFIG_DRM_PANFROST=y
 | 
			
		||||
CONFIG_DRM_LIMA=y
 | 
			
		||||
CONFIG_DRM_PANEL_SIMPLE=y
 | 
			
		||||
CONFIG_PWM_CROS_EC=y
 | 
			
		||||
CONFIG_BACKLIGHT_PWM=y
 | 
			
		||||
 | 
			
		||||
CONFIG_ROCKCHIP_CDN_DP=n
 | 
			
		||||
 | 
			
		||||
CONFIG_SPI_ROCKCHIP=y
 | 
			
		||||
CONFIG_PWM_ROCKCHIP=y
 | 
			
		||||
CONFIG_PHY_ROCKCHIP_DP=y
 | 
			
		||||
CONFIG_DWMAC_ROCKCHIP=y
 | 
			
		||||
 | 
			
		||||
CONFIG_MFD_RK808=y
 | 
			
		||||
CONFIG_REGULATOR_RK808=y
 | 
			
		||||
CONFIG_RTC_DRV_RK808=y
 | 
			
		||||
CONFIG_COMMON_CLK_RK808=y
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_FAN53555=y
 | 
			
		||||
CONFIG_REGULATOR=y
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_VCTRL=y
 | 
			
		||||
 | 
			
		||||
CONFIG_KASAN=n
 | 
			
		||||
CONFIG_KASAN_INLINE=n
 | 
			
		||||
CONFIG_STACKTRACE=n
 | 
			
		||||
 | 
			
		||||
CONFIG_TMPFS=y
 | 
			
		||||
 | 
			
		||||
CONFIG_PROVE_LOCKING=n
 | 
			
		||||
CONFIG_DEBUG_LOCKDEP=n
 | 
			
		||||
CONFIG_SOFTLOCKUP_DETECTOR=n
 | 
			
		||||
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
 | 
			
		||||
 | 
			
		||||
CONFIG_FW_LOADER_COMPRESS=y
 | 
			
		||||
 | 
			
		||||
CONFIG_USB_USBNET=y
 | 
			
		||||
CONFIG_NETDEVICES=y
 | 
			
		||||
CONFIG_USB_NET_DRIVERS=y
 | 
			
		||||
CONFIG_USB_RTL8152=y
 | 
			
		||||
CONFIG_USB_NET_AX8817X=y
 | 
			
		||||
CONFIG_USB_NET_SMSC95XX=y
 | 
			
		||||
@@ -1,157 +0,0 @@
 | 
			
		||||
CONFIG_LOCALVERSION_AUTO=y
 | 
			
		||||
CONFIG_DEBUG_KERNEL=y
 | 
			
		||||
 | 
			
		||||
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
 | 
			
		||||
CONFIG_BLK_DEV_INITRD=n
 | 
			
		||||
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
 | 
			
		||||
 | 
			
		||||
CONFIG_DRM=y
 | 
			
		||||
CONFIG_DRM_ROCKCHIP=y
 | 
			
		||||
CONFIG_DRM_PANFROST=y
 | 
			
		||||
CONFIG_DRM_LIMA=y
 | 
			
		||||
CONFIG_DRM_PANEL_SIMPLE=y
 | 
			
		||||
CONFIG_DRM_MSM=y
 | 
			
		||||
CONFIG_DRM_I2C_ADV7511=y
 | 
			
		||||
CONFIG_PWM_CROS_EC=y
 | 
			
		||||
CONFIG_BACKLIGHT_PWM=y
 | 
			
		||||
 | 
			
		||||
CONFIG_ROCKCHIP_CDN_DP=n
 | 
			
		||||
 | 
			
		||||
CONFIG_SPI_ROCKCHIP=y
 | 
			
		||||
CONFIG_PWM_ROCKCHIP=y
 | 
			
		||||
CONFIG_PHY_ROCKCHIP_DP=y
 | 
			
		||||
CONFIG_DWMAC_ROCKCHIP=y
 | 
			
		||||
CONFIG_STMMAC_ETH=y
 | 
			
		||||
CONFIG_TYPEC_FUSB302=y
 | 
			
		||||
CONFIG_TYPEC=y
 | 
			
		||||
CONFIG_TYPEC_TCPM=y
 | 
			
		||||
 | 
			
		||||
# MSM platform bits
 | 
			
		||||
 | 
			
		||||
# For CONFIG_QCOM_LMH
 | 
			
		||||
CONFIG_OF=y
 | 
			
		||||
 | 
			
		||||
CONFIG_QCOM_RPMHPD=y
 | 
			
		||||
CONFIG_QCOM_RPMPD=y
 | 
			
		||||
CONFIG_SDM_GPUCC_845=y
 | 
			
		||||
CONFIG_SDM_VIDEOCC_845=y
 | 
			
		||||
CONFIG_SDM_DISPCC_845=y
 | 
			
		||||
CONFIG_SDM_LPASSCC_845=y
 | 
			
		||||
CONFIG_SDM_CAMCC_845=y
 | 
			
		||||
CONFIG_RESET_QCOM_PDC=y
 | 
			
		||||
CONFIG_DRM_TI_SN65DSI86=y
 | 
			
		||||
CONFIG_I2C_QCOM_GENI=y
 | 
			
		||||
CONFIG_SPI_QCOM_GENI=y
 | 
			
		||||
CONFIG_PHY_QCOM_QUSB2=y
 | 
			
		||||
CONFIG_PHY_QCOM_QMP=y
 | 
			
		||||
CONFIG_QCOM_CLK_APCC_MSM8996=y
 | 
			
		||||
CONFIG_QCOM_LLCC=y
 | 
			
		||||
CONFIG_QCOM_LMH=y
 | 
			
		||||
CONFIG_QCOM_SPMI_TEMP_ALARM=y
 | 
			
		||||
CONFIG_QCOM_WDT=y
 | 
			
		||||
CONFIG_POWER_RESET_QCOM_PON=y
 | 
			
		||||
CONFIG_RTC_DRV_PM8XXX=y
 | 
			
		||||
CONFIG_INTERCONNECT=y
 | 
			
		||||
CONFIG_INTERCONNECT_QCOM=y
 | 
			
		||||
CONFIG_INTERCONNECT_QCOM_SDM845=y
 | 
			
		||||
CONFIG_INTERCONNECT_QCOM_MSM8916=y
 | 
			
		||||
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
 | 
			
		||||
CONFIG_INTERCONNECT_QCOM_SC7180=y
 | 
			
		||||
CONFIG_CRYPTO_DEV_QCOM_RNG=y
 | 
			
		||||
CONFIG_SC_DISPCC_7180=y
 | 
			
		||||
CONFIG_SC_GPUCC_7180=y
 | 
			
		||||
 | 
			
		||||
# db410c ethernet
 | 
			
		||||
CONFIG_USB_RTL8152=y
 | 
			
		||||
# db820c ethernet
 | 
			
		||||
CONFIG_ATL1C=y
 | 
			
		||||
 | 
			
		||||
CONFIG_ARCH_ALPINE=n
 | 
			
		||||
CONFIG_ARCH_BCM2835=n
 | 
			
		||||
CONFIG_ARCH_BCM_IPROC=n
 | 
			
		||||
CONFIG_ARCH_BERLIN=n
 | 
			
		||||
CONFIG_ARCH_BRCMSTB=n
 | 
			
		||||
CONFIG_ARCH_EXYNOS=n
 | 
			
		||||
CONFIG_ARCH_K3=n
 | 
			
		||||
CONFIG_ARCH_LAYERSCAPE=n
 | 
			
		||||
CONFIG_ARCH_LG1K=n
 | 
			
		||||
CONFIG_ARCH_HISI=n
 | 
			
		||||
CONFIG_ARCH_MVEBU=n
 | 
			
		||||
CONFIG_ARCH_SEATTLE=n
 | 
			
		||||
CONFIG_ARCH_SYNQUACER=n
 | 
			
		||||
CONFIG_ARCH_RENESAS=n
 | 
			
		||||
CONFIG_ARCH_R8A774A1=n
 | 
			
		||||
CONFIG_ARCH_R8A774C0=n
 | 
			
		||||
CONFIG_ARCH_R8A7795=n
 | 
			
		||||
CONFIG_ARCH_R8A7796=n
 | 
			
		||||
CONFIG_ARCH_R8A77965=n
 | 
			
		||||
CONFIG_ARCH_R8A77970=n
 | 
			
		||||
CONFIG_ARCH_R8A77980=n
 | 
			
		||||
CONFIG_ARCH_R8A77990=n
 | 
			
		||||
CONFIG_ARCH_R8A77995=n
 | 
			
		||||
CONFIG_ARCH_STRATIX10=n
 | 
			
		||||
CONFIG_ARCH_TEGRA=n
 | 
			
		||||
CONFIG_ARCH_SPRD=n
 | 
			
		||||
CONFIG_ARCH_THUNDER=n
 | 
			
		||||
CONFIG_ARCH_THUNDER2=n
 | 
			
		||||
CONFIG_ARCH_UNIPHIER=n
 | 
			
		||||
CONFIG_ARCH_VEXPRESS=n
 | 
			
		||||
CONFIG_ARCH_XGENE=n
 | 
			
		||||
CONFIG_ARCH_ZX=n
 | 
			
		||||
CONFIG_ARCH_ZYNQMP=n
 | 
			
		||||
 | 
			
		||||
# Strip out some stuff we don't need for graphics testing, to reduce
 | 
			
		||||
# the build.
 | 
			
		||||
CONFIG_CAN=n
 | 
			
		||||
CONFIG_WIRELESS=n
 | 
			
		||||
CONFIG_RFKILL=n
 | 
			
		||||
CONFIG_WLAN=n
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_FAN53555=y
 | 
			
		||||
CONFIG_REGULATOR=y
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_VCTRL=y
 | 
			
		||||
 | 
			
		||||
CONFIG_KASAN=n
 | 
			
		||||
CONFIG_KASAN_INLINE=n
 | 
			
		||||
CONFIG_STACKTRACE=n
 | 
			
		||||
 | 
			
		||||
CONFIG_TMPFS=y
 | 
			
		||||
 | 
			
		||||
CONFIG_PROVE_LOCKING=n
 | 
			
		||||
CONFIG_DEBUG_LOCKDEP=n
 | 
			
		||||
CONFIG_SOFTLOCKUP_DETECTOR=y
 | 
			
		||||
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 | 
			
		||||
 | 
			
		||||
CONFIG_DETECT_HUNG_TASK=y
 | 
			
		||||
 | 
			
		||||
CONFIG_FW_LOADER_COMPRESS=y
 | 
			
		||||
CONFIG_FW_LOADER_USER_HELPER=n
 | 
			
		||||
 | 
			
		||||
CONFIG_USB_USBNET=y
 | 
			
		||||
CONFIG_NETDEVICES=y
 | 
			
		||||
CONFIG_USB_NET_DRIVERS=y
 | 
			
		||||
CONFIG_USB_RTL8152=y
 | 
			
		||||
CONFIG_USB_NET_AX8817X=y
 | 
			
		||||
CONFIG_USB_NET_SMSC95XX=y
 | 
			
		||||
 | 
			
		||||
# For amlogic
 | 
			
		||||
CONFIG_MESON_GXL_PHY=y
 | 
			
		||||
CONFIG_MDIO_BUS_MUX_MESON_G12A=y
 | 
			
		||||
CONFIG_DRM_MESON=y
 | 
			
		||||
 | 
			
		||||
# For Mediatek
 | 
			
		||||
CONFIG_DRM_MEDIATEK=y
 | 
			
		||||
CONFIG_PWM_MEDIATEK=y
 | 
			
		||||
CONFIG_DRM_MEDIATEK_HDMI=y
 | 
			
		||||
CONFIG_GNSS=y
 | 
			
		||||
CONFIG_GNSS_MTK_SERIAL=y
 | 
			
		||||
CONFIG_HW_RANDOM=y
 | 
			
		||||
CONFIG_HW_RANDOM_MTK=y
 | 
			
		||||
CONFIG_MTK_DEVAPC=y
 | 
			
		||||
CONFIG_PWM_MTK_DISP=y
 | 
			
		||||
CONFIG_MTK_CMDQ=y
 | 
			
		||||
@@ -1,48 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
# Fetch the arm-built rootfs image and unpack it in our x86 container (saves
 | 
			
		||||
# network transfer, disk usage, and runtime on test jobs)
 | 
			
		||||
 | 
			
		||||
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
 | 
			
		||||
  ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
 | 
			
		||||
else
 | 
			
		||||
  ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz
 | 
			
		||||
mkdir -p /rootfs-$arch
 | 
			
		||||
tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz
 | 
			
		||||
rm rootfs.tgz
 | 
			
		||||
 | 
			
		||||
if [[ $arch == "arm64" ]]; then
 | 
			
		||||
    mkdir -p /baremetal-files
 | 
			
		||||
    pushd /baremetal-files
 | 
			
		||||
 | 
			
		||||
    wget ${ARTIFACTS_URL}/Image
 | 
			
		||||
    wget ${ARTIFACTS_URL}/Image.gz
 | 
			
		||||
    wget ${ARTIFACTS_URL}/cheza-kernel
 | 
			
		||||
 | 
			
		||||
    DEVICE_TREES="apq8016-sbc.dtb apq8096-db820c.dtb"
 | 
			
		||||
 | 
			
		||||
    for DTB in $DEVICE_TREES; do
 | 
			
		||||
        wget ${ARTIFACTS_URL}/$DTB
 | 
			
		||||
    done
 | 
			
		||||
 | 
			
		||||
    popd
 | 
			
		||||
elif [[ $arch == "armhf" ]]; then
 | 
			
		||||
    mkdir -p /baremetal-files
 | 
			
		||||
    pushd /baremetal-files
 | 
			
		||||
 | 
			
		||||
    wget ${ARTIFACTS_URL}/zImage
 | 
			
		||||
 | 
			
		||||
    DEVICE_TREES="imx6q-cubox-i.dtb"
 | 
			
		||||
 | 
			
		||||
    for DTB in $DEVICE_TREES; do
 | 
			
		||||
        wget ${ARTIFACTS_URL}/$DTB
 | 
			
		||||
    done
 | 
			
		||||
 | 
			
		||||
    popd
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
APITRACE_VERSION="170424754bb46002ba706e16ee5404b61988d74a"
 | 
			
		||||
 | 
			
		||||
git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
 | 
			
		||||
pushd /apitrace
 | 
			
		||||
git checkout "$APITRACE_VERSION"
 | 
			
		||||
git submodule update --init --depth 1 --recursive
 | 
			
		||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
 | 
			
		||||
ninja -C _build
 | 
			
		||||
mkdir build
 | 
			
		||||
cp _build/apitrace build
 | 
			
		||||
cp _build/eglretrace build
 | 
			
		||||
${STRIP_CMD:-strip} build/*
 | 
			
		||||
find . -not -path './build' -not -path './build/*' -delete
 | 
			
		||||
popd
 | 
			
		||||
@@ -1,63 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# Pull down repositories that crosvm depends on to cros checkout-like locations.
 | 
			
		||||
CROS_ROOT=/
 | 
			
		||||
THIRD_PARTY_ROOT=$CROS_ROOT/third_party
 | 
			
		||||
mkdir -p $THIRD_PARTY_ROOT
 | 
			
		||||
AOSP_EXTERNAL_ROOT=$CROS_ROOT/aosp/external
 | 
			
		||||
mkdir -p $AOSP_EXTERNAL_ROOT
 | 
			
		||||
PLATFORM2_ROOT=/platform2
 | 
			
		||||
 | 
			
		||||
PLATFORM2_COMMIT=72e56e66ccf3d2ea48f5686bd1f772379c43628b
 | 
			
		||||
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform2 $PLATFORM2_ROOT
 | 
			
		||||
pushd $PLATFORM2_ROOT
 | 
			
		||||
git checkout $PLATFORM2_COMMIT
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
# minijail does not exist in upstream linux distros.
 | 
			
		||||
MINIJAIL_COMMIT=debdf5de5a0ae3b667bee2f8fb1f755b0b3f5a6c
 | 
			
		||||
git clone --single-branch --no-checkout https://android.googlesource.com/platform/external/minijail $AOSP_EXTERNAL_ROOT/minijail
 | 
			
		||||
pushd $AOSP_EXTERNAL_ROOT/minijail
 | 
			
		||||
git checkout $MINIJAIL_COMMIT
 | 
			
		||||
make
 | 
			
		||||
cp libminijail.so /usr/lib/x86_64-linux-gnu/
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
# Pull the cras library for audio access.
 | 
			
		||||
ADHD_COMMIT=a1e0869b95c845c4fe6234a7b92fdfa6acc1e809
 | 
			
		||||
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/third_party/adhd $THIRD_PARTY_ROOT/adhd
 | 
			
		||||
pushd $THIRD_PARTY_ROOT/adhd
 | 
			
		||||
git checkout $ADHD_COMMIT
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
# Pull vHost (dataplane for virtio backend drivers)
 | 
			
		||||
VHOST_COMMIT=3091854e27242d09453004b011f701fa29c0b8e8
 | 
			
		||||
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/third_party/rust-vmm/vhost $THIRD_PARTY_ROOT/rust-vmm/vhost
 | 
			
		||||
pushd $THIRD_PARTY_ROOT/rust-vmm/vhost
 | 
			
		||||
git checkout $VHOST_COMMIT
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
CROSVM_VERSION=e42a43d880b0364b55559dbeade3af174f929001
 | 
			
		||||
git clone --single-branch --no-checkout https://chromium.googlesource.com/chromiumos/platform/crosvm /platform/crosvm
 | 
			
		||||
pushd /platform/crosvm
 | 
			
		||||
git checkout "$CROSVM_VERSION"
 | 
			
		||||
 | 
			
		||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
 | 
			
		||||
  bindgen \
 | 
			
		||||
  -j ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
  --root /usr/local \
 | 
			
		||||
  $EXTRA_CARGO_ARGS
 | 
			
		||||
 | 
			
		||||
RUSTFLAGS='-L native=/usr/local/lib' cargo install \
 | 
			
		||||
  -j ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
  --locked \
 | 
			
		||||
  --features 'default-no-sandbox gpu x virgl_renderer virgl_renderer_next' \
 | 
			
		||||
  --path . \
 | 
			
		||||
  --root /usr/local \
 | 
			
		||||
  $EXTRA_CARGO_ARGS
 | 
			
		||||
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
rm -rf $PLATFORM2_ROOT $AOSP_EXTERNAL_ROOT/minijail $THIRD_PARTY_ROOT/adhd $THIRD_PARTY_ROOT/rust-vmm /platform/crosvm
 | 
			
		||||
@@ -1,9 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
cargo install --locked deqp-runner \
 | 
			
		||||
  -j ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
  --version 0.10.0 \
 | 
			
		||||
  --root /usr/local \
 | 
			
		||||
  $EXTRA_CARGO_ARGS
 | 
			
		||||
@@ -1,82 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
git config --global user.email "mesa@example.com"
 | 
			
		||||
git config --global user.name "Mesa CI"
 | 
			
		||||
git clone \
 | 
			
		||||
    https://github.com/KhronosGroup/VK-GL-CTS.git \
 | 
			
		||||
    -b vulkan-cts-1.2.7.1 \
 | 
			
		||||
    --depth 1 \
 | 
			
		||||
    /VK-GL-CTS
 | 
			
		||||
pushd /VK-GL-CTS
 | 
			
		||||
 | 
			
		||||
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
 | 
			
		||||
# libpng (sigh).  The archives get their checksums checked anyway, and git
 | 
			
		||||
# always goes through ssh or https.
 | 
			
		||||
python3 external/fetch_sources.py --insecure
 | 
			
		||||
 | 
			
		||||
mkdir -p /deqp
 | 
			
		||||
 | 
			
		||||
# Save the testlog stylesheets:
 | 
			
		||||
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
pushd /deqp
 | 
			
		||||
# When including EGL/X11 testing, do that build first and save off its
 | 
			
		||||
# deqp-egl binary.
 | 
			
		||||
cmake -S /VK-GL-CTS -B . -G Ninja \
 | 
			
		||||
      -DDEQP_TARGET=x11_egl_glx \
 | 
			
		||||
      -DCMAKE_BUILD_TYPE=Release \
 | 
			
		||||
      $EXTRA_CMAKE_ARGS
 | 
			
		||||
ninja modules/egl/deqp-egl
 | 
			
		||||
cp /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cmake -S /VK-GL-CTS -B . -G Ninja \
 | 
			
		||||
      -DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
 | 
			
		||||
      -DCMAKE_BUILD_TYPE=Release \
 | 
			
		||||
      $EXTRA_CMAKE_ARGS
 | 
			
		||||
ninja
 | 
			
		||||
 | 
			
		||||
mv /deqp/modules/egl/deqp-egl-x11 /deqp/modules/egl/deqp-egl
 | 
			
		||||
 | 
			
		||||
# Copy out the mustpass lists we want.
 | 
			
		||||
mkdir /deqp/mustpass
 | 
			
		||||
for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt) ; do
 | 
			
		||||
    cat /VK-GL-CTS/external/vulkancts/mustpass/master/$mustpass \
 | 
			
		||||
        >> /deqp/mustpass/vk-master.txt
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
cp \
 | 
			
		||||
    /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
 | 
			
		||||
    /deqp/mustpass/.
 | 
			
		||||
cp \
 | 
			
		||||
    /deqp/external/openglcts/modules/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt \
 | 
			
		||||
    /deqp/mustpass/.
 | 
			
		||||
cp \
 | 
			
		||||
    /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-master.txt \
 | 
			
		||||
    /deqp/mustpass/.
 | 
			
		||||
cp \
 | 
			
		||||
    /deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
 | 
			
		||||
    /deqp/mustpass/.
 | 
			
		||||
 | 
			
		||||
# Save *some* executor utils, but otherwise strip things down
 | 
			
		||||
# to reduct deqp build size:
 | 
			
		||||
mkdir /deqp/executor.save
 | 
			
		||||
cp /deqp/executor/testlog-to-* /deqp/executor.save
 | 
			
		||||
rm -rf /deqp/executor
 | 
			
		||||
mv /deqp/executor.save /deqp/executor
 | 
			
		||||
 | 
			
		||||
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
 | 
			
		||||
rm -rf /deqp/external/openglcts/modules/cts-runner
 | 
			
		||||
rm -rf /deqp/modules/internal
 | 
			
		||||
rm -rf /deqp/execserver
 | 
			
		||||
rm -rf /deqp/framework
 | 
			
		||||
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
 | 
			
		||||
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
 | 
			
		||||
${STRIP_CMD:-strip} external/openglcts/modules/glcts
 | 
			
		||||
${STRIP_CMD:-strip} modules/*/deqp-*
 | 
			
		||||
du -sh *
 | 
			
		||||
rm -rf /VK-GL-CTS
 | 
			
		||||
popd
 | 
			
		||||
@@ -1,14 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
git clone https://github.com/ValveSoftware/Fossilize.git
 | 
			
		||||
cd Fossilize
 | 
			
		||||
git checkout 72088685d90bc814d14aad5505354ffa8a642789
 | 
			
		||||
git submodule update --init
 | 
			
		||||
mkdir build
 | 
			
		||||
cd build
 | 
			
		||||
cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
 | 
			
		||||
ninja -C . install
 | 
			
		||||
cd ../..
 | 
			
		||||
rm -rf Fossilize
 | 
			
		||||
@@ -1,19 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1
 | 
			
		||||
 | 
			
		||||
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct
 | 
			
		||||
pushd /gfxreconstruct
 | 
			
		||||
git checkout "$GFXRECONSTRUCT_VERSION"
 | 
			
		||||
git submodule update --init
 | 
			
		||||
git submodule update
 | 
			
		||||
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
 | 
			
		||||
ninja -C _build gfxrecon-replay gfxrecon-info
 | 
			
		||||
mkdir -p build/bin
 | 
			
		||||
install _build/tools/replay/gfxrecon-replay build/bin
 | 
			
		||||
install _build/tools/info/gfxrecon-info build/bin
 | 
			
		||||
strip build/bin/*
 | 
			
		||||
find . -not -path './build' -not -path './build/*' -delete
 | 
			
		||||
popd
 | 
			
		||||
@@ -1,16 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
PARALLEL_DEQP_RUNNER_VERSION=6596b71cf37a7efb4d54acd48c770ed2d4ad6b7e
 | 
			
		||||
 | 
			
		||||
git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
 | 
			
		||||
pushd /parallel-deqp-runner
 | 
			
		||||
git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
 | 
			
		||||
meson . _build
 | 
			
		||||
ninja -C _build hang-detection
 | 
			
		||||
mkdir -p build/bin
 | 
			
		||||
install _build/hang-detection build/bin
 | 
			
		||||
strip build/bin/*
 | 
			
		||||
find . -not -path './build' -not -path './build/*' -delete
 | 
			
		||||
popd
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
mkdir -p kernel
 | 
			
		||||
wget -qO- ${KERNEL_URL} | tar -xj --strip-components=1 -C kernel
 | 
			
		||||
pushd kernel
 | 
			
		||||
 | 
			
		||||
# The kernel doesn't like the gold linker (or the old lld in our debians).
 | 
			
		||||
# Sneak in some override symlinks during kernel build until we can update
 | 
			
		||||
# debian (they'll get blown away by the rm of the kernel dir at the end).
 | 
			
		||||
mkdir -p ld-links
 | 
			
		||||
for i in /usr/bin/*-ld /usr/bin/ld; do
 | 
			
		||||
    i=`basename $i`
 | 
			
		||||
    ln -sf /usr/bin/$i.bfd ld-links/$i
 | 
			
		||||
done
 | 
			
		||||
export PATH=`pwd`/ld-links:$PATH
 | 
			
		||||
 | 
			
		||||
export LOCALVERSION="`basename $KERNEL_URL`"
 | 
			
		||||
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
 | 
			
		||||
make ${KERNEL_IMAGE_NAME}
 | 
			
		||||
for image in ${KERNEL_IMAGE_NAME}; do
 | 
			
		||||
    cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
if [[ -n ${DEVICE_TREES} ]]; then
 | 
			
		||||
    make dtbs
 | 
			
		||||
    cp ${DEVICE_TREES} /lava-files/.
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
 | 
			
		||||
    make modules
 | 
			
		||||
    INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
 | 
			
		||||
    make Image.lzma
 | 
			
		||||
    mkimage \
 | 
			
		||||
        -f auto \
 | 
			
		||||
        -A arm \
 | 
			
		||||
        -O linux \
 | 
			
		||||
        -d arch/arm64/boot/Image.lzma \
 | 
			
		||||
        -C lzma\
 | 
			
		||||
        -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
 | 
			
		||||
        /lava-files/cheza-kernel
 | 
			
		||||
    KERNEL_IMAGE_NAME+=" cheza-kernel"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
popd
 | 
			
		||||
rm -rf kernel
 | 
			
		||||
 | 
			
		||||
@@ -1,30 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
export LLVM_CONFIG="llvm-config-11"
 | 
			
		||||
 | 
			
		||||
$LLVM_CONFIG --version
 | 
			
		||||
 | 
			
		||||
git config --global user.email "mesa@example.com"
 | 
			
		||||
git config --global user.name "Mesa CI"
 | 
			
		||||
git clone \
 | 
			
		||||
    https://github.com/llvm/llvm-project \
 | 
			
		||||
    --depth 1 \
 | 
			
		||||
    -b llvmorg-12.0.0-rc3 \
 | 
			
		||||
    /llvm-project
 | 
			
		||||
 | 
			
		||||
mkdir /libclc
 | 
			
		||||
pushd /libclc
 | 
			
		||||
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
 | 
			
		||||
ninja
 | 
			
		||||
ninja install
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
# workaroud cmake vs debian packaging.
 | 
			
		||||
mkdir -p /usr/lib/clc
 | 
			
		||||
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
 | 
			
		||||
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
 | 
			
		||||
 | 
			
		||||
du -sh *
 | 
			
		||||
rm -rf /libclc /llvm-project
 | 
			
		||||
@@ -1,14 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
export LIBDRM_VERSION=libdrm-2.4.107
 | 
			
		||||
 | 
			
		||||
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
 | 
			
		||||
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
 | 
			
		||||
cd $LIBDRM_VERSION
 | 
			
		||||
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
 | 
			
		||||
ninja -C build install
 | 
			
		||||
cd ..
 | 
			
		||||
rm -rf $LIBDRM_VERSION
 | 
			
		||||
 | 
			
		||||
@@ -1,23 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
 | 
			
		||||
pushd /piglit
 | 
			
		||||
git checkout 7d7dd2688c214e1b3c00f37226500cbec4a58efb
 | 
			
		||||
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
 | 
			
		||||
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
 | 
			
		||||
ninja $PIGLIT_BUILD_TARGETS
 | 
			
		||||
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
 | 
			
		||||
rm -rf target_api
 | 
			
		||||
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
 | 
			
		||||
    find ! -regex "^\.$" \
 | 
			
		||||
         ! -regex "^\.\/piglit.*" \
 | 
			
		||||
         ! -regex "^\.\/framework.*" \
 | 
			
		||||
         ! -regex "^\.\/bin$" \
 | 
			
		||||
         ! -regex "^\.\/bin\/replayer\.py" \
 | 
			
		||||
         ! -regex "^\.\/templates.*" \
 | 
			
		||||
         ! -regex "^\.\/tests$" \
 | 
			
		||||
         ! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
 | 
			
		||||
fi
 | 
			
		||||
popd
 | 
			
		||||
@@ -1,31 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Note that this script is not actually "building" rust, but build- is the
 | 
			
		||||
# convention for the shared helpers for putting stuff in our containers.
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
 | 
			
		||||
# $HOME/.cargo/bin.  Make bin a link to a public bin directory so the commands
 | 
			
		||||
# are just available to all build jobs.
 | 
			
		||||
mkdir -p $HOME/.cargo
 | 
			
		||||
ln -s /usr/local/bin $HOME/.cargo/bin
 | 
			
		||||
 | 
			
		||||
# For rust in Mesa, we use rustup to install.  This lets us pick an arbitrary
 | 
			
		||||
# version of the compiler, rather than whatever the container's Debian comes
 | 
			
		||||
# with.
 | 
			
		||||
#
 | 
			
		||||
# Pick the rust compiler (1.48) available in Debian stable, and pick a specific
 | 
			
		||||
# snapshot from rustup so the compiler doesn't drift on us.
 | 
			
		||||
wget https://sh.rustup.rs -O - | \
 | 
			
		||||
    sh -s -- -y --default-toolchain 1.49.0-2020-12-31
 | 
			
		||||
 | 
			
		||||
# Set up a config script for cross compiling -- cargo needs your system cc for
 | 
			
		||||
# linking in cross builds, but doesn't know what you want to use for system cc.
 | 
			
		||||
cat > /root/.cargo/config <<EOF
 | 
			
		||||
[target.armv7-unknown-linux-gnueabihf]
 | 
			
		||||
linker = "arm-linux-gnueabihf-gcc"
 | 
			
		||||
 | 
			
		||||
[target.aarch64-unknown-linux-gnu]
 | 
			
		||||
linker = "aarch64-linux-gnu-gcc"
 | 
			
		||||
EOF
 | 
			
		||||
@@ -1,17 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
git config --global user.email "mesa@example.com"
 | 
			
		||||
git config --global user.name "Mesa CI"
 | 
			
		||||
git clone \
 | 
			
		||||
    https://github.com/intel/libva-utils.git \
 | 
			
		||||
    -b 2.13.0 \
 | 
			
		||||
    --depth 1 \
 | 
			
		||||
    /va-utils
 | 
			
		||||
 | 
			
		||||
pushd /va-utils
 | 
			
		||||
meson build -D tests=true  -Dprefix=/va $EXTRA_MESON_ARGS
 | 
			
		||||
ninja -C build install
 | 
			
		||||
popd
 | 
			
		||||
rm -rf /va-utils
 | 
			
		||||
@@ -1,20 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
mkdir -p /epoxy
 | 
			
		||||
pushd /epoxy
 | 
			
		||||
wget -qO- https://github.com/anholt/libepoxy/releases/download/1.5.8/libepoxy-1.5.8.tar.xz | tar -xJ --strip-components=1
 | 
			
		||||
meson build/ $EXTRA_MESON_ARGS
 | 
			
		||||
ninja -C build install
 | 
			
		||||
popd
 | 
			
		||||
rm -rf /epoxy
 | 
			
		||||
 | 
			
		||||
VIRGLRENDERER_VERSION=f2ab66c6c00065b2944f4cd9d965ee455c535271
 | 
			
		||||
git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git --single-branch --no-checkout /virglrenderer
 | 
			
		||||
pushd /virglrenderer
 | 
			
		||||
git checkout "$VIRGLRENDERER_VERSION"
 | 
			
		||||
meson build/ $EXTRA_MESON_ARGS
 | 
			
		||||
ninja -C build install
 | 
			
		||||
popd
 | 
			
		||||
rm -rf /virglrenderer
 | 
			
		||||
@@ -1,43 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
VKD3D_PROTON_VERSION="2.3.1"
 | 
			
		||||
VKD3D_PROTON_COMMIT="3ed3526332f53d7d35cf1b685fa8096b01f26ff0"
 | 
			
		||||
 | 
			
		||||
VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
 | 
			
		||||
VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
 | 
			
		||||
VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION"
 | 
			
		||||
 | 
			
		||||
function build_arch {
 | 
			
		||||
  local arch="$1"
 | 
			
		||||
  shift
 | 
			
		||||
 | 
			
		||||
  meson "$@"                               \
 | 
			
		||||
        -Denable_tests=true                \
 | 
			
		||||
        --buildtype release                \
 | 
			
		||||
        --prefix "$VKD3D_PROTON_BUILD_DIR" \
 | 
			
		||||
        --strip                            \
 | 
			
		||||
        --bindir "x${arch}"                \
 | 
			
		||||
        --libdir "x${arch}"                \
 | 
			
		||||
        "$VKD3D_PROTON_BUILD_DIR/build.${arch}"
 | 
			
		||||
 | 
			
		||||
  ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
 | 
			
		||||
 | 
			
		||||
  install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/"*.exe
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b "v$VKD3D_PROTON_VERSION" --no-checkout "$VKD3D_PROTON_SRC_DIR"
 | 
			
		||||
pushd "$VKD3D_PROTON_SRC_DIR"
 | 
			
		||||
git checkout "$VKD3D_PROTON_COMMIT"
 | 
			
		||||
git submodule update --init --recursive
 | 
			
		||||
git submodule update --recursive
 | 
			
		||||
build_arch 64 --cross-file build-win64.txt
 | 
			
		||||
build_arch 86 --cross-file build-win32.txt
 | 
			
		||||
cp "setup_vkd3d_proton.sh" "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
 | 
			
		||||
chmod +x "$VKD3D_PROTON_BUILD_DIR/setup_vkd3d_proton.sh"
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
"$VKD3D_PROTON_BUILD_DIR"/setup_vkd3d_proton.sh install
 | 
			
		||||
rm -rf "$VKD3D_PROTON_BUILD_DIR"
 | 
			
		||||
rm -rf "$VKD3D_PROTON_SRC_DIR"
 | 
			
		||||
@@ -1,10 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
if test -f /etc/debian_version; then
 | 
			
		||||
    apt-get autoremove -y --purge
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Clean up any build cache for rust.
 | 
			
		||||
rm -rf /.cargo
 | 
			
		||||
 | 
			
		||||
ccache --show-stats
 | 
			
		||||
@@ -1,36 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
if test -f /etc/debian_version; then
 | 
			
		||||
    CCACHE_PATH=/usr/lib/ccache
 | 
			
		||||
else
 | 
			
		||||
    CCACHE_PATH=/usr/lib64/ccache
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Common setup among container builds before we get to building code.
 | 
			
		||||
 | 
			
		||||
export CCACHE_COMPILERCHECK=content
 | 
			
		||||
export CCACHE_COMPRESS=true
 | 
			
		||||
export CCACHE_DIR=/cache/mesa/ccache
 | 
			
		||||
export PATH=$CCACHE_PATH:$PATH
 | 
			
		||||
 | 
			
		||||
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
 | 
			
		||||
export CC="${CCACHE_PATH}/gcc"
 | 
			
		||||
export CXX="${CCACHE_PATH}/g++"
 | 
			
		||||
 | 
			
		||||
# Force linkers to gold, since it's so much faster for building.  We can't use
 | 
			
		||||
# lld because we're on old debian and it's buggy.  ming fails meson builds
 | 
			
		||||
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
 | 
			
		||||
find /usr/bin -name \*-ld -o -name ld | \
 | 
			
		||||
    grep -v mingw | \
 | 
			
		||||
    xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
 | 
			
		||||
 | 
			
		||||
ccache --show-stats
 | 
			
		||||
 | 
			
		||||
# Make a wrapper script for ninja to always include the -j flags
 | 
			
		||||
echo '#!/bin/sh -x' > /usr/local/bin/ninja
 | 
			
		||||
echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' >> /usr/local/bin/ninja
 | 
			
		||||
chmod +x /usr/local/bin/ninja
 | 
			
		||||
 | 
			
		||||
# Set MAKEFLAGS so that all make invocations in container builds include the
 | 
			
		||||
# flags (doesn't apply to non-container builds, but we don't run make there)
 | 
			
		||||
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
 | 
			
		||||
@@ -1,35 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
ndk=$1
 | 
			
		||||
arch=$2
 | 
			
		||||
cpu_family=$3
 | 
			
		||||
cpu=$4
 | 
			
		||||
cross_file="/cross_file-$arch.txt"
 | 
			
		||||
 | 
			
		||||
# armv7 has the toolchain split between two names.
 | 
			
		||||
arch2=${5:-$2}
 | 
			
		||||
 | 
			
		||||
# Note that we disable C++ exceptions, because Mesa doesn't use exceptions,
 | 
			
		||||
# and allowing it in code generation means we get unwind symbols that break
 | 
			
		||||
# the libEGL and driver symbol tests.
 | 
			
		||||
 | 
			
		||||
cat >$cross_file <<EOF
 | 
			
		||||
[binaries]
 | 
			
		||||
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-ar'
 | 
			
		||||
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
 | 
			
		||||
cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
 | 
			
		||||
c_ld = 'lld'
 | 
			
		||||
cpp_ld = 'lld'
 | 
			
		||||
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-strip'
 | 
			
		||||
pkgconfig = ['/usr/bin/pkg-config']
 | 
			
		||||
 | 
			
		||||
[host_machine]
 | 
			
		||||
system = 'linux'
 | 
			
		||||
cpu_family = '$cpu_family'
 | 
			
		||||
cpu = '$cpu'
 | 
			
		||||
endian = 'little'
 | 
			
		||||
 | 
			
		||||
[properties]
 | 
			
		||||
needs_exe_wrapper = true
 | 
			
		||||
 | 
			
		||||
EOF
 | 
			
		||||
@@ -1,38 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# Makes a .pc file in the Android NDK for meson to find its libraries.
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
ndk="$1"
 | 
			
		||||
pc="$2"
 | 
			
		||||
cflags="$3"
 | 
			
		||||
libs="$4"
 | 
			
		||||
version="$5"
 | 
			
		||||
 | 
			
		||||
sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot
 | 
			
		||||
 | 
			
		||||
for arch in \
 | 
			
		||||
        x86_64-linux-android \
 | 
			
		||||
        i686-linux-android \
 | 
			
		||||
        aarch64-linux-android \
 | 
			
		||||
        arm-linux-androideabi; do
 | 
			
		||||
    pcdir=$sysroot/usr/lib/$arch/pkgconfig
 | 
			
		||||
    mkdir -p $pcdir
 | 
			
		||||
 | 
			
		||||
    cat >$pcdir/$pc <<EOF
 | 
			
		||||
prefix=$sysroot
 | 
			
		||||
exec_prefix=$sysroot
 | 
			
		||||
libdir=$sysroot/usr/lib/$arch/29
 | 
			
		||||
sharedlibdir=$sysroot/usr/lib/$arch
 | 
			
		||||
includedir=$sysroot/usr/include
 | 
			
		||||
 | 
			
		||||
Name: zlib
 | 
			
		||||
Description: zlib compression library
 | 
			
		||||
Version: $version
 | 
			
		||||
 | 
			
		||||
Requires:
 | 
			
		||||
Libs: -L$sysroot/usr/lib/$arch/29 $libs
 | 
			
		||||
Cflags: -I$sysroot/usr/include $cflags
 | 
			
		||||
EOF
 | 
			
		||||
done
 | 
			
		||||
@@ -1,51 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
arch=$1
 | 
			
		||||
cross_file="/cross_file-$arch.txt"
 | 
			
		||||
/usr/share/meson/debcrossgen --arch $arch -o "$cross_file"
 | 
			
		||||
# Explicitly set ccache path for cross compilers
 | 
			
		||||
sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
 | 
			
		||||
if [ "$arch" = "i386" ]; then
 | 
			
		||||
    # Work around a bug in debcrossgen that should be fixed in the next release
 | 
			
		||||
    sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file"
 | 
			
		||||
fi
 | 
			
		||||
# Rely on qemu-user being configured in binfmt_misc on the host
 | 
			
		||||
sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
 | 
			
		||||
 | 
			
		||||
# Add a line for rustc, which debcrossgen is missing.
 | 
			
		||||
cc=`sed -n 's|c = .\(.*\).|\1|p' < $cross_file`
 | 
			
		||||
if [[ "$arch" = "arm64" ]]; then
 | 
			
		||||
    rust_target=aarch64-unknown-linux-gnu
 | 
			
		||||
elif [[ "$arch" = "armhf" ]]; then
 | 
			
		||||
    rust_target=armv7-unknown-linux-gnueabihf
 | 
			
		||||
elif [[ "$arch" = "i386" ]]; then
 | 
			
		||||
    rust_target=i686-unknown-linux-gnu
 | 
			
		||||
elif [[ "$arch" = "ppc64el" ]]; then
 | 
			
		||||
    rust_target=powerpc64le-unknown-linux-gnu
 | 
			
		||||
elif [[ "$arch" = "s390x" ]]; then
 | 
			
		||||
    rust_target=s390x-unknown-linux-gnu
 | 
			
		||||
else
 | 
			
		||||
    echo "Needs rustc target mapping"
 | 
			
		||||
fi
 | 
			
		||||
sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
 | 
			
		||||
 | 
			
		||||
# Set up cmake cross compile toolchain file for dEQP builds
 | 
			
		||||
toolchain_file="/toolchain-$arch.cmake"
 | 
			
		||||
if [[ "$arch" = "arm64" ]]; then
 | 
			
		||||
    GCC_ARCH="aarch64-linux-gnu"
 | 
			
		||||
    DE_CPU="DE_CPU_ARM_64"
 | 
			
		||||
    CMAKE_ARCH=arm
 | 
			
		||||
elif [[ "$arch" = "armhf" ]]; then
 | 
			
		||||
    GCC_ARCH="arm-linux-gnueabihf"
 | 
			
		||||
    DE_CPU="DE_CPU_ARM"
 | 
			
		||||
    CMAKE_ARCH=arm
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ -n "$GCC_ARCH" ]]; then
 | 
			
		||||
    echo "set(CMAKE_SYSTEM_NAME Linux)" > "$toolchain_file"
 | 
			
		||||
    echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> "$toolchain_file"
 | 
			
		||||
    echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)" >> "$toolchain_file"
 | 
			
		||||
    echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)" >> "$toolchain_file"
 | 
			
		||||
    echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")" >> "$toolchain_file"
 | 
			
		||||
    echo "set(DE_CPU $DE_CPU)" >> "$toolchain_file"
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,270 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
if [ $DEBIAN_ARCH = arm64 ]; then
 | 
			
		||||
    ARCH_PACKAGES="firmware-qcom-media"
 | 
			
		||||
elif [ $DEBIAN_ARCH = amd64 ]; then
 | 
			
		||||
    ARCH_PACKAGES="firmware-amd-graphics
 | 
			
		||||
                   libelf1
 | 
			
		||||
                   libllvm11
 | 
			
		||||
                   libva2
 | 
			
		||||
                   libva-drm2
 | 
			
		||||
                  "
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
INSTALL_CI_FAIRY_PACKAGES="git
 | 
			
		||||
                           python3-dev
 | 
			
		||||
                           python3-pip
 | 
			
		||||
                           python3-setuptools
 | 
			
		||||
                           python3-wheel
 | 
			
		||||
                           "
 | 
			
		||||
 | 
			
		||||
apt-get -y install --no-install-recommends \
 | 
			
		||||
    $ARCH_PACKAGES \
 | 
			
		||||
    $INSTALL_CI_FAIRY_PACKAGES \
 | 
			
		||||
    ca-certificates \
 | 
			
		||||
    firmware-realtek \
 | 
			
		||||
    initramfs-tools \
 | 
			
		||||
    libasan6 \
 | 
			
		||||
    libexpat1 \
 | 
			
		||||
    libpng16-16 \
 | 
			
		||||
    libpython3.9 \
 | 
			
		||||
    libsensors5 \
 | 
			
		||||
    libvulkan1 \
 | 
			
		||||
    libwaffle-1-0 \
 | 
			
		||||
    libx11-6 \
 | 
			
		||||
    libx11-xcb1 \
 | 
			
		||||
    libxcb-dri2-0 \
 | 
			
		||||
    libxcb-dri3-0 \
 | 
			
		||||
    libxcb-glx0 \
 | 
			
		||||
    libxcb-present0 \
 | 
			
		||||
    libxcb-randr0 \
 | 
			
		||||
    libxcb-shm0 \
 | 
			
		||||
    libxcb-sync1 \
 | 
			
		||||
    libxcb-xfixes0 \
 | 
			
		||||
    libxdamage1 \
 | 
			
		||||
    libxext6 \
 | 
			
		||||
    libxfixes3 \
 | 
			
		||||
    libxkbcommon0 \
 | 
			
		||||
    libxrender1 \
 | 
			
		||||
    libxshmfence1 \
 | 
			
		||||
    libxxf86vm1 \
 | 
			
		||||
    netcat-openbsd \
 | 
			
		||||
    python3 \
 | 
			
		||||
    python3-lxml \
 | 
			
		||||
    python3-mako \
 | 
			
		||||
    python3-numpy \
 | 
			
		||||
    python3-packaging \
 | 
			
		||||
    python3-pil \
 | 
			
		||||
    python3-renderdoc \
 | 
			
		||||
    python3-requests \
 | 
			
		||||
    python3-simplejson \
 | 
			
		||||
    python3-yaml \
 | 
			
		||||
    sntp \
 | 
			
		||||
    strace \
 | 
			
		||||
    waffle-utils \
 | 
			
		||||
    wget \
 | 
			
		||||
    xinit \
 | 
			
		||||
    xserver-xorg-core \
 | 
			
		||||
    xz-utils
 | 
			
		||||
 | 
			
		||||
# Needed for ci-fairy, this revision is able to upload files to
 | 
			
		||||
# MinIO and doesn't depend on git
 | 
			
		||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
 | 
			
		||||
 | 
			
		||||
apt-get purge -y \
 | 
			
		||||
        $INSTALL_CI_FAIRY_PACKAGES
 | 
			
		||||
 | 
			
		||||
passwd root -d
 | 
			
		||||
chsh -s /bin/sh
 | 
			
		||||
 | 
			
		||||
cat > /init <<EOF
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
export PS1=lava-shell:
 | 
			
		||||
exec sh
 | 
			
		||||
EOF
 | 
			
		||||
chmod +x  /init
 | 
			
		||||
 | 
			
		||||
#######################################################################
 | 
			
		||||
# Strip the image to a small minimal system without removing the debian
 | 
			
		||||
# toolchain.
 | 
			
		||||
 | 
			
		||||
# xz compress firmware so it doesn't waste RAM at runtime on ramdisk systems
 | 
			
		||||
find /lib/firmware -type f -print0 | \
 | 
			
		||||
    xargs -0r -P4 -n4 xz -T1 -C crc32
 | 
			
		||||
 | 
			
		||||
# Copy timezone file and remove tzdata package
 | 
			
		||||
rm -rf /etc/localtime
 | 
			
		||||
cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
 | 
			
		||||
 | 
			
		||||
UNNEEDED_PACKAGES="
 | 
			
		||||
        libfdisk1
 | 
			
		||||
        "
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Removing unused packages
 | 
			
		||||
for PACKAGE in ${UNNEEDED_PACKAGES}
 | 
			
		||||
do
 | 
			
		||||
	echo ${PACKAGE}
 | 
			
		||||
	if ! apt-get remove --purge --yes "${PACKAGE}"
 | 
			
		||||
	then
 | 
			
		||||
		echo "WARNING: ${PACKAGE} isn't installed"
 | 
			
		||||
	fi
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
apt-get autoremove --yes || true
 | 
			
		||||
 | 
			
		||||
# Dropping logs
 | 
			
		||||
rm -rf /var/log/*
 | 
			
		||||
 | 
			
		||||
# Dropping documentation, localization, i18n files, etc
 | 
			
		||||
rm -rf /usr/share/doc/*
 | 
			
		||||
rm -rf /usr/share/locale/*
 | 
			
		||||
rm -rf /usr/share/X11/locale/*
 | 
			
		||||
rm -rf /usr/share/man
 | 
			
		||||
rm -rf /usr/share/i18n/*
 | 
			
		||||
rm -rf /usr/share/info/*
 | 
			
		||||
rm -rf /usr/share/lintian/*
 | 
			
		||||
rm -rf /usr/share/common-licenses/*
 | 
			
		||||
rm -rf /usr/share/mime/*
 | 
			
		||||
 | 
			
		||||
# Dropping reportbug scripts
 | 
			
		||||
rm -rf /usr/share/bug
 | 
			
		||||
 | 
			
		||||
# Drop udev hwdb not required on a stripped system
 | 
			
		||||
rm -rf /lib/udev/hwdb.bin /lib/udev/hwdb.d/*
 | 
			
		||||
 | 
			
		||||
# Drop all gconv conversions && binaries
 | 
			
		||||
rm -rf usr/bin/iconv
 | 
			
		||||
rm -rf usr/sbin/iconvconfig
 | 
			
		||||
rm -rf usr/lib/*/gconv/
 | 
			
		||||
 | 
			
		||||
# Remove libusb database
 | 
			
		||||
rm -rf usr/sbin/update-usbids
 | 
			
		||||
rm -rf var/lib/usbutils/usb.ids
 | 
			
		||||
rm -rf usr/share/misc/usb.ids
 | 
			
		||||
 | 
			
		||||
#######################################################################
 | 
			
		||||
# Crush into a minimal production image to be deployed via some type of image
 | 
			
		||||
# updating system.
 | 
			
		||||
# IMPORTANT: The Debian system is not longer functional at this point,
 | 
			
		||||
# for example, apt and dpkg will stop working
 | 
			
		||||
 | 
			
		||||
UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
 | 
			
		||||
"ncurses-bin ncurses-base libncursesw6 libncurses6 "\
 | 
			
		||||
"perl-base "\
 | 
			
		||||
"debconf libdebconfclient0 "\
 | 
			
		||||
"e2fsprogs e2fslibs libfdisk1 "\
 | 
			
		||||
"insserv "\
 | 
			
		||||
"udev "\
 | 
			
		||||
"init-system-helpers "\
 | 
			
		||||
"bash "\
 | 
			
		||||
"cpio "\
 | 
			
		||||
"xz-utils "\
 | 
			
		||||
"passwd "\
 | 
			
		||||
"libsemanage1 libsemanage-common "\
 | 
			
		||||
"libsepol1 "\
 | 
			
		||||
"gpgv "\
 | 
			
		||||
"hostname "\
 | 
			
		||||
"adduser "\
 | 
			
		||||
"debian-archive-keyring "\
 | 
			
		||||
"libegl1-mesa-dev "\
 | 
			
		||||
"libegl-mesa0 "\
 | 
			
		||||
"libgl1-mesa-dev "\
 | 
			
		||||
"libgl1-mesa-dri "\
 | 
			
		||||
"libglapi-mesa "\
 | 
			
		||||
"libgles2-mesa-dev "\
 | 
			
		||||
"libglx-mesa0 "\
 | 
			
		||||
"mesa-common-dev "\
 | 
			
		||||
 | 
			
		||||
# Removing unneeded packages
 | 
			
		||||
for PACKAGE in ${UNNEEDED_PACKAGES}
 | 
			
		||||
do
 | 
			
		||||
	echo "Forcing removal of ${PACKAGE}"
 | 
			
		||||
	if ! dpkg --purge --force-remove-essential --force-depends "${PACKAGE}"
 | 
			
		||||
	then
 | 
			
		||||
		echo "WARNING: ${PACKAGE} isn't installed"
 | 
			
		||||
	fi
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
# Show what's left package-wise before dropping dpkg itself
 | 
			
		||||
COLUMNS=300 dpkg-query -W --showformat='${Installed-Size;10}\t${Package}\n' | sort -k1,1n
 | 
			
		||||
 | 
			
		||||
# Drop dpkg
 | 
			
		||||
dpkg --purge --force-remove-essential --force-depends  dpkg
 | 
			
		||||
 | 
			
		||||
# No apt or dpkg, no need for its configuration archives
 | 
			
		||||
rm -rf etc/apt
 | 
			
		||||
rm -rf etc/dpkg
 | 
			
		||||
 | 
			
		||||
# Drop directories not part of ostree
 | 
			
		||||
# Note that /var needs to exist as ostree bind mounts the deployment /var over
 | 
			
		||||
# it
 | 
			
		||||
rm -rf var/* opt srv share
 | 
			
		||||
 | 
			
		||||
# ca-certificates are in /etc drop the source
 | 
			
		||||
rm -rf usr/share/ca-certificates
 | 
			
		||||
 | 
			
		||||
# No bash, no need for completions
 | 
			
		||||
rm -rf usr/share/bash-completion
 | 
			
		||||
 | 
			
		||||
# No zsh, no need for comletions
 | 
			
		||||
rm -rf usr/share/zsh/vendor-completions
 | 
			
		||||
 | 
			
		||||
# drop gcc python helpers
 | 
			
		||||
rm -rf usr/share/gcc
 | 
			
		||||
 | 
			
		||||
# Drop sysvinit leftovers
 | 
			
		||||
rm -rf etc/init.d
 | 
			
		||||
rm -rf etc/rc[0-6S].d
 | 
			
		||||
 | 
			
		||||
# Drop upstart helpers
 | 
			
		||||
rm -rf etc/init
 | 
			
		||||
 | 
			
		||||
# Various xtables helpers
 | 
			
		||||
rm -rf usr/lib/xtables
 | 
			
		||||
 | 
			
		||||
# Drop all locales
 | 
			
		||||
# TODO: only remaining locale is actually "C". Should we really remove it?
 | 
			
		||||
rm -rf usr/lib/locale/*
 | 
			
		||||
 | 
			
		||||
# partition helpers
 | 
			
		||||
rm -rf usr/sbin/*fdisk
 | 
			
		||||
 | 
			
		||||
# local compiler
 | 
			
		||||
rm -rf usr/bin/localedef
 | 
			
		||||
 | 
			
		||||
# Systemd dns resolver
 | 
			
		||||
find usr etc -name '*systemd-resolve*' -prune -exec rm -r {} \;
 | 
			
		||||
 | 
			
		||||
# Systemd network configuration
 | 
			
		||||
find usr etc -name '*networkd*' -prune -exec rm -r {} \;
 | 
			
		||||
 | 
			
		||||
# systemd ntp client
 | 
			
		||||
find usr etc -name '*timesyncd*' -prune -exec rm -r {} \;
 | 
			
		||||
 | 
			
		||||
# systemd hw database manager
 | 
			
		||||
find usr etc -name '*systemd-hwdb*' -prune -exec rm -r {} \;
 | 
			
		||||
 | 
			
		||||
# No need for fuse
 | 
			
		||||
find usr etc -name '*fuse*' -prune -exec rm -r {} \;
 | 
			
		||||
 | 
			
		||||
# lsb init function leftovers
 | 
			
		||||
rm -rf usr/lib/lsb
 | 
			
		||||
 | 
			
		||||
# Only needed when adding libraries
 | 
			
		||||
rm -rf usr/sbin/ldconfig*
 | 
			
		||||
 | 
			
		||||
# Games, unused
 | 
			
		||||
rmdir usr/games
 | 
			
		||||
 | 
			
		||||
# Remove pam module to authenticate against a DB
 | 
			
		||||
# plus libdb-5.3.so that is only used by this pam module
 | 
			
		||||
rm -rf usr/lib/*/security/pam_userdb.so
 | 
			
		||||
rm -rf usr/lib/*/libdb-5.3.so
 | 
			
		||||
 | 
			
		||||
# remove NSS support for nis, nisplus and hesiod
 | 
			
		||||
rm -rf usr/lib/*/libnss_hesiod*
 | 
			
		||||
rm -rf usr/lib/*/libnss_nis*
 | 
			
		||||
@@ -1,79 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
        "
 | 
			
		||||
 | 
			
		||||
dpkg --add-architecture $arch
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
        $STABLE_EPHEMERAL \
 | 
			
		||||
        crossbuild-essential-$arch \
 | 
			
		||||
        libelf-dev:$arch \
 | 
			
		||||
        libexpat1-dev:$arch \
 | 
			
		||||
        libpciaccess-dev:$arch \
 | 
			
		||||
        libstdc++6:$arch \
 | 
			
		||||
        libvulkan-dev:$arch \
 | 
			
		||||
        libx11-dev:$arch \
 | 
			
		||||
        libx11-xcb-dev:$arch \
 | 
			
		||||
        libxcb-dri2-0-dev:$arch \
 | 
			
		||||
        libxcb-dri3-dev:$arch \
 | 
			
		||||
        libxcb-glx0-dev:$arch \
 | 
			
		||||
        libxcb-present-dev:$arch \
 | 
			
		||||
        libxcb-randr0-dev:$arch \
 | 
			
		||||
        libxcb-shm0-dev:$arch \
 | 
			
		||||
        libxcb-xfixes0-dev:$arch \
 | 
			
		||||
        libxdamage-dev:$arch \
 | 
			
		||||
        libxext-dev:$arch \
 | 
			
		||||
        libxrandr-dev:$arch \
 | 
			
		||||
        libxshmfence-dev:$arch \
 | 
			
		||||
        libxxf86vm-dev:$arch \
 | 
			
		||||
        wget
 | 
			
		||||
 | 
			
		||||
if [[ $arch != "armhf" ]]; then
 | 
			
		||||
    if [[ $arch == "s390x" ]]; then
 | 
			
		||||
        LLVM=9
 | 
			
		||||
    else
 | 
			
		||||
        LLVM=11
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only
 | 
			
		||||
    # with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get
 | 
			
		||||
    # around this.
 | 
			
		||||
    apt-get install -y --no-remove \
 | 
			
		||||
            libclang-cpp${LLVM}:$arch \
 | 
			
		||||
            libffi-dev:$arch \
 | 
			
		||||
            libgcc-s1:$arch \
 | 
			
		||||
            libtinfo-dev:$arch \
 | 
			
		||||
            libz3-dev:$arch \
 | 
			
		||||
            llvm-${LLVM}:$arch \
 | 
			
		||||
            zlib1g
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/create-cross-file.sh $arch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# dependencies where we want a specific version
 | 
			
		||||
EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)"
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
apt-get purge -y \
 | 
			
		||||
        $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
 | 
			
		||||
# This needs to be done after container_post_build.sh, or apt-get breaks in there
 | 
			
		||||
if [[ $arch != "armhf" ]]; then
 | 
			
		||||
    apt-get download llvm-${LLVM}-{dev,tools}:$arch
 | 
			
		||||
    dpkg -i --force-depends llvm-${LLVM}-*_${arch}.deb
 | 
			
		||||
    rm llvm-${LLVM}-*_${arch}.deb
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,60 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
EPHEMERAL="\
 | 
			
		||||
         rdfind \
 | 
			
		||||
         unzip \
 | 
			
		||||
         "
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove $EPHEMERAL
 | 
			
		||||
 | 
			
		||||
# Fetch the NDK and extract just the toolchain we want.
 | 
			
		||||
ndk=android-ndk-r21d
 | 
			
		||||
wget -O $ndk.zip https://dl.google.com/android/repository/$ndk-linux-x86_64.zip
 | 
			
		||||
unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
 | 
			
		||||
rm $ndk.zip
 | 
			
		||||
# Since it was packed as a zip file, symlinks/hardlinks got turned into
 | 
			
		||||
# duplicate files.  Turn them into hardlinks to save on container space.
 | 
			
		||||
rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/
 | 
			
		||||
# Drop some large tools we won't use in this build.
 | 
			
		||||
find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f
 | 
			
		||||
 | 
			
		||||
sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3"
 | 
			
		||||
 | 
			
		||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64
 | 
			
		||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x86 x86
 | 
			
		||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android arm armv8
 | 
			
		||||
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl armv7a-linux-androideabi
 | 
			
		||||
 | 
			
		||||
# Not using build-libdrm.sh because we don't want its cleanup after building
 | 
			
		||||
# each arch.  Fetch and extract now.
 | 
			
		||||
export LIBDRM_VERSION=libdrm-2.4.102
 | 
			
		||||
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
 | 
			
		||||
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
 | 
			
		||||
 | 
			
		||||
for arch in \
 | 
			
		||||
        x86_64-linux-android \
 | 
			
		||||
        i686-linux-android \
 | 
			
		||||
        aarch64-linux-android \
 | 
			
		||||
        arm-linux-androideabi ; do
 | 
			
		||||
 | 
			
		||||
    cd $LIBDRM_VERSION
 | 
			
		||||
    rm -rf build-$arch
 | 
			
		||||
    meson build-$arch \
 | 
			
		||||
          --cross-file=/cross_file-$arch.txt \
 | 
			
		||||
          --libdir=lib/$arch \
 | 
			
		||||
          -Dlibkms=false \
 | 
			
		||||
          -Dnouveau=false \
 | 
			
		||||
          -Dvc4=false \
 | 
			
		||||
          -Detnaviv=false \
 | 
			
		||||
          -Dfreedreno=false \
 | 
			
		||||
          -Dintel=false \
 | 
			
		||||
          -Dcairo-tests=false
 | 
			
		||||
    ninja -C build-$arch install
 | 
			
		||||
    cd ..
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
rm -rf $LIBDRM_VERSION
 | 
			
		||||
 | 
			
		||||
apt-get purge -y $EPHEMERAL
 | 
			
		||||
@@ -1,71 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
apt-get -y install ca-certificates
 | 
			
		||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
 | 
			
		||||
echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/buster.list
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get -y install \
 | 
			
		||||
	abootimg \
 | 
			
		||||
	autoconf \
 | 
			
		||||
	automake \
 | 
			
		||||
	bc \
 | 
			
		||||
	bison \
 | 
			
		||||
	ccache \
 | 
			
		||||
	cmake \
 | 
			
		||||
	debootstrap \
 | 
			
		||||
	fastboot \
 | 
			
		||||
	flex \
 | 
			
		||||
	g++ \
 | 
			
		||||
	git \
 | 
			
		||||
	kmod \
 | 
			
		||||
	libasan6 \
 | 
			
		||||
	libdrm-dev \
 | 
			
		||||
	libelf-dev \
 | 
			
		||||
	libexpat1-dev \
 | 
			
		||||
	libx11-dev \
 | 
			
		||||
	libx11-xcb-dev \
 | 
			
		||||
	libxcb-dri2-0-dev \
 | 
			
		||||
	libxcb-dri3-dev \
 | 
			
		||||
	libxcb-glx0-dev \
 | 
			
		||||
	libxcb-present-dev \
 | 
			
		||||
	libxcb-randr0-dev \
 | 
			
		||||
	libxcb-shm0-dev \
 | 
			
		||||
	libxcb-xfixes0-dev \
 | 
			
		||||
	libxdamage-dev \
 | 
			
		||||
	libxext-dev \
 | 
			
		||||
	libxrandr-dev \
 | 
			
		||||
	libxshmfence-dev \
 | 
			
		||||
	libxxf86vm-dev \
 | 
			
		||||
	llvm-11-dev \
 | 
			
		||||
	meson \
 | 
			
		||||
	pkg-config \
 | 
			
		||||
	python3-mako \
 | 
			
		||||
	python3-pil \
 | 
			
		||||
	python3-pip \
 | 
			
		||||
	python3-requests \
 | 
			
		||||
	python3-setuptools \
 | 
			
		||||
	u-boot-tools \
 | 
			
		||||
	wget \
 | 
			
		||||
	xz-utils \
 | 
			
		||||
	zlib1g-dev
 | 
			
		||||
 | 
			
		||||
# Not available anymore in bullseye
 | 
			
		||||
apt-get install -y --no-remove -t buster \
 | 
			
		||||
        android-sdk-ext4-utils
 | 
			
		||||
 | 
			
		||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
 | 
			
		||||
 | 
			
		||||
arch=armhf
 | 
			
		||||
. .gitlab-ci/container/cross_build.sh
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
# dependencies where we want a specific version
 | 
			
		||||
EXTRA_MESON_ARGS=
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
@@ -1,33 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
############### Install packages for baremetal testing
 | 
			
		||||
apt-get install -y ca-certificates
 | 
			
		||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
        abootimg \
 | 
			
		||||
        cpio \
 | 
			
		||||
        fastboot \
 | 
			
		||||
        netcat \
 | 
			
		||||
        procps \
 | 
			
		||||
        python3-distutils \
 | 
			
		||||
        python3-minimal \
 | 
			
		||||
        python3-serial \
 | 
			
		||||
        rsync \
 | 
			
		||||
        snmp \
 | 
			
		||||
        wget
 | 
			
		||||
 | 
			
		||||
# setup SNMPv2 SMI MIB
 | 
			
		||||
wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
 | 
			
		||||
    -O /usr/share/snmp/mibs/SNMPv2-SMI.txt
 | 
			
		||||
 | 
			
		||||
arch=arm64 . .gitlab-ci/container/baremetal_build.sh
 | 
			
		||||
arch=armhf . .gitlab-ci/container/baremetal_build.sh
 | 
			
		||||
 | 
			
		||||
# This firmware file from Debian bullseye causes hangs
 | 
			
		||||
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
 | 
			
		||||
     -O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw
 | 
			
		||||
@@ -1,5 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
arch=i386
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/cross_build.sh
 | 
			
		||||
@@ -1,5 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
arch=ppc64el
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/cross_build.sh
 | 
			
		||||
@@ -1,5 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
arch=s390x
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/cross_build.sh
 | 
			
		||||
@@ -1,82 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
apt-get install -y ca-certificates
 | 
			
		||||
 | 
			
		||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at
 | 
			
		||||
# the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
        python3-pip \
 | 
			
		||||
        python3-setuptools \
 | 
			
		||||
        "
 | 
			
		||||
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
        $STABLE_EPHEMERAL \
 | 
			
		||||
        bison \
 | 
			
		||||
        ccache \
 | 
			
		||||
        dpkg-cross \
 | 
			
		||||
        flex \
 | 
			
		||||
        g++ \
 | 
			
		||||
        g++-mingw-w64-x86-64 \
 | 
			
		||||
        gcc \
 | 
			
		||||
        git \
 | 
			
		||||
        glslang-tools \
 | 
			
		||||
        kmod \
 | 
			
		||||
        libclang-11-dev \
 | 
			
		||||
        libclang-9-dev \
 | 
			
		||||
        libclc-dev \
 | 
			
		||||
        libelf-dev \
 | 
			
		||||
        libepoxy-dev \
 | 
			
		||||
        libexpat1-dev \
 | 
			
		||||
        libgtk-3-dev \
 | 
			
		||||
        libllvm11 \
 | 
			
		||||
        libllvm9 \
 | 
			
		||||
        libomxil-bellagio-dev \
 | 
			
		||||
        libpciaccess-dev \
 | 
			
		||||
        libunwind-dev \
 | 
			
		||||
        libva-dev \
 | 
			
		||||
        libvdpau-dev \
 | 
			
		||||
        libvulkan-dev \
 | 
			
		||||
        libx11-dev \
 | 
			
		||||
        libx11-xcb-dev \
 | 
			
		||||
        libxext-dev \
 | 
			
		||||
        libxml2-utils \
 | 
			
		||||
        libxrandr-dev \
 | 
			
		||||
        libxrender-dev \
 | 
			
		||||
        libxshmfence-dev \
 | 
			
		||||
        libxvmc-dev \
 | 
			
		||||
        libxxf86vm-dev \
 | 
			
		||||
        libz-mingw-w64-dev \
 | 
			
		||||
        make \
 | 
			
		||||
        meson \
 | 
			
		||||
        pkg-config \
 | 
			
		||||
        python3-mako \
 | 
			
		||||
        python3-pil \
 | 
			
		||||
        python3-requests \
 | 
			
		||||
        qemu-user \
 | 
			
		||||
        valgrind \
 | 
			
		||||
        wayland-protocols \
 | 
			
		||||
        wget \
 | 
			
		||||
        wine64 \
 | 
			
		||||
        x11proto-dri2-dev \
 | 
			
		||||
        x11proto-gl-dev \
 | 
			
		||||
        x11proto-randr-dev \
 | 
			
		||||
        xz-utils \
 | 
			
		||||
        zlib1g-dev
 | 
			
		||||
 | 
			
		||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
 | 
			
		||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
 | 
			
		||||
 | 
			
		||||
############### Uninstall ephemeral packages
 | 
			
		||||
 | 
			
		||||
apt-get purge -y $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
@@ -1,112 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
      autoconf \
 | 
			
		||||
      automake \
 | 
			
		||||
      autotools-dev \
 | 
			
		||||
      bzip2 \
 | 
			
		||||
      cmake \
 | 
			
		||||
      libgbm-dev \
 | 
			
		||||
      libtool \
 | 
			
		||||
      python3-pip \
 | 
			
		||||
      "
 | 
			
		||||
 | 
			
		||||
# We need multiarch for Wine
 | 
			
		||||
dpkg --add-architecture i386
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
      $STABLE_EPHEMERAL \
 | 
			
		||||
      clang \
 | 
			
		||||
      libasan6 \
 | 
			
		||||
      libarchive-dev \
 | 
			
		||||
      libclang-cpp11-dev \
 | 
			
		||||
      libglvnd-dev \
 | 
			
		||||
      libllvmspirvlib-dev \
 | 
			
		||||
      liblua5.3-dev \
 | 
			
		||||
      libxcb-dri2-0-dev \
 | 
			
		||||
      libxcb-dri3-dev \
 | 
			
		||||
      libxcb-glx0-dev \
 | 
			
		||||
      libxcb-present-dev \
 | 
			
		||||
      libxcb-randr0-dev \
 | 
			
		||||
      libxcb-shm0-dev \
 | 
			
		||||
      libxcb-sync-dev \
 | 
			
		||||
      libxcb-xfixes0-dev \
 | 
			
		||||
      libxcb1-dev \
 | 
			
		||||
      libxml2-dev \
 | 
			
		||||
      llvm-11-dev \
 | 
			
		||||
      llvm-9-dev \
 | 
			
		||||
      ocl-icd-opencl-dev \
 | 
			
		||||
      procps \
 | 
			
		||||
      spirv-tools \
 | 
			
		||||
      strace \
 | 
			
		||||
      time \
 | 
			
		||||
      wine \
 | 
			
		||||
      wine32
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
 | 
			
		||||
# they're going to be fixed, so we'll just have to fix it ourselves
 | 
			
		||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
 | 
			
		||||
cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@
 | 
			
		||||
EOF
 | 
			
		||||
chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# dependencies where we want a specific version
 | 
			
		||||
export              XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
 | 
			
		||||
export           WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
 | 
			
		||||
 | 
			
		||||
export         XORGMACROS_VERSION=util-macros-1.19.0
 | 
			
		||||
export         LIBWAYLAND_VERSION=wayland-1.18.0
 | 
			
		||||
 | 
			
		||||
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
 | 
			
		||||
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
 | 
			
		||||
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
 | 
			
		||||
rm -rf $XORGMACROS_VERSION
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
 | 
			
		||||
tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
 | 
			
		||||
cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
 | 
			
		||||
rm -rf $LIBWAYLAND_VERSION
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
pushd /usr/local
 | 
			
		||||
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
 | 
			
		||||
rm -rf shader-db/.git
 | 
			
		||||
cd shader-db
 | 
			
		||||
make
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1
 | 
			
		||||
pushd DirectX-Headers
 | 
			
		||||
mkdir build
 | 
			
		||||
cd build
 | 
			
		||||
meson .. --backend=ninja --buildtype=release -Dbuild-test=false
 | 
			
		||||
ninja
 | 
			
		||||
ninja install
 | 
			
		||||
popd
 | 
			
		||||
rm -rf DirectX-Headers
 | 
			
		||||
 | 
			
		||||
pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7
 | 
			
		||||
 | 
			
		||||
############### Uninstall the build software
 | 
			
		||||
 | 
			
		||||
apt-get purge -y \
 | 
			
		||||
      $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
@@ -1,70 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
apt-get install -y ca-certificates
 | 
			
		||||
 | 
			
		||||
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at
 | 
			
		||||
# the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
      cargo \
 | 
			
		||||
      python3-dev \
 | 
			
		||||
      python3-pip \
 | 
			
		||||
      python3-setuptools \
 | 
			
		||||
      python3-wheel \
 | 
			
		||||
      "
 | 
			
		||||
 | 
			
		||||
apt-get update
 | 
			
		||||
apt-get dist-upgrade -y
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
      git \
 | 
			
		||||
      git-lfs \
 | 
			
		||||
      libasan6 \
 | 
			
		||||
      libexpat1 \
 | 
			
		||||
      libllvm11 \
 | 
			
		||||
      libllvm9 \
 | 
			
		||||
      liblz4-1 \
 | 
			
		||||
      libpng16-16 \
 | 
			
		||||
      libpython3.9 \
 | 
			
		||||
      libvulkan1 \
 | 
			
		||||
      libwayland-client0 \
 | 
			
		||||
      libwayland-server0 \
 | 
			
		||||
      libxcb-ewmh2 \
 | 
			
		||||
      libxcb-randr0 \
 | 
			
		||||
      libxcb-xfixes0 \
 | 
			
		||||
      libxkbcommon0 \
 | 
			
		||||
      libxrandr2 \
 | 
			
		||||
      libxrender1 \
 | 
			
		||||
      python3-mako \
 | 
			
		||||
      python3-numpy \
 | 
			
		||||
      python3-packaging \
 | 
			
		||||
      python3-pil \
 | 
			
		||||
      python3-requests \
 | 
			
		||||
      python3-six \
 | 
			
		||||
      python3-yaml \
 | 
			
		||||
      vulkan-tools \
 | 
			
		||||
      waffle-utils \
 | 
			
		||||
      xauth \
 | 
			
		||||
      xvfb \
 | 
			
		||||
      zlib1g
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-install-recommends \
 | 
			
		||||
      $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
# Needed for ci-fairy, this revision is able to upload files to MinIO
 | 
			
		||||
# and doesn't depend on git
 | 
			
		||||
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
 | 
			
		||||
 | 
			
		||||
############### Build dEQP runner
 | 
			
		||||
. .gitlab-ci/container/build-deqp-runner.sh
 | 
			
		||||
rm -rf ~/.cargo
 | 
			
		||||
 | 
			
		||||
apt-get purge -y $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
apt-get autoremove -y --purge
 | 
			
		||||
@@ -1,122 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
      autoconf \
 | 
			
		||||
      automake \
 | 
			
		||||
      bc \
 | 
			
		||||
      bison \
 | 
			
		||||
      bzip2 \
 | 
			
		||||
      ccache \
 | 
			
		||||
      clang-11 \
 | 
			
		||||
      cmake \
 | 
			
		||||
      flex \
 | 
			
		||||
      g++ \
 | 
			
		||||
      glslang-tools \
 | 
			
		||||
      libasound2-dev \
 | 
			
		||||
      libcap-dev \
 | 
			
		||||
      libclang-cpp11-dev \
 | 
			
		||||
      libelf-dev \
 | 
			
		||||
      libfdt-dev \
 | 
			
		||||
      libgbm-dev \
 | 
			
		||||
      libgles2-mesa-dev \
 | 
			
		||||
      libllvmspirvlib-dev \
 | 
			
		||||
      libpciaccess-dev \
 | 
			
		||||
      libpng-dev \
 | 
			
		||||
      libudev-dev \
 | 
			
		||||
      libvulkan-dev \
 | 
			
		||||
      libwaffle-dev \
 | 
			
		||||
      libwayland-dev \
 | 
			
		||||
      libx11-xcb-dev \
 | 
			
		||||
      libxcb-dri2-0-dev \
 | 
			
		||||
      libxext-dev \
 | 
			
		||||
      libxkbcommon-dev \
 | 
			
		||||
      libxrender-dev \
 | 
			
		||||
      llvm-11-dev \
 | 
			
		||||
      llvm-spirv \
 | 
			
		||||
      make \
 | 
			
		||||
      meson \
 | 
			
		||||
      ocl-icd-opencl-dev \
 | 
			
		||||
      patch \
 | 
			
		||||
      pkg-config \
 | 
			
		||||
      python3-distutils \
 | 
			
		||||
      wayland-protocols \
 | 
			
		||||
      wget \
 | 
			
		||||
      xz-utils \
 | 
			
		||||
      "
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
      $STABLE_EPHEMERAL \
 | 
			
		||||
      clinfo \
 | 
			
		||||
      inetutils-syslogd \
 | 
			
		||||
      iptables \
 | 
			
		||||
      libclang-common-11-dev \
 | 
			
		||||
      libclang-cpp11 \
 | 
			
		||||
      libcap2 \
 | 
			
		||||
      libegl1 \
 | 
			
		||||
      libfdt1 \
 | 
			
		||||
      libllvmspirvlib11 \
 | 
			
		||||
      libxcb-shm0 \
 | 
			
		||||
      ocl-icd-libopencl1 \
 | 
			
		||||
      python3-lxml \
 | 
			
		||||
      python3-renderdoc \
 | 
			
		||||
      python3-simplejson \
 | 
			
		||||
      spirv-tools \
 | 
			
		||||
      sysvinit-core
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
############### Build kernel
 | 
			
		||||
 | 
			
		||||
export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
 | 
			
		||||
export KERNEL_IMAGE_NAME=bzImage
 | 
			
		||||
export KERNEL_ARCH=x86_64
 | 
			
		||||
export DEBIAN_ARCH=amd64
 | 
			
		||||
 | 
			
		||||
mkdir -p /lava-files/
 | 
			
		||||
. .gitlab-ci/container/build-kernel.sh
 | 
			
		||||
 | 
			
		||||
############### Build libdrm
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
############### Build libclc
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-libclc.sh
 | 
			
		||||
 | 
			
		||||
############### Build virglrenderer
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-virglrenderer.sh
 | 
			
		||||
 | 
			
		||||
############### Build piglit
 | 
			
		||||
 | 
			
		||||
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
 | 
			
		||||
 | 
			
		||||
############### Build Crosvm
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-rust.sh
 | 
			
		||||
. .gitlab-ci/container/build-crosvm.sh
 | 
			
		||||
rm -rf /root/.cargo
 | 
			
		||||
 | 
			
		||||
############### Build dEQP GL
 | 
			
		||||
 | 
			
		||||
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
 | 
			
		||||
 | 
			
		||||
############### Build apitrace
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-apitrace.sh
 | 
			
		||||
 | 
			
		||||
############### Uninstall the build software
 | 
			
		||||
 | 
			
		||||
ccache --show-stats
 | 
			
		||||
 | 
			
		||||
apt-get purge -y \
 | 
			
		||||
      $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
apt-get autoremove -y --purge
 | 
			
		||||
@@ -1,160 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Ephemeral packages (installed for this script and removed again at the end)
 | 
			
		||||
STABLE_EPHEMERAL=" \
 | 
			
		||||
      ccache \
 | 
			
		||||
      cmake \
 | 
			
		||||
      g++ \
 | 
			
		||||
      g++-mingw-w64-i686-posix \
 | 
			
		||||
      g++-mingw-w64-x86-64-posix \
 | 
			
		||||
      glslang-tools \
 | 
			
		||||
      libgbm-dev \
 | 
			
		||||
      libgles2-mesa-dev \
 | 
			
		||||
      liblz4-dev \
 | 
			
		||||
      libpciaccess-dev \
 | 
			
		||||
      libudev-dev \
 | 
			
		||||
      libvulkan-dev \
 | 
			
		||||
      libwaffle-dev \
 | 
			
		||||
      libwayland-dev \
 | 
			
		||||
      libx11-xcb-dev \
 | 
			
		||||
      libxcb-ewmh-dev \
 | 
			
		||||
      libxcb-keysyms1-dev \
 | 
			
		||||
      libxkbcommon-dev \
 | 
			
		||||
      libxrandr-dev \
 | 
			
		||||
      libxrender-dev \
 | 
			
		||||
      libzstd-dev \
 | 
			
		||||
      meson \
 | 
			
		||||
      mingw-w64-i686-dev \
 | 
			
		||||
      mingw-w64-tools \
 | 
			
		||||
      mingw-w64-x86-64-dev \
 | 
			
		||||
      p7zip \
 | 
			
		||||
      patch \
 | 
			
		||||
      pkg-config \
 | 
			
		||||
      python3-distutils \
 | 
			
		||||
      wget \
 | 
			
		||||
      xz-utils \
 | 
			
		||||
      "
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
      $STABLE_EPHEMERAL \
 | 
			
		||||
      libxcb-shm0 \
 | 
			
		||||
      python3-lxml \
 | 
			
		||||
      python3-simplejson \
 | 
			
		||||
      xinit \
 | 
			
		||||
      xserver-xorg-video-amdgpu \
 | 
			
		||||
      xserver-xorg-video-ati
 | 
			
		||||
 | 
			
		||||
# We need multiarch for Wine
 | 
			
		||||
dpkg --add-architecture i386
 | 
			
		||||
 | 
			
		||||
apt-get update
 | 
			
		||||
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
      wine \
 | 
			
		||||
      wine32 \
 | 
			
		||||
      wine64
 | 
			
		||||
 | 
			
		||||
function setup_wine() {
 | 
			
		||||
    export WINEDEBUG="-all"
 | 
			
		||||
    export WINEPREFIX="$1"
 | 
			
		||||
 | 
			
		||||
    # We don't want crash dialogs
 | 
			
		||||
    cat >crashdialog.reg <<EOF
 | 
			
		||||
Windows Registry Editor Version 5.00
 | 
			
		||||
 | 
			
		||||
[HKEY_CURRENT_USER\Software\Wine\WineDbg]
 | 
			
		||||
"ShowCrashDialog"=dword:00000000
 | 
			
		||||
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
    # Set the wine prefix and disable the crash dialog
 | 
			
		||||
    wine regedit crashdialog.reg
 | 
			
		||||
    rm crashdialog.reg
 | 
			
		||||
 | 
			
		||||
    # An immediate wine command may fail with: "${WINEPREFIX}: Not a
 | 
			
		||||
    # valid wine prefix."  and that is just spit because of checking
 | 
			
		||||
    # the existance of the system.reg file, which fails.  Just giving
 | 
			
		||||
    # it a bit more of time for it to be created solves the problem
 | 
			
		||||
    # ...
 | 
			
		||||
    while ! test -f  "${WINEPREFIX}/system.reg"; do sleep 1; done
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
############### Install DXVK
 | 
			
		||||
 | 
			
		||||
DXVK_VERSION="1.8.1"
 | 
			
		||||
 | 
			
		||||
setup_wine "/dxvk-wine64"
 | 
			
		||||
 | 
			
		||||
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
 | 
			
		||||
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
 | 
			
		||||
dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install
 | 
			
		||||
rm -rf dxvk-"${DXVK_VERSION}"
 | 
			
		||||
rm dxvk-"${DXVK_VERSION}".tar.gz
 | 
			
		||||
 | 
			
		||||
############### Install Windows' apitrace binaries
 | 
			
		||||
 | 
			
		||||
APITRACE_VERSION="10.0"
 | 
			
		||||
APITRACE_VERSION_DATE=""
 | 
			
		||||
 | 
			
		||||
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
 | 
			
		||||
7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
 | 
			
		||||
      "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
 | 
			
		||||
      "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
 | 
			
		||||
mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
 | 
			
		||||
rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
 | 
			
		||||
 | 
			
		||||
# Add the apitrace path to the registry
 | 
			
		||||
wine \
 | 
			
		||||
    reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
 | 
			
		||||
    /v Path \
 | 
			
		||||
    /t REG_EXPAND_SZ \
 | 
			
		||||
    /d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \
 | 
			
		||||
    /f
 | 
			
		||||
 | 
			
		||||
############### Building ...
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
############### Build libdrm
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
############### Build parallel-deqp-runner's hang-detection tool
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-hang-detection.sh
 | 
			
		||||
 | 
			
		||||
############### Build piglit
 | 
			
		||||
 | 
			
		||||
PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
 | 
			
		||||
 | 
			
		||||
############### Build Fossilize
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-fossilize.sh
 | 
			
		||||
 | 
			
		||||
############### Build dEQP VK
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-deqp.sh
 | 
			
		||||
 | 
			
		||||
############### Build gfxreconstruct
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-gfxreconstruct.sh
 | 
			
		||||
 | 
			
		||||
############### Build VKD3D-Proton
 | 
			
		||||
 | 
			
		||||
setup_wine "/vkd3d-proton-wine64"
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-vkd3d-proton.sh
 | 
			
		||||
 | 
			
		||||
############### Uninstall the build software
 | 
			
		||||
 | 
			
		||||
ccache --show-stats
 | 
			
		||||
 | 
			
		||||
apt-get purge -y \
 | 
			
		||||
      $STABLE_EPHEMERAL
 | 
			
		||||
 | 
			
		||||
apt-get autoremove -y --purge
 | 
			
		||||
@@ -1,107 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
EPHEMERAL="
 | 
			
		||||
        autoconf
 | 
			
		||||
        automake
 | 
			
		||||
        bzip2
 | 
			
		||||
        git
 | 
			
		||||
        libtool
 | 
			
		||||
        pkgconfig(epoxy)
 | 
			
		||||
        pkgconfig(gbm)
 | 
			
		||||
        unzip
 | 
			
		||||
        wget
 | 
			
		||||
        xz
 | 
			
		||||
        "
 | 
			
		||||
 | 
			
		||||
dnf install -y --setopt=install_weak_deps=False \
 | 
			
		||||
    bison \
 | 
			
		||||
    ccache \
 | 
			
		||||
    clang-devel \
 | 
			
		||||
    flex \
 | 
			
		||||
    gcc \
 | 
			
		||||
    gcc-c++ \
 | 
			
		||||
    gettext \
 | 
			
		||||
    kernel-headers \
 | 
			
		||||
    llvm-devel \
 | 
			
		||||
    meson \
 | 
			
		||||
    "pkgconfig(dri2proto)" \
 | 
			
		||||
    "pkgconfig(expat)" \
 | 
			
		||||
    "pkgconfig(glproto)" \
 | 
			
		||||
    "pkgconfig(libclc)" \
 | 
			
		||||
    "pkgconfig(libelf)" \
 | 
			
		||||
    "pkgconfig(libglvnd)" \
 | 
			
		||||
    "pkgconfig(libomxil-bellagio)" \
 | 
			
		||||
    "pkgconfig(libselinux)" \
 | 
			
		||||
    "pkgconfig(libva)" \
 | 
			
		||||
    "pkgconfig(pciaccess)" \
 | 
			
		||||
    "pkgconfig(vdpau)" \
 | 
			
		||||
    "pkgconfig(vulkan)" \
 | 
			
		||||
    "pkgconfig(wayland-egl-backend)" \
 | 
			
		||||
    "pkgconfig(wayland-protocols)" \
 | 
			
		||||
    "pkgconfig(wayland-scanner)" \
 | 
			
		||||
    "pkgconfig(x11)" \
 | 
			
		||||
    "pkgconfig(x11-xcb)" \
 | 
			
		||||
    "pkgconfig(xcb)" \
 | 
			
		||||
    "pkgconfig(xcb-dri2)" \
 | 
			
		||||
    "pkgconfig(xcb-dri3)" \
 | 
			
		||||
    "pkgconfig(xcb-glx)" \
 | 
			
		||||
    "pkgconfig(xcb-present)" \
 | 
			
		||||
    "pkgconfig(xcb-randr)" \
 | 
			
		||||
    "pkgconfig(xcb-sync)" \
 | 
			
		||||
    "pkgconfig(xcb-xfixes)" \
 | 
			
		||||
    "pkgconfig(xdamage)" \
 | 
			
		||||
    "pkgconfig(xext)" \
 | 
			
		||||
    "pkgconfig(xfixes)" \
 | 
			
		||||
    "pkgconfig(xrandr)" \
 | 
			
		||||
    "pkgconfig(xshmfence)" \
 | 
			
		||||
    "pkgconfig(xxf86vm)" \
 | 
			
		||||
    "pkgconfig(zlib)" \
 | 
			
		||||
    python-unversioned-command \
 | 
			
		||||
    python3-devel \
 | 
			
		||||
    python3-mako \
 | 
			
		||||
    python3-devel \
 | 
			
		||||
    python3-mako \
 | 
			
		||||
    vulkan-headers \
 | 
			
		||||
    $EPHEMERAL
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# dependencies where we want a specific version
 | 
			
		||||
export              XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
 | 
			
		||||
export           WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
 | 
			
		||||
 | 
			
		||||
export         XORGMACROS_VERSION=util-macros-1.19.0
 | 
			
		||||
export         LIBWAYLAND_VERSION=wayland-1.18.0
 | 
			
		||||
 | 
			
		||||
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
 | 
			
		||||
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
 | 
			
		||||
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
 | 
			
		||||
rm -rf $XORGMACROS_VERSION
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
 | 
			
		||||
tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
 | 
			
		||||
cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
 | 
			
		||||
rm -rf $LIBWAYLAND_VERSION
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
pushd /usr/local
 | 
			
		||||
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
 | 
			
		||||
rm -rf shader-db/.git
 | 
			
		||||
cd shader-db
 | 
			
		||||
make
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Uninstall the build software
 | 
			
		||||
 | 
			
		||||
dnf remove -y $EPHEMERAL
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
@@ -1,222 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
export DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
check_minio()
 | 
			
		||||
{
 | 
			
		||||
    MINIO_PATH="${MINIO_HOST}/mesa-lava/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
 | 
			
		||||
    if wget -q --method=HEAD "https://${MINIO_PATH}/done"; then
 | 
			
		||||
        exit
 | 
			
		||||
    fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# If remote files are up-to-date, skip rebuilding them
 | 
			
		||||
check_minio "${FDO_UPSTREAM_REPO}"
 | 
			
		||||
check_minio "${CI_PROJECT_PATH}"
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_pre_build.sh
 | 
			
		||||
 | 
			
		||||
# Install rust, which we'll be using for deqp-runner.  It will be cleaned up at the end.
 | 
			
		||||
. .gitlab-ci/container/build-rust.sh
 | 
			
		||||
 | 
			
		||||
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
 | 
			
		||||
    GCC_ARCH="aarch64-linux-gnu"
 | 
			
		||||
    KERNEL_ARCH="arm64"
 | 
			
		||||
    DEFCONFIG="arch/arm64/configs/defconfig"
 | 
			
		||||
    DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dtb"
 | 
			
		||||
    KERNEL_IMAGE_NAME="Image"
 | 
			
		||||
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
 | 
			
		||||
    GCC_ARCH="arm-linux-gnueabihf"
 | 
			
		||||
    KERNEL_ARCH="arm"
 | 
			
		||||
    DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
 | 
			
		||||
    DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
 | 
			
		||||
    DEVICE_TREES+=" arch/arm/boot/dts/imx6q-cubox-i.dtb"
 | 
			
		||||
    KERNEL_IMAGE_NAME="zImage"
 | 
			
		||||
    . .gitlab-ci/container/create-cross-file.sh armhf
 | 
			
		||||
else
 | 
			
		||||
    GCC_ARCH="x86_64-linux-gnu"
 | 
			
		||||
    KERNEL_ARCH="x86_64"
 | 
			
		||||
    DEFCONFIG="arch/x86/configs/x86_64_defconfig"
 | 
			
		||||
    DEVICE_TREES=""
 | 
			
		||||
    KERNEL_IMAGE_NAME="bzImage"
 | 
			
		||||
    ARCH_PACKAGES="libva-dev"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Determine if we're in a cross build.
 | 
			
		||||
if [[ -e /cross_file-$DEBIAN_ARCH.txt ]]; then
 | 
			
		||||
    EXTRA_MESON_ARGS="--cross-file /cross_file-$DEBIAN_ARCH.txt"
 | 
			
		||||
    EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/toolchain-$DEBIAN_ARCH.cmake"
 | 
			
		||||
 | 
			
		||||
    if [ $DEBIAN_ARCH = arm64 ]; then
 | 
			
		||||
        RUST_TARGET="aarch64-unknown-linux-gnu"
 | 
			
		||||
    elif [ $DEBIAN_ARCH = armhf ]; then
 | 
			
		||||
        RUST_TARGET="armv7-unknown-linux-gnueabihf"
 | 
			
		||||
    fi
 | 
			
		||||
    rustup target add $RUST_TARGET
 | 
			
		||||
    export EXTRA_CARGO_ARGS="--target $RUST_TARGET"
 | 
			
		||||
 | 
			
		||||
    export ARCH=${KERNEL_ARCH}
 | 
			
		||||
    export CROSS_COMPILE="${GCC_ARCH}-"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
apt-get update
 | 
			
		||||
apt-get install -y --no-remove \
 | 
			
		||||
                   ${ARCH_PACKAGES} \
 | 
			
		||||
                   automake \
 | 
			
		||||
                   bc \
 | 
			
		||||
                   cmake \
 | 
			
		||||
                   debootstrap \
 | 
			
		||||
                   git \
 | 
			
		||||
                   glslang-tools \
 | 
			
		||||
                   libdrm-dev \
 | 
			
		||||
                   libegl1-mesa-dev \
 | 
			
		||||
                   libgbm-dev \
 | 
			
		||||
                   libgles2-mesa-dev \
 | 
			
		||||
                   libpng-dev \
 | 
			
		||||
                   libssl-dev \
 | 
			
		||||
                   libudev-dev \
 | 
			
		||||
                   libvulkan-dev \
 | 
			
		||||
                   libwaffle-dev \
 | 
			
		||||
                   libwayland-dev \
 | 
			
		||||
                   libx11-xcb-dev \
 | 
			
		||||
                   libxcb-dri2-0-dev \
 | 
			
		||||
                   libxkbcommon-dev \
 | 
			
		||||
                   patch \
 | 
			
		||||
                   python3-distutils \
 | 
			
		||||
                   python3-mako \
 | 
			
		||||
                   python3-numpy \
 | 
			
		||||
                   python3-serial \
 | 
			
		||||
                   wget
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
 | 
			
		||||
    apt-get install -y --no-remove \
 | 
			
		||||
                       libegl1-mesa-dev:armhf \
 | 
			
		||||
                       libelf-dev:armhf \
 | 
			
		||||
                       libgbm-dev:armhf \
 | 
			
		||||
                       libgles2-mesa-dev:armhf \
 | 
			
		||||
                       libpng-dev:armhf \
 | 
			
		||||
                       libudev-dev:armhf \
 | 
			
		||||
                       libvulkan-dev:armhf \
 | 
			
		||||
                       libwaffle-dev:armhf \
 | 
			
		||||
                       libwayland-dev:armhf \
 | 
			
		||||
                       libx11-xcb-dev:armhf \
 | 
			
		||||
                       libxkbcommon-dev:armhf
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Building
 | 
			
		||||
STRIP_CMD="${GCC_ARCH}-strip"
 | 
			
		||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Build apitrace
 | 
			
		||||
. .gitlab-ci/container/build-apitrace.sh
 | 
			
		||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/apitrace
 | 
			
		||||
mv /apitrace/build /lava-files/rootfs-${DEBIAN_ARCH}/apitrace
 | 
			
		||||
rm -rf /apitrace
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Build dEQP runner
 | 
			
		||||
. .gitlab-ci/container/build-deqp-runner.sh
 | 
			
		||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin
 | 
			
		||||
mv /usr/local/bin/*-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Build dEQP
 | 
			
		||||
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
 | 
			
		||||
 | 
			
		||||
mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Build piglit
 | 
			
		||||
PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
 | 
			
		||||
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
 | 
			
		||||
 | 
			
		||||
############### Build libva tests
 | 
			
		||||
if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
 | 
			
		||||
    . .gitlab-ci/container/build-va-tools.sh
 | 
			
		||||
    mv /va/bin/* /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
############### Build libdrm
 | 
			
		||||
EXTRA_MESON_ARGS+=" -D prefix=/libdrm"
 | 
			
		||||
. .gitlab-ci/container/build-libdrm.sh
 | 
			
		||||
 | 
			
		||||
############### Build kernel
 | 
			
		||||
. .gitlab-ci/container/build-kernel.sh
 | 
			
		||||
 | 
			
		||||
############### Delete rust, since the tests won't be compiling anything.
 | 
			
		||||
rm -rf /root/.cargo
 | 
			
		||||
 | 
			
		||||
############### Create rootfs
 | 
			
		||||
set +e
 | 
			
		||||
if ! debootstrap \
 | 
			
		||||
     --variant=minbase \
 | 
			
		||||
     --arch=${DEBIAN_ARCH} \
 | 
			
		||||
     --components main,contrib,non-free \
 | 
			
		||||
     bullseye \
 | 
			
		||||
     /lava-files/rootfs-${DEBIAN_ARCH}/ \
 | 
			
		||||
     http://deb.debian.org/debian; then
 | 
			
		||||
    cat /lava-files/rootfs-${DEBIAN_ARCH}/debootstrap/debootstrap.log
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/.
 | 
			
		||||
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
 | 
			
		||||
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
############### Install the built libdrm
 | 
			
		||||
# Dependencies pulled during the creation of the rootfs may overwrite
 | 
			
		||||
# the built libdrm. Hence, we add it after the rootfs has been already
 | 
			
		||||
# created.
 | 
			
		||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
 | 
			
		||||
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
 | 
			
		||||
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/
 | 
			
		||||
cp -Rp /libdrm/share /lava-files/rootfs-${DEBIAN_ARCH}/libdrm/share
 | 
			
		||||
rm -rf /libdrm
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if [ ${DEBIAN_ARCH} = arm64 ]; then
 | 
			
		||||
    # Make a gzipped copy of the Image for db410c.
 | 
			
		||||
    gzip -k /lava-files/Image
 | 
			
		||||
    KERNEL_IMAGE_NAME+=" Image.gz"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
du -ah /lava-files/rootfs-${DEBIAN_ARCH} | sort -h | tail -100
 | 
			
		||||
pushd /lava-files/rootfs-${DEBIAN_ARCH}
 | 
			
		||||
  tar czf /lava-files/lava-rootfs.tgz .
 | 
			
		||||
popd
 | 
			
		||||
 | 
			
		||||
. .gitlab-ci/container/container_post_build.sh
 | 
			
		||||
 | 
			
		||||
############### Upload the files!
 | 
			
		||||
ci-fairy minio login $CI_JOB_JWT
 | 
			
		||||
FILES_TO_UPLOAD="lava-rootfs.tgz \
 | 
			
		||||
                 $KERNEL_IMAGE_NAME"
 | 
			
		||||
 | 
			
		||||
if [[ -n $DEVICE_TREES ]]; then
 | 
			
		||||
    FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
for f in $FILES_TO_UPLOAD; do
 | 
			
		||||
    ci-fairy minio cp /lava-files/$f \
 | 
			
		||||
             minio://${MINIO_PATH}/$f
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
touch /lava-files/done
 | 
			
		||||
ci-fairy minio cp /lava-files/done minio://${MINIO_PATH}/done
 | 
			
		||||
@@ -1,100 +0,0 @@
 | 
			
		||||
CONFIG_LOCALVERSION_AUTO=y
 | 
			
		||||
CONFIG_DEBUG_KERNEL=y
 | 
			
		||||
 | 
			
		||||
CONFIG_PWM=y
 | 
			
		||||
CONFIG_PM_DEVFREQ=y
 | 
			
		||||
CONFIG_OF=y
 | 
			
		||||
CONFIG_CROS_EC=y
 | 
			
		||||
 | 
			
		||||
# abootimg with a 'dummy' rootfs fails with root=/dev/nfs
 | 
			
		||||
CONFIG_BLK_DEV_INITRD=n
 | 
			
		||||
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
 | 
			
		||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
 | 
			
		||||
 | 
			
		||||
CONFIG_DRM=y
 | 
			
		||||
CONFIG_DRM_PANEL_SIMPLE=y
 | 
			
		||||
CONFIG_PWM_CROS_EC=y
 | 
			
		||||
CONFIG_BACKLIGHT_PWM=y
 | 
			
		||||
 | 
			
		||||
# Strip out some stuff we don't need for graphics testing, to reduce
 | 
			
		||||
# the build.
 | 
			
		||||
CONFIG_CAN=n
 | 
			
		||||
CONFIG_WIRELESS=n
 | 
			
		||||
CONFIG_RFKILL=n
 | 
			
		||||
CONFIG_WLAN=n
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_FAN53555=y
 | 
			
		||||
CONFIG_REGULATOR=y
 | 
			
		||||
 | 
			
		||||
CONFIG_REGULATOR_VCTRL=y
 | 
			
		||||
 | 
			
		||||
CONFIG_KASAN=n
 | 
			
		||||
CONFIG_KASAN_INLINE=n
 | 
			
		||||
CONFIG_STACKTRACE=n
 | 
			
		||||
 | 
			
		||||
CONFIG_TMPFS=y
 | 
			
		||||
 | 
			
		||||
CONFIG_PROVE_LOCKING=n
 | 
			
		||||
CONFIG_DEBUG_LOCKDEP=n
 | 
			
		||||
CONFIG_SOFTLOCKUP_DETECTOR=y
 | 
			
		||||
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 | 
			
		||||
 | 
			
		||||
CONFIG_DETECT_HUNG_TASK=y
 | 
			
		||||
 | 
			
		||||
CONFIG_USB_USBNET=y
 | 
			
		||||
CONFIG_NETDEVICES=y
 | 
			
		||||
CONFIG_USB_NET_DRIVERS=y
 | 
			
		||||
CONFIG_USB_RTL8152=y
 | 
			
		||||
CONFIG_USB_NET_AX8817X=y
 | 
			
		||||
CONFIG_USB_NET_SMSC95XX=y
 | 
			
		||||
CONFIG_USB_GADGET=y
 | 
			
		||||
CONFIG_USB_ETH=y
 | 
			
		||||
 | 
			
		||||
CONFIG_FW_LOADER_COMPRESS=y
 | 
			
		||||
 | 
			
		||||
# options for AMD devices
 | 
			
		||||
CONFIG_X86_AMD_PLATFORM_DEVICE=y
 | 
			
		||||
CONFIG_ACPI_VIDEO=y
 | 
			
		||||
CONFIG_X86_AMD_FREQ_SENSITIVITY=y
 | 
			
		||||
CONFIG_PINCTRL=y
 | 
			
		||||
CONFIG_PINCTRL_AMD=y
 | 
			
		||||
CONFIG_DRM_AMDGPU=m
 | 
			
		||||
CONFIG_DRM_AMDGPU_SI=y
 | 
			
		||||
CONFIG_DRM_AMDGPU_USERPTR=y
 | 
			
		||||
CONFIG_DRM_AMD_ACP=n
 | 
			
		||||
CONFIG_ACPI_WMI=y
 | 
			
		||||
CONFIG_MXM_WMI=y
 | 
			
		||||
CONFIG_PARPORT=y
 | 
			
		||||
CONFIG_PARPORT_PC=y
 | 
			
		||||
CONFIG_PARPORT_SERIAL=y
 | 
			
		||||
CONFIG_SERIAL_8250_DW=y
 | 
			
		||||
CONFIG_CHROME_PLATFORMS=y
 | 
			
		||||
 | 
			
		||||
#options for Intel devices
 | 
			
		||||
CONFIG_MFD_INTEL_LPSS_PCI=y
 | 
			
		||||
 | 
			
		||||
#options for KVM guests
 | 
			
		||||
CONFIG_FUSE_FS=y
 | 
			
		||||
CONFIG_HYPERVISOR_GUEST=y
 | 
			
		||||
CONFIG_KVM=y
 | 
			
		||||
CONFIG_KVM_GUEST=y
 | 
			
		||||
CONFIG_VIRT_DRIVERS=y
 | 
			
		||||
CONFIG_VIRTIO_FS=y
 | 
			
		||||
CONFIG_DRM_VIRTIO_GPU=y
 | 
			
		||||
CONFIG_SERIAL_8250_CONSOLE=y
 | 
			
		||||
CONFIG_VIRTIO_NET=y
 | 
			
		||||
CONFIG_VIRTIO_CONSOLE=y
 | 
			
		||||
CONFIG_PARAVIRT=y
 | 
			
		||||
CONFIG_VIRTIO_BLK=y
 | 
			
		||||
CONFIG_VIRTUALIZATION=y
 | 
			
		||||
CONFIG_VIRTIO=y
 | 
			
		||||
CONFIG_VIRTIO_PCI=y
 | 
			
		||||
CONFIG_VIRTIO_MMIO=y
 | 
			
		||||
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
 | 
			
		||||
CONFIG_CRYPTO_DEV_VIRTIO=y
 | 
			
		||||
CONFIG_HW_RANDOM_VIRTIO=y
 | 
			
		||||
CONFIG_BLK_MQ_VIRTIO=y
 | 
			
		||||
CONFIG_TUN=y
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
lp_test_arit
 | 
			
		||||
@@ -1,2 +0,0 @@
 | 
			
		||||
lp_test_arit
 | 
			
		||||
lp_test_format
 | 
			
		||||
@@ -1,27 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
mount -t proc none /proc
 | 
			
		||||
mount -t sysfs none /sys
 | 
			
		||||
mount -t devtmpfs none /dev || echo possibly already mounted
 | 
			
		||||
mkdir -p /dev/pts
 | 
			
		||||
mount -t devpts devpts /dev/pts
 | 
			
		||||
mount -t tmpfs tmpfs /tmp
 | 
			
		||||
 | 
			
		||||
. /crosvm-env.sh
 | 
			
		||||
 | 
			
		||||
# / is ro
 | 
			
		||||
export PIGLIT_REPLAY_EXTRA_ARGS="$PIGLIT_REPLAY_EXTRA_ARGS --db-path /tmp/replayer-db"
 | 
			
		||||
 | 
			
		||||
if sh $CROSVM_TEST_SCRIPT; then
 | 
			
		||||
    touch /results/success
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
sleep 5   # Leave some time to get the last output flushed out
 | 
			
		||||
 | 
			
		||||
poweroff -d -n -f || true
 | 
			
		||||
 | 
			
		||||
sleep 10   # Just in case init would exit before the kernel shuts down the VM
 | 
			
		||||
 | 
			
		||||
exit 1
 | 
			
		||||
@@ -1,49 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -x
 | 
			
		||||
 | 
			
		||||
ln -sf $CI_PROJECT_DIR/install /install
 | 
			
		||||
 | 
			
		||||
export LD_LIBRARY_PATH=$CI_PROJECT_DIR/install/lib/
 | 
			
		||||
export EGL_PLATFORM=surfaceless
 | 
			
		||||
 | 
			
		||||
export -p > /crosvm-env.sh
 | 
			
		||||
export GALLIUM_DRIVER="$CROSVM_GALLIUM_DRIVER"
 | 
			
		||||
export GALLIVM_PERF="nopt"
 | 
			
		||||
export LIBGL_ALWAYS_SOFTWARE="true"
 | 
			
		||||
 | 
			
		||||
CROSVM_KERNEL_ARGS="root=my_root rw rootfstype=virtiofs loglevel=3 init=$CI_PROJECT_DIR/install/crosvm-init.sh ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
 | 
			
		||||
 | 
			
		||||
# Temporary results dir because from the guest we cannot write to /
 | 
			
		||||
mkdir -p /results
 | 
			
		||||
mount -t tmpfs tmpfs /results
 | 
			
		||||
 | 
			
		||||
mkdir -p /piglit/.gitlab-ci/piglit
 | 
			
		||||
mount -t tmpfs tmpfs /piglit/.gitlab-ci/piglit
 | 
			
		||||
 | 
			
		||||
unset DISPLAY
 | 
			
		||||
unset XDG_RUNTIME_DIR
 | 
			
		||||
 | 
			
		||||
/usr/sbin/iptables-legacy  -t nat -A POSTROUTING -o eth0 -j MASQUERADE
 | 
			
		||||
echo 1 > /proc/sys/net/ipv4/ip_forward
 | 
			
		||||
 | 
			
		||||
# Crosvm wants this
 | 
			
		||||
syslogd > /dev/null
 | 
			
		||||
 | 
			
		||||
# We aren't testing LLVMPipe here, so we don't need to validate NIR on the host
 | 
			
		||||
export NIR_VALIDATE=0
 | 
			
		||||
 | 
			
		||||
crosvm run \
 | 
			
		||||
  --gpu "$CROSVM_GPU_ARGS" \
 | 
			
		||||
  -m 4096 \
 | 
			
		||||
  -c $((FDO_CI_CONCURRENT > 1 ? FDO_CI_CONCURRENT - 1 : 1)) \
 | 
			
		||||
  --disable-sandbox \
 | 
			
		||||
  --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
 | 
			
		||||
  --host_ip=192.168.30.1 --netmask=255.255.255.0 --mac "AA:BB:CC:00:00:12" \
 | 
			
		||||
  -p "$CROSVM_KERNEL_ARGS" \
 | 
			
		||||
  /lava-files/bzImage
 | 
			
		||||
 | 
			
		||||
mkdir -p $CI_PROJECT_DIR/results
 | 
			
		||||
mv /results/* $CI_PROJECT_DIR/results/.
 | 
			
		||||
 | 
			
		||||
test -f $CI_PROJECT_DIR/results/success
 | 
			
		||||
@@ -1,214 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
if [ -z "$GPU_VERSION" ]; then
 | 
			
		||||
   echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/gpu-version-*.txt)'
 | 
			
		||||
   exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
INSTALL=`pwd`/install
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
export LD_LIBRARY_PATH=`pwd`/install/lib/
 | 
			
		||||
export EGL_PLATFORM=surfaceless
 | 
			
		||||
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
 | 
			
		||||
 | 
			
		||||
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
 | 
			
		||||
mkdir -p $RESULTS
 | 
			
		||||
 | 
			
		||||
HANG_DETECTION_CMD=""
 | 
			
		||||
 | 
			
		||||
if [ -z "$DEQP_SUITE" ]; then
 | 
			
		||||
    if [ -z "$DEQP_VER" ]; then
 | 
			
		||||
        echo 'DEQP_SUITE must be set to the name of your deqp-gpu_version.toml, or DEQP_VER must be set to something like "gles2", "gles31-khr" or "vk" for the test run'
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    DEQP_WIDTH=${DEQP_WIDTH:-256}
 | 
			
		||||
    DEQP_HEIGHT=${DEQP_HEIGHT:-256}
 | 
			
		||||
    DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0}
 | 
			
		||||
    DEQP_VARIANT=${DEQP_VARIANT:-master}
 | 
			
		||||
 | 
			
		||||
    DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT"
 | 
			
		||||
    DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=${DEQP_SURFACE_TYPE:-pbuffer}"
 | 
			
		||||
    DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG"
 | 
			
		||||
    DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden"
 | 
			
		||||
 | 
			
		||||
    if [ "$DEQP_VER" = "vk" -a -z "$VK_DRIVER" ]; then
 | 
			
		||||
        echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # Generate test case list file.
 | 
			
		||||
    if [ "$DEQP_VER" = "vk" ]; then
 | 
			
		||||
       MUSTPASS=/deqp/mustpass/vk-$DEQP_VARIANT.txt
 | 
			
		||||
       DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk
 | 
			
		||||
       HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
 | 
			
		||||
    elif [ "$DEQP_VER" = "gles2" -o "$DEQP_VER" = "gles3" -o "$DEQP_VER" = "gles31" -o "$DEQP_VER" = "egl" ]; then
 | 
			
		||||
       MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
 | 
			
		||||
       DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER
 | 
			
		||||
    elif [ "$DEQP_VER" = "gles2-khr" -o "$DEQP_VER" = "gles3-khr" -o "$DEQP_VER" = "gles31-khr" -o "$DEQP_VER" = "gles32-khr" ]; then
 | 
			
		||||
       MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
 | 
			
		||||
       DEQP=/deqp/external/openglcts/modules/glcts
 | 
			
		||||
    else
 | 
			
		||||
       MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
 | 
			
		||||
       DEQP=/deqp/external/openglcts/modules/glcts
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    cp $MUSTPASS /tmp/case-list.txt
 | 
			
		||||
 | 
			
		||||
    # If the caselist is too long to run in a reasonable amount of time, let the job
 | 
			
		||||
    # specify what fraction (1/n) of the caselist we should run.  Note: N~M is a gnu
 | 
			
		||||
    # sed extension to match every nth line (first line is #1).
 | 
			
		||||
    if [ -n "$DEQP_FRACTION" ]; then
 | 
			
		||||
       sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    # If the job is parallel at the gitab job level, take the corresponding fraction
 | 
			
		||||
    # of the caselist.
 | 
			
		||||
    if [ -n "$CI_NODE_INDEX" ]; then
 | 
			
		||||
       sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if [ -n "$DEQP_CASELIST_FILTER" ]; then
 | 
			
		||||
        sed -ni "/$DEQP_CASELIST_FILTER/p" /tmp/case-list.txt
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if [ -n "$DEQP_CASELIST_INV_FILTER" ]; then
 | 
			
		||||
        sed -ni "/$DEQP_CASELIST_INV_FILTER/!p" /tmp/case-list.txt
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    if [ ! -s /tmp/case-list.txt ]; then
 | 
			
		||||
        echo "Caselist generation failed"
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
 | 
			
		||||
    DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Default to an empty known flakes file if it doesn't exist.
 | 
			
		||||
touch $INSTALL/$GPU_VERSION-flakes.txt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if [ -n "$VK_DRIVER" ] && [ -e "$INSTALL/$VK_DRIVER-skips.txt" ]; then
 | 
			
		||||
    DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$VK_DRIVER-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
 | 
			
		||||
    DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GALLIUM_DRIVER-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
 | 
			
		||||
    DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$DRIVER_NAME-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
 | 
			
		||||
    DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
 | 
			
		||||
report_load() {
 | 
			
		||||
    echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
 | 
			
		||||
    echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# wrapper to supress +x to avoid spamming the log
 | 
			
		||||
quiet() {
 | 
			
		||||
    set +x
 | 
			
		||||
    "$@"
 | 
			
		||||
    set -x
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
 | 
			
		||||
    # deqp is to use virpipe, and virgl_test_server llvmpipe
 | 
			
		||||
    export GALLIUM_DRIVER="$GALLIUM_DRIVER"
 | 
			
		||||
 | 
			
		||||
    VTEST_ARGS="--use-egl-surfaceless"
 | 
			
		||||
    if [ "$VIRGL_HOST_API" = "GLES" ]; then
 | 
			
		||||
        VTEST_ARGS="$VTEST_ARGS --use-gles"
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    GALLIUM_DRIVER=llvmpipe \
 | 
			
		||||
    GALLIVM_PERF="nopt" \
 | 
			
		||||
    virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
 | 
			
		||||
 | 
			
		||||
    sleep 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -z "$DEQP_SUITE" ]; then
 | 
			
		||||
    if [ -n "$DEQP_EXPECTED_RENDERER" ]; then
 | 
			
		||||
        export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --renderer-check "$DEQP_EXPECTED_RENDERER""
 | 
			
		||||
    fi
 | 
			
		||||
    if [ $DEQP_VER != vk -a $DEQP_VER != egl ]; then
 | 
			
		||||
        export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check `cat $INSTALL/VERSION | sed 's/[() ]/./g'`"
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    deqp-runner \
 | 
			
		||||
        run \
 | 
			
		||||
        --deqp $DEQP \
 | 
			
		||||
        --output $RESULTS \
 | 
			
		||||
        --caselist /tmp/case-list.txt \
 | 
			
		||||
        --skips $INSTALL/all-skips.txt $DEQP_SKIPS \
 | 
			
		||||
        --flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
        --testlog-to-xml /deqp/executor/testlog-to-xml \
 | 
			
		||||
        --jobs ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
	$DEQP_RUNNER_OPTIONS \
 | 
			
		||||
        -- \
 | 
			
		||||
        $DEQP_OPTIONS
 | 
			
		||||
else
 | 
			
		||||
    deqp-runner \
 | 
			
		||||
        suite \
 | 
			
		||||
        --suite $INSTALL/deqp-$DEQP_SUITE.toml \
 | 
			
		||||
        --output $RESULTS \
 | 
			
		||||
        --skips $INSTALL/all-skips.txt $DEQP_SKIPS \
 | 
			
		||||
        --flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
        --testlog-to-xml /deqp/executor/testlog-to-xml \
 | 
			
		||||
        --fraction-start $CI_NODE_INDEX \
 | 
			
		||||
        --fraction $CI_NODE_TOTAL \
 | 
			
		||||
        --jobs ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
	$DEQP_RUNNER_OPTIONS
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
DEQP_EXITCODE=$?
 | 
			
		||||
 | 
			
		||||
quiet report_load
 | 
			
		||||
 | 
			
		||||
# Remove all but the first 50 individual XML files uploaded as artifacts, to
 | 
			
		||||
# save fd.o space when you break everything.
 | 
			
		||||
find $RESULTS -name \*.xml | \
 | 
			
		||||
    sort -n |
 | 
			
		||||
    sed -n '1,+49!p' | \
 | 
			
		||||
    xargs rm -f
 | 
			
		||||
 | 
			
		||||
# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
 | 
			
		||||
find $RESULTS -name \*.xml \
 | 
			
		||||
    -exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \
 | 
			
		||||
    -quit
 | 
			
		||||
 | 
			
		||||
deqp-runner junit \
 | 
			
		||||
   --testsuite dEQP \
 | 
			
		||||
   --results $RESULTS/failures.csv \
 | 
			
		||||
   --output $RESULTS/junit.xml \
 | 
			
		||||
   --limit 50 \
 | 
			
		||||
   --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
 | 
			
		||||
 | 
			
		||||
# Report the flakes to the IRC channel for monitoring (if configured):
 | 
			
		||||
if [ -n "$FLAKES_CHANNEL" ]; then
 | 
			
		||||
  python3 $INSTALL/report-flakes.py \
 | 
			
		||||
         --host irc.oftc.net \
 | 
			
		||||
         --port 6667 \
 | 
			
		||||
         --results $RESULTS/results.csv \
 | 
			
		||||
         --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
         --channel "$FLAKES_CHANNEL" \
 | 
			
		||||
         --runner "$CI_RUNNER_DESCRIPTION" \
 | 
			
		||||
         --job "$CI_JOB_ID" \
 | 
			
		||||
         --url "$CI_JOB_URL" \
 | 
			
		||||
         --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
 | 
			
		||||
         --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
exit $DEQP_EXITCODE
 | 
			
		||||
@@ -1 +0,0 @@
 | 
			
		||||
../docs/ci
 | 
			
		||||
@@ -1,36 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
# if we run this script outside of gitlab-ci for testing, ensure
 | 
			
		||||
# we got meaningful variables
 | 
			
		||||
CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/mesa}
 | 
			
		||||
 | 
			
		||||
if [[ -e $CI_PROJECT_DIR/.git ]]
 | 
			
		||||
then
 | 
			
		||||
    echo "Repository already present, skip cache download"
 | 
			
		||||
    exit
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
TMP_DIR=$(mktemp -d)
 | 
			
		||||
 | 
			
		||||
echo "Downloading archived master..."
 | 
			
		||||
/usr/bin/wget -O $TMP_DIR/mesa.tar.gz \
 | 
			
		||||
              https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/mesa.tar.gz
 | 
			
		||||
 | 
			
		||||
# check wget error code
 | 
			
		||||
if [[ $? -ne 0 ]]
 | 
			
		||||
then
 | 
			
		||||
    echo "Repository cache not available"
 | 
			
		||||
    exit
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
rm -rf "$CI_PROJECT_DIR"
 | 
			
		||||
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
 | 
			
		||||
mkdir -p "$CI_PROJECT_DIR"
 | 
			
		||||
tar xzf "$TMP_DIR/mesa.tar.gz" -C "$CI_PROJECT_DIR"
 | 
			
		||||
rm -rf "$TMP_DIR"
 | 
			
		||||
chmod a+w "$CI_PROJECT_DIR"
 | 
			
		||||
@@ -1,20 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
if [ -z "$VK_DRIVER" ]; then
 | 
			
		||||
   echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
 | 
			
		||||
   exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
INSTALL=`pwd`/install
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
export LD_LIBRARY_PATH=`pwd`/install/lib/
 | 
			
		||||
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.x86_64.json
 | 
			
		||||
 | 
			
		||||
# To store Fossilize logs on failure.
 | 
			
		||||
RESULTS=`pwd`/results
 | 
			
		||||
mkdir -p results
 | 
			
		||||
 | 
			
		||||
"$INSTALL/fossils/fossils.sh" "$INSTALL/fossils.yml" "$RESULTS"
 | 
			
		||||
@@ -1,10 +0,0 @@
 | 
			
		||||
fossils-db:
 | 
			
		||||
  repo: "https://gitlab.freedesktop.org/hakzsam/fossils-db"
 | 
			
		||||
  commit: "5626cedcb58bd95a7b79a9664651818aea92b21c"
 | 
			
		||||
 | 
			
		||||
fossils:
 | 
			
		||||
  - path: sascha-willems/database.foz
 | 
			
		||||
  - path: parallel-rdp/small_subgroup.foz
 | 
			
		||||
  - path: parallel-rdp/small_uber_subgroup.foz
 | 
			
		||||
  - path: parallel-rdp/subgroup.foz
 | 
			
		||||
  - path: parallel-rdp/uber_subgroup.foz
 | 
			
		||||
@@ -1,77 +0,0 @@
 | 
			
		||||
#!/usr/bin/env bash
 | 
			
		||||
 | 
			
		||||
FOSSILS_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
 | 
			
		||||
FOSSILS_YAML="$(readlink -f "$1")"
 | 
			
		||||
FOSSILS_RESULTS="$2"
 | 
			
		||||
 | 
			
		||||
clone_fossils_db()
 | 
			
		||||
{
 | 
			
		||||
    local repo="$1"
 | 
			
		||||
    local commit="$2"
 | 
			
		||||
    rm -rf fossils-db
 | 
			
		||||
    git clone --no-checkout "$repo" fossils-db
 | 
			
		||||
    (cd fossils-db; git reset "$commit" || git reset "origin/$commit")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
query_fossils_yaml()
 | 
			
		||||
{
 | 
			
		||||
    python3 "$FOSSILS_SCRIPT_DIR/query_fossils_yaml.py" \
 | 
			
		||||
        --file "$FOSSILS_YAML" "$@"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
create_clean_git()
 | 
			
		||||
{
 | 
			
		||||
    rm -rf .clean_git
 | 
			
		||||
    cp -R .git .clean_git
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
restore_clean_git()
 | 
			
		||||
{
 | 
			
		||||
    rm -rf .git
 | 
			
		||||
    cp -R .clean_git .git
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fetch_fossil()
 | 
			
		||||
{
 | 
			
		||||
    local fossil="${1//,/?}"
 | 
			
		||||
    echo -n "[fetch_fossil] Fetching $1... "
 | 
			
		||||
    local output=$(git lfs pull -I "$fossil" 2>&1)
 | 
			
		||||
    local ret=0
 | 
			
		||||
    if [[ $? -ne 0 || ! -f "$1" ]]; then
 | 
			
		||||
        echo "ERROR"
 | 
			
		||||
        echo "$output"
 | 
			
		||||
        ret=1
 | 
			
		||||
    else
 | 
			
		||||
        echo "OK"
 | 
			
		||||
    fi
 | 
			
		||||
    restore_clean_git
 | 
			
		||||
    return $ret
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if [[ -n "$(query_fossils_yaml fossils_db_repo)" ]]; then
 | 
			
		||||
    clone_fossils_db "$(query_fossils_yaml fossils_db_repo)" \
 | 
			
		||||
                     "$(query_fossils_yaml fossils_db_commit)"
 | 
			
		||||
    cd fossils-db
 | 
			
		||||
else
 | 
			
		||||
    echo "Warning: No fossils-db entry in $FOSSILS_YAML, assuming fossils-db is current directory"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# During git operations various git objects get created which
 | 
			
		||||
# may take up significant space. Store a clean .git instance,
 | 
			
		||||
# which we restore after various git operations to keep our
 | 
			
		||||
# storage consumption low.
 | 
			
		||||
create_clean_git
 | 
			
		||||
 | 
			
		||||
for fossil in $(query_fossils_yaml fossils)
 | 
			
		||||
do
 | 
			
		||||
    fetch_fossil "$fossil" || exit $?
 | 
			
		||||
    fossilize-replay --num-threads 4 $fossil 1>&2 2> $FOSSILS_RESULTS/fossil_replay.txt
 | 
			
		||||
    if [ $? != 0 ]; then
 | 
			
		||||
        echo "Replay of $fossil failed"
 | 
			
		||||
        grep "pipeline crashed or hung" $FOSSILS_RESULTS/fossil_replay.txt
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
    rm $fossil
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
exit $ret
 | 
			
		||||
@@ -1,69 +0,0 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
# Copyright (c) 2019 Collabora Ltd
 | 
			
		||||
# Copyright (c) 2020 Valve Corporation
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice shall be included
 | 
			
		||||
# in all copies or substantial portions of the Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 | 
			
		||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
			
		||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
			
		||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
			
		||||
# OTHER DEALINGS IN THE SOFTWARE.
 | 
			
		||||
#
 | 
			
		||||
# SPDX-License-Identifier: MIT
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
def cmd_fossils_db_repo(args):
 | 
			
		||||
    with open(args.file, 'r') as f:
 | 
			
		||||
        y = yaml.safe_load(f)
 | 
			
		||||
    print(y['fossils-db']['repo'])
 | 
			
		||||
 | 
			
		||||
def cmd_fossils_db_commit(args):
 | 
			
		||||
    with open(args.file, 'r') as f:
 | 
			
		||||
        y = yaml.safe_load(f)
 | 
			
		||||
    print(y['fossils-db']['commit'])
 | 
			
		||||
 | 
			
		||||
def cmd_fossils(args):
 | 
			
		||||
    with open(args.file, 'r') as f:
 | 
			
		||||
        y = yaml.safe_load(f)
 | 
			
		||||
 | 
			
		||||
    fossils = list(y['fossils'])
 | 
			
		||||
    if len(fossils) == 0:
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    print('\n'.join((t['path'] for t in fossils)))
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--file', required=True,
 | 
			
		||||
                        help='the name of the yaml file')
 | 
			
		||||
 | 
			
		||||
    subparsers = parser.add_subparsers(help='sub-command help')
 | 
			
		||||
 | 
			
		||||
    parser_fossils_db_repo = subparsers.add_parser('fossils_db_repo')
 | 
			
		||||
    parser_fossils_db_repo.set_defaults(func=cmd_fossils_db_repo)
 | 
			
		||||
 | 
			
		||||
    parser_fossils_db_commit = subparsers.add_parser('fossils_db_commit')
 | 
			
		||||
    parser_fossils_db_commit.set_defaults(func=cmd_fossils_db_commit)
 | 
			
		||||
 | 
			
		||||
    parser_fossils = subparsers.add_parser('fossils')
 | 
			
		||||
    parser_fossils.set_defaults(func=cmd_fossils)
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    args.func(args)
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,70 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
INSTALL=`pwd`/install
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
export LD_LIBRARY_PATH=`pwd`/install/lib/
 | 
			
		||||
export LIBVA_DRIVERS_PATH=`pwd`/install/lib/dri/
 | 
			
		||||
# libva spams driver open info by default, and that happens per testcase.
 | 
			
		||||
export LIBVA_MESSAGING_LEVEL=1
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
 | 
			
		||||
    GTEST_RUNNER_OPTIONS="$GTEST_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Default to an empty known flakes file if it doesn't exist.
 | 
			
		||||
touch $INSTALL/$GPU_VERSION-flakes.txt
 | 
			
		||||
 | 
			
		||||
if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
 | 
			
		||||
    GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GALLIUM_DRIVER-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
 | 
			
		||||
    GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$DRIVER_NAME-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
 | 
			
		||||
    GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GPU_VERSION-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
 | 
			
		||||
gtest-runner \
 | 
			
		||||
    run \
 | 
			
		||||
    --gtest $GTEST \
 | 
			
		||||
    --output ${GTEST_RESULTS_DIR:-results} \
 | 
			
		||||
    --jobs ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
    $GTEST_SKIPS \
 | 
			
		||||
    --flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
    --fraction-start ${CI_NODE_INDEX:-1} \
 | 
			
		||||
    --fraction $((${CI_NODE_TOTAL:-1} * ${GTEST_FRACTION:-1})) \
 | 
			
		||||
    --env "LD_PRELOAD=$TEST_LD_PRELOAD" \
 | 
			
		||||
    $GTEST_RUNNER_OPTIONS
 | 
			
		||||
 | 
			
		||||
GTEST_EXITCODE=$?
 | 
			
		||||
 | 
			
		||||
deqp-runner junit \
 | 
			
		||||
   --testsuite gtest \
 | 
			
		||||
   --results $RESULTS/failures.csv \
 | 
			
		||||
   --output $RESULTS/junit.xml \
 | 
			
		||||
   --limit 50 \
 | 
			
		||||
   --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
 | 
			
		||||
 | 
			
		||||
# Report the flakes to the IRC channel for monitoring (if configured):
 | 
			
		||||
if [ -n "$FLAKES_CHANNEL" ]; then
 | 
			
		||||
  python3 $INSTALL/report-flakes.py \
 | 
			
		||||
         --host irc.oftc.net \
 | 
			
		||||
         --port 6667 \
 | 
			
		||||
         --results $RESULTS/results.csv \
 | 
			
		||||
         --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
         --channel "$FLAKES_CHANNEL" \
 | 
			
		||||
         --runner "$CI_RUNNER_DESCRIPTION" \
 | 
			
		||||
         --job "$CI_JOB_ID" \
 | 
			
		||||
         --url "$CI_JOB_URL" \
 | 
			
		||||
         --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
 | 
			
		||||
         --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
exit $GTEST_EXITCODE
 | 
			
		||||
@@ -1,120 +0,0 @@
 | 
			
		||||
.lava-test:
 | 
			
		||||
  extends:
 | 
			
		||||
    - .ci-run-policy
 | 
			
		||||
  # Cancel job if a newer commit is pushed to the same branch
 | 
			
		||||
  interruptible: true
 | 
			
		||||
  variables:
 | 
			
		||||
    GIT_STRATEGY: none # testing doesn't build anything from source
 | 
			
		||||
    FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions
 | 
			
		||||
    DEQP_VER: gles2
 | 
			
		||||
    # proxy used to cache data locally
 | 
			
		||||
    FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
 | 
			
		||||
    # base system generated by the container build job, shared between many pipelines
 | 
			
		||||
    BASE_SYSTEM_HOST_PREFIX: "${MINIO_HOST}/mesa-lava"
 | 
			
		||||
    BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${ARCH}"
 | 
			
		||||
    BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${ARCH}"
 | 
			
		||||
    # per-job build artifacts
 | 
			
		||||
    MESA_BUILD_PATH: "${PIPELINE_ARTIFACTS_BASE}/mesa-${ARCH}.tar.gz"
 | 
			
		||||
    JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
 | 
			
		||||
    JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.gz"
 | 
			
		||||
    PIGLIT_NO_WINDOW: 1
 | 
			
		||||
    VISIBILITY_GROUP: "Collabora+fdo"
 | 
			
		||||
  script:
 | 
			
		||||
    - ./artifacts/lava/lava-submit.sh
 | 
			
		||||
  artifacts:
 | 
			
		||||
    name: "mesa_${CI_JOB_NAME}"
 | 
			
		||||
    when: always
 | 
			
		||||
    paths:
 | 
			
		||||
      - results/
 | 
			
		||||
    exclude:
 | 
			
		||||
      - results/*.shader_cache
 | 
			
		||||
  after_script:
 | 
			
		||||
    - wget -q "https://${JOB_RESULTS_PATH}" -O- | tar -xz
 | 
			
		||||
 | 
			
		||||
.lava-test:armhf:
 | 
			
		||||
  variables:
 | 
			
		||||
    ARCH: armhf
 | 
			
		||||
    KERNEL_IMAGE_NAME: zImage
 | 
			
		||||
    KERNEL_IMAGE_TYPE: "zimage"
 | 
			
		||||
    BOOT_METHOD: u-boot
 | 
			
		||||
    HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
 | 
			
		||||
  extends:
 | 
			
		||||
    - .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_armhf
 | 
			
		||||
    - .use-debian/x86_build
 | 
			
		||||
    - .lava-test
 | 
			
		||||
    - .use-kernel+rootfs-arm
 | 
			
		||||
  needs:
 | 
			
		||||
    - kernel+rootfs_armhf
 | 
			
		||||
    - debian/x86_build
 | 
			
		||||
    - debian-armhf
 | 
			
		||||
 | 
			
		||||
.lava-test:arm64:
 | 
			
		||||
  variables:
 | 
			
		||||
    ARCH: arm64
 | 
			
		||||
    KERNEL_IMAGE_NAME: Image
 | 
			
		||||
    KERNEL_IMAGE_TYPE: "image"
 | 
			
		||||
    BOOT_METHOD: u-boot
 | 
			
		||||
    HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
 | 
			
		||||
  extends:
 | 
			
		||||
    - .use-debian/arm_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64
 | 
			
		||||
    - .use-debian/x86_build
 | 
			
		||||
    - .lava-test
 | 
			
		||||
    - .use-kernel+rootfs-arm
 | 
			
		||||
  dependencies:
 | 
			
		||||
    - debian-arm64
 | 
			
		||||
  needs:
 | 
			
		||||
    - kernel+rootfs_arm64
 | 
			
		||||
    - debian/x86_build
 | 
			
		||||
    - debian-arm64
 | 
			
		||||
 | 
			
		||||
.lava-test:amd64:
 | 
			
		||||
  variables:
 | 
			
		||||
    ARCH: amd64
 | 
			
		||||
    KERNEL_IMAGE_NAME: bzImage
 | 
			
		||||
    KERNEL_IMAGE_TYPE: "zimage"
 | 
			
		||||
    BOOT_METHOD: u-boot
 | 
			
		||||
    HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
 | 
			
		||||
  extends:
 | 
			
		||||
    - .use-debian/x86_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_amd64
 | 
			
		||||
    - .use-debian/x86_build
 | 
			
		||||
    - .lava-test
 | 
			
		||||
    - .use-kernel+rootfs-amd64
 | 
			
		||||
  needs:
 | 
			
		||||
    - kernel+rootfs_amd64
 | 
			
		||||
    - debian-testing
 | 
			
		||||
 | 
			
		||||
.lava-traces-base:
 | 
			
		||||
  variables:
 | 
			
		||||
    HWCI_TEST_SCRIPT: "/install/piglit/run.sh"
 | 
			
		||||
  artifacts:
 | 
			
		||||
    reports:
 | 
			
		||||
      junit: results/junit.xml
 | 
			
		||||
 | 
			
		||||
.lava-piglit:
 | 
			
		||||
  variables:
 | 
			
		||||
    PIGLIT_REPLAY_DEVICE_NAME: "gl-${GPU_VERSION}"
 | 
			
		||||
    PIGLIT_RESULTS: "${GPU_VERSION}-${PIGLIT_PROFILES}"
 | 
			
		||||
    HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
 | 
			
		||||
 | 
			
		||||
.lava-piglit-traces:amd64:
 | 
			
		||||
  extends:
 | 
			
		||||
    - .lava-test:amd64
 | 
			
		||||
    - .lava-piglit
 | 
			
		||||
    - .lava-traces-base
 | 
			
		||||
 | 
			
		||||
.lava-piglit-traces:armhf:
 | 
			
		||||
  extends:
 | 
			
		||||
    - .lava-test:armhf
 | 
			
		||||
    - .lava-piglit
 | 
			
		||||
    - .lava-traces-base
 | 
			
		||||
 | 
			
		||||
.lava-piglit-traces:arm64:
 | 
			
		||||
  extends:
 | 
			
		||||
    - .lava-test:arm64
 | 
			
		||||
    - .lava-piglit
 | 
			
		||||
    - .lava-traces-base
 | 
			
		||||
 | 
			
		||||
.lava-piglit:amd64:
 | 
			
		||||
  extends:
 | 
			
		||||
    - .lava-test:amd64
 | 
			
		||||
    - .lava-piglit
 | 
			
		||||
@@ -1,47 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -x
 | 
			
		||||
 | 
			
		||||
# Try to use the kernel and rootfs built in mainline first, so we're more
 | 
			
		||||
# likely to hit cache
 | 
			
		||||
if wget -q --method=HEAD "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
 | 
			
		||||
	BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
 | 
			
		||||
else
 | 
			
		||||
	BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
rm -rf results
 | 
			
		||||
mkdir -p results/job-rootfs-overlay/
 | 
			
		||||
 | 
			
		||||
# LAVA always uploads to MinIO when necessary as we don't have direct upload
 | 
			
		||||
# from the DUT
 | 
			
		||||
export PIGLIT_REPLAY_UPLOAD_TO_MINIO=1
 | 
			
		||||
cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
 | 
			
		||||
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
 | 
			
		||||
artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
 | 
			
		||||
 | 
			
		||||
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
 | 
			
		||||
ci-fairy minio login "${CI_JOB_JWT}"
 | 
			
		||||
ci-fairy minio cp job-rootfs-overlay.tar.gz "minio://${JOB_ROOTFS_OVERLAY_PATH}"
 | 
			
		||||
 | 
			
		||||
touch results/lava.log
 | 
			
		||||
tail -f results/lava.log &
 | 
			
		||||
artifacts/lava/lava_job_submitter.py \
 | 
			
		||||
	--dump-yaml \
 | 
			
		||||
	--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
 | 
			
		||||
	--base-system-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
 | 
			
		||||
	--mesa-build-url "${FDO_HTTP_CACHE_URI:-}https://${MESA_BUILD_PATH}" \
 | 
			
		||||
	--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
 | 
			
		||||
	--job-artifacts-base ${JOB_ARTIFACTS_BASE} \
 | 
			
		||||
	--job-timeout ${JOB_TIMEOUT:-30} \
 | 
			
		||||
	--first-stage-init artifacts/ci-common/init-stage1.sh \
 | 
			
		||||
	--ci-project-dir ${CI_PROJECT_DIR} \
 | 
			
		||||
	--device-type ${DEVICE_TYPE} \
 | 
			
		||||
	--dtb ${DTB} \
 | 
			
		||||
	--jwt "${CI_JOB_JWT}" \
 | 
			
		||||
	--kernel-image-name ${KERNEL_IMAGE_NAME} \
 | 
			
		||||
	--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
 | 
			
		||||
	--boot-method ${BOOT_METHOD} \
 | 
			
		||||
	--visibility-group ${VISIBILITY_GROUP} \
 | 
			
		||||
	--lava-tags "${LAVA_TAGS}" >> results/lava.log
 | 
			
		||||
@@ -1,351 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright (C) 2020, 2021 Collabora Limited
 | 
			
		||||
# Author: Gustavo Padovan <gustavo.padovan@collabora.com>
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 | 
			
		||||
# SOFTWARE.
 | 
			
		||||
 | 
			
		||||
"""Send a job to LAVA, track it and collect log back"""
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import lavacli
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import time
 | 
			
		||||
import traceback
 | 
			
		||||
import urllib.parse
 | 
			
		||||
import xmlrpc
 | 
			
		||||
import yaml
 | 
			
		||||
 | 
			
		||||
from datetime import datetime, timedelta
 | 
			
		||||
from lavacli.utils import loader
 | 
			
		||||
 | 
			
		||||
# Timeout in minutes to decide if the device from the dispatched LAVA job has
 | 
			
		||||
# hung or not due to the lack of new log output.
 | 
			
		||||
DEVICE_HANGING_TIMEOUT_MIN = 5
 | 
			
		||||
 | 
			
		||||
# How many seconds the script should wait before try a new polling iteration to
 | 
			
		||||
# check if the dispatched LAVA job is running or waiting in the job queue.
 | 
			
		||||
WAIT_FOR_DEVICE_POLLING_TIME_SEC = 10
 | 
			
		||||
 | 
			
		||||
# How many seconds to wait between log output LAVA RPC calls.
 | 
			
		||||
LOG_POLLING_TIME_SEC = 5
 | 
			
		||||
 | 
			
		||||
# How many retries should be made when a timeout happen.
 | 
			
		||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION = 2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_log(msg):
 | 
			
		||||
    print("{}: {}".format(datetime.now(), msg))
 | 
			
		||||
 | 
			
		||||
def fatal_err(msg):
 | 
			
		||||
    print_log(msg)
 | 
			
		||||
    sys.exit(1)
 | 
			
		||||
 | 
			
		||||
def generate_lava_yaml(args):
 | 
			
		||||
    # General metadata and permissions, plus also inexplicably kernel arguments
 | 
			
		||||
    values = {
 | 
			
		||||
        'job_name': 'mesa: {}'.format(args.pipeline_info),
 | 
			
		||||
        'device_type': args.device_type,
 | 
			
		||||
        'visibility': { 'group': [ args.visibility_group ] },
 | 
			
		||||
        'priority': 75,
 | 
			
		||||
        'context': {
 | 
			
		||||
            'extra_nfsroot_args': ' init=/init rootwait minio_results={}'.format(args.job_artifacts_base)
 | 
			
		||||
        },
 | 
			
		||||
        'timeouts': {
 | 
			
		||||
            'job': {
 | 
			
		||||
                'minutes': args.job_timeout
 | 
			
		||||
            }
 | 
			
		||||
        },
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if args.lava_tags:
 | 
			
		||||
        values['tags'] = args.lava_tags.split(',')
 | 
			
		||||
 | 
			
		||||
    # URLs to our kernel rootfs to boot from, both generated by the base
 | 
			
		||||
    # container build
 | 
			
		||||
    deploy = {
 | 
			
		||||
      'timeout': { 'minutes': 10 },
 | 
			
		||||
      'to': 'tftp',
 | 
			
		||||
      'os': 'oe',
 | 
			
		||||
      'kernel': {
 | 
			
		||||
        'url': '{}/{}'.format(args.base_system_url_prefix, args.kernel_image_name),
 | 
			
		||||
      },
 | 
			
		||||
      'nfsrootfs': {
 | 
			
		||||
        'url': '{}/lava-rootfs.tgz'.format(args.base_system_url_prefix),
 | 
			
		||||
        'compression': 'gz',
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    if args.kernel_image_type:
 | 
			
		||||
        deploy['kernel']['type'] = args.kernel_image_type
 | 
			
		||||
    if args.dtb:
 | 
			
		||||
        deploy['dtb'] = {
 | 
			
		||||
          'url': '{}/{}.dtb'.format(args.base_system_url_prefix, args.dtb)
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    # always boot over NFS
 | 
			
		||||
    boot = {
 | 
			
		||||
      'timeout': { 'minutes': 25 },
 | 
			
		||||
      'method': args.boot_method,
 | 
			
		||||
      'commands': 'nfs',
 | 
			
		||||
      'prompts': ['lava-shell:'],
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    # skeleton test definition: only declaring each job as a single 'test'
 | 
			
		||||
    # since LAVA's test parsing is not useful to us
 | 
			
		||||
    test = {
 | 
			
		||||
      'timeout': { 'minutes': args.job_timeout },
 | 
			
		||||
      'failure_retry': 1,
 | 
			
		||||
      'definitions': [ {
 | 
			
		||||
        'name': 'mesa',
 | 
			
		||||
        'from': 'inline',
 | 
			
		||||
        'path': 'inline/mesa.yaml',
 | 
			
		||||
        'repository': {
 | 
			
		||||
          'metadata': {
 | 
			
		||||
            'name': 'mesa',
 | 
			
		||||
            'description': 'Mesa test plan',
 | 
			
		||||
            'os': [ 'oe' ],
 | 
			
		||||
            'scope': [ 'functional' ],
 | 
			
		||||
            'format': 'Lava-Test Test Definition 1.0',
 | 
			
		||||
          },
 | 
			
		||||
          'parse': {
 | 
			
		||||
            'pattern': r'hwci: (?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
 | 
			
		||||
          },
 | 
			
		||||
          'run': {
 | 
			
		||||
          },
 | 
			
		||||
        },
 | 
			
		||||
      } ],
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    # job execution script:
 | 
			
		||||
    #   - inline .gitlab-ci/common/init-stage1.sh
 | 
			
		||||
    #   - fetch and unpack per-pipeline build artifacts from build job
 | 
			
		||||
    #   - fetch and unpack per-job environment from lava-submit.sh
 | 
			
		||||
    #   - exec .gitlab-ci/common/init-stage2.sh 
 | 
			
		||||
    init_lines = []
 | 
			
		||||
    with open(args.first_stage_init, 'r') as init_sh:
 | 
			
		||||
      init_lines += [ x.rstrip() for x in init_sh if not x.startswith('#') and x.rstrip() ]
 | 
			
		||||
    init_lines += [
 | 
			
		||||
      'mkdir -p {}'.format(args.ci_project_dir),
 | 
			
		||||
      'wget -S --progress=dot:giga -O- {} | tar -xz -C {}'.format(args.mesa_build_url, args.ci_project_dir),
 | 
			
		||||
      'wget -S --progress=dot:giga -O- {} | tar -xz -C /'.format(args.job_rootfs_overlay_url),
 | 
			
		||||
      'set +x',
 | 
			
		||||
      'export CI_JOB_JWT="{}"'.format(args.jwt),
 | 
			
		||||
      'set -x',
 | 
			
		||||
      'exec /init-stage2.sh',
 | 
			
		||||
    ]
 | 
			
		||||
    test['definitions'][0]['repository']['run']['steps'] = init_lines
 | 
			
		||||
 | 
			
		||||
    values['actions'] = [
 | 
			
		||||
      { 'deploy': deploy },
 | 
			
		||||
      { 'boot': boot },
 | 
			
		||||
      { 'test': test },
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    return yaml.dump(values, width=10000000)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def setup_lava_proxy():
 | 
			
		||||
    config = lavacli.load_config("default")
 | 
			
		||||
    uri, usr, tok = (config.get(key) for key in ("uri", "username", "token"))
 | 
			
		||||
    uri_obj = urllib.parse.urlparse(uri)
 | 
			
		||||
    uri_str = "{}://{}:{}@{}{}".format(uri_obj.scheme, usr, tok, uri_obj.netloc, uri_obj.path)
 | 
			
		||||
    transport = lavacli.RequestsTransport(
 | 
			
		||||
        uri_obj.scheme,
 | 
			
		||||
        config.get("proxy"),
 | 
			
		||||
        config.get("timeout", 120.0),
 | 
			
		||||
        config.get("verify_ssl_cert", True),
 | 
			
		||||
    )
 | 
			
		||||
    proxy = xmlrpc.client.ServerProxy(
 | 
			
		||||
        uri_str, allow_none=True, transport=transport)
 | 
			
		||||
 | 
			
		||||
    print_log("Proxy for {} created.".format(config['uri']))
 | 
			
		||||
 | 
			
		||||
    return proxy
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _call_proxy(fn, *args):
 | 
			
		||||
    retries = 60
 | 
			
		||||
    for n in range(1, retries + 1):
 | 
			
		||||
        try:
 | 
			
		||||
            return fn(*args)
 | 
			
		||||
        except xmlrpc.client.ProtocolError as err:
 | 
			
		||||
            if n == retries:
 | 
			
		||||
                traceback.print_exc()
 | 
			
		||||
                fatal_err("A protocol error occurred (Err {} {})".format(err.errcode, err.errmsg))
 | 
			
		||||
            else:
 | 
			
		||||
                time.sleep(15)
 | 
			
		||||
                pass
 | 
			
		||||
        except xmlrpc.client.Fault as err:
 | 
			
		||||
            traceback.print_exc()
 | 
			
		||||
            fatal_err("FATAL: Fault: {} (code: {})".format(err.faultString, err.faultCode))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_job_results(proxy, job_id, test_suite, test_case):
 | 
			
		||||
    # Look for infrastructure errors and retry if we see them.
 | 
			
		||||
    results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
 | 
			
		||||
    results = yaml.load(results_yaml, Loader=loader(False))
 | 
			
		||||
    for res in results:
 | 
			
		||||
        metadata = res['metadata']
 | 
			
		||||
        if not 'result' in metadata or metadata['result'] != 'fail':
 | 
			
		||||
            continue
 | 
			
		||||
        if 'error_type' in metadata and metadata['error_type'] == "Infrastructure":
 | 
			
		||||
            print_log("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id))
 | 
			
		||||
            return False
 | 
			
		||||
        if 'case' in metadata and metadata['case'] == "validate":
 | 
			
		||||
            print_log("LAVA job {} failed validation (possible download error). Retry.".format(job_id))
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
    results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case)
 | 
			
		||||
    results = yaml.load(results_yaml, Loader=loader(False))
 | 
			
		||||
    if not results:
 | 
			
		||||
        fatal_err("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case))
 | 
			
		||||
 | 
			
		||||
    print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result']))
 | 
			
		||||
    if results[0]['result'] != 'pass':
 | 
			
		||||
        fatal_err("FAIL")
 | 
			
		||||
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
def wait_until_job_is_started(proxy, job_id):
 | 
			
		||||
    print_log(f"Waiting for job {job_id} to start.")
 | 
			
		||||
    current_state = "Submitted"
 | 
			
		||||
    waiting_states = ["Submitted", "Scheduling", "Scheduled"]
 | 
			
		||||
    while current_state in waiting_states:
 | 
			
		||||
        job_state = _call_proxy(proxy.scheduler.job_state, job_id)
 | 
			
		||||
        current_state = job_state["job_state"]
 | 
			
		||||
 | 
			
		||||
        time.sleep(WAIT_FOR_DEVICE_POLLING_TIME_SEC)
 | 
			
		||||
    print_log(f"Job {job_id} started.")
 | 
			
		||||
 | 
			
		||||
def follow_job_execution(proxy, job_id):
 | 
			
		||||
    line_count = 0
 | 
			
		||||
    finished = False
 | 
			
		||||
    last_time_logs = datetime.now()
 | 
			
		||||
    while not finished:
 | 
			
		||||
        (finished, data) = _call_proxy(proxy.scheduler.jobs.logs, job_id, line_count)
 | 
			
		||||
        logs = yaml.load(str(data), Loader=loader(False))
 | 
			
		||||
        if logs:
 | 
			
		||||
            # Reset the timeout
 | 
			
		||||
            last_time_logs = datetime.now()
 | 
			
		||||
            for line in logs:
 | 
			
		||||
                print("{} {}".format(line["dt"], line["msg"]))
 | 
			
		||||
 | 
			
		||||
            line_count += len(logs)
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            time_limit = timedelta(minutes=DEVICE_HANGING_TIMEOUT_MIN)
 | 
			
		||||
            if datetime.now() - last_time_logs > time_limit:
 | 
			
		||||
                print_log("LAVA job {} doesn't advance (machine got hung?). Retry.".format(job_id))
 | 
			
		||||
                return False
 | 
			
		||||
 | 
			
		||||
        # `proxy.scheduler.jobs.logs` does not block, even when there is no
 | 
			
		||||
        # new log to be fetched. To avoid dosing the LAVA dispatcher
 | 
			
		||||
        # machine, let's add a sleep to save them some stamina.
 | 
			
		||||
        time.sleep(LOG_POLLING_TIME_SEC)
 | 
			
		||||
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
def show_job_data(proxy, job_id):
 | 
			
		||||
    show = _call_proxy(proxy.scheduler.jobs.show, job_id)
 | 
			
		||||
    for field, value in show.items():
 | 
			
		||||
        print("{}\t: {}".format(field, value))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def validate_job(proxy, job_file):
 | 
			
		||||
    try:
 | 
			
		||||
        return _call_proxy(proxy.scheduler.jobs.validate, job_file, True)
 | 
			
		||||
    except:
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
def submit_job(proxy, job_file):
 | 
			
		||||
    return _call_proxy(proxy.scheduler.jobs.submit, job_file)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main(args):
 | 
			
		||||
    proxy = setup_lava_proxy()
 | 
			
		||||
 | 
			
		||||
    yaml_file = generate_lava_yaml(args)
 | 
			
		||||
 | 
			
		||||
    if args.dump_yaml:
 | 
			
		||||
        censored_args = args
 | 
			
		||||
        censored_args.jwt = "jwt-hidden"
 | 
			
		||||
        print(generate_lava_yaml(censored_args))
 | 
			
		||||
 | 
			
		||||
    if args.validate_only:
 | 
			
		||||
        ret = validate_job(proxy, yaml_file)
 | 
			
		||||
        if not ret:
 | 
			
		||||
            fatal_err("Error in LAVA job definition")
 | 
			
		||||
        print("LAVA job definition validated successfully")
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    retry_count = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
 | 
			
		||||
 | 
			
		||||
    while retry_count >= 0:
 | 
			
		||||
        job_id = submit_job(proxy, yaml_file)
 | 
			
		||||
 | 
			
		||||
        print_log("LAVA job id: {}".format(job_id))
 | 
			
		||||
 | 
			
		||||
        wait_until_job_is_started(proxy, job_id)
 | 
			
		||||
 | 
			
		||||
        if not follow_job_execution(proxy, job_id):
 | 
			
		||||
            print_log(f"Job {job_id} has timed out. Cancelling it.")
 | 
			
		||||
            # Cancel the job as it is considered unreachable by Mesa CI.
 | 
			
		||||
            proxy.scheduler.jobs.cancel(job_id)
 | 
			
		||||
 | 
			
		||||
            retry_count -= 1
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        show_job_data(proxy, job_id)
 | 
			
		||||
 | 
			
		||||
        if get_job_results(proxy,  job_id, "0_mesa", "mesa") == True:
 | 
			
		||||
             break
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    # given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
 | 
			
		||||
    # GitLab runner -> GitLab primary -> user, safe to say we don't need any
 | 
			
		||||
    # more buffering
 | 
			
		||||
    sys.stdout.reconfigure(line_buffering=True)
 | 
			
		||||
    sys.stderr.reconfigure(line_buffering=True)
 | 
			
		||||
    parser = argparse.ArgumentParser("LAVA job submitter")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--pipeline-info")
 | 
			
		||||
    parser.add_argument("--base-system-url-prefix")
 | 
			
		||||
    parser.add_argument("--mesa-build-url")
 | 
			
		||||
    parser.add_argument("--job-rootfs-overlay-url")
 | 
			
		||||
    parser.add_argument("--job-artifacts-base")
 | 
			
		||||
    parser.add_argument("--job-timeout", type=int)
 | 
			
		||||
    parser.add_argument("--first-stage-init")
 | 
			
		||||
    parser.add_argument("--ci-project-dir")
 | 
			
		||||
    parser.add_argument("--device-type")
 | 
			
		||||
    parser.add_argument("--dtb", nargs='?', default="")
 | 
			
		||||
    parser.add_argument("--kernel-image-name")
 | 
			
		||||
    parser.add_argument("--kernel-image-type", nargs='?', default="")
 | 
			
		||||
    parser.add_argument("--boot-method")
 | 
			
		||||
    parser.add_argument("--lava-tags", nargs='?', default="")
 | 
			
		||||
    parser.add_argument("--jwt")
 | 
			
		||||
    parser.add_argument("--validate-only", action='store_true')
 | 
			
		||||
    parser.add_argument("--dump-yaml", action='store_true')
 | 
			
		||||
    parser.add_argument("--visibility-group")
 | 
			
		||||
 | 
			
		||||
    parser.set_defaults(func=main)
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    args.func(args)
 | 
			
		||||
@@ -1,82 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
CROSS_FILE=/cross_file-"$CROSS".txt
 | 
			
		||||
 | 
			
		||||
# We need to control the version of llvm-config we're using, so we'll
 | 
			
		||||
# tweak the cross file or generate a native file to do so.
 | 
			
		||||
if test -n "$LLVM_VERSION"; then
 | 
			
		||||
    LLVM_CONFIG="llvm-config-${LLVM_VERSION}"
 | 
			
		||||
    echo -e "[binaries]\nllvm-config = '`which $LLVM_CONFIG`'" > native.file
 | 
			
		||||
    if [ -n "$CROSS" ]; then
 | 
			
		||||
        sed -i -e '/\[binaries\]/a\' -e "llvm-config = '`which $LLVM_CONFIG`'" $CROSS_FILE
 | 
			
		||||
    fi
 | 
			
		||||
    $LLVM_CONFIG --version
 | 
			
		||||
else
 | 
			
		||||
    rm -f native.file
 | 
			
		||||
    touch native.file
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# cross-xfail-$CROSS, if it exists, contains a list of tests that are expected
 | 
			
		||||
# to fail for the $CROSS configuration, one per line. you can then mark those
 | 
			
		||||
# tests in their meson.build with:
 | 
			
		||||
#
 | 
			
		||||
# test(...,
 | 
			
		||||
#      should_fail: meson.get_cross_property('xfail', '').contains(t),
 | 
			
		||||
#     )
 | 
			
		||||
#
 | 
			
		||||
# where t is the name of the test, and the '' is the string to search when
 | 
			
		||||
# not cross-compiling (which is empty, because for amd64 everything is
 | 
			
		||||
# expected to pass).
 | 
			
		||||
if [ -n "$CROSS" ]; then
 | 
			
		||||
    CROSS_XFAIL=.gitlab-ci/cross-xfail-"$CROSS"
 | 
			
		||||
    if [ -s "$CROSS_XFAIL" ]; then
 | 
			
		||||
        sed -i \
 | 
			
		||||
            -e '/\[properties\]/a\' \
 | 
			
		||||
            -e "xfail = '$(tr '\n' , < $CROSS_XFAIL)'" \
 | 
			
		||||
            "$CROSS_FILE"
 | 
			
		||||
    fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Only use GNU time if available, not any shell built-in command
 | 
			
		||||
case $CI_JOB_NAME in
 | 
			
		||||
    # strace and wine don't seem to mix well
 | 
			
		||||
    # ASAN leak detection is incompatible with strace
 | 
			
		||||
    debian-mingw32-x86_64|*-asan*)
 | 
			
		||||
        if test -f /usr/bin/time; then
 | 
			
		||||
            MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
 | 
			
		||||
        fi
 | 
			
		||||
        ;;
 | 
			
		||||
    *)
 | 
			
		||||
        if test -f /usr/bin/time -a -f /usr/bin/strace; then
 | 
			
		||||
            MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time-strace.sh
 | 
			
		||||
        fi
 | 
			
		||||
        ;;
 | 
			
		||||
esac
 | 
			
		||||
 | 
			
		||||
rm -rf _build
 | 
			
		||||
meson _build --native-file=native.file \
 | 
			
		||||
      --wrap-mode=nofallback \
 | 
			
		||||
      ${CROSS+--cross "$CROSS_FILE"} \
 | 
			
		||||
      -D prefix=`pwd`/install \
 | 
			
		||||
      -D libdir=lib \
 | 
			
		||||
      -D buildtype=${BUILDTYPE:-debug} \
 | 
			
		||||
      -D build-tests=true \
 | 
			
		||||
      -D c_args="$(echo -n $C_ARGS)" \
 | 
			
		||||
      -D cpp_args="$(echo -n $CPP_ARGS)" \
 | 
			
		||||
      -D libunwind=${UNWIND} \
 | 
			
		||||
      ${DRI_LOADERS} \
 | 
			
		||||
      -D dri-drivers=${DRI_DRIVERS:-[]} \
 | 
			
		||||
      ${GALLIUM_ST} \
 | 
			
		||||
      -D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
 | 
			
		||||
      -D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
 | 
			
		||||
      -D werror=true \
 | 
			
		||||
      ${EXTRA_OPTION}
 | 
			
		||||
cd _build
 | 
			
		||||
meson configure
 | 
			
		||||
ninja
 | 
			
		||||
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} ${MESON_TEST_ARGS}
 | 
			
		||||
ninja install
 | 
			
		||||
cd ..
 | 
			
		||||
@@ -1,27 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
 | 
			
		||||
 | 
			
		||||
mkdir -p $STRACEDIR
 | 
			
		||||
 | 
			
		||||
# If the test times out, meson sends SIGTERM to this process.
 | 
			
		||||
# Simply exec'ing "time" would result in no output from that in this case.
 | 
			
		||||
# Instead, we need to run "time" in the background, catch the signals and
 | 
			
		||||
# propagate them to the actual test process.
 | 
			
		||||
 | 
			
		||||
/usr/bin/time -v strace -ff -tt -T -o $STRACEDIR/log "$@" &
 | 
			
		||||
TIMEPID=$!
 | 
			
		||||
STRACEPID=$(ps --ppid $TIMEPID -o pid=)
 | 
			
		||||
TESTPID=$(ps --ppid $STRACEPID -o pid=)
 | 
			
		||||
 | 
			
		||||
if test "x$TESTPID" != x; then
 | 
			
		||||
    trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
wait $TIMEPID
 | 
			
		||||
EXITCODE=$?
 | 
			
		||||
 | 
			
		||||
# Only keep strace logs if the test timed out
 | 
			
		||||
rm -rf $STRACEDIR &
 | 
			
		||||
 | 
			
		||||
exit $EXITCODE
 | 
			
		||||
@@ -1,17 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
# If the test times out, meson sends SIGTERM to this process.
 | 
			
		||||
# Simply exec'ing "time" would result in no output from that in this case.
 | 
			
		||||
# Instead, we need to run "time" in the background, catch the signals and
 | 
			
		||||
# propagate them to the actual test process.
 | 
			
		||||
 | 
			
		||||
/usr/bin/time -v "$@" &
 | 
			
		||||
TIMEPID=$!
 | 
			
		||||
TESTPID=$(ps --ppid $TIMEPID -o pid=)
 | 
			
		||||
 | 
			
		||||
if test "x$TESTPID" != x; then
 | 
			
		||||
    trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
wait $TIMEPID
 | 
			
		||||
exit $?
 | 
			
		||||
@@ -1,36 +0,0 @@
 | 
			
		||||
diff --git a/generated_tests/CMakeLists.txt b/generated_tests/CMakeLists.txt
 | 
			
		||||
index 738526546..6f89048cd 100644
 | 
			
		||||
--- a/generated_tests/CMakeLists.txt
 | 
			
		||||
+++ b/generated_tests/CMakeLists.txt
 | 
			
		||||
@@ -206,11 +206,6 @@ piglit_make_generated_tests(
 | 
			
		||||
	templates/gen_variable_index_write_tests/vs.shader_test.mako
 | 
			
		||||
	templates/gen_variable_index_write_tests/fs.shader_test.mako
 | 
			
		||||
	templates/gen_variable_index_write_tests/helpers.mako)
 | 
			
		||||
-piglit_make_generated_tests(
 | 
			
		||||
-	vs_in_fp64.list
 | 
			
		||||
-	gen_vs_in_fp64.py
 | 
			
		||||
-	templates/gen_vs_in_fp64/columns.shader_test.mako
 | 
			
		||||
-	templates/gen_vs_in_fp64/regular.shader_test.mako)
 | 
			
		||||
 piglit_make_generated_tests(
 | 
			
		||||
	shader_framebuffer_fetch_tests.list
 | 
			
		||||
	gen_shader_framebuffer_fetch_tests.py)
 | 
			
		||||
@@ -279,7 +274,6 @@ add_custom_target(gen-gl-tests
 | 
			
		||||
			gen_extensions_defined.list
 | 
			
		||||
			vp-tex.list
 | 
			
		||||
			variable_index_write_tests.list
 | 
			
		||||
-			vs_in_fp64.list
 | 
			
		||||
			gpu_shader4_tests.list
 | 
			
		||||
 )
 | 
			
		||||
 | 
			
		||||
diff --git a/tests/sanity.py b/tests/sanity.py
 | 
			
		||||
index 12f1614c9..9019087e2 100644
 | 
			
		||||
--- a/tests/sanity.py
 | 
			
		||||
+++ b/tests/sanity.py
 | 
			
		||||
@@ -100,7 +100,6 @@ shader_tests = (
 | 
			
		||||
     'spec/arb_tessellation_shader/execution/barrier-patch.shader_test',
 | 
			
		||||
     'spec/arb_tessellation_shader/execution/built-in-functions/tcs-any-bvec4-using-if.shader_test',
 | 
			
		||||
     'spec/arb_tessellation_shader/execution/sanity.shader_test',
 | 
			
		||||
-    'spec/arb_vertex_attrib_64bit/execution/vs_in/vs-input-uint_uvec4-double_dmat3x4_array2-position.shader_test',
 | 
			
		||||
     'spec/glsl-1.50/execution/geometry-basic.shader_test',
 | 
			
		||||
     'spec/oes_viewport_array/viewport-gs-write-simple.shader_test',
 | 
			
		||||
 )
 | 
			
		||||
@@ -1,92 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
if [ -z "$GPU_VERSION" ]; then
 | 
			
		||||
   echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in your ci/gpu-version-*.txt)'
 | 
			
		||||
   exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
INSTALL=`pwd`/install
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
export LD_LIBRARY_PATH=`pwd`/install/lib/
 | 
			
		||||
export EGL_PLATFORM=surfaceless
 | 
			
		||||
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
 | 
			
		||||
 | 
			
		||||
RESULTS=`pwd`/${PIGLIT_RESULTS_DIR:-results}
 | 
			
		||||
mkdir -p $RESULTS
 | 
			
		||||
 | 
			
		||||
if [ -n "$PIGLIT_FRACTION" -o -n "$CI_NODE_INDEX" ]; then
 | 
			
		||||
   FRACTION=`expr ${PIGLIT_FRACTION:-1} \* ${CI_NODE_TOTAL:-1}`
 | 
			
		||||
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction $FRACTION"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# If the job is parallel at the gitab job level, take the corresponding fraction
 | 
			
		||||
# of the caselist.
 | 
			
		||||
if [ -n "$CI_NODE_INDEX" ]; then
 | 
			
		||||
   PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction-start ${CI_NODE_INDEX}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
 | 
			
		||||
    PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Default to an empty known flakes file if it doesn't exist.
 | 
			
		||||
touch $INSTALL/$GPU_VERSION-flakes.txt
 | 
			
		||||
 | 
			
		||||
if [ -n "$VK_DRIVER" ] && [ -e "$INSTALL/$VK_DRIVER-skips.txt" ]; then
 | 
			
		||||
    PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$VK_DRIVER-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
 | 
			
		||||
    PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GALLIUM_DRIVER-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
 | 
			
		||||
    PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$DRIVER_NAME-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
 | 
			
		||||
    PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
 | 
			
		||||
piglit-runner \
 | 
			
		||||
    run \
 | 
			
		||||
    --piglit-folder /piglit \
 | 
			
		||||
    --output $RESULTS \
 | 
			
		||||
    --jobs ${FDO_CI_CONCURRENT:-4} \
 | 
			
		||||
    --skips $INSTALL/all-skips.txt $PIGLIT_SKIPS \
 | 
			
		||||
    --flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
    --profile $PIGLIT_PROFILES \
 | 
			
		||||
    --process-isolation \
 | 
			
		||||
    $PIGLIT_RUNNER_OPTIONS \
 | 
			
		||||
    -v -v
 | 
			
		||||
 | 
			
		||||
PIGLIT_EXITCODE=$?
 | 
			
		||||
 | 
			
		||||
deqp-runner junit \
 | 
			
		||||
   --testsuite $PIGLIT_PROFILES \
 | 
			
		||||
   --results $RESULTS/failures.csv \
 | 
			
		||||
   --output $RESULTS/junit.xml \
 | 
			
		||||
   --limit 50 \
 | 
			
		||||
   --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
 | 
			
		||||
 | 
			
		||||
# Report the flakes to the IRC channel for monitoring (if configured):
 | 
			
		||||
if [ -n "$FLAKES_CHANNEL" ]; then
 | 
			
		||||
  python3 $INSTALL/report-flakes.py \
 | 
			
		||||
         --host irc.oftc.net \
 | 
			
		||||
         --port 6667 \
 | 
			
		||||
         --results $RESULTS/results.csv \
 | 
			
		||||
         --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
 | 
			
		||||
         --channel "$FLAKES_CHANNEL" \
 | 
			
		||||
         --runner "$CI_RUNNER_DESCRIPTION" \
 | 
			
		||||
         --job "$CI_JOB_ID" \
 | 
			
		||||
         --url "$CI_JOB_URL" \
 | 
			
		||||
         --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
 | 
			
		||||
         --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
exit $PIGLIT_EXITCODE
 | 
			
		||||
@@ -1,275 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
INSTALL=$(realpath -s "$PWD"/install)
 | 
			
		||||
MINIO_ARGS="--credentials=/tmp/.minio_credentials"
 | 
			
		||||
 | 
			
		||||
RESULTS=$(realpath -s "$PWD"/results)
 | 
			
		||||
mkdir -p "$RESULTS"
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
 | 
			
		||||
# using a command wrapper. Hence, we will just set it when running the
 | 
			
		||||
# command.
 | 
			
		||||
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
 | 
			
		||||
 | 
			
		||||
# Sanity check to ensure that our environment is sufficient to make our tests
 | 
			
		||||
# run against the Mesa built by CI, rather than any installed distro version.
 | 
			
		||||
MESA_VERSION=$(head -1 "$INSTALL/VERSION" | sed 's/\./\\./g')
 | 
			
		||||
 | 
			
		||||
print_red() {
 | 
			
		||||
    RED='\033[0;31m'
 | 
			
		||||
    NC='\033[0m' # No Color
 | 
			
		||||
    printf "${RED}"
 | 
			
		||||
    "$@"
 | 
			
		||||
    printf "${NC}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# wrapper to supress +x to avoid spamming the log
 | 
			
		||||
quiet() {
 | 
			
		||||
    set +x
 | 
			
		||||
    "$@"
 | 
			
		||||
    set -x
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
if [ "$VK_DRIVER" ]; then
 | 
			
		||||
 | 
			
		||||
    ### VULKAN ###
 | 
			
		||||
 | 
			
		||||
    # Set the Vulkan driver to use.
 | 
			
		||||
    export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
 | 
			
		||||
 | 
			
		||||
    if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
 | 
			
		||||
        # Set environment for Wine.
 | 
			
		||||
        export WINEDEBUG="-all"
 | 
			
		||||
        export WINEPREFIX="/dxvk-wine64"
 | 
			
		||||
        export WINEESYNC=1
 | 
			
		||||
 | 
			
		||||
        # Set environment for DXVK.
 | 
			
		||||
        export DXVK_LOG_LEVEL="none"
 | 
			
		||||
        export DXVK_STATE_CACHE=0
 | 
			
		||||
 | 
			
		||||
        # Set environment for gfxreconstruct executables.
 | 
			
		||||
        export PATH="/gfxreconstruct/build/bin:$PATH"
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    SANITY_MESA_VERSION_CMD="vulkaninfo"
 | 
			
		||||
 | 
			
		||||
    HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Set up the Window System Interface (WSI)
 | 
			
		||||
 | 
			
		||||
    if [ ${TEST_START_XORG:-0} -eq 1 ]; then
 | 
			
		||||
        "$INSTALL"/common/start-x.sh "$INSTALL"
 | 
			
		||||
        export DISPLAY=:0
 | 
			
		||||
    else
 | 
			
		||||
        # Run vulkan against the host's running X server (xvfb doesn't
 | 
			
		||||
        # have DRI3 support).
 | 
			
		||||
        # Set the DISPLAY env variable in each gitlab-runner's
 | 
			
		||||
        # configuration file:
 | 
			
		||||
        # https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
 | 
			
		||||
        quiet printf "%s%s\n" "Running against the hosts' X server. " \
 | 
			
		||||
              "DISPLAY is \"$DISPLAY\"."
 | 
			
		||||
    fi
 | 
			
		||||
else
 | 
			
		||||
 | 
			
		||||
    ### GL/ES ###
 | 
			
		||||
 | 
			
		||||
    if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
 | 
			
		||||
        # Set environment for apitrace executable.
 | 
			
		||||
        export PATH="/apitrace/build:$PATH"
 | 
			
		||||
 | 
			
		||||
        # Our rootfs may not have "less", which apitrace uses during
 | 
			
		||||
        # apitrace dump
 | 
			
		||||
        export PAGER=cat
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    SANITY_MESA_VERSION_CMD="wflinfo"
 | 
			
		||||
 | 
			
		||||
    HANG_DETECTION_CMD=""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    # Set up the platform windowing system.
 | 
			
		||||
 | 
			
		||||
    if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
 | 
			
		||||
 | 
			
		||||
        # Use the surfaceless EGL platform.
 | 
			
		||||
        export DISPLAY=
 | 
			
		||||
        export WAFFLE_PLATFORM="surfaceless_egl"
 | 
			
		||||
 | 
			
		||||
        SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
 | 
			
		||||
 | 
			
		||||
        if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
 | 
			
		||||
            # piglit is to use virpipe, and virgl_test_server llvmpipe
 | 
			
		||||
            export GALLIUM_DRIVER="$GALLIUM_DRIVER"
 | 
			
		||||
 | 
			
		||||
            LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
 | 
			
		||||
            GALLIUM_DRIVER=llvmpipe \
 | 
			
		||||
            GALLIVM_PERF="nopt" \
 | 
			
		||||
            VTEST_USE_EGL_SURFACELESS=1 \
 | 
			
		||||
            VTEST_USE_GLES=1 \
 | 
			
		||||
            virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
 | 
			
		||||
 | 
			
		||||
            sleep 1
 | 
			
		||||
        fi
 | 
			
		||||
    elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
 | 
			
		||||
        SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
 | 
			
		||||
    elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
 | 
			
		||||
        # It is assumed that you have already brought up your X server before
 | 
			
		||||
        # calling this script.
 | 
			
		||||
        SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
 | 
			
		||||
    else
 | 
			
		||||
        SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
 | 
			
		||||
        RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
 | 
			
		||||
    fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ "$ZINK_USE_LAVAPIPE" ]; then
 | 
			
		||||
    export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# If the job is parallel at the  gitlab job level, will take the corresponding
 | 
			
		||||
# fraction of the caselist.
 | 
			
		||||
if [ -n "$CI_NODE_INDEX" ]; then
 | 
			
		||||
 | 
			
		||||
    if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
 | 
			
		||||
        FAILURE_MESSAGE=$(printf "%s" "Can't parallelize piglit with multiple profiles")
 | 
			
		||||
        quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
    USE_CASELIST=1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
replay_minio_upload_images() {
 | 
			
		||||
    find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
 | 
			
		||||
        | while read -r line; do
 | 
			
		||||
 | 
			
		||||
        __TRACE="${line%-*-*}"
 | 
			
		||||
        if grep -q "^$__PREFIX/$__TRACE: pass$" ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig"; then
 | 
			
		||||
            if [ "x$CI_PROJECT_PATH" != "x$FDO_UPSTREAM_REPO" ]; then
 | 
			
		||||
                continue
 | 
			
		||||
            fi
 | 
			
		||||
            __MINIO_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE"
 | 
			
		||||
            __DESTINATION_FILE_PATH="${line##*-}"
 | 
			
		||||
            if wget -q --method=HEAD "https://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}" 2>/dev/null; then
 | 
			
		||||
                continue
 | 
			
		||||
            fi
 | 
			
		||||
        else
 | 
			
		||||
            __MINIO_PATH="$JOB_ARTIFACTS_BASE"
 | 
			
		||||
            __DESTINATION_FILE_PATH="$__MINIO_TRACES_PREFIX/${line##*-}"
 | 
			
		||||
        fi
 | 
			
		||||
 | 
			
		||||
        ci-fairy minio cp $MINIO_ARGS "$RESULTS/$__PREFIX/$line" \
 | 
			
		||||
            "minio://${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
 | 
			
		||||
    done
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
 | 
			
		||||
 | 
			
		||||
if [ -d results ]; then
 | 
			
		||||
    cd results && rm -rf ..?* .[!.]* *
 | 
			
		||||
fi
 | 
			
		||||
cd /piglit
 | 
			
		||||
 | 
			
		||||
if [ -n "$USE_CASELIST" ]; then
 | 
			
		||||
    PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
 | 
			
		||||
    PIGLIT_GENTESTS="./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format \"{name}\" > /tmp/case-list.txt"
 | 
			
		||||
    RUN_GENTESTS="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $PIGLIT_GENTESTS"
 | 
			
		||||
 | 
			
		||||
    eval $RUN_GENTESTS
 | 
			
		||||
 | 
			
		||||
    sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
 | 
			
		||||
 | 
			
		||||
    PIGLIT_TESTS="--test-list /tmp/case-list.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
 | 
			
		||||
 | 
			
		||||
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
 | 
			
		||||
 | 
			
		||||
PIGLIT_CMD="./piglit run --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES "$(/usr/bin/printf "%q" "$RESULTS")
 | 
			
		||||
 | 
			
		||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
 | 
			
		||||
 | 
			
		||||
if [ "$RUN_CMD_WRAPPER" ]; then
 | 
			
		||||
    RUN_CMD="set +e; $RUN_CMD_WRAPPER "$(/usr/bin/printf "%q" "$RUN_CMD")"; set -e"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
FAILURE_MESSAGE=$(printf "%s" "Unexpected change in results:")
 | 
			
		||||
 | 
			
		||||
if [ "x$PIGLIT_PROFILES" = "xreplay" ] \
 | 
			
		||||
       && [ ${PIGLIT_REPLAY_UPLOAD_TO_MINIO:-0} -eq 1 ]; then
 | 
			
		||||
    ci-fairy minio login $MINIO_ARGS $CI_JOB_JWT
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
eval $RUN_CMD
 | 
			
		||||
 | 
			
		||||
if [ $? -ne 0 ]; then
 | 
			
		||||
    printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
ARTIFACTS_BASE_URL="https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts"
 | 
			
		||||
 | 
			
		||||
if [ ${PIGLIT_JUNIT_RESULTS:-0} -eq 1 ]; then
 | 
			
		||||
    ./piglit summary aggregate "$RESULTS" -o junit.xml
 | 
			
		||||
    FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the JUnit report for failures at: ${ARTIFACTS_BASE_URL}/results/junit.xml")
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
PIGLIT_RESULTS="${PIGLIT_RESULTS:-$PIGLIT_PROFILES}"
 | 
			
		||||
RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt"
 | 
			
		||||
mkdir -p .gitlab-ci/piglit
 | 
			
		||||
./piglit summary console "$RESULTS"/results.json.bz2 \
 | 
			
		||||
    | tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
 | 
			
		||||
    | head -n -1 | grep -v ": pass" \
 | 
			
		||||
    | sed '/^summary:/Q' \
 | 
			
		||||
    > $RESULTSFILE
 | 
			
		||||
 | 
			
		||||
if [ "x$PIGLIT_PROFILES" = "xreplay" ] \
 | 
			
		||||
       && [ ${PIGLIT_REPLAY_UPLOAD_TO_MINIO:-0} -eq 1 ]; then
 | 
			
		||||
 | 
			
		||||
    __PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
 | 
			
		||||
    __MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
 | 
			
		||||
    __MINIO_TRACES_PREFIX="traces"
 | 
			
		||||
 | 
			
		||||
    if [ "x$PIGLIT_REPLAY_SUBCOMMAND" != "xprofile" ]; then
 | 
			
		||||
        quiet replay_minio_upload_images
 | 
			
		||||
    fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$USE_CASELIST" ]; then
 | 
			
		||||
    # Just filter the expected results based on the tests that were actually
 | 
			
		||||
    # executed, and switch to the version with no summary
 | 
			
		||||
    cat ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" | sed '/^summary:/Q' | rev \
 | 
			
		||||
        | cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
 | 
			
		||||
 | 
			
		||||
    grep -F -f /tmp/executed.txt "$INSTALL/$PIGLIT_RESULTS.txt" \
 | 
			
		||||
       > ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" || true
 | 
			
		||||
elif [ -f "$INSTALL/$PIGLIT_RESULTS.txt" ]; then
 | 
			
		||||
    cp "$INSTALL/$PIGLIT_RESULTS.txt" \
 | 
			
		||||
       ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline"
 | 
			
		||||
else
 | 
			
		||||
    touch ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if diff -q ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE; then
 | 
			
		||||
    exit 0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
./piglit summary html --exclude-details=pass \
 | 
			
		||||
"$RESULTS"/summary "$RESULTS"/results.json.bz2
 | 
			
		||||
 | 
			
		||||
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
 | 
			
		||||
find "$RESULTS"/summary -type f -name "*.html" -print0 \
 | 
			
		||||
        | xargs -0 sed -i 's%<img src="file://'"${RESULTS}"'.*-\([0-9a-f]*\)\.png%<img src="https://'"${JOB_ARTIFACTS_BASE}"'/traces/\1.png%g'
 | 
			
		||||
find "$RESULTS"/summary -type f -name "*.html" -print0 \
 | 
			
		||||
        | xargs -0 sed -i 's%<img src="file://%<img src="https://'"${PIGLIT_REPLAY_REFERENCE_IMAGES_BASE}"'/%g'
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the HTML summary for problems at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html")
 | 
			
		||||
 | 
			
		||||
quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
 | 
			
		||||
quiet diff --color=always -u ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE
 | 
			
		||||
exit 1
 | 
			
		||||
@@ -1,75 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
VERSION=`head -1 install/VERSION`
 | 
			
		||||
ROOTDIR=`pwd`
 | 
			
		||||
 | 
			
		||||
if [ -d results ]; then
 | 
			
		||||
    cd results && rm -rf ..?* .[!.]* *
 | 
			
		||||
fi
 | 
			
		||||
cd /piglit
 | 
			
		||||
 | 
			
		||||
export OCL_ICD_VENDORS=$ROOTDIR/install/etc/OpenCL/vendors/
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
unset DISPLAY
 | 
			
		||||
export LD_LIBRARY_PATH=$ROOTDIR/install/lib
 | 
			
		||||
clinfo
 | 
			
		||||
 | 
			
		||||
# If the job is parallel at the gitlab job level, will take the corresponding
 | 
			
		||||
# fraction of the caselist.
 | 
			
		||||
if [ -n "$CI_NODE_INDEX" ]; then
 | 
			
		||||
 | 
			
		||||
    if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
 | 
			
		||||
        echo "Can't parallelize piglit with multiple profiles"
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
    USE_CASELIST=1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ -n "$USE_CASELIST" ]; then
 | 
			
		||||
    ./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format "{name}" > /tmp/case-list.txt
 | 
			
		||||
 | 
			
		||||
    sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
 | 
			
		||||
 | 
			
		||||
    PIGLIT_TESTS="--test-list /tmp/case-list.txt"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
./piglit run -c -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES $ROOTDIR/results
 | 
			
		||||
retVal=$?
 | 
			
		||||
if [ $retVal -ne 0 ]; then
 | 
			
		||||
    echo "Found $(cat /tmp/version.txt), expected $VERSION"
 | 
			
		||||
fi
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
PIGLIT_RESULTS=${PIGLIT_RESULTS:-$PIGLIT_PROFILES}
 | 
			
		||||
mkdir -p .gitlab-ci/piglit
 | 
			
		||||
./piglit summary console $ROOTDIR/results \
 | 
			
		||||
  | tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
 | 
			
		||||
  | head -n -1 \
 | 
			
		||||
  | grep -v ": pass" \
 | 
			
		||||
  | sed '/^summary:/Q' \
 | 
			
		||||
  > .gitlab-ci/piglit/$PIGLIT_RESULTS.txt
 | 
			
		||||
 | 
			
		||||
if [ -n "$USE_CASELIST" ]; then
 | 
			
		||||
    # Just filter the expected results based on the tests that were actually
 | 
			
		||||
    # executed, and switch to the version with no summary
 | 
			
		||||
    cat .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig | sed '/^summary:/Q' | rev \
 | 
			
		||||
         | cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
 | 
			
		||||
    grep -F -f /tmp/executed.txt $ROOTDIR/install/$PIGLIT_RESULTS.txt \
 | 
			
		||||
         > .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline || true
 | 
			
		||||
else
 | 
			
		||||
    cp $ROOTDIR/install/$PIGLIT_RESULTS.txt .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if diff -q .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}; then
 | 
			
		||||
    exit 0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
./piglit summary html --exclude-details=pass $ROOTDIR/results/summary $ROOTDIR/results
 | 
			
		||||
 | 
			
		||||
echo Unexpected change in results:
 | 
			
		||||
diff -u .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}
 | 
			
		||||
exit 1
 | 
			
		||||
@@ -1,57 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
set -o xtrace
 | 
			
		||||
 | 
			
		||||
CROSS_FILE=/cross_file-"$CROSS".txt
 | 
			
		||||
 | 
			
		||||
# Delete unused bin and includes from artifacts to save space.
 | 
			
		||||
rm -rf install/bin install/include
 | 
			
		||||
 | 
			
		||||
# Strip the drivers in the artifacts to cut 80% of the artifacts size.
 | 
			
		||||
if [ -n "$CROSS" ]; then
 | 
			
		||||
    STRIP=`sed -n -E "s/strip\s*=\s*'(.*)'/\1/p" "$CROSS_FILE"`
 | 
			
		||||
    if [ -z "$STRIP" ]; then
 | 
			
		||||
        echo "Failed to find strip command in cross file"
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
else
 | 
			
		||||
    STRIP="strip"
 | 
			
		||||
fi
 | 
			
		||||
if [ -z "$ARTIFACTS_DEBUG_SYMBOLS" ]; then
 | 
			
		||||
    find install -name \*.so -exec $STRIP {} \;
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Test runs don't pull down the git tree, so put the dEQP helper
 | 
			
		||||
# script and associated bits there.
 | 
			
		||||
echo "$(cat VERSION) (git-$(git rev-parse HEAD | cut -b -10))" > install/VERSION
 | 
			
		||||
cp -Rp .gitlab-ci/bare-metal install/
 | 
			
		||||
cp -Rp .gitlab-ci/common install/
 | 
			
		||||
cp -Rp .gitlab-ci/piglit install/
 | 
			
		||||
cp -Rp .gitlab-ci/fossils.yml install/
 | 
			
		||||
cp -Rp .gitlab-ci/fossils install/
 | 
			
		||||
cp -Rp .gitlab-ci/fossilize-runner.sh install/
 | 
			
		||||
cp -Rp .gitlab-ci/crosvm-init.sh install/
 | 
			
		||||
cp -Rp .gitlab-ci/*.txt install/
 | 
			
		||||
cp -Rp .gitlab-ci/report-flakes.py install/
 | 
			
		||||
cp -Rp .gitlab-ci/vkd3d-proton install/
 | 
			
		||||
cp -Rp .gitlab-ci/*-runner.sh install/
 | 
			
		||||
find . -path \*/ci/\*.txt \
 | 
			
		||||
    -o -path \*/ci/\*.toml \
 | 
			
		||||
    -o -path \*/ci/\*traces\*.yml \
 | 
			
		||||
    | xargs -I '{}' cp -p '{}' install/
 | 
			
		||||
 | 
			
		||||
# Tar up the install dir so that symlinks and hardlinks aren't each
 | 
			
		||||
# packed separately in the zip file.
 | 
			
		||||
mkdir -p artifacts/
 | 
			
		||||
tar -cf artifacts/install.tar install
 | 
			
		||||
cp -Rp .gitlab-ci/common artifacts/ci-common
 | 
			
		||||
cp -Rp .gitlab-ci/lava artifacts/
 | 
			
		||||
 | 
			
		||||
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
 | 
			
		||||
    # Pass needed files to the test stage
 | 
			
		||||
    MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.gz"
 | 
			
		||||
    gzip -c artifacts/install.tar > ${MINIO_ARTIFACT_NAME}
 | 
			
		||||
    ci-fairy minio login $CI_JOB_JWT
 | 
			
		||||
    ci-fairy minio cp ${MINIO_ARTIFACT_NAME} minio://${PIPELINE_ARTIFACTS_BASE}/${MINIO_ARTIFACT_NAME}
 | 
			
		||||
fi
 | 
			
		||||
@@ -1,151 +0,0 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
#
 | 
			
		||||
# Copyright © 2021 Google LLC
 | 
			
		||||
#
 | 
			
		||||
# Permission is hereby granted, free of charge, to any person obtaining a
 | 
			
		||||
# copy of this software and associated documentation files (the "Software"),
 | 
			
		||||
# to deal in the Software without restriction, including without limitation
 | 
			
		||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
			
		||||
# and/or sell copies of the Software, and to permit persons to whom the
 | 
			
		||||
# Software is furnished to do so, subject to the following conditions:
 | 
			
		||||
#
 | 
			
		||||
# The above copyright notice and this permission notice (including the next
 | 
			
		||||
# paragraph) shall be included in all copies or substantial portions of the
 | 
			
		||||
# Software.
 | 
			
		||||
#
 | 
			
		||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
			
		||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 | 
			
		||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 | 
			
		||||
# IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import io
 | 
			
		||||
import re
 | 
			
		||||
import socket
 | 
			
		||||
import time
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Connection:
 | 
			
		||||
    def __init__(self, host, port, verbose):
 | 
			
		||||
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
 | 
			
		||||
        self.s.connect((host, port))
 | 
			
		||||
        self.s.setblocking(0)
 | 
			
		||||
        self.verbose = verbose
 | 
			
		||||
 | 
			
		||||
    def send_line(self, line):
 | 
			
		||||
        if self.verbose:
 | 
			
		||||
            print(f"IRC: sending {line}")
 | 
			
		||||
        self.s.sendall((line + '\n').encode())
 | 
			
		||||
 | 
			
		||||
    def wait(self, secs):
 | 
			
		||||
        for i in range(secs):
 | 
			
		||||
            if self.verbose:
 | 
			
		||||
                while True:
 | 
			
		||||
                    try:
 | 
			
		||||
                        data = self.s.recv(1024)
 | 
			
		||||
                    except io.BlockingIOError:
 | 
			
		||||
                        break
 | 
			
		||||
                    if data == "":
 | 
			
		||||
                        break
 | 
			
		||||
                    for line in data.decode().split('\n'):
 | 
			
		||||
                        print(f"IRC: received {line}")
 | 
			
		||||
            time.sleep(1)
 | 
			
		||||
 | 
			
		||||
    def quit(self):
 | 
			
		||||
        self.send_line("QUIT")
 | 
			
		||||
        self.s.shutdown(socket.SHUT_WR)
 | 
			
		||||
        self.s.close()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_flakes(results):
 | 
			
		||||
    flakes = []
 | 
			
		||||
    csv = re.compile("(.*),(.*),(.*)")
 | 
			
		||||
    for line in open(results, 'r').readlines():
 | 
			
		||||
        match = csv.match(line)
 | 
			
		||||
        if match.group(2) == "Flake":
 | 
			
		||||
            flakes.append(match.group(1))
 | 
			
		||||
    return flakes
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--host', type=str,
 | 
			
		||||
                        help='IRC server hostname', required=True)
 | 
			
		||||
    parser.add_argument('--port', type=int,
 | 
			
		||||
                        help='IRC server port', required=True)
 | 
			
		||||
    parser.add_argument('--results', type=str,
 | 
			
		||||
                        help='results.csv file from deqp-runner or piglit-runner', required=True)
 | 
			
		||||
    parser.add_argument('--known-flakes', type=str,
 | 
			
		||||
                        help='*-flakes.txt file passed to deqp-runner or piglit-runner', required=True)
 | 
			
		||||
    parser.add_argument('--channel', type=str,
 | 
			
		||||
                        help='Known flakes report channel', required=True)
 | 
			
		||||
    parser.add_argument('--url', type=str,
 | 
			
		||||
                        help='$CI_JOB_URL', required=True)
 | 
			
		||||
    parser.add_argument('--runner', type=str,
 | 
			
		||||
                        help='$CI_RUNNER_DESCRIPTION', required=True)
 | 
			
		||||
    parser.add_argument('--branch', type=str,
 | 
			
		||||
                        help='optional branch name')
 | 
			
		||||
    parser.add_argument('--branch-title', type=str,
 | 
			
		||||
                        help='optional branch title')
 | 
			
		||||
    parser.add_argument('--job', type=str,
 | 
			
		||||
                        help='$CI_JOB_ID', required=True)
 | 
			
		||||
    parser.add_argument('--verbose', "-v", action="store_true",
 | 
			
		||||
                        help='log IRC interactions')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    flakes = read_flakes(args.results)
 | 
			
		||||
    if not flakes:
 | 
			
		||||
        exit(0)
 | 
			
		||||
 | 
			
		||||
    known_flakes = []
 | 
			
		||||
    for line in open(args.known_flakes).readlines():
 | 
			
		||||
        line = line.strip()
 | 
			
		||||
        if not line or line.startswith("#"):
 | 
			
		||||
            continue
 | 
			
		||||
        known_flakes.append(re.compile(line))
 | 
			
		||||
 | 
			
		||||
    irc = Connection(args.host, args.port, args.verbose)
 | 
			
		||||
 | 
			
		||||
    # The nick needs to be something unique so that multiple runners
 | 
			
		||||
    # connecting at the same time don't race for one nick and get blocked.
 | 
			
		||||
    # freenode has a 16-char limit on nicks (9 is the IETF standard, but
 | 
			
		||||
    # various servers extend that).  So, trim off the common prefixes of the
 | 
			
		||||
    # runner name, and append the job ID so that software runners with more
 | 
			
		||||
    # than one concurrent job (think swrast) don't collide.  For freedreno,
 | 
			
		||||
    # that gives us a nick as long as db410c-N-JJJJJJJJ, and it'll be a while
 | 
			
		||||
    # before we make it to 9-digit jobs (we're at 7 so far).
 | 
			
		||||
    nick = args.runner
 | 
			
		||||
    nick = nick.replace('mesa-', '')
 | 
			
		||||
    nick = nick.replace('google-freedreno-', '')
 | 
			
		||||
    nick += f'-{args.job}'
 | 
			
		||||
    irc.send_line(f"NICK {nick}")
 | 
			
		||||
    irc.send_line(f"USER {nick} unused unused: Gitlab CI Notifier")
 | 
			
		||||
    irc.wait(10)
 | 
			
		||||
    irc.send_line(f"JOIN {args.channel}")
 | 
			
		||||
    irc.wait(1)
 | 
			
		||||
 | 
			
		||||
    branchinfo = ""
 | 
			
		||||
    if args.branch:
 | 
			
		||||
        branchinfo = f" on branch {args.branch} ({args.branch_title})"
 | 
			
		||||
    irc.send_line(
 | 
			
		||||
        f"PRIVMSG {args.channel} :Flakes detected in job {args.url} on {args.runner}{branchinfo}:")
 | 
			
		||||
 | 
			
		||||
    for flake in flakes:
 | 
			
		||||
        status = "NEW "
 | 
			
		||||
        for known in known_flakes:
 | 
			
		||||
            if known.match(flake):
 | 
			
		||||
                status = ""
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
        irc.send_line(f"PRIVMSG {args.channel} :{status}{flake}")
 | 
			
		||||
 | 
			
		||||
    irc.send_line(
 | 
			
		||||
        f"PRIVMSG {args.channel} :See {args.url}/artifacts/browse/results/")
 | 
			
		||||
 | 
			
		||||
    irc.quit()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
set -e
 | 
			
		||||
set -v
 | 
			
		||||
 | 
			
		||||
ARTIFACTSDIR=`pwd`/shader-db
 | 
			
		||||
mkdir -p $ARTIFACTSDIR
 | 
			
		||||
export DRM_SHIM_DEBUG=true
 | 
			
		||||
 | 
			
		||||
LIBDIR=`pwd`/install/lib
 | 
			
		||||
export LD_LIBRARY_PATH=$LIBDIR
 | 
			
		||||
 | 
			
		||||
cd /usr/local/shader-db
 | 
			
		||||
 | 
			
		||||
for driver in freedreno intel v3d; do
 | 
			
		||||
    echo "Running drm-shim for $driver"
 | 
			
		||||
    env LD_PRELOAD=$LIBDIR/lib${driver}_noop_drm_shim.so \
 | 
			
		||||
        ./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
 | 
			
		||||
            > $ARTIFACTSDIR/${driver}-shader-db.txt
 | 
			
		||||
done
 | 
			
		||||
@@ -1,637 +0,0 @@
 | 
			
		||||
# This file list source dependencies to avoid creating/running jobs
 | 
			
		||||
# those outcome cannot be changed by the modifications from a branch.
 | 
			
		||||
 | 
			
		||||
# Generic rule to not run the job during scheduled pipelines
 | 
			
		||||
# ----------------------------------------------------------
 | 
			
		||||
.scheduled_pipelines-rules:
 | 
			
		||||
  rules: &ignore_scheduled_pipelines
 | 
			
		||||
    if: '$CI_PIPELINE_SOURCE == "schedule"'
 | 
			
		||||
    when: never
 | 
			
		||||
 | 
			
		||||
# Mesa core source file dependencies
 | 
			
		||||
# ----------------------------------
 | 
			
		||||
.mesa-rules:
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes: &mesa_core_file_list
 | 
			
		||||
      - .gitlab-ci.yml
 | 
			
		||||
      - .gitlab-ci/**/*
 | 
			
		||||
      - include/**/*
 | 
			
		||||
      - meson.build
 | 
			
		||||
      - src/*
 | 
			
		||||
      - src/compiler/**/*
 | 
			
		||||
      - src/drm-shim/**/*
 | 
			
		||||
      - src/egl/**/*
 | 
			
		||||
      - src/gbm/**/*
 | 
			
		||||
      - src/glx/**/*
 | 
			
		||||
      - src/gtest/**/*
 | 
			
		||||
      - src/hgl/**/*
 | 
			
		||||
      - src/include/**/*
 | 
			
		||||
      - src/loader/**/*
 | 
			
		||||
      - src/mapi/**/*
 | 
			
		||||
      - src/mesa/*
 | 
			
		||||
      - src/mesa/drivers/*
 | 
			
		||||
      - src/mesa/drivers/common/**/*
 | 
			
		||||
      - src/mesa/drivers/dri/*
 | 
			
		||||
      - src/mesa/drivers/dri/common/**/*
 | 
			
		||||
      - src/mesa/main/**/*
 | 
			
		||||
      - src/mesa/math/**/*
 | 
			
		||||
      - src/mesa/program/**/*
 | 
			
		||||
      - src/mesa/sparc/**/*
 | 
			
		||||
      - src/mesa/state_tracker/**/*
 | 
			
		||||
      - src/mesa/swrast/**/*
 | 
			
		||||
      - src/mesa/swrast_setup/**/*
 | 
			
		||||
      - src/mesa/tnl/**/*
 | 
			
		||||
      - src/mesa/tnl_dd/**/*
 | 
			
		||||
      - src/mesa/vbo/**/*
 | 
			
		||||
      - src/mesa/x86/**/*
 | 
			
		||||
      - src/mesa/x86-64/**/*
 | 
			
		||||
      - src/util/**/*
 | 
			
		||||
 | 
			
		||||
.vulkan-rules:
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes: &vulkan_file_list
 | 
			
		||||
      - src/vulkan/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Gallium core source file dependencies
 | 
			
		||||
# -------------------------------------
 | 
			
		||||
.gallium-rules:
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes: &gallium_core_file_list
 | 
			
		||||
      - src/gallium/*
 | 
			
		||||
      - src/gallium/auxiliary/**/*
 | 
			
		||||
      - src/gallium/drivers/*
 | 
			
		||||
      - src/gallium/include/**/*
 | 
			
		||||
      - src/gallium/frontends/dri/*
 | 
			
		||||
      - src/gallium/frontends/glx/**/*
 | 
			
		||||
      - src/gallium/targets/**/*
 | 
			
		||||
      - src/gallium/tests/**/*
 | 
			
		||||
      - src/gallium/winsys/*
 | 
			
		||||
 | 
			
		||||
.softpipe-rules:
 | 
			
		||||
  stage: software-renderer
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &softpipe_file_list
 | 
			
		||||
      - src/gallium/drivers/softpipe/**/*
 | 
			
		||||
      - src/gallium/winsys/sw/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.llvmpipe-rules:
 | 
			
		||||
  stage: software-renderer
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &llvmpipe_file_list
 | 
			
		||||
      - src/gallium/drivers/llvmpipe/**/*
 | 
			
		||||
      - src/gallium/winsys/sw/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.lavapipe-rules:
 | 
			
		||||
  stage: software-renderer
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &lavapipe_file_list
 | 
			
		||||
      - src/gallium/drivers/llvmpipe/**/*
 | 
			
		||||
      - src/gallium/frontends/lavapipe/**/*
 | 
			
		||||
      - src/gallium/winsys/sw/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.llvmpipe-cl-rules:
 | 
			
		||||
  stage: software-renderer
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
      - .gitlab-ci.yml
 | 
			
		||||
      - .gitlab-ci/**/*
 | 
			
		||||
      - meson.build
 | 
			
		||||
      - include/**/*
 | 
			
		||||
      - src/compiler/**/*
 | 
			
		||||
      - src/include/**/*
 | 
			
		||||
      - src/util/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *llvmpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &clover_file_list
 | 
			
		||||
      - src/gallium/frontends/clover/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.freedreno-rules:
 | 
			
		||||
  stage: freedreno
 | 
			
		||||
  rules:
 | 
			
		||||
    - if: '$FD_FARM == "offline"'
 | 
			
		||||
      when: never
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &freedreno_file_list
 | 
			
		||||
      # Note: when https://gitlab.com/gitlab-org/gitlab/-/issues/198688
 | 
			
		||||
      # is supported, we can change the src/freedreno/ rule to explicitly
 | 
			
		||||
      # exclude tools
 | 
			
		||||
      - src/freedreno/**/*
 | 
			
		||||
      - src/gallium/drivers/freedreno/**/*
 | 
			
		||||
      - src/gallium/winsys/freedreno/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
 | 
			
		||||
# rules duplication manually
 | 
			
		||||
.freedreno-rules-restricted:
 | 
			
		||||
  stage: freedreno
 | 
			
		||||
  rules:
 | 
			
		||||
    - if: '$FD_FARM == "offline"'
 | 
			
		||||
      when: never
 | 
			
		||||
    # If the triggerer has access to the restricted traces and if it is pre-merge
 | 
			
		||||
    - if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu)$/") &&
 | 
			
		||||
           ($GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME)'
 | 
			
		||||
      when: never
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *freedreno_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
 | 
			
		||||
# rules duplication manually
 | 
			
		||||
.freedreno-rules-performance:
 | 
			
		||||
  stage: freedreno
 | 
			
		||||
  rules:
 | 
			
		||||
    - if: '$FD_FARM == "offline"'
 | 
			
		||||
      when: never
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    # Run only on pre-merge pipelines from Marge
 | 
			
		||||
    - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
 | 
			
		||||
      when: never
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - changes:
 | 
			
		||||
        *freedreno_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.panfrost-midgard-rules:
 | 
			
		||||
  stage: arm
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &panfrost_gallium_file_list
 | 
			
		||||
      - src/gallium/drivers/panfrost/**/*
 | 
			
		||||
      - src/gallium/winsys/panfrost/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &panfrost_common_file_list
 | 
			
		||||
      - src/panfrost/ci/*
 | 
			
		||||
      - src/panfrost/include/*
 | 
			
		||||
      - src/panfrost/lib/*
 | 
			
		||||
      - src/panfrost/shared/*
 | 
			
		||||
      - src/panfrost/util/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/panfrost/midgard/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.panfrost-bifrost-rules:
 | 
			
		||||
  stage: arm
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *panfrost_common_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *panfrost_gallium_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &panfrost_vulkan_file_list
 | 
			
		||||
      - src/panfrost/vulkan/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/panfrost/bifrost/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.vc4-rules:
 | 
			
		||||
  stage: broadcom
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/broadcom/**/*
 | 
			
		||||
      - src/gallium/drivers/vc4/**/*
 | 
			
		||||
      - src/gallium/winsys/kmsro/**/*
 | 
			
		||||
      - src/gallium/winsys/vc4/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.v3d-rules:
 | 
			
		||||
  stage: broadcom
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/broadcom/**/*
 | 
			
		||||
      - src/gallium/drivers/v3d/**/*
 | 
			
		||||
      - src/gallium/winsys/kmsro/**/*
 | 
			
		||||
      - src/gallium/winsys/v3d/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.v3dv-rules:
 | 
			
		||||
  stage: broadcom
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/broadcom/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.lima-rules:
 | 
			
		||||
  stage: arm
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/gallium/drivers/lima/**/*
 | 
			
		||||
      - src/gallium/winsys/lima/**/*
 | 
			
		||||
      - src/lima/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.radv-rules:
 | 
			
		||||
  stage: amd
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &radv_file_list
 | 
			
		||||
      - src/amd/**/*
 | 
			
		||||
      - src/vulkan/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.virgl-rules:
 | 
			
		||||
  stage: layered-backends
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *llvmpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &virgl_file_list
 | 
			
		||||
      - src/gallium/drivers/virgl/**/*
 | 
			
		||||
      - src/gallium/winsys/virgl/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.radeonsi-rules:
 | 
			
		||||
  stage: amd
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &radeonsi_file_list
 | 
			
		||||
      - src/gallium/drivers/radeonsi/**/*
 | 
			
		||||
      - src/gallium/winsys/amdgpu/**/*
 | 
			
		||||
      - src/amd/*
 | 
			
		||||
      - src/amd/addrlib/**/*
 | 
			
		||||
      - src/amd/common/**/*
 | 
			
		||||
      - src/amd/llvm/**/*
 | 
			
		||||
      - src/amd/registers/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.radeonsi-vaapi-rules:
 | 
			
		||||
  stage: amd
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *radeonsi_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &radeon_vcn_file_list
 | 
			
		||||
      - src/gallium/frontends/va/**/*
 | 
			
		||||
      - src/gallium/drivers/radeon/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.i915g-rules:
 | 
			
		||||
  stage: intel
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/gallium/drivers/i915/**/*
 | 
			
		||||
      - src/gallium/winsys/i915/**/*
 | 
			
		||||
      - src/intel/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.iris-rules:
 | 
			
		||||
  stage: intel
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &iris_file_list
 | 
			
		||||
      - src/gallium/drivers/iris/**/*
 | 
			
		||||
      - src/gallium/winsys/iris/**/*
 | 
			
		||||
      - src/intel/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
 | 
			
		||||
# rules duplication manually
 | 
			
		||||
.iris-rules-performance:
 | 
			
		||||
  stage: intel
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    # Run only on pre-merge pipelines from Marge
 | 
			
		||||
    - if: '$GITLAB_USER_LOGIN != "marge-bot" || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME != $CI_COMMIT_REF_NAME'
 | 
			
		||||
      when: never
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - changes:
 | 
			
		||||
        *iris_file_list
 | 
			
		||||
      when: manual
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.anv-rules:
 | 
			
		||||
  stage: intel
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/intel/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.zink-rules:
 | 
			
		||||
  stage: layered-backends
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *lavapipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &zink_file_list
 | 
			
		||||
      - src/gallium/drivers/zink/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
 | 
			
		||||
# rules duplication manually
 | 
			
		||||
.windows-build-rules:
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes: 
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *lavapipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: &d3d12_file_list
 | 
			
		||||
      - src/gallium/drivers/d3d12/**/*
 | 
			
		||||
      - src/microsoft/**/*
 | 
			
		||||
      - src/gallium/frontends/wgl/*
 | 
			
		||||
      - src/gallium/winsys/d3d12/wgl/*
 | 
			
		||||
      - src/gallium/targets/libgl-gdi/*
 | 
			
		||||
      - src/gallium/targets/libgl-d3d12/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *zink_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *radv_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.windows-test-rules:
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *lavapipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *vulkan_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes: *d3d12_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.etnaviv-rules:
 | 
			
		||||
  stage: etnaviv
 | 
			
		||||
  rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/etnaviv/**/*
 | 
			
		||||
      - src/gallium/drivers/etnaviv/**/*
 | 
			
		||||
      - src/gallium/winsys/etnaviv/**/*
 | 
			
		||||
      - src/gallium/auxiliary/renderonly/**/*
 | 
			
		||||
      - src/gallium/winsys/kmsro/**/*
 | 
			
		||||
      - src/gallium/winsys/etnaviv/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
# Rules for unusual architectures that only build a subset of drivers
 | 
			
		||||
.ppc64el-rules:
 | 
			
		||||
   rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *softpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *llvmpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *lavapipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *radv_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *radeonsi_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *zink_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *virgl_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
      - src/gallium/drivers/nouveau/**/*
 | 
			
		||||
      - src/gallium/winsys/nouveau/**/*
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
 | 
			
		||||
.s390x-rules:
 | 
			
		||||
   rules:
 | 
			
		||||
    - *ignore_scheduled_pipelines
 | 
			
		||||
    - changes:
 | 
			
		||||
        *mesa_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *gallium_core_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *softpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *llvmpipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *lavapipe_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - changes:
 | 
			
		||||
        *zink_file_list
 | 
			
		||||
      when: on_success
 | 
			
		||||
    - when: never
 | 
			
		||||
@@ -1,93 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
set -ex
 | 
			
		||||
 | 
			
		||||
if [ "x$VK_DRIVER" = "x" ]; then
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
INSTALL=$(realpath -s "$PWD"/install)
 | 
			
		||||
 | 
			
		||||
RESULTS=$(realpath -s "$PWD"/results)
 | 
			
		||||
 | 
			
		||||
# Set up the driver environment.
 | 
			
		||||
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
 | 
			
		||||
# using a command wrapper. Hence, we will just set it when running the
 | 
			
		||||
# command.
 | 
			
		||||
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Sanity check to ensure that our environment is sufficient to make our tests
 | 
			
		||||
# run against the Mesa built by CI, rather than any installed distro version.
 | 
			
		||||
MESA_VERSION=$(cat "$INSTALL/VERSION" | sed 's/\./\\./g')
 | 
			
		||||
 | 
			
		||||
# Set the Vulkan driver to use.
 | 
			
		||||
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
 | 
			
		||||
 | 
			
		||||
# Set environment for Wine.
 | 
			
		||||
export WINEDEBUG="-all"
 | 
			
		||||
export WINEPREFIX="/vkd3d-proton-wine64"
 | 
			
		||||
export WINEESYNC=1
 | 
			
		||||
 | 
			
		||||
print_red() {
 | 
			
		||||
    RED='\033[0;31m'
 | 
			
		||||
    NC='\033[0m' # No Color
 | 
			
		||||
    printf "${RED}"
 | 
			
		||||
    "$@"
 | 
			
		||||
    printf "${NC}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# wrapper to supress +x to avoid spamming the log
 | 
			
		||||
quiet() {
 | 
			
		||||
    set +x
 | 
			
		||||
    "$@"
 | 
			
		||||
    set -x
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SANITY_MESA_VERSION_CMD="vulkaninfo | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
 | 
			
		||||
 | 
			
		||||
HANG_DETECTION_CMD="/parallel-deqp-runner/build/bin/hang-detection"
 | 
			
		||||
 | 
			
		||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD"
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
eval $RUN_CMD
 | 
			
		||||
 | 
			
		||||
if [ $? -ne 0 ]; then
 | 
			
		||||
    printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
 | 
			
		||||
fi
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
if [ -d "$RESULTS" ]; then
 | 
			
		||||
    cd "$RESULTS" && rm -rf ..?* .[!.]* * && cd -
 | 
			
		||||
else
 | 
			
		||||
    mkdir "$RESULTS"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
VKD3D_PROTON_TESTSUITE_CMD="wine /vkd3d-proton-tests/x64/bin/d3d12.exe >$RESULTS/vkd3d-proton.log 2>&1"
 | 
			
		||||
 | 
			
		||||
quiet printf "%s\n" "Running vkd3d-proton testsuite..."
 | 
			
		||||
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $HANG_DETECTION_CMD $VKD3D_PROTON_TESTSUITE_CMD"
 | 
			
		||||
 | 
			
		||||
set +e
 | 
			
		||||
eval $RUN_CMD
 | 
			
		||||
 | 
			
		||||
VKD3D_PROTON_RESULTS="vkd3d-proton-${VKD3D_PROTON_RESULTS:-results}"
 | 
			
		||||
RESULTSFILE="$RESULTS/$VKD3D_PROTON_RESULTS.txt"
 | 
			
		||||
mkdir -p .gitlab-ci/vkd3d-proton
 | 
			
		||||
grep "Test failed" "$RESULTS"/vkd3d-proton.log > "$RESULTSFILE"
 | 
			
		||||
 | 
			
		||||
if [ -f "$INSTALL/$VKD3D_PROTON_RESULTS.txt" ]; then
 | 
			
		||||
    cp "$INSTALL/$VKD3D_PROTON_RESULTS.txt" \
 | 
			
		||||
       ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
 | 
			
		||||
else
 | 
			
		||||
    touch ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if diff -q ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"; then
 | 
			
		||||
    exit 0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
quiet print_red printf "%s\n" "Changes found, see vkd3d-proton.log!"
 | 
			
		||||
quiet diff --color=always -u ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"
 | 
			
		||||
exit 1
 | 
			
		||||
@@ -1,13 +0,0 @@
 | 
			
		||||
# escape=`
 | 
			
		||||
 | 
			
		||||
FROM mcr.microsoft.com/windows:1809
 | 
			
		||||
 | 
			
		||||
# Make sure any failure in PowerShell scripts is fatal
 | 
			
		||||
SHELL ["powershell", "-ExecutionPolicy", "RemoteSigned", "-Command", "$ErrorActionPreference = 'Stop';"]
 | 
			
		||||
ENV ErrorActionPreference='Stop'
 | 
			
		||||
 | 
			
		||||
COPY mesa_deps_vs2019.ps1 C:\
 | 
			
		||||
RUN C:\mesa_deps_vs2019.ps1
 | 
			
		||||
 | 
			
		||||
COPY mesa_deps.ps1 C:\
 | 
			
		||||
RUN C:\mesa_deps.ps1
 | 
			
		||||
@@ -1,32 +0,0 @@
 | 
			
		||||
# Native Windows GitLab CI builds
 | 
			
		||||
 | 
			
		||||
Unlike Linux, Windows cannot reuse the freedesktop ci-templates as they exist
 | 
			
		||||
as we do not have Podman, Skopeo, or even Docker-in-Docker builds available
 | 
			
		||||
under Windows.
 | 
			
		||||
 | 
			
		||||
We still reuse the same model: build a base container with the core operating
 | 
			
		||||
system and infrequently-changed build dependencies, then execute Mesa builds
 | 
			
		||||
only inside that base container. This is open-coded in PowerShell scripts.
 | 
			
		||||
 | 
			
		||||
## Base container build
 | 
			
		||||
 | 
			
		||||
The base container build job executes the `mesa_container.ps1` script which
 | 
			
		||||
reproduces the ci-templates behaviour. It looks for the registry image in
 | 
			
		||||
the user's namespace, and exits if found. If not found, it tries to copy
 | 
			
		||||
the same image tag from the upstream Mesa repository. If that is not found,
 | 
			
		||||
the image is rebuilt inside the user's namespace.
 | 
			
		||||
 | 
			
		||||
The rebuild executes `docker build` which calls `mesa_deps.ps1` inside the
 | 
			
		||||
container to fetch and install all build dependencies. This includes Visual
 | 
			
		||||
Studio Community Edition (downloaded from Microsoft, under the license which
 | 
			
		||||
allows use by open-source projects), other build tools from Chocolatey, and
 | 
			
		||||
finally Meson and Python dependencies from PyPI.
 | 
			
		||||
 | 
			
		||||
This job is executed inside a Windows shell environment directly inside the
 | 
			
		||||
host, without Docker.
 | 
			
		||||
 | 
			
		||||
## Mesa build
 | 
			
		||||
 | 
			
		||||
The Mesa build runs inside the base container, executing `mesa_build.ps1`.
 | 
			
		||||
This simply compiles Mesa using Meson and Ninja, executing the build and
 | 
			
		||||
unit tests. Currently, no build artifacts are captured.
 | 
			
		||||
@@ -1,24 +0,0 @@
 | 
			
		||||
# force the CA cert cache to be rebuilt, in case Meson tries to access anything
 | 
			
		||||
Write-Host "Refreshing Windows TLS CA cache"
 | 
			
		||||
(New-Object System.Net.WebClient).DownloadString("https://github.com") >$null
 | 
			
		||||
 | 
			
		||||
$env:PYTHONUTF8=1
 | 
			
		||||
 | 
			
		||||
Get-Date
 | 
			
		||||
Write-Host "Compiling Mesa"
 | 
			
		||||
$builddir = New-Item -ItemType Directory -Name "_build"
 | 
			
		||||
$installdir = New-Item -ItemType Directory -Name "_install"
 | 
			
		||||
Push-Location $builddir.FullName
 | 
			
		||||
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && meson --default-library=shared -Dzlib:default_library=static --buildtype=release -Db_ndebug=false -Dc_std=c17 -Dcpp_std=vc++latest -Db_vscrt=mt --cmake-prefix-path=`"C:\llvm-10`" --pkg-config-path=`"C:\llvm-10\lib\pkgconfig;C:\llvm-10\share\pkgconfig;C:\spirv-tools\lib\pkgconfig`" --prefix=`"$installdir`" -Dllvm=enabled -Dshared-llvm=disabled -Dvulkan-drivers=swrast,amd -Dgallium-drivers=swrast,d3d12,zink -Dshared-glapi=enabled -Dgles2=enabled -Dmicrosoft-clc=enabled -Dstatic-libclc=all -Dspirv-to-dxil=true -Dbuild-tests=true -Dwerror=true -Dwarning_level=2 -Dzlib:warning_level=1 -Dlibelf:warning_level=1 && ninja -j32 install && meson test --num-processes 32"
 | 
			
		||||
$buildstatus = $?
 | 
			
		||||
Pop-Location
 | 
			
		||||
 | 
			
		||||
Get-Date
 | 
			
		||||
 | 
			
		||||
if (!$buildstatus) {
 | 
			
		||||
  Write-Host "Mesa build or test failed"
 | 
			
		||||
  Exit 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Copy-Item ".\.gitlab-ci\windows\piglit_run.ps1" -Destination $installdir
 | 
			
		||||
Copy-Item ".\.gitlab-ci\windows\quick_gl.txt" -Destination $installdir
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user