Compare commits

..

1 Commits

Author SHA1 Message Date
jtg
fd624c0601 Imported sources 1999-08-19 00:55:39 +00:00
8556 changed files with 140364 additions and 3623514 deletions

View File

@@ -1,18 +0,0 @@
((nil . ((show-trailing-whitespace . t)))
(prog-mode
(indent-tabs-mode . nil)
(tab-width . 8)
(c-basic-offset . 3)
(c-file-style . "stroustrup")
(fill-column . 78)
(eval . (progn
(c-set-offset 'case-label '0)
(c-set-offset 'innamespace '0)
(c-set-offset 'inline-open '0)))
(whitespace-style face indentation)
(whitespace-line-column . 79)
(eval ignore-errors
(require 'whitespace)
(whitespace-mode 1)))
(makefile-mode (indent-tabs-mode . t))
)

View File

@@ -1,44 +0,0 @@
# To use this config on you editor, follow the instructions at:
# http://editorconfig.org
root = true
[*]
charset = utf-8
insert_final_newline = true
tab_width = 8
[*.{c,h,cpp,hpp,cc,hh}]
indent_style = space
indent_size = 3
max_line_length = 78
[{Makefile*,*.mk}]
indent_style = tab
[{*.py,SCons*}]
indent_style = space
indent_size = 4
[*.pl]
indent_style = space
indent_size = 4
[*.m4]
indent_style = space
indent_size = 2
[*.yml]
indent_style = space
indent_size = 2
[*.html]
indent_style = space
indent_size = 2
[*.patch]
trim_trailing_whitespace = false
[{meson.build,meson_options.txt}]
indent_style = space
indent_size = 2

4
.gitignore vendored
View File

@@ -1,4 +0,0 @@
*.pyc
*.pyo
*.out
build

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
[*.sh]
indent_size = 2

View File

@@ -1,81 +0,0 @@
#!/bin/sh
# This test script groups together a bunch of fast dEQP variant runs
# to amortize the cost of rebooting the board.
set -ex
EXIT=0
# Test rendering with the gmem path forced when possible (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/gmem \
DEQP_VER=gles31 \
DEQP_FRACTION=5 \
FD_MESA_DEBUG=nobypass \
/install/deqp-runner.sh; then
EXIT=1
fi
# Test rendering with the bypass path forced when possible (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/bypass \
DEQP_VER=gles31 \
DEQP_FRACTION=5 \
FD_MESA_DEBUG=nogmem \
GPU_VERSION=freedreno-a630-bypass \
/install/deqp-runner.sh; then
EXIT=1
fi
# Test rendering with the UBO-to-constants optimization disabled (~1 minute)
if ! env \
DEQP_RESULTS_DIR=results/nouboopt \
DEQP_VER=gles31 \
IR3_SHADER_DEBUG=nouboopt \
DEQP_CASELIST_FILTER="functional.*ubo" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles3-565nozs mustpass (~20s)
if ! env \
DEQP_RESULTS_DIR=results/gles3-565nozs \
DEQP_VER=gles3 \
DEQP_CONFIG="rgb565d0s0ms0" \
DEQP_VARIANT="565-no-depth-no-stencil" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles31-565nozs mustpass (~1s)
if ! env \
DEQP_RESULTS_DIR=results/gles31-565nozs \
DEQP_VER=gles31 \
DEQP_CONFIG="rgb565d0s0ms0" \
DEQP_VARIANT="565-no-depth-no-stencil" \
/install/deqp-runner.sh; then
EXIT=1
fi
# gles3-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
# if ! env \
# DEQP_RESULTS_DIR=results/gles3-multisample \
# DEQP_VER=gles3 \
# DEQP_CONFIG="rgba8888d24s8ms4" \
# DEQP_VARIANT="multisample" \
# /install/deqp-runner.sh; then
# EXIT=1
# fi
# gles31-multisample mustpass -- disabled pending https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/1859
# if ! env \
# DEQP_RESULTS_DIR=results/gles31-multisample \
# DEQP_VER=gles31 \
# DEQP_CONFIG="rgba8888d24s8ms4" \
# DEQP_VARIANT="multisample" \
# /install/deqp-runner.sh; then
# EXIT=1
# fi
exit $EXIT

View File

@@ -1,14 +0,0 @@
#!/bin/sh
while true; do
devcds=`find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null`
for i in $devcds; do
echo "Found a devcoredump at $i."
if cp $i /results/first.devcore; then
echo 1 > $i
echo "Saved to the job artifacts at /first.devcore"
exit 0
fi
done
sleep 10
done

View File

@@ -1,100 +0,0 @@
#!/bin/bash
# Boot script for Chrome OS devices attached to a servo debug connector, using
# NFS and TFTP to boot.
# We're run from the root of the repo, make a helper var for our paths
BM=$CI_PROJECT_DIR/install/bare-metal
# Runner config checks
if [ -z "$BM_SERIAL" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the CPU serial device."
exit 1
fi
if [ -z "$BM_SERIAL_EC" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the EC serial device for controlling board power"
exit 1
fi
if [ ! -d /nfs ]; then
echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
exit 1
fi
if [ ! -d /tftp ]; then
echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
exit 1
fi
# job config checks
if [ -z "$BM_KERNEL" ]; then
echo "Must set BM_KERNEL to your board's kernel FIT image"
exit 1
fi
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
exit 1
fi
if [ -z "$BM_CMDLINE" ]; then
echo "Must set BM_CMDLINE to your board's kernel command line arguments"
exit 1
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
# Put the kernel/dtb image and the boot command line in the tftp directory for
# the board to find. For normal Mesa development, we build the kernel and
# store it in the docker container that this script is running in.
#
# However, container builds are expensive, so when you're hacking on the
# kernel, it's nice to be able to skip the half hour container build and plus
# moving that container to the runner. So, if BM_KERNEL is a URL, fetch it
# instead of looking in the container. Note that the kernel build should be
# the output of:
#
# make Image.lzma
#
# mkimage \
# -A arm64 \
# -f auto \
# -C lzma \
# -d arch/arm64/boot/Image.lzma \
# -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
# cheza-image.img
rm -rf /tftp/*
if echo "$BM_KERNEL" | grep -q http; then
apt install -y wget
wget $BM_KERNEL -O /tftp/vmlinuz
else
cp $BM_KERNEL /tftp/vmlinuz
fi
echo "$BM_CMDLINE" > /tftp/cmdline
set +e
python3 $BM/cros_servo_run.py \
--cpu $BM_SERIAL \
--ec $BM_SERIAL_EC
ret=$?
set -e
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
exit $ret

View File

@@ -1,175 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import queue
import re
from serial_buffer import SerialBuffer
import sys
import threading
class CrosServoRun:
def __init__(self, cpu, ec):
# Merged FIFO for the two serial buffers, fed by threads.
self.serial_queue = queue.Queue()
self.sentinel = object()
self.threads_done = 0
self.ec_ser = SerialBuffer(
ec, "results/serial-ec.txt", "R SERIAL-EC> ")
self.cpu_ser = SerialBuffer(
cpu, "results/serial.txt", "R SERIAL-CPU> ")
self.iter_feed_ec = threading.Thread(
target=self.iter_feed_queue, daemon=True, args=(self.ec_ser.lines(),))
self.iter_feed_ec.start()
self.iter_feed_cpu = threading.Thread(
target=self.iter_feed_queue, daemon=True, args=(self.cpu_ser.lines(),))
self.iter_feed_cpu.start()
# Feed lines from our serial queues into the merged queue, marking when our
# input is done.
def iter_feed_queue(self, it):
for i in it:
self.serial_queue.put(i)
self.serial_queue.put(sentinel)
# Return the next line from the queue, counting how many threads have
# terminated and joining when done
def get_serial_queue_line(self):
line = self.serial_queue.get()
if line == self.sentinel:
self.threads_done = self.threads_done + 1
if self.threads_done == 2:
self.iter_feed_cpu.join()
self.iter_feed_ec.join()
return line
# Returns an iterator for getting the next line.
def serial_queue_lines(self):
return iter(self.get_serial_queue_line, self.sentinel)
def ec_write(self, s):
print("W SERIAL-EC> %s" % s)
self.ec_ser.serial.write(s.encode())
def cpu_write(self, s):
print("W SERIAL-CPU> %s" % s)
self.cpu_ser.serial.write(s.encode())
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def run(self):
# Flush any partial commands in the EC's prompt, then ask for a reboot.
self.ec_write("\n")
self.ec_write("reboot\n")
# This is emitted right when the bootloader pauses to check for input.
# Emit a ^N character to request network boot, because we don't have a
# direct-to-netboot firmware on cheza.
for line in self.serial_queue_lines():
if re.search("load_archive: loading locale_en.bin", line):
self.cpu_write("\016")
break
# The Cheza boards have issues with failing to bring up power to
# the system sometimes, possibly dependent on ambient temperature
# in the farm.
if re.search("POWER_GOOD not seen in time", line):
self.print_error("Detected intermittent poweron failure, restarting run...")
return 2
tftp_failures = 0
for line in self.serial_queue_lines():
if re.search("---. end Kernel panic", line):
return 1
# The Cheza firmware seems to occasionally get stuck looping in
# this error state during TFTP booting, possibly based on amount of
# network traffic around it, but it'll usually recover after a
# reboot.
if re.search("R8152: Bulk read error 0xffffffbf", line):
tftp_failures += 1
if tftp_failures >= 100:
self.print_error("Detected intermittent tftp failure, restarting run...")
return 2
# There are very infrequent bus errors during power management transitions
# on cheza, which we don't expect to be the case on future boards.
if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
self.print_error("Detected cheza power management bus error, restarting run...")
return 2
# These HFI response errors started appearing with the introduction
# of piglit runs. CosmicPenguin says:
#
# "message ID 106 isn't a thing, so likely what happened is that we
# got confused when parsing the HFI queue. If it happened on only
# one run, then memory corruption could be a possible clue"
#
# Given that it seems to trigger randomly near a GPU fault and then
# break many tests after that, just restart the whole run.
if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
self.print_error("Detected cheza power management bus error, restarting run...")
return 2
result = re.search("bare-metal result: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result")
return 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', type=str,
help='CPU Serial device', required=True)
parser.add_argument(
'--ec', type=str, help='EC Serial device', required=True)
args = parser.parse_args()
servo = CrosServoRun(args.cpu, args.ec)
while True:
retval = servo.run()
if retval != 2:
break
# power down the CPU on the device
servo.ec_write("power off\n")
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,30 +0,0 @@
#!/bin/bash
set -e
STRINGS=$(mktemp)
ERRORS=$(mktemp)
trap "rm $STRINGS; rm $ERRORS;" EXIT
FILE=$1
shift 1
while getopts "f:e:" opt; do
case $opt in
f) echo "$OPTARG" >> $STRINGS;;
e) echo "$OPTARG" >> $STRINGS ; echo "$OPTARG" >> $ERRORS;;
esac
done
shift $((OPTIND -1))
echo "Waiting for $FILE to say one of following strings"
cat $STRINGS
while ! egrep -wf $STRINGS $FILE; do
sleep 2
done
if egrep -wf $ERRORS $FILE; then
exit 1
fi

View File

@@ -1,164 +0,0 @@
#!/bin/bash
BM=$CI_PROJECT_DIR/install/bare-metal
if [ -z "$BM_SERIAL" -a -z "$BM_SERIAL_SCRIPT" ]; then
echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
echo "BM_SERIAL:"
echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
echo "BM_SERIAL_SCRIPT:"
echo " This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel."
exit 1
fi
if [ -z "$BM_POWERUP" ]; then
echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should reset the device and begin its boot sequence"
echo "such that it pauses at fastboot."
exit 1
fi
if [ -z "$BM_POWERDOWN" ]; then
echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power off the device."
exit 1
fi
if [ -z "$BM_FASTBOOT_SERIAL" ]; then
echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This must be the a stable-across-resets fastboot serial number."
exit 1
fi
if [ -z "$BM_KERNEL" ]; then
echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:"
exit 1
fi
if [ -z "$BM_DTB" ]; then
echo "Must set BM_DTB to your board's DTB file in the job's variables:"
exit 1
fi
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:"
exit 1
fi
if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
BM_FASTBOOT_NFSROOT=1
fi
if [ -z "$BM_FASTBOOT_NFSROOT" ]; then
if [ -z "$BM_WEBDAV_IP" -o -z "$BM_WEBDAV_PORT" ]; then
echo "BM_WEBDAV_IP and/or BM_WEBDAV_PORT is not set - no results will be uploaded from DUT!"
WEBDAV_CMDLINE=""
else
WEBDAV_CMDLINE="webdav=http://$BM_WEBDAV_IP:$BM_WEBDAV_PORT"
fi
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results/
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
# Root on NFS, no need for an inintramfs.
rm -f rootfs.cpio.gz
touch rootfs.cpio
gzip rootfs.cpio
else
# Create the rootfs in a temp dir
rsync -a --delete $BM_ROOTFS/ rootfs/
. $BM/rootfs-setup.sh rootfs
# Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
# these devices use it and it would take up space in the initrd.
if [ -n "$PIGLIT_PROFILES" ]; then
EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
else
EXCLUDE_FILTER="piglit|python"
fi
pushd rootfs
find -H | \
egrep -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
egrep -v "traces-db|apitrace|renderdoc" | \
egrep -v $EXCLUDE_FILTER | \
cpio -H newc -o | \
xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
popd
# Start nginx to get results from DUT
if [ -n "$WEBDAV_CMDLINE" ]; then
ln -s `pwd`/results /results
sed -i s/80/$BM_WEBDAV_PORT/g /etc/nginx/sites-enabled/default
sed -i s/www-data/root/g /etc/nginx/nginx.conf
nginx
fi
fi
# Make the combined kernel image and dtb for passing to fastboot. For normal
# Mesa development, we build the kernel and store it in the docker container
# that this script is running in.
#
# However, container builds are expensive, so when you're hacking on the
# kernel, it's nice to be able to skip the half hour container build and plus
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
# fetch them instead of looking in the container.
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
apt install -y wget
wget $BM_KERNEL -O kernel
wget $BM_DTB -O dtb
cat kernel dtb > Image.gz-dtb
rm kernel dtb
else
cat $BM_KERNEL $BM_DTB > Image.gz-dtb
fi
mkdir -p artifacts
abootimg \
--create artifacts/fastboot.img \
-k Image.gz-dtb \
-r rootfs.cpio.gz \
-c cmdline="$BM_CMDLINE $WEBDAV_CMDLINE"
rm Image.gz-dtb
export PATH=$BM:$PATH
# Start background command for talking to serial if we have one.
if [ -n "$BM_SERIAL_SCRIPT" ]; then
$BM_SERIAL_SCRIPT > results/serial-output.txt &
while [ ! -e results/serial-output.txt ]; do
sleep 1
done
fi
set +e
$BM/fastboot_run.py \
--dev="$BM_SERIAL" \
--fbserial="$BM_FASTBOOT_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN"
ret=$?
set -e
if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
fi
exit $ret

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import os
import re
from serial_buffer import SerialBuffer
import sys
import threading
class FastbootRun:
def __init__(self, args):
self.powerup = args.powerup
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "R SERIAL> ")
self.fastboot="fastboot boot -s {ser} artifacts/fastboot.img".format(ser=args.fbserial)
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def logged_system(self, cmd):
print("Running '{}'".format(cmd))
return os.system(cmd)
def run(self):
if self.logged_system(self.powerup) != 0:
return 1
fastboot_ready = False
for line in self.ser.lines():
if re.search("fastboot: processing commands", line) or \
re.search("Listening for fastboot command on", line):
fastboot_ready = True
break
if re.search("data abort", line):
return 1
if not fastboot_ready:
self.print_error("Failed to get to fastboot prompt")
return 1
if self.logged_system(self.fastboot) != 0:
return 1
for line in self.ser.lines():
if re.search("---. end Kernel panic", line):
return 1
# The db820c boards intermittently reboot. Just restart the run
# when if we see a reboot after we got past fastboot.
if re.search("PON REASON", line):
self.print_error("Detected spontaneous reboot, restarting run...")
return 2
# db820c sometimes wedges around iommu fault recovery
if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
self.print_error(
"Detected kernel soft lockup, restarting run...")
return 2
result = re.search("bare-metal result: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result")
return 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
parser.add_argument('--fbserial', type=str, help='fastboot serial number of the board', required=True)
args = parser.parse_args()
fastboot = FastbootRun(args)
while True:
retval = fastboot.run()
if retval != 2:
break
fastboot.logged_system(args.powerdown)
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,10 +0,0 @@
#!/bin/bash
relay=$1
if [ -z "$relay" ]; then
echo "Must supply a relay arg"
exit 1
fi
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay

View File

@@ -1,19 +0,0 @@
#!/usr/bin/python3
import sys
import serial
mode = sys.argv[1]
relay = sys.argv[2]
# our relays are "off" means "board is powered".
mode_swap = {
"on" : "off",
"off" : "on",
}
mode = mode_swap[mode]
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2)
command = "relay {} {}\n\r".format(mode, relay)
ser.write(command.encode())
ser.close()

View File

@@ -1,12 +0,0 @@
#!/bin/bash
relay=$1
if [ -z "$relay" ]; then
echo "Must supply a relay arg"
exit 1
fi
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py off $relay
sleep 5
$CI_PROJECT_DIR/install/bare-metal/google-power-relay.py on $relay

View File

@@ -1,69 +0,0 @@
#!/bin/sh
set -ex
mount -t proc none /proc
mount -t sysfs none /sys
mount -t devtmpfs none /dev || echo possibly already mounted
mkdir -p /dev/pts
mount -t devpts devpts /dev/pts
mount -t tmpfs tmpfs /tmp
. /set-job-env-vars.sh
[ -z "$BM_KERNEL_MODULES" ] || echo -n $BM_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
export XDG_CACHE_HOME=/tmp
echo "nameserver 8.8.8.8" > /etc/resolv.conf
# Not all DUTs have network
sntp -sS pool.ntp.org || true
# Start a little daemon to capture the first devcoredump we encounter. (They
# expire after 5 minutes, so we poll for them).
./capture-devcoredump.sh &
# If we want Xorg to be running for the test, then we start it up before the
# BARE_METAL_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
# your client's return code
if [ -n "$BM_START_XORG" ]; then
echo "touch /xorg-started; sleep 100000" > /xorg-script
env \
LD_LIBRARY_PATH=/install/lib/ \
LIBGL_DRIVERS_PATH=/install/lib/dri/ \
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -dpms -logfile /Xorg.0.log &
# Wait for xorg to be ready for connections.
for i in 1 2 3 4 5; do
if [ -e /xorg-started ]; then
break
fi
sleep 5
done
export DISPLAY=:0
fi
if sh $BARE_METAL_TEST_SCRIPT; then
OK=1
else
OK=0
fi
# upload artifacts via webdav
WEBDAV=$(cat /proc/cmdline | tr " " "\n" | grep webdav | cut -d '=' -f 2 || true)
if [ -n "$WEBDAV" ]; then
find /results -type f -exec curl -T {} $WEBDAV/{} \;
fi
if [ $OK -eq 1 ]; then
echo "bare-metal result: pass"
else
echo "bare-metal result: fail"
fi
# Wait until the job would have timed out anyway, so we don't spew a "init
# exited" panic.
sleep 6000

View File

@@ -1,20 +0,0 @@
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
location / {
dav_methods PUT;
dav_ext_methods PROPFIND OPTIONS;
dav_access user:rw group:rw all:r;
client_body_temp_path /tmp;
client_max_body_size 0;
create_full_put_path on;
root /results;
autoindex on;
}
}

View File

@@ -1,17 +0,0 @@
#!/bin/bash
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must supply the PoE Interface to power up"
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must supply the PoE Switch host"
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
SNMP_ON="i 1"
SNMP_OFF="i 2"
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must supply the PoE Interface to power up"
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must supply the PoE Switch host"
exit 1
fi
SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.`expr 48 + $BM_POE_INTERFACE`"
SNMP_ON="i 1"
SNMP_OFF="i 2"
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
sleep 3s
flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON"

View File

@@ -1,146 +0,0 @@
#!/bin/bash
# Boot script for devices attached to a PoE switch, using NFS for the root
# filesystem.
# We're run from the root of the repo, make a helper var for our paths
BM=$CI_PROJECT_DIR/install/bare-metal
# Runner config checks
if [ -z "$BM_SERIAL" ]; then
echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
echo "This is the serial port to listen the device."
exit 1
fi
if [ -z "$BM_POE_ADDRESS" ]; then
echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch address to connect for powering up/down devices."
exit 1
fi
if [ -z "$BM_POE_USERNAME" ]; then
echo "Must set BM_POE_USERNAME in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch username."
exit 1
fi
if [ -z "$BM_POE_PASSWORD" ]; then
echo "Must set BM_POE_PASSWORD in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch password."
exit 1
fi
if [ -z "$BM_POE_INTERFACE" ]; then
echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
echo "This is the PoE switch interface where the device is connected."
exit 1
fi
if [ -z "$BM_POWERUP" ]; then
echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power up the device and begin its boot sequence."
exit 1
fi
if [ -z "$BM_POWERDOWN" ]; then
echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
echo "This is a shell script that should power off the device."
exit 1
fi
if [ ! -d /nfs ]; then
echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
exit 1
fi
if [ ! -d /tftp ]; then
echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
exit 1
fi
# job config checks
if [ -z "$BM_ROOTFS" ]; then
echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
exit 1
fi
if [ -z "$BM_BOOTFS" ]; then
echo "Must set /boot files for the TFTP boot in the job's variables"
exit 1
fi
if [ -z "$BM_CMDLINE" ]; then
echo "Must set BM_CMDLINE to your board's kernel command line arguments"
exit 1
fi
if [ -z "$BM_BOOTCONFIG" ]; then
echo "Must set BM_BOOTCONFIG to your board's required boot configuration arguments"
exit 1
fi
set -ex
# Clear out any previous run's artifacts.
rm -rf results/
mkdir -p results
# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
# state, since it's volume-mounted on the host.
rsync -a --delete $BM_ROOTFS/ /nfs/
# If BM_BOOTFS is an URL, download it
if echo $BM_BOOTFS | grep -q http; then
apt install -y wget
wget ${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS -O /tmp/bootfs.tar
BM_BOOTFS=/tmp/bootfs.tar
fi
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
if [ -f $BM_BOOTFS ]; then
mkdir -p /tmp/bootfs
tar xf $BM_BOOTFS -C /tmp/bootfs
BM_BOOTFS=/tmp/bootfs
fi
# Install kernel modules (it could be either in /lib/modules or
# /usr/lib/modules, but we want to install in the latter)
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a --delete $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
[ -d $BM_BOOTFS/lib/modules ] && rsync -a --delete $BM_BOOTFS/lib/modules/ /nfs/usr/lib/modules/
# Install kernel image + bootloader files
rsync -a --delete $BM_BOOTFS/boot/ /tftp/
# Create the rootfs in the NFS directory
mkdir -p /nfs/results
. $BM/rootfs-setup.sh /nfs
echo "$BM_CMDLINE" > /tftp/cmdline.txt
# Add some required options in config.txt
printf "$BM_BOOTCONFIG" >> /tftp/config.txt
set +e
ATTEMPTS=2
while [ $((ATTEMPTS--)) -gt 0 ]; do
python3 $BM/poe_run.py \
--dev="$BM_SERIAL" \
--powerup="$BM_POWERUP" \
--powerdown="$BM_POWERDOWN" \
--timeout="${BM_POE_TIMEOUT:-60}"
ret=$?
if [ $ret -eq 2 ]; then
echo "Did not detect boot sequence, retrying..."
else
ATTEMPTS=0
fi
done
set -e
# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
# will look for them.
cp -Rp /nfs/results/. results/
exit $ret

View File

@@ -1,96 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Igalia, S.L.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import os
import re
from serial_buffer import SerialBuffer
import sys
import threading
class PoERun:
def __init__(self, args):
self.powerup = args.powerup
self.powerdown = args.powerdown
self.ser = SerialBuffer(args.dev, "results/serial-output.txt", "", args.timeout)
def print_error(self, message):
RED = '\033[0;31m'
NO_COLOR = '\033[0m'
print(RED + message + NO_COLOR)
def logged_system(self, cmd):
print("Running '{}'".format(cmd))
return os.system(cmd)
def run(self):
if self.logged_system(self.powerup) != 0:
return 1
boot_detected = False
for line in self.ser.lines():
if re.search("Booting Linux", line):
boot_detected = True
break
if not boot_detected:
self.print_error("Something wrong; couldn't detect the boot start up sequence")
return 2
for line in self.ser.lines():
if re.search("---. end Kernel panic", line):
return 1
# Binning memory problems
if re.search("binner overflow mem", line):
self.print_error("Memory overflow in the binner; GPU hang")
return 1
result = re.search("bare-metal result: (\S*)", line)
if result:
if result.group(1) == "pass":
return 0
else:
return 1
self.print_error("Reached the end of the CPU serial log without finding a result")
return 2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device to monitor', required=True)
parser.add_argument('--powerup', type=str, help='shell command for rebooting', required=True)
parser.add_argument('--powerdown', type=str, help='shell command for powering off', required=True)
parser.add_argument('--timeout', type=int, default=60,
help='time in seconds to wait for activity', required=False)
args = parser.parse_args()
poe = PoERun(args)
retval = poe.run()
poe.logged_system(args.powerdown)
sys.exit(retval)
if __name__ == '__main__':
main()

View File

@@ -1,92 +0,0 @@
#!/bin/bash
rootfs_dst=$1
mkdir -p $rootfs_dst/results
# Set up the init script that brings up the system.
cp $BM/init.sh $rootfs_dst/init
cp $BM/capture-devcoredump.sh $rootfs_dst/
set +x
# Pass through relevant env vars from the gitlab job to the baremetal init script
touch $rootfs_dst/set-job-env-vars.sh
chmod +x $rootfs_dst/set-job-env-vars.sh
for var in \
BARE_METAL_TEST_SCRIPT \
BM_KERNEL_MODULES \
BM_START_XORG \
CI_COMMIT_BRANCH \
CI_COMMIT_TITLE \
CI_JOB_ID \
CI_JOB_JWT \
CI_JOB_URL \
CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
CI_MERGE_REQUEST_TITLE \
CI_NODE_INDEX \
CI_NODE_TOTAL \
CI_PAGES_DOMAIN \
CI_PIPELINE_ID \
CI_PROJECT_NAME \
CI_PROJECT_PATH \
CI_PROJECT_ROOT_NAMESPACE \
CI_RUNNER_DESCRIPTION \
CI_SERVER_URL \
DEQP_CASELIST_FILTER \
DEQP_CONFIG \
DEQP_EXPECTED_RENDERER \
DEQP_FRACTION \
DEQP_HEIGHT \
DEQP_NO_SAVE_RESULTS \
DEQP_PARALLEL \
DEQP_RESULTS_DIR \
DEQP_RUNNER_OPTIONS \
DEQP_VARIANT \
DEQP_VER \
DEQP_WIDTH \
DEVICE_NAME \
DRIVER_NAME \
EGL_PLATFORM \
FDO_CI_CONCURRENT \
FDO_UPSTREAM_REPO \
FD_MESA_DEBUG \
FLAKES_CHANNEL \
GPU_VERSION \
IR3_SHADER_DEBUG \
MESA_GL_VERSION_OVERRIDE \
MESA_GLSL_VERSION_OVERRIDE \
MESA_GLES_VERSION_OVERRIDE \
MINIO_HOST \
NIR_VALIDATE \
PIGLIT_FRACTION \
PIGLIT_HTML_SUMMARY \
PIGLIT_JUNIT_RESULTS \
PIGLIT_OPTIONS \
PIGLIT_PLATFORM \
PIGLIT_PROFILES \
PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
PIGLIT_REPLAY_DESCRIPTION_FILE \
PIGLIT_REPLAY_DEVICE_NAME \
PIGLIT_REPLAY_EXTRA_ARGS \
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL \
PIGLIT_REPLAY_UPLOAD_TO_MINIO \
PIGLIT_RESULTS \
PIGLIT_TESTS \
TEST_LD_PRELOAD \
TU_DEBUG \
VK_CPU \
VK_DRIVER \
; do
if [ -n "${!var+x}" ]; then
echo "export $var=${!var@Q}" >> $rootfs_dst/set-job-env-vars.sh
fi
done
echo "Variables passed through:"
cat $rootfs_dst/set-job-env-vars.sh
set -x
# Add the Mesa drivers we built, and make a consistent symlink to them.
mkdir -p $rootfs_dst/$CI_PROJECT_DIR
rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
ln -sf $CI_PROJECT_DIR/install $rootfs_dst/install

View File

@@ -1,153 +0,0 @@
#!/usr/bin/env python3
#
# Copyright © 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
from datetime import datetime, timezone
import queue
import serial
import threading
import time
class SerialBuffer:
def __init__(self, dev, filename, prefix, timeout = None):
self.filename = filename
self.dev = dev
if dev:
self.f = open(filename, "wb+")
self.serial = serial.Serial(dev, 115200, timeout=timeout if timeout else 10)
else:
self.f = open(filename, "rb")
self.byte_queue = queue.Queue()
self.line_queue = queue.Queue()
self.prefix = prefix
self.timeout = timeout
self.sentinel = object()
if self.dev:
self.read_thread = threading.Thread(
target=self.serial_read_thread_loop, daemon=True)
else:
self.read_thread = threading.Thread(
target=self.serial_file_read_thread_loop, daemon=True)
self.read_thread.start()
self.lines_thread = threading.Thread(
target=self.serial_lines_thread_loop, daemon=True)
self.lines_thread.start()
# Thread that just reads the bytes from the serial device to try to keep from
# buffer overflowing it. If nothing is received in 1 minute, it finalizes.
def serial_read_thread_loop(self):
greet = "Serial thread reading from %s\n" % self.dev
self.byte_queue.put(greet.encode())
while True:
try:
b = self.serial.read()
if len(b) > 0:
self.byte_queue.put(b)
elif self.timeout:
self.byte_queue.put(self.sentinel)
break
except Exception as err:
print(self.prefix + str(err))
self.byte_queue.put(self.sentinel)
break
# Thread that just reads the bytes from the file of serial output that some
# other process is appending to.
def serial_file_read_thread_loop(self):
greet = "Serial thread reading from %s\n" % self.filename
self.byte_queue.put(greet.encode())
while True:
line = self.f.readline()
if line:
self.byte_queue.put(line)
else:
time.sleep(0.1)
# Thread that processes the stream of bytes to 1) log to stdout, 2) log to
# file, 3) add to the queue of lines to be read by program logic
def serial_lines_thread_loop(self):
line = bytearray()
while True:
bytes = self.byte_queue.get(block=True)
if bytes == self.sentinel:
self.read_thread.join()
self.line_queue.put(self.sentinel)
break
# Write our data to the output file if we're the ones reading from
# the serial device
if self.dev:
self.f.write(bytes)
self.f.flush()
for b in bytes:
line.append(b)
if b == b'\n'[0]:
line = line.decode(errors="replace")
time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
print("{endc}{time} {prefix}{line}".format(
time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
self.line_queue.put(line)
line = bytearray()
def get_line(self):
line = self.line_queue.get()
if line == self.sentinel:
self.lines_thread.join()
return line
def lines(self):
return iter(self.get_line, self.sentinel)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev', type=str, help='Serial device')
parser.add_argument('--file', type=str,
help='Filename for serial output', required=True)
parser.add_argument('--prefix', type=str,
help='Prefix for logging serial to stdout', nargs='?')
args = parser.parse_args()
ser = SerialBuffer(args.dev, args.file, args.prefix or "")
for line in ser.lines():
# We're just using this as a logger, so eat the produced lines and drop
# them
pass
if __name__ == '__main__':
main()

View File

@@ -1,60 +0,0 @@
#!/bin/bash
set -ex
EPHEMERAL="\
rdfind \
unzip \
"
apt-get install -y --no-remove $EPHEMERAL
# Fetch the NDK and extract just the toolchain we want.
ndk=android-ndk-r21d
wget -O $ndk.zip https://dl.google.com/android/repository/$ndk-linux-x86_64.zip
unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
rm $ndk.zip
# Since it was packed as a zip file, symlinks/hardlinks got turned into
# duplicate files. Turn them into hardlinks to save on container space.
rdfind -makehardlinks true -makeresultsfile false /android-ndk-r21d/
# Drop some large tools we won't use in this build.
find /android-ndk-r21d/ -type f | egrep -i "clang-check|clang-tidy|lldb" | xargs rm -f
sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3"
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x86 x86
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android arm armv8
sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl armv7a-linux-androideabi
# Not using build-libdrm.sh because we don't want its cleanup after building
# each arch. Fetch and extract now.
export LIBDRM_VERSION=libdrm-2.4.102
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
for arch in \
x86_64-linux-android \
i686-linux-android \
aarch64-linux-android \
arm-linux-androideabi ; do
cd $LIBDRM_VERSION
rm -rf build-$arch
meson build-$arch \
--cross-file=/cross_file-$arch.txt \
--libdir=lib/$arch \
-Dlibkms=false \
-Dnouveau=false \
-Dvc4=false \
-Detnaviv=false \
-Dfreedreno=false \
-Dintel=false \
-Dcairo-tests=false
ninja -C build-$arch install
cd ..
done
rm -rf $LIBDRM_VERSION
apt-get purge -y $EPHEMERAL

View File

@@ -1,54 +0,0 @@
CONFIG_LOCALVERSION="ccu"
CONFIG_DEBUG_KERNEL=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DRM=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_MFD_RK808=y
CONFIG_REGULATOR_RK808=y
CONFIG_RTC_DRV_RK808=y
CONFIG_COMMON_CLK_RK808=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=n
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=n
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y

View File

@@ -1,137 +0,0 @@
CONFIG_LOCALVERSION="ccu"
CONFIG_DEBUG_KERNEL=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_ROCKCHIP=y
CONFIG_DRM_PANFROST=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_MSM=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7533=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_ROCKCHIP_CDN_DP=n
CONFIG_SPI_ROCKCHIP=y
CONFIG_PWM_ROCKCHIP=y
CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_DWMAC_ROCKCHIP=y
CONFIG_STMMAC_ETH=y
CONFIG_TYPEC_FUSB302=y
CONFIG_TYPEC=y
CONFIG_TYPEC_TCPM=y
# MSM platform bits
CONFIG_QCOM_RPMHPD=y
CONFIG_QCOM_RPMPD=y
CONFIG_SDM_GPUCC_845=y
CONFIG_SDM_VIDEOCC_845=y
CONFIG_SDM_DISPCC_845=y
CONFIG_SDM_LPASSCC_845=y
CONFIG_SDM_CAMCC_845=y
CONFIG_RESET_QCOM_PDC=y
CONFIG_DRM_TI_SN65DSI86=y
CONFIG_I2C_QCOM_GENI=y
CONFIG_SPI_QCOM_GENI=y
CONFIG_PHY_QCOM_QUSB2=y
CONFIG_PHY_QCOM_QMP=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_SPMI_TEMP_ALARM=y
CONFIG_QCOM_CLK_APCC_MSM8996=y
CONFIG_POWER_RESET_QCOM_PON=y
CONFIG_RTC_DRV_PM8XXX=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_MSM8916=y
CONFIG_INTERCONNECT_QCOM_OSM_L3=y
CONFIG_INTERCONNECT_QCOM_SC7180=y
CONFIG_QCOM_WDT=y
CONFIG_CRYPTO_DEV_QCOM_RNG=y
# db410c ethernet
CONFIG_USB_RTL8152=y
# db820c ethernet
CONFIG_ATL1C=y
CONFIG_ARCH_ALPINE=n
CONFIG_ARCH_BCM2835=n
CONFIG_ARCH_BCM_IPROC=n
CONFIG_ARCH_BERLIN=n
CONFIG_ARCH_BRCMSTB=n
CONFIG_ARCH_EXYNOS=n
CONFIG_ARCH_K3=n
CONFIG_ARCH_LAYERSCAPE=n
CONFIG_ARCH_LG1K=n
CONFIG_ARCH_HISI=n
CONFIG_ARCH_MEDIATEK=n
CONFIG_ARCH_MVEBU=n
CONFIG_ARCH_SEATTLE=n
CONFIG_ARCH_SYNQUACER=n
CONFIG_ARCH_RENESAS=n
CONFIG_ARCH_R8A774A1=n
CONFIG_ARCH_R8A774C0=n
CONFIG_ARCH_R8A7795=n
CONFIG_ARCH_R8A7796=n
CONFIG_ARCH_R8A77965=n
CONFIG_ARCH_R8A77970=n
CONFIG_ARCH_R8A77980=n
CONFIG_ARCH_R8A77990=n
CONFIG_ARCH_R8A77995=n
CONFIG_ARCH_STRATIX10=n
CONFIG_ARCH_TEGRA=n
CONFIG_ARCH_SPRD=n
CONFIG_ARCH_THUNDER=n
CONFIG_ARCH_THUNDER2=n
CONFIG_ARCH_UNIPHIER=n
CONFIG_ARCH_VEXPRESS=n
CONFIG_ARCH_XGENE=n
CONFIG_ARCH_ZX=n
CONFIG_ARCH_ZYNQMP=n
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_USER_HELPER=n
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
# For amlogic
CONFIG_MESON_GXL_PHY=y
CONFIG_MDIO_BUS_MUX_MESON_G12A=y

View File

@@ -1,88 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
apt-get -y install ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
echo 'deb https://deb.debian.org/debian buster main' >/etc/apt/sources.list.d/buster.list
apt-get update
EPHEMERAL="
python3-pytest-runner
python3-wheel
"
apt-get -y install \
abootimg \
autoconf \
automake \
bc \
bison \
ccache \
cmake \
debootstrap \
fastboot \
flex \
g++ \
git \
kmod \
libasan6 \
libdrm-dev \
libelf-dev \
libexpat1-dev \
libx11-dev \
libx11-xcb-dev \
libxcb-dri2-0-dev \
libxcb-dri3-dev \
libxcb-glx0-dev \
libxcb-present-dev \
libxcb-randr0-dev \
libxcb-shm0-dev \
libxcb-xfixes0-dev \
libxdamage-dev \
libxext-dev \
libxrandr-dev \
libxshmfence-dev \
libxxf86vm-dev \
llvm-11-dev \
meson \
pkg-config \
python-is-python3 \
python3-aiohttp \
python3-jinja2 \
python3-mako \
python3-pil \
python3-pip \
python3-requests \
python3-setuptools \
python3-yaml \
python3-zmq \
u-boot-tools \
unzip \
wget \
xz-utils \
zlib1g-dev \
$EPHEMERAL
# Update lavacli to v1.1+
pip3 install git+https://git.lavasoftware.org/lava/lavacli@3db3ddc45e5358908bc6a17448059ea2340492b7
# Not available anymore in bullseye
apt-get install -y --no-remove -t buster \
android-sdk-ext4-utils
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
apt-get purge -y $EPHEMERAL
arch=armhf
. .gitlab-ci/container/cross_build.sh
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
EXTRA_MESON_ARGS=
. .gitlab-ci/container/build-libdrm.sh
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,40 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
############### Install packages for baremetal testing
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
apt-get update
apt-get install -y --no-remove \
abootimg \
cpio \
fastboot \
netcat \
nginx-full \
procps \
python-is-python3 \
python3-distutils \
python3-minimal \
python3-serial \
rsync \
snmp \
unzip \
wget
# setup nginx
sed -i '/gzip_/ s/#\ //g' /etc/nginx/nginx.conf
cp .gitlab-ci/bare-metal/nginx-default-site /etc/nginx/sites-enabled/default
# setup SNMPv2 SMI MIB
wget https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
-O /usr/share/snmp/mibs/SNMPv2-SMI.txt
arch=arm64 . .gitlab-ci/container/baremetal_build.sh
arch=armhf . .gitlab-ci/container/baremetal_build.sh
# This firmware file from Debian bullseye causes hangs
wget https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/plain/qcom/a530_pfp.fw?id=d5f9eea5a251d43412b07f5295d03e97b89ac4a5 \
-O /rootfs-arm64/lib/firmware/qcom/a530_pfp.fw

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
# Fetch the arm-built rootfs image and unpack it in our x86 container (saves
# network transfer, disk usage, and runtime on test jobs)
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
else
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
fi
wget ${ARTIFACTS_URL}/lava-rootfs.tgz -O rootfs.tgz
mkdir -p /rootfs-$arch
tar -C /rootfs-$arch '--exclude=./dev/*' -zxf rootfs.tgz
rm rootfs.tgz
if [[ $arch == "arm64" ]]; then
mkdir -p /baremetal-files
pushd /baremetal-files
wget ${ARTIFACTS_URL}/Image
wget ${ARTIFACTS_URL}/Image.gz
wget ${ARTIFACTS_URL}/cheza-kernel
DEVICE_TREES="apq8016-sbc.dtb apq8096-db820c.dtb"
for DTB in $DEVICE_TREES; do
wget ${ARTIFACTS_URL}/$DTB
done
popd
fi

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -ex
cargo install --locked deqp-runner \
-j ${FDO_CI_CONCURRENT:-4} \
--version 0.6.5 \
--root /usr/local \
$EXTRA_CARGO_ARGS

View File

@@ -1,63 +0,0 @@
#!/bin/bash
set -ex
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/KhronosGroup/VK-GL-CTS.git \
-b vulkan-cts-1.2.6.0 \
--depth 1 \
/VK-GL-CTS
pushd /VK-GL-CTS
# --insecure is due to SSL cert failures hitting sourceforge for zlib and
# libpng (sigh). The archives get their checksums checked anyway, and git
# always goes through ssh or https.
python3 external/fetch_sources.py --insecure
mkdir -p /deqp
# Save the testlog stylesheets:
cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
popd
pushd /deqp
cmake -S /VK-GL-CTS -B . -G Ninja \
-DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
-DCMAKE_BUILD_TYPE=Release \
$EXTRA_CMAKE_ARGS
ninja
# Copy out the mustpass lists we want.
mkdir /deqp/mustpass
cp /VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt \
/deqp/mustpass/vk-master.txt
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
/deqp/mustpass/.
cp \
/deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
/deqp/mustpass/.
# Save *some* executor utils, but otherwise strip things down
# to reduct deqp build size:
mkdir /deqp/executor.save
cp /deqp/executor/testlog-to-* /deqp/executor.save
rm -rf /deqp/executor
mv /deqp/executor.save /deqp/executor
rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
rm -rf /deqp/external/openglcts/modules/cts-runner
rm -rf /deqp/modules/internal
rm -rf /deqp/execserver
rm -rf /deqp/modules/egl
rm -rf /deqp/framework
find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
${STRIP_CMD:-strip} external/openglcts/modules/glcts
${STRIP_CMD:-strip} modules/*/deqp-*
du -sh *
rm -rf /VK-GL-CTS
popd

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -ex
git clone https://github.com/ValveSoftware/Fossilize.git
cd Fossilize
git checkout 72088685d90bc814d14aad5505354ffa8a642789
git submodule update --init
mkdir build
cd build
cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
ninja -C . install
cd ../..
rm -rf Fossilize

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -ex
GFXRECONSTRUCT_VERSION=3738decc2f4f9ff183818e5ab213a75a79fb7ab1
git clone https://github.com/LunarG/gfxreconstruct.git --single-branch -b master --no-checkout /gfxreconstruct
pushd /gfxreconstruct
git checkout "$GFXRECONSTRUCT_VERSION"
git submodule update --init
git submodule update
cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release
ninja -C _build gfxrecon-replay gfxrecon-info
mkdir -p build/bin
install _build/tools/replay/gfxrecon-replay build/bin
install _build/tools/info/gfxrecon-info build/bin
strip build/bin/*
find . -not -path './build' -not -path './build/*' -delete
popd

View File

@@ -1,30 +0,0 @@
#!/bin/bash
set -ex
export LLVM_CONFIG="llvm-config-11"
$LLVM_CONFIG --version
git config --global user.email "mesa@example.com"
git config --global user.name "Mesa CI"
git clone \
https://github.com/llvm/llvm-project \
--depth 1 \
-b llvmorg-12.0.0-rc3 \
/llvm-project
mkdir /libclc
pushd /libclc
cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG=$LLVM_CONFIG -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
ninja
ninja install
popd
# workaroud cmake vs debian packaging.
mkdir -p /usr/lib/clc
ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
du -sh *
rm -rf /libclc /llvm-project

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -ex
export LIBDRM_VERSION=libdrm-2.4.105
wget https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
tar -xvf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
cd $LIBDRM_VERSION
meson build -D vc4=false -D freedreno=false -D etnaviv=false $EXTRA_MESON_ARGS
ninja -C build install
cd ..
rm -rf $LIBDRM_VERSION

View File

@@ -1,27 +0,0 @@
#!/bin/bash
set -ex
if [ -n "$INCLUDE_OPENCL_TESTS" ]; then
PIGLIT_OPTS="-DPIGLIT_BUILD_CL_TESTS=ON"
fi
git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
pushd /piglit
git checkout 6a4be9e9946df310d9402f995f371c7deb8c27ba
patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
ninja $PIGLIT_BUILD_TARGETS
find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
rm -rf target_api
if [ "x$PIGLIT_BUILD_TARGETS" = "xpiglit_replayer" ]; then
find ! -regex "^\.$" \
! -regex "^\.\/piglit.*" \
! -regex "^\.\/framework.*" \
! -regex "^\.\/bin$" \
! -regex "^\.\/bin\/replayer\.py" \
! -regex "^\.\/templates.*" \
! -regex "^\.\/tests$" \
! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
fi
popd

View File

@@ -1,31 +0,0 @@
#!/bin/bash
# Note that this script is not actually "building" rust, but build- is the
# convention for the shared helpers for putting stuff in our containers.
set -ex
# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
# are just available to all build jobs.
mkdir -p $HOME/.cargo
ln -s /usr/local/bin $HOME/.cargo/bin
# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
# version of the compiler, rather than whatever the container's Debian comes
# with.
#
# Pick the rust compiler (1.41) available in Debian stable, and pick a specific
# snapshot from rustup so the compiler doesn't drift on us.
wget https://sh.rustup.rs -O - | \
sh -s -- -y --default-toolchain 1.41.1-2020-02-27
# Set up a config script for cross compiling -- cargo needs your system cc for
# linking in cross builds, but doesn't know what you want to use for system cc.
cat > /root/.cargo/config <<EOF
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
EOF

View File

@@ -1,20 +0,0 @@
#!/bin/bash
set -ex
mkdir -p /epoxy
pushd /epoxy
wget -qO- https://github.com/anholt/libepoxy/releases/download/1.5.4/libepoxy-1.5.4.tar.xz | tar -xJ --strip-components=1
meson build/ $EXTRA_MESON_ARGS
ninja -C build install
popd
rm -rf /epoxy
VIRGLRENDERER_VERSION=43148d1115a12219a0560a538c9872d07c28c558
git clone https://gitlab.freedesktop.org/virgl/virglrenderer.git --single-branch --no-checkout /virglrenderer
pushd /virglrenderer
git checkout "$VIRGLRENDERER_VERSION"
meson build/ $EXTRA_MESON_ARGS
ninja -C build install
popd
rm -rf /virglrenderer

View File

@@ -1,8 +0,0 @@
#!/bin/sh
apt-get autoremove -y --purge
# Clean up any build cache for rust.
rm -rf /.cargo
ccache --show-stats

View File

@@ -1,30 +0,0 @@
#!/bin/sh
# Common setup among container builds before we get to building code.
export CCACHE_COMPILERCHECK=content
export CCACHE_COMPRESS=true
export CCACHE_DIR=/cache/mesa/ccache
export PATH=/usr/lib/ccache:$PATH
# CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
export CC="/usr/lib/ccache/gcc"
export CXX="/usr/lib/ccache/g++"
# Force linkers to gold, since it's so much faster for building. We can't use
# lld because we're on old debian and it's buggy. ming fails meson builds
# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
find /usr/bin -name \*-ld -o -name ld | \
grep -v mingw | \
xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
ccache --show-stats
# Make a wrapper script for ninja to always include the -j flags
echo '#!/bin/sh -x' > /usr/local/bin/ninja
echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"' >> /usr/local/bin/ninja
chmod +x /usr/local/bin/ninja
# Set MAKEFLAGS so that all make invocations in container builds include the
# flags (doesn't apply to non-container builds, but we don't run make there)
export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"

View File

@@ -1,35 +0,0 @@
#!/bin/bash
ndk=$1
arch=$2
cpu_family=$3
cpu=$4
cross_file="/cross_file-$arch.txt"
# armv7 has the toolchain split between two names.
arch2=${5:-$2}
# Note that we disable C++ exceptions, because Mesa doesn't use exceptions,
# and allowing it in code generation means we get unwind symbols that break
# the libEGL and driver symbol tests.
cat >$cross_file <<EOF
[binaries]
ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-ar'
c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}29-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '-static-libstdc++']
c_ld = 'lld'
cpp_ld = 'lld'
strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/$arch-strip'
pkgconfig = ['/usr/bin/pkg-config']
[host_machine]
system = 'linux'
cpu_family = '$cpu_family'
cpu = '$cpu'
endian = 'little'
[properties]
needs_exe_wrapper = true
EOF

View File

@@ -1,38 +0,0 @@
#!/bin/sh
# Makes a .pc file in the Android NDK for meson to find its libraries.
set -ex
ndk="$1"
pc="$2"
cflags="$3"
libs="$4"
version="$5"
sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot
for arch in \
x86_64-linux-android \
i686-linux-android \
aarch64-linux-android \
arm-linux-androideabi; do
pcdir=$sysroot/usr/lib/$arch/pkgconfig
mkdir -p $pcdir
cat >$pcdir/$pc <<EOF
prefix=$sysroot
exec_prefix=$sysroot
libdir=$sysroot/usr/lib/$arch/29
sharedlibdir=$sysroot/usr/lib/$arch
includedir=$sysroot/usr/include
Name: zlib
Description: zlib compression library
Version: $version
Requires:
Libs: -L$sysroot/usr/lib/$arch/29 $libs
Cflags: -I$sysroot/usr/include $cflags
EOF
done

View File

@@ -1,51 +0,0 @@
#!/bin/bash
arch=$1
cross_file="/cross_file-$arch.txt"
/usr/share/meson/debcrossgen --arch $arch -o "$cross_file"
# Explicitly set ccache path for cross compilers
sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
if [ "$arch" = "i386" ]; then
# Work around a bug in debcrossgen that should be fixed in the next release
sed -i "s|cpu_family = 'i686'|cpu_family = 'x86'|g" "$cross_file"
fi
# Rely on qemu-user being configured in binfmt_misc on the host
sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
# Add a line for rustc, which debcrossgen is missing.
cc=`sed -n 's|c = .\(.*\).|\1|p' < $cross_file`
if [[ "$arch" = "arm64" ]]; then
rust_target=aarch64-unknown-linux-gnu
elif [[ "$arch" = "armhf" ]]; then
rust_target=armv7-unknown-linux-gnueabihf
elif [[ "$arch" = "i386" ]]; then
rust_target=i686-unknown-linux-gnu
elif [[ "$arch" = "ppc64el" ]]; then
rust_target=powerpc64le-unknown-linux-gnu
elif [[ "$arch" = "s390x" ]]; then
rust_target=s390x-unknown-linux-gnu
else
echo "Needs rustc target mapping"
fi
sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
# Set up cmake cross compile toolchain file for dEQP builds
toolchain_file="/toolchain-$arch.cmake"
if [[ "$arch" = "arm64" ]]; then
GCC_ARCH="aarch64-linux-gnu"
DE_CPU="DE_CPU_ARM_64"
CMAKE_ARCH=arm
elif [[ "$arch" = "armhf" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DE_CPU="DE_CPU_ARM"
CMAKE_ARCH=arm
fi
if [[ -n "$GCC_ARCH" ]]; then
echo "set(CMAKE_SYSTEM_NAME Linux)" > "$toolchain_file"
echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> "$toolchain_file"
echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)" >> "$toolchain_file"
echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)" >> "$toolchain_file"
echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkg-config\")" >> "$toolchain_file"
echo "set(DE_CPU $DE_CPU)" >> "$toolchain_file"
fi

View File

@@ -1,270 +0,0 @@
#!/bin/bash
set -ex
if [ $DEBIAN_ARCH = arm64 ]; then
ARCH_PACKAGES="firmware-qcom-media"
elif [ $DEBIAN_ARCH = amd64 ]; then
ARCH_PACKAGES="firmware-amd-graphics
libelf1
libllvm11
"
fi
INSTALL_CI_FAIRY_PACKAGES="git
python3-dev
python3-pip
python3-setuptools
python3-wheel
"
apt-get -y install --no-install-recommends \
$ARCH_PACKAGES \
$INSTALL_CI_FAIRY_PACKAGES \
apitrace \
ca-certificates \
curl \
firmware-realtek \
initramfs-tools \
libasan6 \
libexpat1 \
libpng16-16 \
libpython3.9 \
libsensors5 \
libvulkan1 \
libwaffle-1-0 \
libx11-6 \
libx11-xcb1 \
libxcb-dri2-0 \
libxcb-dri3-0 \
libxcb-glx0 \
libxcb-present0 \
libxcb-randr0 \
libxcb-shm0 \
libxcb-sync1 \
libxcb-xfixes0 \
libxdamage1 \
libxext6 \
libxfixes3 \
libxkbcommon0 \
libxrender1 \
libxshmfence1 \
libxxf86vm1 \
netcat-openbsd \
python3 \
python3-lxml \
python3-mako \
python3-numpy \
python3-packaging \
python3-pil \
python3-renderdoc \
python3-requests \
python3-simplejson \
python3-yaml \
sntp \
strace \
waffle-utils \
wget \
xinit \
xserver-xorg-core \
xz-utils
# Needed for ci-fairy, this revision is able to upload files to
# MinIO and doesn't depend on git
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
apt-get purge -y \
$INSTALL_CI_FAIRY_PACKAGES
passwd root -d
chsh -s /bin/sh
cat > /init <<EOF
#!/bin/sh
export PS1=lava-shell:
exec sh
EOF
chmod +x /init
#######################################################################
# Strip the image to a small minimal system without removing the debian
# toolchain.
# xz compress firmware so it doesn't waste RAM at runtime on ramdisk systems
find /lib/firmware -type f -print0 | \
xargs -0r -P4 -n4 xz -T1 -C crc32
# Copy timezone file and remove tzdata package
rm -rf /etc/localtime
cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
UNNEEDED_PACKAGES="
libfdisk1
"
export DEBIAN_FRONTEND=noninteractive
# Removing unused packages
for PACKAGE in ${UNNEEDED_PACKAGES}
do
echo ${PACKAGE}
if ! apt-get remove --purge --yes "${PACKAGE}"
then
echo "WARNING: ${PACKAGE} isn't installed"
fi
done
apt-get autoremove --yes || true
# Dropping logs
rm -rf /var/log/*
# Dropping documentation, localization, i18n files, etc
rm -rf /usr/share/doc/*
rm -rf /usr/share/locale/*
rm -rf /usr/share/X11/locale/*
rm -rf /usr/share/man
rm -rf /usr/share/i18n/*
rm -rf /usr/share/info/*
rm -rf /usr/share/lintian/*
rm -rf /usr/share/common-licenses/*
rm -rf /usr/share/mime/*
# Dropping reportbug scripts
rm -rf /usr/share/bug
# Drop udev hwdb not required on a stripped system
rm -rf /lib/udev/hwdb.bin /lib/udev/hwdb.d/*
# Drop all gconv conversions && binaries
rm -rf usr/bin/iconv
rm -rf usr/sbin/iconvconfig
rm -rf usr/lib/*/gconv/
# Remove libusb database
rm -rf usr/sbin/update-usbids
rm -rf var/lib/usbutils/usb.ids
rm -rf usr/share/misc/usb.ids
#######################################################################
# Crush into a minimal production image to be deployed via some type of image
# updating system.
# IMPORTANT: The Debian system is not longer functional at this point,
# for example, apt and dpkg will stop working
UNNEEDED_PACKAGES="apt libapt-pkg6.0 "\
"ncurses-bin ncurses-base libncursesw6 libncurses6 "\
"perl-base "\
"debconf libdebconfclient0 "\
"e2fsprogs e2fslibs libfdisk1 "\
"insserv "\
"udev "\
"init-system-helpers "\
"bash "\
"cpio "\
"xz-utils "\
"passwd "\
"libsemanage1 libsemanage-common "\
"libsepol1 "\
"gpgv "\
"hostname "\
"adduser "\
"debian-archive-keyring "\
"libegl1-mesa-dev "\
"libegl-mesa0 "\
"libgl1-mesa-dev "\
"libgl1-mesa-dri "\
"libglapi-mesa "\
"libgles2-mesa-dev "\
"libglx-mesa0 "\
"mesa-common-dev "\
# Removing unneeded packages
for PACKAGE in ${UNNEEDED_PACKAGES}
do
echo "Forcing removal of ${PACKAGE}"
if ! dpkg --purge --force-remove-essential --force-depends "${PACKAGE}"
then
echo "WARNING: ${PACKAGE} isn't installed"
fi
done
# Show what's left package-wise before dropping dpkg itself
COLUMNS=300 dpkg-query -W --showformat='${Installed-Size;10}\t${Package}\n' | sort -k1,1n
# Drop dpkg
dpkg --purge --force-remove-essential --force-depends dpkg
# No apt or dpkg, no need for its configuration archives
rm -rf etc/apt
rm -rf etc/dpkg
# Drop directories not part of ostree
# Note that /var needs to exist as ostree bind mounts the deployment /var over
# it
rm -rf var/* opt srv share
# ca-certificates are in /etc drop the source
rm -rf usr/share/ca-certificates
# No bash, no need for completions
rm -rf usr/share/bash-completion
# No zsh, no need for comletions
rm -rf usr/share/zsh/vendor-completions
# drop gcc python helpers
rm -rf usr/share/gcc
# Drop sysvinit leftovers
rm -rf etc/init.d
rm -rf etc/rc[0-6S].d
# Drop upstart helpers
rm -rf etc/init
# Various xtables helpers
rm -rf usr/lib/xtables
# Drop all locales
# TODO: only remaining locale is actually "C". Should we really remove it?
rm -rf usr/lib/locale/*
# partition helpers
rm -rf usr/sbin/*fdisk
# local compiler
rm -rf usr/bin/localedef
# Systemd dns resolver
find usr etc -name '*systemd-resolve*' -prune -exec rm -r {} \;
# Systemd network configuration
find usr etc -name '*networkd*' -prune -exec rm -r {} \;
# systemd ntp client
find usr etc -name '*timesyncd*' -prune -exec rm -r {} \;
# systemd hw database manager
find usr etc -name '*systemd-hwdb*' -prune -exec rm -r {} \;
# No need for fuse
find usr etc -name '*fuse*' -prune -exec rm -r {} \;
# lsb init function leftovers
rm -rf usr/lib/lsb
# Only needed when adding libraries
rm -rf usr/sbin/ldconfig*
# Games, unused
rmdir usr/games
# Remove pam module to authenticate against a DB
# plus libdb-5.3.so that is only used by this pam module
rm -rf usr/lib/*/security/pam_userdb.so
rm -rf usr/lib/*/libdb-5.3.so
# remove NSS support for nis, nisplus and hesiod
rm -rf usr/lib/*/libnss_hesiod*
rm -rf usr/lib/*/libnss_nis*

View File

@@ -1,79 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
"
dpkg --add-architecture $arch
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
crossbuild-essential-$arch \
libelf-dev:$arch \
libexpat1-dev:$arch \
libpciaccess-dev:$arch \
libstdc++6:$arch \
libvulkan-dev:$arch \
libx11-dev:$arch \
libx11-xcb-dev:$arch \
libxcb-dri2-0-dev:$arch \
libxcb-dri3-dev:$arch \
libxcb-glx0-dev:$arch \
libxcb-present-dev:$arch \
libxcb-randr0-dev:$arch \
libxcb-shm0-dev:$arch \
libxcb-xfixes0-dev:$arch \
libxdamage-dev:$arch \
libxext-dev:$arch \
libxrandr-dev:$arch \
libxshmfence-dev:$arch \
libxxf86vm-dev:$arch \
wget
if [[ $arch != "armhf" ]]; then
if [[ $arch == "s390x" ]]; then
LLVM=9
else
LLVM=11
fi
# llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only
# with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get
# around this.
apt-get install -y --no-remove \
libclang-cpp${LLVM}:$arch \
libffi-dev:$arch \
libgcc-s1:$arch \
libtinfo-dev:$arch \
libz3-dev:$arch \
llvm-${LLVM}:$arch \
zlib1g
fi
. .gitlab-ci/container/create-cross-file.sh $arch
. .gitlab-ci/container/container_pre_build.sh
# dependencies where we want a specific version
EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)"
. .gitlab-ci/container/build-libdrm.sh
apt-get purge -y \
$STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh
# This needs to be done after container_post_build.sh, or apt-get breaks in there
if [[ $arch != "armhf" ]]; then
apt-get download llvm-${LLVM}-{dev,tools}:$arch
dpkg -i --force-depends llvm-${LLVM}-*_${arch}.deb
rm llvm-${LLVM}-*_${arch}.deb
fi

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=i386
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,248 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
check_minio()
{
MINIO_PATH="${MINIO_HOST}/mesa-lava/$1/${MINIO_SUFFIX}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
if wget -q --method=HEAD "https://${MINIO_PATH}/done"; then
exit
fi
}
# If remote files are up-to-date, skip rebuilding them
check_minio "${FDO_UPSTREAM_REPO}"
check_minio "${CI_PROJECT_PATH}"
. .gitlab-ci/container/container_pre_build.sh
# Install rust, which we'll be using for deqp-runner. It will be cleaned up at the end.
. .gitlab-ci/container/build-rust.sh
if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
GCC_ARCH="aarch64-linux-gnu"
KERNEL_ARCH="arm64"
DEFCONFIG="arch/arm64/configs/defconfig"
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
KERNEL_IMAGE_NAME="Image"
elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
KERNEL_ARCH="arm"
DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
DEVICE_TREES="arch/arm/boot/dts/rk3288-veyron-jaq.dtb arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dtb"
KERNEL_IMAGE_NAME="zImage"
. .gitlab-ci/container/create-cross-file.sh armhf
else
GCC_ARCH="x86_64-linux-gnu"
KERNEL_ARCH="x86_64"
DEFCONFIG="arch/x86/configs/x86_64_defconfig"
DEVICE_TREES=""
KERNEL_IMAGE_NAME="bzImage"
fi
# Determine if we're in a cross build.
if [[ -e /cross_file-$DEBIAN_ARCH.txt ]]; then
EXTRA_MESON_ARGS="--cross-file /cross_file-$DEBIAN_ARCH.txt"
EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/toolchain-$DEBIAN_ARCH.cmake"
if [ $DEBIAN_ARCH = arm64 ]; then
RUST_TARGET="aarch64-unknown-linux-gnu"
elif [ $DEBIAN_ARCH = armhf ]; then
RUST_TARGET="armv7-unknown-linux-gnueabihf"
fi
rustup target add $RUST_TARGET
export EXTRA_CARGO_ARGS="--target $RUST_TARGET"
export ARCH=${KERNEL_ARCH}
export CROSS_COMPILE="${GCC_ARCH}-"
fi
apt-get update
apt-get install -y --no-remove \
automake \
bc \
cmake \
debootstrap \
git \
libegl1-mesa-dev \
libgbm-dev \
libgles2-mesa-dev \
libssl-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxkbcommon-dev \
patch \
python3-distutils \
python3-mako \
python3-numpy \
python3-serial \
wget
if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
apt-get install -y --no-remove \
libegl1-mesa-dev:armhf \
libelf-dev:armhf \
libgbm-dev:armhf \
libgles2-mesa-dev:armhf \
libudev-dev:armhf \
libvulkan-dev:armhf \
libwaffle-dev:armhf \
libwayland-dev:armhf \
libx11-xcb-dev:armhf \
libxkbcommon-dev:armhf
fi
############### Building
STRIP_CMD="${GCC_ARCH}-strip"
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin
mv /usr/local/bin/deqp-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/.
mv /usr/local/bin/piglit-runner /lava-files/rootfs-${DEBIAN_ARCH}/usr/bin/.
############### Build dEQP
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
mv /deqp /lava-files/rootfs-${DEBIAN_ARCH}/.
############### Build piglit
. .gitlab-ci/container/build-piglit.sh
mv /piglit /lava-files/rootfs-${DEBIAN_ARCH}/.
############### Build libdrm
EXTRA_MESON_ARGS+=" -D prefix=/libdrm"
. .gitlab-ci/container/build-libdrm.sh
############### Cross-build kernel
mkdir -p kernel
wget -qO- ${KERNEL_URL} | tar -xz --strip-components=1 -C kernel
pushd kernel
# The kernel doesn't like the gold linker (or the old lld in our debians).
# Sneak in some override symlinks during kernel build until we can update
# debian (they'll get blown away by the rm of the kernel dir at the end).
mkdir -p ld-links
for i in /usr/bin/*-ld /usr/bin/ld; do
i=`basename $i`
ln -sf /usr/bin/$i.bfd ld-links/$i
done
export PATH=`pwd`/ld-links:$PATH
if [ -n "$INSTALL_KERNEL_MODULES" ]; then
# Disable all modules in defconfig, so we only build the ones we want
sed -i 's/=m/=n/g' ${DEFCONFIG}
fi
./scripts/kconfig/merge_config.sh ${DEFCONFIG} ../.gitlab-ci/container/${KERNEL_ARCH}.config
make ${KERNEL_IMAGE_NAME}
for image in ${KERNEL_IMAGE_NAME}; do
cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
cp ${DEVICE_TREES} /lava-files/.
fi
if [ -n "$INSTALL_KERNEL_MODULES" ]; then
make modules
INSTALL_MOD_PATH=/lava-files/rootfs-${DEBIAN_ARCH}/ make modules_install
fi
if [[ ${DEBIAN_ARCH} = "arm64" ]] && [[ ${MINIO_SUFFIX} = "baremetal" ]]; then
make Image.lzma
mkimage \
-f auto \
-A arm \
-O linux \
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
/lava-files/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
fi
popd
rm -rf kernel
############### Delete rust, since the tests won't be compiling anything.
rm -rf /root/.cargo
############### Create rootfs
set +e
if ! debootstrap \
--variant=minbase \
--arch=${DEBIAN_ARCH} \
--components main,contrib,non-free \
bullseye \
/lava-files/rootfs-${DEBIAN_ARCH}/ \
http://deb.debian.org/debian; then
cat /lava-files/rootfs-${DEBIAN_ARCH}/debootstrap/debootstrap.log
exit 1
fi
set -e
cp .gitlab-ci/container/create-rootfs.sh /lava-files/rootfs-${DEBIAN_ARCH}/.
chroot /lava-files/rootfs-${DEBIAN_ARCH} sh /create-rootfs.sh
rm /lava-files/rootfs-${DEBIAN_ARCH}/create-rootfs.sh
############### Install the built libdrm
# Dependencies pulled during the creation of the rootfs may overwrite
# the built libdrm. Hence, we add it after the rootfs has been already
# created.
mkdir -p /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH
find /libdrm/ -name lib\*\.so\* | xargs cp -t /lava-files/rootfs-${DEBIAN_ARCH}/usr/lib/$GCC_ARCH/.
rm -rf /libdrm
if [ ${DEBIAN_ARCH} = arm64 ] && [ ${MINIO_SUFFIX} = baremetal ]; then
# Make a gzipped copy of the Image for db410c.
gzip -k /lava-files/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
du -ah /lava-files/rootfs-${DEBIAN_ARCH} | sort -h | tail -100
pushd /lava-files/rootfs-${DEBIAN_ARCH}
tar czf /lava-files/lava-rootfs.tgz .
popd
. .gitlab-ci/container/container_post_build.sh
############### Upload the files!
ci-fairy minio login $CI_JOB_JWT
FILES_TO_UPLOAD="lava-rootfs.tgz \
$KERNEL_IMAGE_NAME"
if [[ -n $DEVICE_TREES ]]; then
FILES_TO_UPLOAD="$FILES_TO_UPLOAD $(basename -a $DEVICE_TREES)"
fi
for f in $FILES_TO_UPLOAD; do
ci-fairy minio cp /lava-files/$f \
minio://${MINIO_PATH}/$f
done
touch /lava-files/done
ci-fairy minio cp /lava-files/done minio://${MINIO_PATH}/done

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=ppc64el
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,5 +0,0 @@
#!/bin/bash
arch=s390x
. .gitlab-ci/container/cross_build.sh

View File

@@ -1,69 +0,0 @@
CONFIG_LOCALVERSION="ccu"
CONFIG_DEBUG_KERNEL=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_PWM_CROS_EC=y
CONFIG_BACKLIGHT_PWM=y
# Strip out some stuff we don't need for graphics testing, to reduce
# the build.
CONFIG_CAN=n
CONFIG_WIRELESS=n
CONFIG_RFKILL=n
CONFIG_WLAN=n
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_VCTRL=y
CONFIG_KASAN=n
CONFIG_KASAN_INLINE=n
CONFIG_STACKTRACE=n
CONFIG_TMPFS=y
CONFIG_PROVE_LOCKING=n
CONFIG_DEBUG_LOCKDEP=n
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_USB_USBNET=y
CONFIG_NETDEVICES=y
CONFIG_USB_NET_DRIVERS=y
CONFIG_USB_RTL8152=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=y
CONFIG_FW_LOADER_COMPRESS=y
# options for AMD devices
CONFIG_X86_AMD_PLATFORM_DEVICE=y
CONFIG_ACPI_VIDEO=y
CONFIG_X86_AMD_FREQ_SENSITIVITY=y
CONFIG_PINCTRL=y
CONFIG_PINCTRL_AMD=y
CONFIG_DRM_AMDGPU=m
CONFIG_DRM_AMDGPU_SI=m
CONFIG_DRM_AMD_ACP=n
CONFIG_ACPI_WMI=y
CONFIG_MXM_WMI=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_SERIAL_8250_DW=y
CONFIG_CHROME_PLATFORMS=y
#options for Intel devices
CONFIG_MFD_INTEL_LPSS_PCI=y

View File

@@ -1,90 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
# Ephemeral packages (installed for this script and removed again at
# the end)
STABLE_EPHEMERAL=" \
python3-pip \
python3-setuptools \
unzip \
"
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
bison \
ccache \
dpkg-cross \
flex \
g++ \
g++-mingw-w64-x86-64 \
gcc \
git \
kmod \
libclang-11-dev \
libclang-9-dev \
libclc-dev \
libelf-dev \
libepoxy-dev \
libexpat1-dev \
libgtk-3-dev \
libllvm11 \
libllvm9 \
libomxil-bellagio-dev \
libpciaccess-dev \
libunwind-dev \
libva-dev \
libvdpau-dev \
libvulkan-dev \
libx11-dev \
libx11-xcb-dev \
libxext-dev \
libxml2-utils \
libxrandr-dev \
libxrender-dev \
libxshmfence-dev \
libxvmc-dev \
libxxf86vm-dev \
libz-mingw-w64-dev \
make \
meson \
pkg-config \
python-is-python3 \
python3-mako \
python3-pil \
python3-requests \
qemu-user \
valgrind \
wayland-protocols \
wget \
wine64 \
x11proto-dri2-dev \
x11proto-gl-dev \
x11proto-randr-dev \
xz-utils \
zlib1g-dev
# Needed for ci-fairy, this revision is able to upload files to MinIO
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@6f5af7e5574509726c79109e3c147cee95e81366
# for the vulkan overlay layer and ACO tests
wget https://github.com/KhronosGroup/glslang/releases/download/SDK-candidate-26-Jul-2020/glslang-master-linux-Release.zip
unzip glslang-master-linux-Release.zip bin/glslangValidator
install -m755 bin/glslangValidator /usr/local/bin/
rm bin/glslangValidator glslang-master-linux-Release.zip
############### Uninstall ephemeral packages
apt-get purge -y $STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,110 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
autoconf \
automake \
autotools-dev \
bzip2 \
cmake \
libgbm-dev \
libtool \
unzip \
"
# We need multiarch for Wine
dpkg --add-architecture i386
apt-get update
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
clang \
libasan6 \
libarchive-dev \
libclang-cpp11-dev \
libglvnd-dev \
libllvmspirvlib-dev \
liblua5.3-dev \
libxcb-dri2-0-dev \
libxcb-dri3-dev \
libxcb-glx0-dev \
libxcb-present-dev \
libxcb-randr0-dev \
libxcb-shm0-dev \
libxcb-sync-dev \
libxcb-xfixes0-dev \
libxcb1-dev \
libxml2-dev \
llvm-11-dev \
llvm-9-dev \
ocl-icd-opencl-dev \
procps \
spirv-tools \
strace \
time \
wine \
wine32
. .gitlab-ci/container/container_pre_build.sh
# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
# they're going to be fixed, so we'll just have to fix it ourselves
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
cat >/usr/local/bin/x86_64-w64-mingw32-pkg-config <<EOF
#!/bin/sh
PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig pkg-config \$@
EOF
chmod +x /usr/local/bin/x86_64-w64-mingw32-pkg-config
# dependencies where we want a specific version
export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
export WAYLAND_RELEASES=https://wayland.freedesktop.org/releases
export XORGMACROS_VERSION=util-macros-1.19.0
export LIBWAYLAND_VERSION=wayland-1.18.0
wget $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
cd $XORGMACROS_VERSION; ./configure; make install; cd ..
rm -rf $XORGMACROS_VERSION
. .gitlab-ci/container/build-libdrm.sh
wget $WAYLAND_RELEASES/$LIBWAYLAND_VERSION.tar.xz
tar -xvf $LIBWAYLAND_VERSION.tar.xz && rm $LIBWAYLAND_VERSION.tar.xz
cd $LIBWAYLAND_VERSION; ./configure --enable-libraries --without-host-scanner --disable-documentation --disable-dtd-validation; make install; cd ..
rm -rf $LIBWAYLAND_VERSION
pushd /usr/local
git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
rm -rf shader-db/.git
cd shader-db
make
popd
git clone https://github.com/microsoft/DirectX-Headers -b v1.0.1 --depth 1
pushd DirectX-Headers
mkdir build
cd build
meson .. --backend=ninja --buildtype=release -Dbuild-test=false
ninja
ninja install
popd
rm -rf DirectX-Headers
############### Uninstall the build software
apt-get purge -y \
$STABLE_EPHEMERAL
. .gitlab-ci/container/container_post_build.sh

View File

@@ -1,71 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
apt-get install -y ca-certificates
sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list
# Ephemeral packages (installed for this script and removed again at
# the end)
STABLE_EPHEMERAL=" \
cargo \
python3-dev \
python3-pip \
python3-setuptools \
python3-wheel \
"
apt-get update
apt-get dist-upgrade -y
apt-get install -y --no-remove \
git \
git-lfs \
libasan6 \
libexpat1 \
libllvm11 \
libllvm9 \
liblz4-1 \
libpng16-16 \
libpython3.9 \
libvulkan1 \
libwayland-client0 \
libwayland-server0 \
libxcb-ewmh2 \
libxcb-randr0 \
libxcb-xfixes0 \
libxkbcommon0 \
libxrandr2 \
libxrender1 \
python-is-python3 \
python3-mako \
python3-numpy \
python3-packaging \
python3-pil \
python3-requests \
python3-six \
python3-yaml \
vulkan-tools \
waffle-utils \
xauth \
xvfb \
zlib1g
apt-get install -y --no-install-recommends \
$STABLE_EPHEMERAL
# Needed for ci-fairy, this revision is able to upload files to MinIO
# and doesn't depend on git
pip3 install git+http://gitlab.freedesktop.org/freedesktop/ci-templates@0f1abc24c043e63894085a6bd12f14263e8b29eb
############### Build dEQP runner
. .gitlab-ci/container/build-deqp-runner.sh
rm -rf ~/.cargo
apt-get purge -y $STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1,87 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
autoconf \
automake \
ccache \
clang-11 \
cmake \
g++ \
libclang-cpp11-dev \
libgbm-dev \
libgles2-mesa-dev \
libllvmspirvlib-dev \
libpciaccess-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxkbcommon-dev \
libxrender-dev \
llvm-11-dev \
llvm-spirv \
make \
meson \
ocl-icd-opencl-dev \
patch \
pkg-config \
python3-distutils \
wget \
xz-utils \
"
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
apitrace \
clinfo \
libclang-common-11-dev \
libclang-cpp11 \
libegl1 \
libllvmspirvlib11 \
libxcb-shm0 \
ocl-icd-libopencl1 \
python3-lxml \
python3-renderdoc \
python3-simplejson \
spirv-tools
. .gitlab-ci/container/container_pre_build.sh
############### Build libdrm
. .gitlab-ci/container/build-libdrm.sh
############### Build libclc
. .gitlab-ci/container/build-libclc.sh
############### Build virglrenderer
. .gitlab-ci/container/build-virglrenderer.sh
############### Build piglit
INCLUDE_OPENCL_TESTS=1 . .gitlab-ci/container/build-piglit.sh
############### Build dEQP GL
DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
############### Uninstall the build software
ccache --show-stats
apt-get purge -y \
$STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1,140 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# Ephemeral packages (installed for this script and removed again at the end)
STABLE_EPHEMERAL=" \
ccache \
cmake \
g++ \
libgbm-dev \
libgles2-mesa-dev \
liblz4-dev \
libpciaccess-dev \
libudev-dev \
libvulkan-dev \
libwaffle-dev \
libwayland-dev \
libx11-xcb-dev \
libxcb-ewmh-dev \
libxcb-keysyms1-dev \
libxkbcommon-dev \
libxrandr-dev \
libxrender-dev \
libzstd-dev \
meson \
p7zip \
patch \
pkg-config \
python3-distutils \
wget \
xz-utils \
"
apt-get install -y --no-remove \
$STABLE_EPHEMERAL \
libxcb-shm0 \
python3-lxml \
python3-simplejson
# We need multiarch for Wine
dpkg --add-architecture i386
apt-get update
apt-get install -y --no-remove \
wine \
wine32 \
wine64
############### Set up Wine env variables
export WINEDEBUG="-all"
export WINEPREFIX="/dxvk-wine64"
############### Install DXVK
DXVK_VERSION="1.6"
# We don't want crash dialogs
cat >crashdialog.reg <<EOF
Windows Registry Editor Version 5.00
[HKEY_CURRENT_USER\Software\Wine\WineDbg]
"ShowCrashDialog"=dword:00000000
EOF
# Set the wine prefix and disable the crash dialog
wine regedit crashdialog.reg
rm crashdialog.reg
# DXVK's setup often fails with:
# "${WINEPREFIX}: Not a valid wine prefix."
# and that is just spit because of checking the existance of the
# system.reg file, which fails.
# Just giving it a bit more of time for it to be created solves the
# problem ...
while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done
wget "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
dxvk-"${DXVK_VERSION}"/setup_dxvk.sh install
rm -rf dxvk-"${DXVK_VERSION}"
rm dxvk-"${DXVK_VERSION}".tar.gz
############### Install Windows' apitrace binaries
APITRACE_VERSION="9.0"
APITRACE_VERSION_DATE="20191126"
wget "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64.7z"
7zr x "apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64.7z" \
"apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
"apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
mv "apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
rm "apitrace-${APITRACE_VERSION}.${APITRACE_VERSION_DATE}-win64.7z"
# Add the apitrace path to the registry
wine \
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
/v Path \
/t REG_EXPAND_SZ \
/d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \
/f
############### Building ...
. .gitlab-ci/container/container_pre_build.sh
############### Build piglit
PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
############### Build Fossilize
. .gitlab-ci/container/build-fossilize.sh
############### Build dEQP VK
. .gitlab-ci/container/build-deqp.sh
############### Build gfxreconstruct
. .gitlab-ci/container/build-gfxreconstruct.sh
############### Build libdrm
. .gitlab-ci/container/build-libdrm.sh
############### Uninstall the build software
ccache --show-stats
apt-get purge -y \
$STABLE_EPHEMERAL
apt-get autoremove -y --purge

View File

@@ -1 +0,0 @@
u_format_test

View File

@@ -1 +0,0 @@
lp_test_arit

View File

@@ -1,3 +0,0 @@
lp_test_arit
lp_test_format
u_format_test

View File

@@ -1,10 +0,0 @@
# Note: skips lists for CI are just a list of lines that, when
# non-zero-length and not starting with '#', will regex match to
# delete lines from the test list. Be careful.
# Skip the perf/stress tests to keep runtime manageable
dEQP-GLES[0-9]*.performance.*
dEQP-GLES[0-9]*.stress.*
# These are really slow on tiling architectures (including llvmpipe).
dEQP-GLES[0-9]*.functional.flush_finish.*

View File

@@ -1,275 +0,0 @@
#!/bin/sh
set -ex
DEQP_WIDTH=${DEQP_WIDTH:-256}
DEQP_HEIGHT=${DEQP_HEIGHT:-256}
DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0}
DEQP_VARIANT=${DEQP_VARIANT:-master}
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=pbuffer"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG"
DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden"
if [ -z "$DEQP_VER" ]; then
echo 'DEQP_VER must be set to something like "gles2", "gles31" or "vk" for the test run'
exit 1
fi
if [ "$DEQP_VER" = "vk" ]; then
if [ -z "$VK_DRIVER" ]; then
echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
exit 1
fi
fi
if [ -z "$GPU_VERSION" ]; then
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/deqp-gpu-version-*.txt)'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export EGL_PLATFORM=surfaceless
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
# the runner was failing to look for libkms in /usr/local/lib for some reason
# I never figured out.
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
RESULTS=`pwd`/${DEQP_RESULTS_DIR:-results}
mkdir -p $RESULTS
# Generate test case list file.
if [ "$DEQP_VER" = "vk" ]; then
cp /deqp/mustpass/vk-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk
elif [ "$DEQP_VER" = "gles2" -o "$DEQP_VER" = "gles3" -o "$DEQP_VER" = "gles31" ]; then
cp /deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER
SUITE=dEQP
else
cp /deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt /tmp/case-list.txt
DEQP=/deqp/external/openglcts/modules/glcts
SUITE=KHR
fi
# If the caselist is too long to run in a reasonable amount of time, let the job
# specify what fraction (1/n) of the caselist we should run. Note: N~M is a gnu
# sed extension to match every nth line (first line is #1).
if [ -n "$DEQP_FRACTION" ]; then
sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt
fi
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
fi
if [ -n "$DEQP_CASELIST_FILTER" ]; then
sed -ni "/$DEQP_CASELIST_FILTER/p" /tmp/case-list.txt
fi
if [ ! -s /tmp/case-list.txt ]; then
echo "Caselist generation failed"
exit 1
fi
if [ -e "$INSTALL/deqp-$GPU_VERSION-fails.txt" ]; then
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/deqp-$GPU_VERSION-fails.txt"
fi
if [ -e "$INSTALL/deqp-$GPU_VERSION-flakes.txt" ]; then
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --flakes $INSTALL/deqp-$GPU_VERSION-flakes.txt"
fi
if [ -e "$INSTALL/deqp-$GPU_VERSION-skips.txt" ]; then
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --skips $INSTALL/deqp-$GPU_VERSION-skips.txt"
else
DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --skips $INSTALL/deqp-default-skips.txt"
fi
set +e
if [ -n "$DEQP_PARALLEL" ]; then
JOB="--jobs $DEQP_PARALLEL"
elif [ -n "$FDO_CI_CONCURRENT" ]; then
JOB="--jobs $FDO_CI_CONCURRENT"
else
JOB="--jobs 4"
fi
# If this CI lab lacks artifacts support, print the whole list of failures/flakes.
if [ -n "$DEQP_NO_SAVE_RESULTS" ]; then
SUMMARY_LIMIT="--summary-limit 0"
fi
run_cts() {
deqp=$1
caselist=$2
output=$3
deqp-runner \
run \
--deqp $deqp \
--output $RESULTS \
--caselist $caselist \
--testlog-to-xml /deqp/executor/testlog-to-xml \
$JOB \
$SUMMARY_LIMIT \
$DEQP_RUNNER_OPTIONS \
-- \
$DEQP_OPTIONS
}
report_flakes() {
flakes=`grep ",Flake" $1 | sed 's|,Flake.*||g'`
if [ -z "$flakes" ]; then
return 0
fi
if [ -z "$FLAKES_CHANNEL" ]; then
return 0
fi
# The nick needs to be something unique so that multiple runners
# connecting at the same time don't race for one nick and get blocked.
# freenode has a 16-char limit on nicks (9 is the IETF standard, but
# various servers extend that). So, trim off the common prefixes of the
# runner name, and append the job ID so that software runners with more
# than one concurrent job (think swrast) don't collide. For freedreno,
# that gives us a nick as long as db410c-N-JJJJJJJJ, and it'll be a while
# before we make it to 9-digit jobs (we're at 7 so far).
runner=`echo $CI_RUNNER_DESCRIPTION | sed 's|mesa-||' | sed 's|google-freedreno-||g'`
bot="$runner-$CI_JOB_ID"
channel="$FLAKES_CHANNEL"
(
echo NICK $bot
echo USER $bot unused unused :Gitlab CI Notifier
sleep 10
echo "JOIN $channel"
sleep 1
desc="Flakes detected in job: $CI_JOB_URL on $CI_RUNNER_DESCRIPTION"
if [ -n "$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME" ]; then
desc="$desc on branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME ($CI_MERGE_REQUEST_TITLE)"
elif [ -n "$CI_COMMIT_BRANCH" ]; then
desc="$desc on branch $CI_COMMIT_BRANCH ($CI_COMMIT_TITLE)"
fi
echo "PRIVMSG $channel :$desc"
for flake in $flakes; do
echo "PRIVMSG $channel :$flake"
done
echo "PRIVMSG $channel :See $CI_JOB_URL/artifacts/browse/results/"
echo "QUIT"
) | nc irc.freenode.net 6667 > /dev/null
}
parse_renderer() {
RENDERER=`grep -A1 TestCaseResult.\*info.renderer $RESULTS/deqp-info.qpa | grep '<Text' | sed 's|.*<Text>||g' | sed 's|</Text>||g'`
VERSION=`grep -A1 TestCaseResult.\*info.version $RESULTS/deqp-info.qpa | grep '<Text' | sed 's|.*<Text>||g' | sed 's|</Text>||g'`
echo "Renderer: $RENDERER"
echo "Version: $VERSION "
if ! echo $RENDERER | grep -q $DEQP_EXPECTED_RENDERER; then
echo "Expected GL_RENDERER $DEQP_EXPECTED_RENDERER"
exit 1
fi
}
check_renderer() {
echo "Capturing renderer info for GLES driver sanity checks"
# If you're having trouble loading your driver, uncommenting this may help
# debug.
# export EGL_LOG_LEVEL=debug
VERSION=`echo $DEQP_VER | tr '[a-z]' '[A-Z]'`
export LD_PRELOAD=$TEST_LD_PRELOAD
$DEQP $DEQP_OPTIONS --deqp-case=$SUITE-$VERSION.info.\* --deqp-log-filename=$RESULTS/deqp-info.qpa
export LD_PRELOAD=
parse_renderer
}
check_vk_device_name() {
echo "Capturing device info for VK driver sanity checks"
export LD_PRELOAD=$TEST_LD_PRELOAD
$DEQP $DEQP_OPTIONS --deqp-case=dEQP-VK.info.device --deqp-log-filename=$RESULTS/deqp-info.qpa
export LD_PRELOAD=
DEVICENAME=`grep deviceName $RESULTS/deqp-info.qpa | sed 's|deviceName: ||g'`
echo "deviceName: $DEVICENAME"
if [ -n "$DEQP_EXPECTED_RENDERER" -a "x$DEVICENAME" != "x$DEQP_EXPECTED_RENDERER" ]; then
echo "Expected deviceName $DEQP_EXPECTED_RENDERER"
exit 1
fi
}
report_load() {
echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
echo "# of CPU cores: $(cat /proc/cpuinfo | grep processor | wc -l)"
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
# deqp is to use virpipe, and virgl_test_server llvmpipe
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
VTEST_ARGS="--use-egl-surfaceless"
if [ "$VIRGL_HOST_API" = "GLES" ]; then
VTEST_ARGS="$VTEST_ARGS --use-gles"
fi
GALLIUM_DRIVER=llvmpipe \
GALLIVM_PERF="nopt,no_filter_hacks" \
virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
sleep 1
fi
if [ $DEQP_VER = vk ]; then
quiet check_vk_device_name
else
quiet check_renderer
fi
RESULTS_CSV=$RESULTS/results.csv
FAILURES_CSV=$RESULTS/failures.csv
export LD_PRELOAD=$TEST_LD_PRELOAD
run_cts $DEQP /tmp/case-list.txt $RESULTS_CSV
DEQP_EXITCODE=$?
export LD_PRELOAD=
quiet report_load
# Remove all but the first 50 individual XML files uploaded as artifacts, to
# save fd.o space when you break everything.
find $RESULTS -name \*.xml | \
sort -n |
sed -n '1,+49!p' | \
xargs rm -f
# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
find $RESULTS -name \*.xml \
-exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \
-quit
deqp-runner junit \
--testsuite $DEQP_VER \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
# Report the flakes to the IRC channel for monitoring (if configured):
quiet report_flakes $RESULTS_CSV
exit $DEQP_EXITCODE

View File

@@ -1 +0,0 @@
../docs/ci

View File

@@ -1,36 +0,0 @@
#!/bin/bash
set +e
set -o xtrace
# if we run this script outside of gitlab-ci for testing, ensure
# we got meaningful variables
CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/mesa}
if [[ -e $CI_PROJECT_DIR/.git ]]
then
echo "Repository already present, skip cache download"
exit
fi
TMP_DIR=$(mktemp -d)
echo "Downloading archived master..."
/usr/bin/wget -O $TMP_DIR/mesa.tar.gz \
https://${MINIO_HOST}/git-cache/${FDO_UPSTREAM_REPO}/mesa.tar.gz
# check wget error code
if [[ $? -ne 0 ]]
then
echo "Repository cache not available"
exit
fi
set -e
rm -rf "$CI_PROJECT_DIR"
echo "Extracting tarball into '$CI_PROJECT_DIR'..."
mkdir -p "$CI_PROJECT_DIR"
tar xzf "$TMP_DIR/mesa.tar.gz" -C "$CI_PROJECT_DIR"
rm -rf "$TMP_DIR"
chmod a+w "$CI_PROJECT_DIR"

View File

@@ -1,20 +0,0 @@
#!/bin/sh
set -ex
if [ -z "$VK_DRIVER" ]; then
echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.x86_64.json
# To store Fossilize logs on failure.
RESULTS=`pwd`/results
mkdir -p results
"$INSTALL/fossils/fossils.sh" "$INSTALL/fossils.yml" "$RESULTS"

View File

@@ -1,10 +0,0 @@
fossils-db:
repo: "https://gitlab.freedesktop.org/hakzsam/fossils-db"
commit: "5626cedcb58bd95a7b79a9664651818aea92b21c"
fossils:
- path: sascha-willems/database.foz
- path: parallel-rdp/small_subgroup.foz
- path: parallel-rdp/small_uber_subgroup.foz
- path: parallel-rdp/subgroup.foz
- path: parallel-rdp/uber_subgroup.foz

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env bash
FOSSILS_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
FOSSILS_YAML="$(readlink -f "$1")"
FOSSILS_RESULTS="$2"
clone_fossils_db()
{
local repo="$1"
local commit="$2"
rm -rf fossils-db
git clone --no-checkout "$repo" fossils-db
(cd fossils-db; git reset "$commit" || git reset "origin/$commit")
}
query_fossils_yaml()
{
python3 "$FOSSILS_SCRIPT_DIR/query_fossils_yaml.py" \
--file "$FOSSILS_YAML" "$@"
}
create_clean_git()
{
rm -rf .clean_git
cp -R .git .clean_git
}
restore_clean_git()
{
rm -rf .git
cp -R .clean_git .git
}
fetch_fossil()
{
local fossil="${1//,/?}"
echo -n "[fetch_fossil] Fetching $1... "
local output=$(git lfs pull -I "$fossil" 2>&1)
local ret=0
if [[ $? -ne 0 || ! -f "$1" ]]; then
echo "ERROR"
echo "$output"
ret=1
else
echo "OK"
fi
restore_clean_git
return $ret
}
if [[ -n "$(query_fossils_yaml fossils_db_repo)" ]]; then
clone_fossils_db "$(query_fossils_yaml fossils_db_repo)" \
"$(query_fossils_yaml fossils_db_commit)"
cd fossils-db
else
echo "Warning: No fossils-db entry in $FOSSILS_YAML, assuming fossils-db is current directory"
fi
# During git operations various git objects get created which
# may take up significant space. Store a clean .git instance,
# which we restore after various git operations to keep our
# storage consumption low.
create_clean_git
for fossil in $(query_fossils_yaml fossils)
do
fetch_fossil "$fossil" || exit $?
fossilize-replay --num-threads 4 $fossil 1>&2 2> $FOSSILS_RESULTS/fossil_replay.txt
if [ $? != 0 ]; then
echo "Replay of $fossil failed"
grep "pipeline crashed or hung" $FOSSILS_RESULTS/fossil_replay.txt
exit 1
fi
rm $fossil
done
exit $ret

View File

@@ -1,69 +0,0 @@
#!/usr/bin/python3
# Copyright (c) 2019 Collabora Ltd
# Copyright (c) 2020 Valve Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# SPDX-License-Identifier: MIT
import argparse
import yaml
def cmd_fossils_db_repo(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
print(y['fossils-db']['repo'])
def cmd_fossils_db_commit(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
print(y['fossils-db']['commit'])
def cmd_fossils(args):
with open(args.file, 'r') as f:
y = yaml.safe_load(f)
fossils = list(y['fossils'])
if len(fossils) == 0:
return
print('\n'.join((t['path'] for t in fossils)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--file', required=True,
help='the name of the yaml file')
subparsers = parser.add_subparsers(help='sub-command help')
parser_fossils_db_repo = subparsers.add_parser('fossils_db_repo')
parser_fossils_db_repo.set_defaults(func=cmd_fossils_db_repo)
parser_fossils_db_commit = subparsers.add_parser('fossils_db_commit')
parser_fossils_db_commit.set_defaults(func=cmd_fossils_db_commit)
parser_fossils = subparsers.add_parser('fossils')
parser_fossils.set_defaults(func=cmd_fossils)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -1,49 +0,0 @@
#!/usr/bin/env python3
from jinja2 import Environment, FileSystemLoader
import argparse
import os
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("--template")
parser.add_argument("--pipeline-info")
parser.add_argument("--base-artifacts-url")
parser.add_argument("--mesa-url")
parser.add_argument("--device-type")
parser.add_argument("--dtb", nargs='?', default="")
parser.add_argument("--kernel-image-name")
parser.add_argument("--kernel-image-type", nargs='?', default="")
parser.add_argument("--gpu-version")
parser.add_argument("--boot-method")
parser.add_argument("--lava-tags", nargs='?', default="")
parser.add_argument("--env-vars", nargs='?', default="")
parser.add_argument("--deqp-version")
parser.add_argument("--ci-node-index")
parser.add_argument("--ci-node-total")
parser.add_argument("--job-type")
args = parser.parse_args()
env = Environment(loader = FileSystemLoader(os.path.dirname(args.template)), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(os.path.basename(args.template))
env_vars = "%s CI_NODE_INDEX=%s CI_NODE_TOTAL=%s" % (args.env_vars, args.ci_node_index, args.ci_node_total)
values = {}
values['pipeline_info'] = args.pipeline_info
values['base_artifacts_url'] = args.base_artifacts_url
values['mesa_url'] = args.mesa_url
values['device_type'] = args.device_type
values['dtb'] = args.dtb
values['kernel_image_name'] = args.kernel_image_name
values['kernel_image_type'] = args.kernel_image_type
values['gpu_version'] = args.gpu_version
values['boot_method'] = args.boot_method
values['tags'] = args.lava_tags
values['env_vars'] = env_vars
values['deqp_version'] = args.deqp_version
f = open(os.path.splitext(os.path.basename(args.template))[0], "w")
f.write(template.render(values))
f.close()

View File

@@ -1,178 +0,0 @@
.kernel+rootfs:
extends:
- .ci-run-policy
stage: container-2
variables:
GIT_STRATEGY: fetch
DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}"
script:
- .gitlab-ci/container/lava_build.sh
.kernel+rootfs-lava:
extends:
- .kernel+rootfs
variables:
KERNEL_URL: "https://gitlab.freedesktop.org/tomeu/linux/-/archive/v5.10-rc2-for-mesa-ci/linux-v5.10-rc2-for-mesa-ci.tar.gz"
INSTALL_KERNEL_MODULES: 1
MESA_ROOTFS_TAG: &lava-rootfs "2021-04-14-librenderdoc"
MINIO_SUFFIX: "lava"
PIGLIT_BUILD_TARGETS: "piglit_replayer"
kernel+rootfs_amd64:
extends:
- .use-x86_build-base
- .kernel+rootfs-lava
image: "$FDO_BASE_IMAGE"
variables:
DEBIAN_ARCH: "amd64"
DISTRIBUTION_TAG: &distribution-tag-amd64 "${MESA_ROOTFS_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
kernel+rootfs_arm64:
extends:
- .use-arm_build
- .kernel+rootfs-lava
tags:
- aarch64
variables:
DEBIAN_ARCH: "arm64"
kernel+rootfs_armhf:
extends:
- kernel+rootfs_arm64
variables:
DEBIAN_ARCH: "armhf"
.lava-test:
extends:
- .ci-run-policy
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
variables:
DISTRIBUTION_TAG: *distribution-tag-arm
GIT_STRATEGY: none # testing doesn't build anything from source
ENV_VARS: "DEQP_PARALLEL=6"
FIXED_ENV_VARS: "CI_PIPELINE_ID=${CI_PIPELINE_ID} CI_JOB_ID=${CI_JOB_ID} CI_PAGES_DOMAIN=${CI_PAGES_DOMAIN} CI_PROJECT_NAME=${CI_PROJECT_NAME} CI_PROJECT_PATH=${CI_PROJECT_PATH} CI_PROJECT_ROOT_NAMESPACE=${CI_PROJECT_ROOT_NAMESPACE} CI_JOB_JWT=${CI_JOB_JWT} CI_SERVER_URL=${CI_SERVER_URL} DRIVER_NAME=${DRIVER_NAME} FDO_UPSTREAM_REPO=${FDO_UPSTREAM_REPO} PIGLIT_NO_WINDOW=1 PIGLIT_REPLAY_UPLOAD_TO_MINIO=1 MINIO_HOST=${MINIO_HOST} LAVA_TEST_SCRIPT=${LAVA_TEST_SCRIPT} TEST_SUITE=${TEST_SUITE}"
DEQP_VERSION: gles2
ARTIFACTS_PREFIX: "https://${MINIO_HOST}/mesa-lava/"
MESA_ROOTFS_TAG: *lava-rootfs
MESA_URL: "https://${MINIO_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}/mesa-${ARCH}.tar.gz"
script:
# Try to use the kernel and rootfs built in mainline first, to save cycles
- >
if wget -q --method=HEAD "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/lava/${DISTRIBUTION_TAG}/${ARCH}/done"; then
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/lava/${DISTRIBUTION_TAG}/${ARCH}"
else
ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/lava/${DISTRIBUTION_TAG}/${ARCH}"
fi
- >
artifacts/generate_lava.py \
--template artifacts/lava.yml.jinja2 \
--pipeline-info "$CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--base-artifacts-url ${ARTIFACTS_URL} \
--mesa-url ${MESA_URL} \
--device-type ${DEVICE_TYPE} \
--dtb ${DTB} \
--env-vars "${ENV_VARS} ${FIXED_ENV_VARS}" \
--deqp-version ${DEQP_VERSION} \
--kernel-image-name ${KERNEL_IMAGE_NAME} \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--gpu-version ${GPU_VERSION} \
--boot-method ${BOOT_METHOD} \
--lava-tags "${LAVA_TAGS}" \
--ci-node-index "${CI_NODE_INDEX}" \
--ci-node-total "${CI_NODE_TOTAL}"
- lava_job_id=`lavacli jobs submit lava.yml` || lavacli jobs submit lava.yml
- echo $lava_job_id
- rm -rf artifacts/*
- cp lava.yml artifacts/.
- lavacli jobs logs $lava_job_id | tee artifacts/lava-$lava_job_id.log
- lavacli jobs show $lava_job_id
- result=`lavacli results $lava_job_id 0_mesa mesa | head -1`
- echo $result
- '[[ "$result" == "pass" ]]'
artifacts:
name: "mesa_${CI_JOB_NAME}"
when: always
paths:
- artifacts/
.lava-test:armhf:
variables:
ARCH: armhf
KERNEL_IMAGE_NAME: zImage
KERNEL_IMAGE_TYPE: "type:\ zimage"
BOOT_METHOD: u-boot
TEST_SUITE: "deqp"
LAVA_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .lava-test
- .use-arm_build
needs:
- kernel+rootfs_armhf
- meson-armhf
.lava-test:arm64:
variables:
ARCH: arm64
KERNEL_IMAGE_NAME: Image
KERNEL_IMAGE_TYPE: "type:\ image"
BOOT_METHOD: u-boot
TEST_SUITE: "deqp"
LAVA_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .lava-test
- .use-arm_build
dependencies:
- meson-arm64
needs:
- kernel+rootfs_arm64
- meson-arm64
.lava-test:amd64:
variables:
ARCH: amd64
DISTRIBUTION_TAG: *distribution-tag-amd64
KERNEL_IMAGE_NAME: bzImage
KERNEL_IMAGE_TYPE: "type:\ zimage"
BOOT_METHOD: u-boot
TEST_SUITE: "deqp"
LAVA_TEST_SCRIPT: "/install/deqp-runner.sh"
extends:
- .use-x86_build-base # for same $MESA_BASE_TAG as in kernel+rootfs_amd64
- .use-arm_build # ARM because it must match the architecture of the runner
- .lava-test
needs:
- kernel+rootfs_amd64
- arm_build # ARM because it must match the architecture of the runner
- meson-testing
.lava-traces-base:
after_script:
- mkdir -p artifacts
- wget -O "artifacts/junit.xml" "https://minio-packet.freedesktop.org/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}/${CI_JOB_ID}/traces/junit.xml"
artifacts:
reports:
junit: artifacts/junit.xml
.lava-piglit:
variables:
TEST_SUITE: "piglit"
LAVA_TEST_SCRIPT: "/install/piglit/run.sh"
.lava-piglit-traces:amd64:
extends:
- .lava-test:amd64
- .lava-piglit
- .lava-traces-base
.lava-piglit-traces:armhf:
extends:
- .lava-test:armhf
- .lava-piglit
- .lava-traces-base
.lava-piglit-traces:arm64:
extends:
- .lava-test:arm64
- .lava-piglit
- .lava-traces-base

View File

@@ -1,120 +0,0 @@
job_name: mesa-{{ test_suite }}-{{ deqp_version }}-{{ gpu_version }} {{ pipeline_info }}
device_type: {{ device_type }}
context:
extra_nfsroot_args: " init=/init rootwait"
timeouts:
job:
minutes: 30
priority: 75
visibility:
group:
- "Collabora+fdo"
{% if tags %}
{% set lavatags = tags.split(',') %}
tags:
{% for tag in lavatags %}
- {{ tag }}
{% endfor %}
{% endif %}
actions:
- deploy:
timeout:
minutes: 10
to: tftp
kernel:
url: {{ base_artifacts_url }}/{{ kernel_image_name }}
{% if kernel_image_type %}
{{ kernel_image_type }}
{% endif %}
nfsrootfs:
url: {{ base_artifacts_url }}/lava-rootfs.tgz
compression: gz
{% if dtb %}
dtb:
url: {{ base_artifacts_url }}/{{ dtb }}.dtb
{% endif %}
os: oe
- boot:
timeout:
minutes: 25
method: {{ boot_method }}
{% if boot_method == "fastboot" %}
{#
For fastboot, LAVA doesn't know how to unpack the rootfs/apply overlay/repack,
so we transfer the overlay over the network after boot.
#}
transfer_overlay:
download_command: wget -S --progress=dot:giga
unpack_command: tar -C / -xzf
{% else %}
commands: nfs
{% endif %}
prompts:
- 'lava-shell:'
- test:
timeout:
minutes: 30
failure_retry: 1
definitions:
- repository:
metadata:
format: Lava-Test Test Definition 1.0
name: mesa
description: "Mesa test plan"
os:
- oe
scope:
- functional
run:
steps:
- mount -t proc none /proc
- mount -t sysfs none /sys
- mount -t devtmpfs none /dev || echo possibly already mounted
- mkdir -p /dev/pts
- mount -t devpts devpts /dev/pts
- mkdir -p /dev/shm
- mount -t tmpfs tmpfs /dev/shm
- echo "nameserver 8.8.8.8" > /etc/resolv.conf
- for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done
- modprobe amdgpu || true
- DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
- echo performance > $DEVFREQ_GOVERNOR || true
- GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
- echo -1 > $GPU_AUTOSUSPEND || true
{% if env_vars %}
- export {{ env_vars }}
{% endif %}
# runner script assumes some stuff is in pwd
- cd /
- wget -S --progress=dot:giga -O- {{ mesa_url }} | tar -xz
- export DEQP_NO_SAVE_RESULTS=1
- export GPU_VERSION={{ gpu_version }}
- export DEQP_VER={{ deqp_version }}
- export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
- export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image"
- export PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL="/mesa-tracie-results/${CI_PROJECT_PATH}"
- export PIGLIT_REPLAY_ARTIFACTS_BASE_URL="/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}/${CI_JOB_ID}"
- export PIGLIT_REPLAY_DESCRIPTION_FILE="/install/traces-${DRIVER_NAME}.yml"
- export PIGLIT_REPLAY_DEVICE_NAME=gl-{{ gpu_version }}
- export PIGLIT_RESULTS={{ gpu_version }}-${PIGLIT_PROFILES}
- export LIBGL_DRIVERS_PATH=`pwd`/install/lib/dri
- "if sh $LAVA_TEST_SCRIPT; then
echo 'mesa: pass';
else
echo 'mesa: fail';
fi"
parse:
pattern: '(?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
from: inline
name: mesa
path: inline/mesa.yaml

View File

@@ -1,79 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
CROSS_FILE=/cross_file-"$CROSS".txt
# We need to control the version of llvm-config we're using, so we'll
# tweak the cross file or generate a native file to do so.
if test -n "$LLVM_VERSION"; then
LLVM_CONFIG="llvm-config-${LLVM_VERSION}"
echo -e "[binaries]\nllvm-config = '`which $LLVM_CONFIG`'" > native.file
if [ -n "$CROSS" ]; then
sed -i -e '/\[binaries\]/a\' -e "llvm-config = '`which $LLVM_CONFIG`'" $CROSS_FILE
fi
$LLVM_CONFIG --version
else
rm -f native.file
touch native.file
fi
# cross-xfail-$CROSS, if it exists, contains a list of tests that are expected
# to fail for the $CROSS configuration, one per line. you can then mark those
# tests in their meson.build with:
#
# test(...,
# should_fail: meson.get_cross_property('xfail', '').contains(t),
# )
#
# where t is the name of the test, and the '' is the string to search when
# not cross-compiling (which is empty, because for amd64 everything is
# expected to pass).
if [ -n "$CROSS" ]; then
CROSS_XFAIL=.gitlab-ci/cross-xfail-"$CROSS"
if [ -s "$CROSS_XFAIL" ]; then
sed -i \
-e '/\[properties\]/a\' \
-e "xfail = '$(tr '\n' , < $CROSS_XFAIL)'" \
"$CROSS_FILE"
fi
fi
# Only use GNU time if available, not any shell built-in command
case $CI_JOB_NAME in
# strace and wine don't seem to mix well
# ASAN leak detection is incompatible with strace
meson-mingw32-x86_64|*-asan*)
if test -f /usr/bin/time; then
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
fi
;;
*)
if test -f /usr/bin/time -a -f /usr/bin/strace; then
MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time-strace.sh
fi
;;
esac
rm -rf _build
meson _build --native-file=native.file \
--wrap-mode=nofallback \
${CROSS+--cross "$CROSS_FILE"} \
-D prefix=`pwd`/install \
-D libdir=lib \
-D buildtype=${BUILDTYPE:-debug} \
-D build-tests=true \
-D libunwind=${UNWIND} \
${DRI_LOADERS} \
-D dri-drivers=${DRI_DRIVERS:-[]} \
${GALLIUM_ST} \
-D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
-D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
${EXTRA_OPTION}
cd _build
meson configure
ninja
LC_ALL=C.UTF-8 meson test --num-processes ${FDO_CI_CONCURRENT:-4} ${MESON_TEST_ARGS}
ninja install
cd ..

View File

@@ -1,27 +0,0 @@
#!/bin/sh
STRACEDIR=meson-logs/strace/$(for i in $@; do basename -z -- $i; echo -n _; done)
mkdir -p $STRACEDIR
# If the test times out, meson sends SIGTERM to this process.
# Simply exec'ing "time" would result in no output from that in this case.
# Instead, we need to run "time" in the background, catch the signals and
# propagate them to the actual test process.
/usr/bin/time -v strace -ff -tt -T -o $STRACEDIR/log "$@" &
TIMEPID=$!
STRACEPID=$(ps --ppid $TIMEPID -o pid=)
TESTPID=$(ps --ppid $STRACEPID -o pid=)
if test "x$TESTPID" != x; then
trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
fi
wait $TIMEPID
EXITCODE=$?
# Only keep strace logs if the test timed out
rm -rf $STRACEDIR &
exit $EXITCODE

View File

@@ -1,17 +0,0 @@
#!/bin/sh
# If the test times out, meson sends SIGTERM to this process.
# Simply exec'ing "time" would result in no output from that in this case.
# Instead, we need to run "time" in the background, catch the signals and
# propagate them to the actual test process.
/usr/bin/time -v "$@" &
TIMEPID=$!
TESTPID=$(ps --ppid $TIMEPID -o pid=)
if test "x$TESTPID" != x; then
trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
fi
wait $TIMEPID
exit $?

View File

@@ -1,36 +0,0 @@
diff --git a/generated_tests/CMakeLists.txt b/generated_tests/CMakeLists.txt
index 738526546..6f89048cd 100644
--- a/generated_tests/CMakeLists.txt
+++ b/generated_tests/CMakeLists.txt
@@ -206,11 +206,6 @@ piglit_make_generated_tests(
templates/gen_variable_index_write_tests/vs.shader_test.mako
templates/gen_variable_index_write_tests/fs.shader_test.mako
templates/gen_variable_index_write_tests/helpers.mako)
-piglit_make_generated_tests(
- vs_in_fp64.list
- gen_vs_in_fp64.py
- templates/gen_vs_in_fp64/columns.shader_test.mako
- templates/gen_vs_in_fp64/regular.shader_test.mako)
piglit_make_generated_tests(
shader_framebuffer_fetch_tests.list
gen_shader_framebuffer_fetch_tests.py)
@@ -279,7 +274,6 @@ add_custom_target(gen-gl-tests
gen_extensions_defined.list
vp-tex.list
variable_index_write_tests.list
- vs_in_fp64.list
gpu_shader4_tests.list
)
diff --git a/tests/sanity.py b/tests/sanity.py
index 12f1614c9..9019087e2 100644
--- a/tests/sanity.py
+++ b/tests/sanity.py
@@ -100,7 +100,6 @@ shader_tests = (
'spec/arb_tessellation_shader/execution/barrier-patch.shader_test',
'spec/arb_tessellation_shader/execution/built-in-functions/tcs-any-bvec4-using-if.shader_test',
'spec/arb_tessellation_shader/execution/sanity.shader_test',
- 'spec/arb_vertex_attrib_64bit/execution/vs_in/vs-input-uint_uvec4-double_dmat3x4_array2-position.shader_test',
'spec/glsl-1.50/execution/geometry-basic.shader_test',
'spec/oes_viewport_array/viewport-gs-write-simple.shader_test',
)

View File

@@ -1,134 +0,0 @@
#!/bin/sh
set -ex
if [ -z "$GPU_VERSION" ]; then
echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in your ci/piglit-gpu-version-*.txt)'
exit 1
fi
INSTALL=`pwd`/install
# Set up the driver environment.
export LD_LIBRARY_PATH=`pwd`/install/lib/
export EGL_PLATFORM=surfaceless
export VK_ICD_FILENAMES=`pwd`/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-`uname -m`}.json
RESULTS=`pwd`/${PIGLIT_RESULTS_DIR:-results}
mkdir -p $RESULTS
if [ -n "$PIGLIT_FRACTION" -o -n "$CI_NODE_INDEX" ]; then
FRACTION=`expr ${PIGLIT_FRACTION:-1} \* ${CI_NODE_TOTAL:-1}`
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction $FRACTION"
fi
# If the job is parallel at the gitab job level, take the corresponding fraction
# of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction-start ${CI_NODE_INDEX}"
fi
if [ -e "$INSTALL/piglit-$GPU_VERSION-fails.txt" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --baseline $INSTALL/piglit-$GPU_VERSION-fails.txt"
fi
if [ -e "$INSTALL/piglit-$GPU_VERSION-flakes.txt" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --flakes $INSTALL/piglit-$GPU_VERSION-flakes.txt"
fi
if [ -e "$INSTALL/piglit-$GPU_VERSION-skips.txt" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --skips $INSTALL/piglit-$GPU_VERSION-skips.txt"
fi
set +e
if [ -n "$PIGLIT_PARALLEL" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs $PIGLIT_PARALLEL"
elif [ -n "$FDO_CI_CONCURRENT" ]; then
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs $FDO_CI_CONCURRENT"
else
PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --jobs 4"
fi
report_flakes() {
# Replace spaces in test names with _ to make the channel reporting not
# split it across lines, even though it makes it so you can't copy and
# paste from IRC into your flakes list.
flakes=`grep ",Flake" $1 | sed 's|,Flake.*||g' | sed 's| |_|g'`
if [ -z "$flakes" ]; then
return 0
fi
if [ -z "$FLAKES_CHANNEL" ]; then
return 0
fi
# The nick needs to be something unique so that multiple runners
# connecting at the same time don't race for one nick and get blocked.
# freenode has a 16-char limit on nicks (9 is the IETF standard, but
# various servers extend that). So, trim off the common prefixes of the
# runner name, and append the job ID so that software runners with more
# than one concurrent job (think swrast) don't collide. For freedreno,
# that gives us a nick as long as db410c-N-JJJJJJJJ, and it'll be a while
# before we make it to 9-digit jobs (we're at 7 so far).
runner=`echo $CI_RUNNER_DESCRIPTION | sed 's|mesa-||' | sed 's|google-freedreno-||g'`
bot="$runner-$CI_JOB_ID"
channel="$FLAKES_CHANNEL"
(
echo NICK $bot
echo USER $bot unused unused :Gitlab CI Notifier
sleep 10
echo "JOIN $channel"
sleep 1
desc="Flakes detected in job: $CI_JOB_URL on $CI_RUNNER_DESCRIPTION"
if [ -n "$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME" ]; then
desc="$desc on branch $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME ($CI_MERGE_REQUEST_TITLE)"
elif [ -n "$CI_COMMIT_BRANCH" ]; then
desc="$desc on branch $CI_COMMIT_BRANCH ($CI_COMMIT_TITLE)"
fi
echo "PRIVMSG $channel :$desc"
for flake in $flakes; do
echo "PRIVMSG $channel :$flake"
done
echo "PRIVMSG $channel :See $CI_JOB_URL/artifacts/browse/results/"
echo "QUIT"
) | nc irc.freenode.net 6667 > /dev/null
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
RESULTS_CSV=$RESULTS/results.csv
FAILURES_CSV=$RESULTS/failures.csv
export LD_PRELOAD=$TEST_LD_PRELOAD
piglit-runner \
run \
--piglit-folder /piglit \
--output $RESULTS \
--profile $PIGLIT_PROFILES \
--process-isolation \
$PIGLIT_RUNNER_OPTIONS \
-v -v
PIGLIT_EXITCODE=$?
export LD_PRELOAD=
deqp-runner junit \
--testsuite $PIGLIT_PROFILES \
--results $RESULTS/failures.csv \
--output $RESULTS/junit.xml \
--limit 50 \
--template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
# Report the flakes to the IRC channel for monitoring (if configured):
quiet report_flakes $RESULTS_CSV
exit $PIGLIT_EXITCODE

View File

@@ -1,278 +0,0 @@
#!/bin/sh
set -ex
INSTALL=$(realpath -s "$PWD"/install)
RESULTS=$(realpath -s "$PWD"/results)
mkdir -p "$RESULTS"
# Set up the driver environment.
# Modifiying here directly LD_LIBRARY_PATH may cause problems when
# using a command wrapper. Hence, we will just set it when running the
# command.
export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
# Sanity check to ensure that our environment is sufficient to make our tests
# run against the Mesa built by CI, rather than any installed distro version.
MESA_VERSION=$(cat "$INSTALL/VERSION" | sed 's/\./\\./g')
if [ "$VK_DRIVER" ]; then
### VULKAN ###
# Set the Vulkan driver to use.
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
# Set environment for Wine.
export WINEDEBUG="-all"
export WINEPREFIX="/dxvk-wine64"
export WINEESYNC=1
# Set environment for DXVK.
export DXVK_LOG_LEVEL="none"
export DXVK_STATE_CACHE=0
# Set environment for gfxreconstruct executables.
export PATH="/gfxreconstruct/build/bin:$PATH"
fi
SANITY_MESA_VERSION_CMD="vulkaninfo"
# Set up the Window System Interface (WSI)
# IMPORTANT:
#
# Nothing to do here.
#
# Run vulkan against the host's running X server (xvfb doesn't
# have DRI3 support).
# Set the DISPLAY env variable in each gitlab-runner's
# configuration file:
# https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section
else
### GL/ES ###
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
# Set environment for apitrace executable.
export PATH="/apitrace/build:$PATH"
# Our rootfs may not have "less", which apitrace uses during
# apitrace dump
export PAGER=cat
fi
SANITY_MESA_VERSION_CMD="wflinfo"
# Set up the platform windowing system.
if [ "x$EGL_PLATFORM" = "xsurfaceless" ]; then
# Use the surfaceless EGL platform.
export DISPLAY=
export WAFFLE_PLATFORM="surfaceless_egl"
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
if [ "x$GALLIUM_DRIVER" = "xvirpipe" ]; then
# piglit is to use virpipe, and virgl_test_server llvmpipe
export GALLIUM_DRIVER="$GALLIUM_DRIVER"
LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
GALLIUM_DRIVER=llvmpipe \
GALLIVM_PERF="nopt,no_filter_hacks" \
VTEST_USE_EGL_SURFACELESS=1 \
VTEST_USE_GLES=1 \
virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
sleep 1
fi
elif [ "x$PIGLIT_PLATFORM" = "xgbm" ]; then
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
elif [ "x$PIGLIT_PLATFORM" = "xmixed_glx_egl" ]; then
# It is assumed that you have already brought up your X server before
# calling this script.
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
else
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
RUN_CMD_WRAPPER="xvfb-run --server-args=\"-noreset\" sh -c"
fi
fi
if [ "$ZINK_USE_LAVAPIPE" ]; then
export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/lvp_icd.x86_64.json"
fi
# If the job is parallel at the gitlab job level, will take the corresponding
# fraction of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
FAILURE_MESSAGE=$(printf "%s" "Can't parallelize piglit with multiple profiles")
quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
exit 1
fi
USE_CASELIST=1
fi
print_red() {
RED='\033[0;31m'
NC='\033[0m' # No Color
printf "${RED}"
"$@"
printf "${NC}"
}
# wrapper to supress +x to avoid spamming the log
quiet() {
set +x
"$@"
set -x
}
replay_minio_upload_images() {
find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
| while read -r line; do
__TRACE="${line%-*-*}"
if grep -q "^$__PREFIX/$__TRACE: pass$" ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig"; then
if [ "x$CI_PROJECT_PATH" != "x$FDO_UPSTREAM_REPO" ]; then
continue
fi
__MINIO_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL"
__DESTINATION_FILE_PATH="${line##*-}"
if ci-fairy minio ls "minio://${MINIO_HOST}${__MINIO_PATH}/${__DESTINATION_FILE_PATH}" 2>/dev/null; then
continue
fi
else
__MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
__DESTINATION_FILE_PATH="$__MINIO_TRACES_PREFIX/${line##*-}"
# Adding to the JUnit the direct link to the diff page in
# the dashboard
__PIGLIT_TESTCASE_CLASSNAME="piglit\.trace\.$PIGLIT_REPLAY_DEVICE_NAME\.$(dirname $__TRACE | sed 's%/%\\.%g;s@%@\\%@')"
__PIGLIT_TESTCASE_NAME="$(basename $__TRACE | sed 's%\.%_%g;s@%@\\%@')"
__DASHBOARD_URL="https://tracie.freedesktop.org/dashboard/imagediff/${CI_PROJECT_PATH}/${CI_JOB_ID}/${__TRACE}"
__START_TEST_PATTERN='<testcase classname="'"${__PIGLIT_TESTCASE_CLASSNAME}"'" name="'"${__PIGLIT_TESTCASE_NAME}"'" status="fail"'
__REPLACE_TEST_PATTERN='</system-out><failure type="fail"/></testcase>'
# Replace in the range between __START_TEST_PATTERN and
# __REPLACE_TEST_PATTERN leaving __START_TEST_PATTERN out
# from the substitution
sed '\%'"${__START_TEST_PATTERN}"'%,\%'"${__REPLACE_TEST_PATTERN}"'%{\%'"${__START_TEST_PATTERN}"'%b;s%'"${__REPLACE_TEST_PATTERN}"'%</system-out><failure type="fail">To view the image differences visit: '"${__DASHBOARD_URL}"'</failure></testcase>%}' \
-i "$RESULTS"/junit.xml
fi
ci-fairy minio cp "$RESULTS/$__PREFIX/$line" \
"minio://${MINIO_HOST}${__MINIO_PATH}/${__DESTINATION_FILE_PATH}"
done
}
SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
rm -rf results
cd /piglit
if [ -n "$USE_CASELIST" ]; then
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
PIGLIT_GENTESTS="./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format \"{name}\" > /tmp/case-list.txt"
RUN_GENTESTS="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $PIGLIT_GENTESTS"
eval $RUN_GENTESTS
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
PIGLIT_TESTS="--test-list /tmp/case-list.txt"
fi
PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
PIGLIT_CMD="./piglit run -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES "$(/usr/bin/printf "%q" "$RESULTS")
RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $PIGLIT_CMD"
if [ "$RUN_CMD_WRAPPER" ]; then
RUN_CMD="set +e; $RUN_CMD_WRAPPER "$(/usr/bin/printf "%q" "$RUN_CMD")"; set -e"
fi
FAILURE_MESSAGE=$(printf "%s" "Unexpected change in results:")
eval $RUN_CMD
if [ $? -ne 0 ]; then
printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
fi
ARTIFACTS_BASE_URL="https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts"
if [ ${PIGLIT_JUNIT_RESULTS:-0} -eq 1 ]; then
./piglit summary aggregate "$RESULTS" -o junit.xml
FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the JUnit report for failures at: ${ARTIFACTS_BASE_URL}/results/junit.xml")
fi
PIGLIT_RESULTS="${PIGLIT_RESULTS:-$PIGLIT_PROFILES}"
RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt"
mkdir -p .gitlab-ci/piglit
./piglit summary console "$RESULTS"/results.json.bz2 \
| tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
| head -n -1 | grep -v ": pass" \
| sed '/^summary:/Q' \
> $RESULTSFILE
if [ "x$PIGLIT_PROFILES" = "xreplay" ] \
&& [ ${PIGLIT_REPLAY_UPLOAD_TO_MINIO:-0} -eq 1 ]; then
ci-fairy minio login $CI_JOB_JWT
__PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
__MINIO_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
__MINIO_TRACES_PREFIX="traces"
ci-fairy minio cp "$RESULTS"/results.json.bz2 \
"minio://${MINIO_HOST}${__MINIO_PATH}/${__MINIO_TRACES_PREFIX}/results.json.bz2"
quiet replay_minio_upload_images
ci-fairy minio cp "$RESULTS"/junit.xml \
"minio://${MINIO_HOST}${__MINIO_PATH}/${__MINIO_TRACES_PREFIX}/junit.xml"
fi
if [ -n "$USE_CASELIST" ]; then
# Just filter the expected results based on the tests that were actually
# executed, and switch to the version with no summary
cat ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" | sed '/^summary:/Q' | rev \
| cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
grep -F -f /tmp/executed.txt "$INSTALL/$PIGLIT_RESULTS.txt" \
> ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" || true
else
cp "$INSTALL/$PIGLIT_RESULTS.txt" \
".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline"
fi
if diff -q ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE; then
exit 0
fi
if [ ${PIGLIT_HTML_SUMMARY:-1} -eq 1 ]; then
./piglit summary html --exclude-details=pass \
"$RESULTS"/summary "$RESULTS"/results.json.bz2
if [ "x$PIGLIT_PROFILES" = "xreplay" ]; then
find "$RESULTS"/summary -type f -name "*.html" -print0 \
| xargs -0 sed -i 's%<img src="file://'"${RESULTS}"'.*-\([0-9a-f]*\)\.png%<img src="https://'"${MINIO_HOST}${PIGLIT_REPLAY_ARTIFACTS_BASE_URL}"'/traces/\1.png%g'
find "$RESULTS"/summary -type f -name "*.html" -print0 \
| xargs -0 sed -i 's%<img src="file://%<img src="https://'"${MINIO_HOST}${PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL}"'/%g'
fi
FAILURE_MESSAGE=$(printf "${FAILURE_MESSAGE}\n%s" "Check the HTML summary for problems at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html")
fi
quiet print_red printf "%s\n" "$FAILURE_MESSAGE"
quiet diff --color=always -u ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline" $RESULTSFILE
exit 1

View File

@@ -1,72 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
VERSION=`cat install/VERSION`
rm -rf results
cd /piglit
export OCL_ICD_VENDORS=$OLDPWD/install/etc/OpenCL/vendors/
set +e
unset DISPLAY
export LD_LIBRARY_PATH=$OLDPWD/install/lib
clinfo
# If the job is parallel at the gitlab job level, will take the corresponding
# fraction of the caselist.
if [ -n "$CI_NODE_INDEX" ]; then
if [ "$PIGLIT_PROFILES" != "${PIGLIT_PROFILES% *}" ]; then
echo "Can't parallelize piglit with multiple profiles"
exit 1
fi
USE_CASELIST=1
fi
if [ -n "$USE_CASELIST" ]; then
./piglit print-cmd $PIGLIT_TESTS $PIGLIT_PROFILES --format "{name}" > /tmp/case-list.txt
sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
PIGLIT_TESTS="--test-list /tmp/case-list.txt"
fi
./piglit run -c -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS $PIGLIT_PROFILES $OLDPWD/results
retVal=$?
if [ $retVal -ne 0 ]; then
echo "Found $(cat /tmp/version.txt), expected $VERSION"
fi
set -e
PIGLIT_RESULTS=${PIGLIT_RESULTS:-$PIGLIT_PROFILES}
mkdir -p .gitlab-ci/piglit
./piglit summary console $OLDPWD/results \
| tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
| head -n -1 \
| grep -v ": pass" \
| sed '/^summary:/Q' \
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt
if [ -n "$USE_CASELIST" ]; then
# Just filter the expected results based on the tests that were actually
# executed, and switch to the version with no summary
cat .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig | sed '/^summary:/Q' | rev \
| cut -f2- -d: | rev | sed "s/$/:/g" > /tmp/executed.txt
grep -F -f /tmp/executed.txt $OLDPWD/install/$PIGLIT_RESULTS.txt \
> .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline || true
else
cp $OLDPWD/install/$PIGLIT_RESULTS.txt .gitlab-ci/piglit/$PIGLIT_RESULTS.txt.baseline
fi
if diff -q .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}; then
exit 0
fi
./piglit summary html --exclude-details=pass $OLDPWD/results/summary $OLDPWD/results
echo Unexpected change in results:
diff -u .gitlab-ci/piglit/$PIGLIT_RESULTS.txt{.baseline,}
exit 1

View File

@@ -1,54 +0,0 @@
#!/bin/bash
set -e
set -o xtrace
CROSS_FILE=/cross_file-"$CROSS".txt
# Delete unused bin and includes from artifacts to save space.
rm -rf install/bin install/include
# Strip the drivers in the artifacts to cut 80% of the artifacts size.
if [ -n "$CROSS" ]; then
STRIP=`sed -n -E "s/strip\s*=\s*'(.*)'/\1/p" "$CROSS_FILE"`
if [ -z "$STRIP" ]; then
echo "Failed to find strip command in cross file"
exit 1
fi
else
STRIP="strip"
fi
if [ -z "$ARTIFACTS_DEBUG_SYMBOLS"]; then
find install -name \*.so -exec $STRIP {} \;
fi
# Test runs don't pull down the git tree, so put the dEQP helper
# script and associated bits there.
echo "$(cat VERSION) (git-$(git rev-parse HEAD | cut -b -10))" >> install/VERSION
cp -Rp .gitlab-ci/bare-metal install/
cp -Rp .gitlab-ci/piglit install/
cp -Rp .gitlab-ci/fossils.yml install/
cp -Rp .gitlab-ci/fossils install/
cp -Rp .gitlab-ci/fossilize-runner.sh install/
cp -Rp .gitlab-ci/deqp-runner.sh install/
cp -Rp .gitlab-ci/deqp-*.txt install/
find . -path \*/ci/\*.txt \
-o -path \*/ci/\*traces\*.yml \
| xargs -I '{}' cp -p '{}' install/
# Tar up the install dir so that symlinks and hardlinks aren't each
# packed separately in the zip file.
mkdir -p artifacts/
tar -cf artifacts/install.tar install
if [ -n "$MINIO_ARTIFACT_NAME" ]; then
# Pass needed files to the test stage
cp $CI_PROJECT_DIR/.gitlab-ci/generate_lava.py artifacts/.
cp $CI_PROJECT_DIR/.gitlab-ci/lava.yml.jinja2 artifacts/.
MINIO_ARTIFACT_NAME="$MINIO_ARTIFACT_NAME.tar.gz"
gzip -c artifacts/install.tar > ${MINIO_ARTIFACT_NAME}
MINIO_PATH=${MINIO_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
ci-fairy minio login $CI_JOB_JWT
ci-fairy minio cp ${MINIO_ARTIFACT_NAME} minio://${MINIO_PATH}/${MINIO_ARTIFACT_NAME}
fi

View File

@@ -1,18 +0,0 @@
set -e
set -v
ARTIFACTSDIR=`pwd`/shader-db
mkdir -p $ARTIFACTSDIR
export DRM_SHIM_DEBUG=true
LIBDIR=`pwd`/install/lib
export LD_LIBRARY_PATH=$LIBDIR
cd /usr/local/shader-db
for driver in freedreno intel v3d; do
echo "Running drm-shim for $driver"
env LD_PRELOAD=$LIBDIR/lib${driver}_noop_drm_shim.so \
./run -j${FDO_CI_CONCURRENT:-4} ./shaders \
> $ARTIFACTSDIR/${driver}-shader-db.txt
done

View File

@@ -1,380 +0,0 @@
# This file list source dependencies to avoid creating/running jobs
# those outcome cannot be changed by the modifications from a branch.
# Generic rule to not run the job during scheduled pipelines
# ----------------------------------------------------------
.scheduled_pipelines-rules:
rules: &ignore_scheduled_pipelines
if: '$CI_PIPELINE_SOURCE == "schedule"'
when: never
# Mesa core source file dependencies
# ----------------------------------
.mesa-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &mesa_core_file_list
- .gitlab-ci.yml
- .gitlab-ci/**/*
- include/**/*
- meson.build
- src/*
- src/compiler/**/*
- src/drm-shim/**/*
- src/egl/**/*
- src/gbm/**/*
- src/glx/**/*
- src/gtest/**/*
- src/hgl/**/*
- src/include/**/*
- src/loader/**/*
- src/mapi/**/*
- src/mesa/*
- src/mesa/drivers/*
- src/mesa/drivers/common/**/*
- src/mesa/drivers/dri/*
- src/mesa/drivers/dri/common/**/*
- src/mesa/main/**/*
- src/mesa/math/**/*
- src/mesa/program/**/*
- src/mesa/sparc/**/*
- src/mesa/state_tracker/**/*
- src/mesa/swrast/**/*
- src/mesa/swrast_setup/**/*
- src/mesa/tnl/**/*
- src/mesa/tnl_dd/**/*
- src/mesa/vbo/**/*
- src/mesa/x86/**/*
- src/mesa/x86-64/**/*
- src/util/**/*
.vulkan-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &vulkan_file_list
- src/vulkan/**/*
when: on_success
- when: never
# Gallium core source file dependencies
# -------------------------------------
.gallium-rules:
rules:
- *ignore_scheduled_pipelines
- changes: &gallium_core_file_list
- src/gallium/*
- src/gallium/auxiliary/**/*
- src/gallium/drivers/*
- src/gallium/include/**/*
- src/gallium/state_trackers/**/*
- src/gallium/targets/**/*
- src/gallium/tests/**/*
- src/gallium/winsys/*
.softpipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/softpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- when: never
.llvmpipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &llvmpipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- when: never
.lavapipe-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes: &lavapipe_file_list
- src/gallium/drivers/llvmpipe/**/*
- src/gallium/frontends/lavapipe/**/*
- src/gallium/winsys/sw/**/*
when: on_success
- when: never
.llvmpipe-cl-rules:
stage: software-renderer
rules:
- *ignore_scheduled_pipelines
- changes:
- .gitlab-ci.yml
- .gitlab-ci/**/*
- meson.build
- include/**/*
- src/compiler/**/*
- src/include/**/*
- src/util/**/*
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes: &clover_file_list
- src/gallium/frontends/clover/**/*
when: on_success
- when: never
.freedreno-rules:
stage: freedreno
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
# Note: when https://gitlab.com/gitlab-org/gitlab/-/issues/198688
# is supported, we can change the src/freedreno/ rule to explicitly
# exclude tools
- src/freedreno/**/*
- src/gallium/drivers/freedreno/**/*
- src/gallium/winsys/freedreno/**/*
when: on_success
- when: never
.panfrost-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/panfrost/**/*
- src/gallium/winsys/panfrost/**/*
- src/panfrost/**/*
when: on_success
- when: never
.vc4-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/broadcom/**/*
- src/gallium/drivers/vc4/**/*
- src/gallium/winsys/kmsro/**/*
- src/gallium/winsys/vc4/**/*
when: on_success
- when: never
.v3d-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/broadcom/**/*
- src/gallium/drivers/v3d/**/*
- src/gallium/winsys/kmsro/**/*
- src/gallium/winsys/v3d/**/*
when: on_success
- when: never
.v3dv-rules:
stage: broadcom
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
- src/broadcom/**/*
when: on_success
- when: never
.lima-rules:
stage: arm
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/lima/**/*
- src/gallium/winsys/lima/**/*
- src/lima/**/*
when: on_success
- when: never
.radv-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
- src/amd/**/*
- src/vulkan/**/*
when: on_success
- when: never
.virgl-rules:
stage: layered-backends
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*llvmpipe_file_list
when: on_success
- changes:
- src/gallium/drivers/virgl/**/*
- src/gallium/winsys/virgl/**/*
when: on_success
- when: never
.radeonsi-rules:
stage: amd
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/radeonsi/**/*
- src/gallium/winsys/amdgpu/**/*
- src/amd/*
- src/amd/addrlib/**/*
- src/amd/common/**/*
- src/amd/llvm/**/*
- src/amd/registers/**/*
when: on_success
- when: never
.iris-rules:
stage: intel
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
- src/gallium/drivers/iris/**/*
- src/gallium/winsys/iris/**/*
- src/intel/**/*
when: on_success
- when: never
# Unfortunately YAML doesn't let us concatenate arrays, so we have to do the
# rules duplication manually
.windows-build-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes: &d3d12_file_list
- src/gallium/drivers/d3d12/**/*
- src/microsoft/**/*
- src/gallium/frontends/wgl/*
- src/gallium/winsys/d3d12/wgl/*
- src/gallium/targets/libgl-gdi/*
- src/gallium/targets/libgl-d3d12/*
when: on_success
- when: never
.windows-test-rules:
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
*vulkan_file_list
when: on_success
- changes: *d3d12_file_list
when: on_success
- when: never
.zink-rules:
stage: layered-backends
rules:
- *ignore_scheduled_pipelines
- changes:
*mesa_core_file_list
when: on_success
- changes:
*gallium_core_file_list
when: on_success
- changes:
*lavapipe_file_list
when: on_success
- changes:
- src/gallium/drivers/zink/**/*
when: on_success
- when: never

View File

@@ -1,13 +0,0 @@
# escape=`
FROM mcr.microsoft.com/windows:1809
# Make sure any failure in PowerShell scripts is fatal
SHELL ["powershell", "-ExecutionPolicy", "RemoteSigned", "-Command", "$ErrorActionPreference = 'Stop';"]
ENV ErrorActionPreference='Stop'
COPY mesa_deps_vs2019.ps1 C:\
RUN C:\mesa_deps_vs2019.ps1
COPY mesa_deps.ps1 C:\
RUN C:\mesa_deps.ps1

View File

@@ -1,32 +0,0 @@
# Native Windows GitLab CI builds
Unlike Linux, Windows cannot reuse the freedesktop ci-templates as they exist
as we do not have Podman, Skopeo, or even Docker-in-Docker builds available
under Windows.
We still reuse the same model: build a base container with the core operating
system and infrequently-changed build dependencies, then execute Mesa builds
only inside that base container. This is open-coded in PowerShell scripts.
## Base container build
The base container build job executes the `mesa_container.ps1` script which
reproduces the ci-templates behaviour. It looks for the registry image in
the user's namespace, and exits if found. If not found, it tries to copy
the same image tag from the upstream Mesa repository. If that is not found,
the image is rebuilt inside the user's namespace.
The rebuild executes `docker build` which calls `mesa_deps.ps1` inside the
container to fetch and install all build dependencies. This includes Visual
Studio Community Edition (downloaded from Microsoft, under the license which
allows use by open-source projects), other build tools from Chocolatey, and
finally Meson and Python dependencies from PyPI.
This job is executed inside a Windows shell environment directly inside the
host, without Docker.
## Mesa build
The Mesa build runs inside the base container, executing `mesa_build.ps1`.
This simply compiles Mesa using Meson and Ninja, executing the build and
unit tests. Currently, no build artifacts are captured.

View File

@@ -1,22 +0,0 @@
# force the CA cert cache to be rebuilt, in case Meson tries to access anything
Write-Host "Refreshing Windows TLS CA cache"
(New-Object System.Net.WebClient).DownloadString("https://github.com") >$null
Get-Date
Write-Host "Compiling Mesa"
$builddir = New-Item -ItemType Directory -Name "_build"
$installdir = New-Item -ItemType Directory -Name "_install"
Push-Location $builddir.FullName
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && meson --default-library=shared -Dzlib:default_library=static --buildtype=release -Db_ndebug=false -Db_vscrt=mt --cmake-prefix-path=`"C:\llvm-10`" --pkg-config-path=`"C:\llvm-10\lib\pkgconfig;C:\llvm-10\share\pkgconfig;C:\spirv-tools\lib\pkgconfig`" --prefix=`"$installdir`" -Dllvm=enabled -Dshared-llvm=disabled -Dvulkan-drivers=swrast -Dgallium-drivers=swrast,d3d12 -Dmicrosoft-clc=enabled -Dstatic-libclc=all -Dbuild-tests=true -Dwerror=true && ninja -j32 install && meson test --num-processes 32"
$buildstatus = $?
Pop-Location
Get-Date
if (!$buildstatus) {
Write-Host "Mesa build or test failed"
Exit 1
}
Copy-Item ".\.gitlab-ci\windows\piglit_run.ps1" -Destination $installdir
Copy-Item ".\.gitlab-ci\windows\quick_gl.txt" -Destination $installdir

View File

@@ -1,56 +0,0 @@
# Implements the equivalent of ci-templates container-ifnot-exists, using
# Docker directly as we don't have buildah/podman/skopeo available under
# Windows, nor can we execute Docker-in-Docker
$registry_uri = $args[0]
$registry_username = $args[1]
$registry_password = $args[2]
$registry_user_image = $args[3]
$registry_central_image = $args[4]
Set-Location -Path ".\.gitlab-ci\windows"
docker --config "windows-docker.conf" login -u "$registry_username" -p "$registry_password" "$registry_uri"
if (!$?) {
Write-Host "docker login failed to $registry_uri"
Exit 1
}
# if the image already exists, don't rebuild it
docker --config "windows-docker.conf" pull "$registry_user_image"
if ($?) {
Write-Host "User image $registry_user_image already exists; not rebuilding"
docker --config "windows-docker.conf" logout "$registry_uri"
Exit 0
}
# if the image already exists upstream, copy it
docker --config "windows-docker.conf" pull "$registry_central_image"
if ($?) {
Write-Host "Copying central image $registry_central_image to user image $registry_user_image"
docker --config "windows-docker.conf" tag "$registry_central_image" "$registry_user_image"
docker --config "windows-docker.conf" push "$registry_user_image"
$pushstatus = $?
docker --config "windows-docker.conf" logout "$registry_uri"
if (!$pushstatus) {
Write-Host "Pushing image to $registry_user_image failed"
Exit 1
}
Exit 0
}
Write-Host "No image found at $registry_user_image or $registry_central_image; rebuilding"
docker --config "windows-docker.conf" build --no-cache -t "$registry_user_image" .
if (!$?) {
Write-Host "Container build failed"
docker --config "windows-docker.conf" logout "$registry_uri"
Exit 1
}
Get-Date
docker --config "windows-docker.conf" push "$registry_user_image"
$pushstatus = $?
docker --config "windows-docker.conf" logout "$registry_uri"
if (!$pushstatus) {
Write-Host "Pushing image to $registry_user_image failed"
Exit 1
}

View File

@@ -1,194 +0,0 @@
# Download new TLS certs from Windows Update
Get-Date
Write-Host "Updating TLS certificate store"
$certdir = (New-Item -ItemType Directory -Name "_tlscerts")
certutil -syncwithWU "$certdir"
Foreach ($file in (Get-ChildItem -Path "$certdir\*" -Include "*.crt")) {
Import-Certificate -FilePath $file -CertStoreLocation Cert:\LocalMachine\Root
}
Remove-Item -Recurse -Path $certdir
Get-Date
Write-Host "Installing Chocolatey"
Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
Import-Module "$env:ProgramData\chocolatey\helpers\chocolateyProfile.psm1"
Update-SessionEnvironment
Write-Host "Installing Chocolatey packages"
# Chocolatey tries to download winflexbison from SourceForge, which is not super reliable, and has no retry
# loop of its own - so we give it a helping hand here
For ($i = 0; $i -lt 5; $i++) {
choco install -y python3 --params="/InstallDir:C:\python3"
$python_install = $?
choco install --allow-empty-checksums -y cmake git git-lfs ninja pkgconfiglite winflexbison --installargs "ADD_CMAKE_TO_PATH=System"
$other_install = $?
$choco_installed = $other_install -and $python_install
if ($choco_installed) {
Break
}
}
if (!$choco_installed) {
Write-Host "Couldn't install dependencies from Chocolatey"
Exit 1
}
# Add Chocolatey's native install path
Update-SessionEnvironment
# Python and CMake add themselves to the system environment path, which doesn't get refreshed
# until we start a new shell
$env:PATH = "C:\python3;C:\python3\scripts;C:\Program Files\CMake\bin;$env:PATH"
Start-Process -NoNewWindow -Wait git -ArgumentList 'config --global core.autocrlf false'
Get-Date
Write-Host "Installing Meson, Mako and numpy"
pip3 install meson mako numpy
if (!$?) {
Write-Host "Failed to install dependencies from pip"
Exit 1
}
# we want more secure TLS 1.2 for most things, but it breaks SourceForge
# downloads so must be done after Chocolatey use
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -bor [Net.SecurityProtocolType]::Tls13;
Get-Date
Write-Host "Cloning LLVM master"
git clone -b release/12.x --depth=1 https://github.com/llvm/llvm-project llvm-project
if (!$?) {
Write-Host "Failed to clone LLVM repository"
Exit 1
}
# ideally we want to use a tag here insted of a sha,
# but as of today, SPIRV-LLVM-Translator doesn't have
# a tag matching LLVM 12.0.0
Get-Date
Write-Host "Cloning SPIRV-LLVM-Translator"
git clone https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
if (!$?) {
Write-Host "Failed to clone SPIRV-LLVM-Translator repository"
Exit 1
}
Push-Location llvm-project/llvm/projects/SPIRV-LLVM-Translator
git checkout 5b641633b3bcc3251a52260eee11db13a79d7258
Pop-Location
Get-Date
# slightly convoluted syntax but avoids the CWD being under the PS filesystem meta-path
$llvm_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build"
Push-Location -Path $llvm_build.FullName
Write-Host "Compiling LLVM and Clang"
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../llvm -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_CRT_RELEASE=MT -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_TARGETS_TO_BUILD=X86 -DLLVM_OPTIMIZED_TABLEGEN=TRUE -DLLVM_ENABLE_ASSERTIONS=TRUE -DLLVM_INCLUDE_UTILS=OFF -DLLVM_INCLUDE_RUNTIMES=OFF -DLLVM_INCLUDE_TESTS=OFF -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_INCLUDE_GO_TESTS=OFF -DLLVM_INCLUDE_BENCHMARKS=OFF -DLLVM_BUILD_LLVM_C_DYLIB=OFF -DLLVM_ENABLE_DIA_SDK=OFF -DCLANG_BUILD_TOOLS=ON -DLLVM_SPIRV_INCLUDE_TESTS=OFF && ninja -j32 install'
$buildstatus = $?
Pop-Location
if (!$buildstatus) {
Write-Host "Failed to compile LLVM"
Exit 1
}
Get-Date
$libclc_build = New-Item -ItemType Directory -Path ".\llvm-project" -Name "build-libclc"
Push-Location -Path $libclc_build.FullName
Write-Host "Compiling libclc"
# libclc can only be built with Ninja, because CMake's VS backend doesn't know how to compile new language types
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake ../libclc -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER="/llvm-10/bin/clang-cl.exe" -DCMAKE_CXX_FLAGS="-m64" -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\llvm-10" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" && ninja -j32 install'
$buildstatus = $?
Pop-Location
Remove-Item -Recurse -Path $libclc_build
if (!$buildstatus) {
Write-Host "Failed to compile libclc"
Exit 1
}
Remove-Item -Recurse -Path $llvm_build
Get-Date
Write-Host "Cloning SPIRV-Tools"
git clone https://github.com/KhronosGroup/SPIRV-Tools
if (!$?) {
Write-Host "Failed to clone SPIRV-Tools repository"
Exit 1
}
git clone https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
if (!$?) {
Write-Host "Failed to clone SPIRV-Headers repository"
Exit 1
}
Write-Host "Building SPIRV-Tools"
$spv_build = New-Item -ItemType Directory -Path ".\SPIRV-Tools" -Name "build"
Push-Location -Path $spv_build.FullName
# SPIRV-Tools doesn't use multi-threaded MSVCRT, but we need it to
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POLICY_DEFAULT_CMP0091=NEW -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DCMAKE_INSTALL_PREFIX="C:\spirv-tools" && ninja -j32 install'
$buildstatus = $?
Pop-Location
Remove-Item -Recurse -Path $spv_build
if (!$buildstatus) {
Write-Host "Failed to compile SPIRV-Tools"
Exit 1
}
Get-Date
Write-Host "Downloading Freeglut"
$freeglut_zip = 'freeglut-MSVC.zip'
$freeglut_url = "https://www.transmissionzero.co.uk/files/software/development/GLUT/$freeglut_zip"
For ($i = 0; $i -lt 5; $i++) {
Invoke-WebRequest -Uri $freeglut_url -OutFile $freeglut_zip
$freeglut_downloaded = $?
if ($freeglut_downloaded) {
Break
}
}
if (!$freeglut_downloaded) {
Write-Host "Failed to download Freeglut"
Exit 1
}
Get-Date
Write-Host "Installing Freeglut"
Expand-Archive $freeglut_zip -DestinationPath C:\
if (!$?) {
Write-Host "Failed to install Freeglut"
Exit 1
}
Get-Date
Write-Host "Downloading glext.h"
New-Item -ItemType Directory -Path ".\glext" -Name "GL"
$ProgressPreference = "SilentlyContinue"
Invoke-WebRequest -Uri 'https://www.khronos.org/registry/OpenGL/api/GL/glext.h' -OutFile '.\glext\GL\glext.h' | Out-Null
Get-Date
Write-Host "Cloning Piglit"
git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\src\piglit'
if (!$?) {
Write-Host "Failed to clone Piglit repository"
Exit 1
}
Push-Location -Path C:\src\piglit
git checkout b0bbeb876a506e0ee689dd7e17cee374c8284058
Pop-Location
Get-Date
$piglit_build = New-Item -ItemType Directory -Path "C:\src\piglit" -Name "build"
Push-Location -Path $piglit_build.FullName
Write-Host "Compiling Piglit"
cmd.exe /C 'C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="C:\Piglit" -DGLUT_INCLUDE_DIR=C:\freeglut\include -DGLUT_glut_LIBRARY_RELEASE=C:\freeglut\lib\x64\freeglut.lib -DGLEXT_INCLUDE_DIR=.\glext && ninja -j32'
$buildstatus = $?
ninja -j32 install | Out-Null
$installstatus = $?
Pop-Location
Remove-Item -Recurse -Path $piglit_build
if (!$buildstatus -Or !$installstatus) {
Write-Host "Failed to compile or install Piglit"
Exit 1
}
Copy-Item -Path C:\freeglut\bin\x64\freeglut.dll -Destination C:\Piglit\lib\piglit\bin\freeglut.dll
Get-Date
Write-Host "Complete"

View File

@@ -1,18 +0,0 @@
# we want more secure TLS 1.2 for most things
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;
# VS16.x is 2019
$msvc_2019_url = 'https://aka.ms/vs/16/release/vs_buildtools.exe'
Get-Date
Write-Host "Downloading Visual Studio 2019 build tools"
Invoke-WebRequest -Uri $msvc_2019_url -OutFile C:\vs_buildtools.exe
Get-Date
Write-Host "Installing Visual Studio 2019"
Start-Process -NoNewWindow -Wait C:\vs_buildtools.exe -ArgumentList '--wait --quiet --norestart --nocache --installPath C:\BuildTools --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Component.VC.ATL --add Microsoft.VisualStudio.Component.VC.ATLMFC --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 --add Microsoft.VisualStudio.Component.Graphics.Tools --add Microsoft.VisualStudio.Component.Windows10SDK.18362 --includeRecommended'
if (!$?) {
Write-Host "Failed to install Visual Studio tools"
Exit 1
}
Remove-Item C:\vs_buildtools.exe -Force

View File

@@ -1,26 +0,0 @@
$env:PIGLIT_NO_FAST_SKIP = 1
Copy-Item -Path _install\bin\opengl32.dll -Destination C:\Piglit\lib\piglit\bin\opengl32.dll
# Run this using VsDevCmd.bat to ensure DXIL.dll is in %PATH%
cmd.exe /C "C:\BuildTools\Common7\Tools\VsDevCmd.bat -host_arch=amd64 -arch=amd64 && py -3 C:\Piglit\bin\piglit.py run `"$env:PIGLIT_PROFILE`" $env:PIGLIT_OPTIONS $env:PIGLIT_TESTS .\results"
py -3 C:\Piglit\bin\piglit.py summary console .\results | Select -SkipLast 1 | Select-String -NotMatch -Pattern ': pass' | Set-Content -Path .\result.txt
$reference = Get-Content ".\_install\$env:PIGLIT_PROFILE.txt"
$result = Get-Content .\result.txt
if (-Not ($reference -And $result)) {
Exit 1
}
$diff = Compare-Object -ReferenceObject $reference -DifferenceObject $result
if (-Not $diff) {
Exit 0
}
py -3 C:\Piglit\bin\piglit.py summary html --exclude-details=pass .\summary .\results
Write-Host "Unexpected change in results:"
Write-Output $diff | Format-Table -Property SideIndicator,InputObject -Wrap
Exit 1

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
[binaries]
c = ['ccache', 'x86_64-w64-mingw32-gcc']
cpp = ['ccache', 'x86_64-w64-mingw32-g++']
ar = 'x86_64-w64-mingw32-ar'
strip = 'x86_64-w64-mingw32-strip'
pkgconfig = '/usr/local/bin/x86_64-w64-mingw32-pkg-config'
windres = 'x86_64-w64-mingw32-windres'
exe_wrapper = ['wine64']
[properties]
needs_exe_wrapper = True
sys_root = '/usr/x86_64-w64-mingw32/'
[host_machine]
system = 'windows'
cpu_family = 'x86_64'
cpu = 'x86_64'
endian = 'little'
; vim: ft=dosini

View File

@@ -1,71 +0,0 @@
### Before submitting your bug report:
- Check if a new version of Mesa is available which might have fixed the problem.
- If you can, check if the latest development version (git master) works better.
- Check if your bug has already been reported here.
- For any logs, backtraces, etc - use [code blocks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)
- As examples of good bug reports you may review one of these - #2598, #2615, #2608
Otherwise, fill the requested information below.
And please remove anything that doesn't apply to keep things readable :)
### Description
Describe what you are doing, what you expect and what you're
seeing instead. How frequent is the issue? Is it a one time occurrence? Does it appear multiple times but randomly? Can you easily reproduce it?
"It doesn't work" usually is not a helpful description of an issue.
The more detail about how things are going wrong, the better.
### Screenshots/video files
For rendering errors, attach screenshots of the problem and (if possible) of how it should look. For freezes, it may be useful to provide a screenshot of the affected game scene. Prefer screenshots over videos.
### Log files (for system lockups / game freezes / crashes)
- Backtrace (for crashes)
- Output of `dmesg`
- Hang reports: Run with `RADV_DEBUG=hang` and attach the files created in `$HOME/radv_dumps_*/`
### Steps to reproduce
How can Mesa developers reproduce the issue? When reporting a game issue, start explaining from a fresh save file and don't assume prior knowledge of the game's story.
Example:
1. `Start new game and enter second mission (takes about 10 minutes)`
2. `Talk to the NPC called "Frank"`
3. `Observe flickering on Frank's body`
### System information
Please post `inxi -GSC -xx` output ([fenced with triple backticks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)) OR fill information below manually
- OS: (`cat /etc/os-release | grep "NAME"`)
- GPU: (`lspci -nn | grep VGA` or `lshw -C display -numeric`)
- Kernel version: (`uname -a`)
- Mesa version: (`glxinfo -B | grep "OpenGL version string"`)
- Desktop environment: (`env | grep XDG_CURRENT_DESKTOP`)
#### If applicable
- Xserver version: (`sudo X -version`)
- DXVK version:
- Wine/Proton version:
### Regression
Did it used to work in a previous Mesa version? It can greatly help to know when the issue started.
### API captures (if applicable, optional)
Consider recording a [GFXReconstruct](https://github.com/LunarG/gfxreconstruct/blob/dev/USAGE_desktop.md) (preferred), [RenderDoc](https://renderdoc.org/), or [apitrace](https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown) capture of the issue with the RADV driver active. This can tremendously help when debugging issues, but you're still encouraged to report issues if you can't provide a capture file.
### Further information (optional)
Does the issue reproduce with the LLVM backend (`RADV_DEBUG=llvm`) or on the AMDGPU-PRO drivers?
Does your environment set any of the variables `ACO_DEBUG`, `RADV_DEBUG`, and `RADV_PERFTEST`?

View File

@@ -1,54 +0,0 @@
### Before submitting your bug report:
- Check if a new version of Mesa is available which might have fixed the problem.
- If you can, check if the latest development version (git master) works better.
- Check if your bug has already been reported here.
- For any logs, backtraces, etc - use [code blocks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)
- As examples of good bug reports you may review one of these - #2598, #2615, #2608
Otherwise, please fill the requested information below.
And please remove anything that doesn't apply to keep things readable :)
### System information
Please post `inxi -GSC -xx` output ([fenced with triple backticks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)) OR fill information below manually
- OS: (`cat /etc/os-release | grep "NAME"`)
- GPU: (`lspci -nn | grep VGA` or `lshw -C display -numeric`)
- Kernel version: (run `uname -a`)
- Mesa version: (`glxinfo -B | grep "OpenGL version string"`)
- Xserver version (if applicable): (`sudo X -version`)
- Desktop manager and compositor:
#### If applicable
- DXVK version:
- Wine/Proton version:
### Describe the issue
Please describe what you are doing, what you expect and what you're
seeing instead. How frequent is the issue? Is it a one time occurrence? Does it appear multiple times but randomly? Can you easily reproduce it?
"It doesn't work" usually is not a helpful description of an issue.
The more detail about how things are going wrong, the better.
### Regression
Did it used to work? It can greatly help to know when the issue started.
### Log files as attachment
- Output of `dmesg`
- Backtrace
- Gpu hang details
### Screenshots/video files (if applicable)
### Any extra information would be greatly appreciated

673
.mailmap
View File

@@ -1,673 +0,0 @@
Aapo Tahkola <aet@rasterburn.org> <aapo@aapo-desktop.(none)>
Adam Jackson <ajax@redhat.com> <ajax@benzedrine.nwnk.net>
Adam Jackson <ajax@redhat.com> <ajax@freedesktop.org>
Adrian Marius Negreanu <adrian.m.negreanu@intel.com> Adrian Negreanu <adrian.m.negreanu@intel.com>
Adrian Marius Negreanu <adrian.m.negreanu@intel.com> Negreanu Marius Adrian <adrian.m.negreanu@intel.com>
Alan Swanson <reiver@improbability.net> <swanson@ukfsn.org>
Dave Airlie <airlied@redhat.com> <airliedfreedesktop.org>
Dave Airlie <airlied@redhat.com> airlied <airlied@unused-12-215.bne.redhat.com>
Dave Airlie <airlied@redhat.com> <airlied@dhcp-1-203.bne.redhat.com>
Dave Airlie <airlied@redhat.com> <airlied@dhcp-40-204.bne.redhat.com>
Dave Airlie <airlied@redhat.com> <airlied@gmail.com>
Dave Airlie <airlied@redhat.com> <airlied@itt42.(none)>
Dave Airlie <airlied@redhat.com> <airlied@linux.ie>
Dave Airlie <airlied@redhat.com> <airlied@nx6125b.(none)>
Dave Airlie <airlied@redhat.com> <airlied@panoply-rh.(none)>
Dave Airlie <airlied@redhat.com> <airlied@ppcg5.localdomain>
Alan Coopersmith <alan.coopersmith@oracle.com> <alan.coopersmith@sun.com>
Alan Hourihane <alanh@vmware.com> <alanh@tungstengraphics.com>
Alan Hourihane <alanh@vmware.com> <alanh@fairlite.demon.co.uk>
Alan Hourihane <alanh@vmware.com> <alanh@jetpack.(none)>
Alexander Monakov <amonakov@gmail.com> <amonakov@ispras.ru>
Alexander von Gluck IV <kallisti5@unixzen.com> Alexander von Gluck <kallisti5@unixzen.com>
Alexandros Frantzis <alexandros.frantzis@collabora.com> <Alexandros.Frantzis@canonical.com>
Alex Corscadden <alexc@vmware.com> <alexc@alexc-dev1.prom.eng.vmware.com>
Alex Corscadden <alexc@vmware.com> <alexc@alexc-dev1.vmware.com>
Alex Deucher <alexdeucher@gmail.com> <alexander.deucher@amd.com>
Alex Deucher <alexdeucher@gmail.com> <agd5f@yahoo.com>
Alex Deucher <alexdeucher@gmail.com> <alex@botch2.com>
Alex Deucher <alexdeucher@gmail.com> <alex@botch2.(none)>
Alex Deucher <alexdeucher@gmail.com> <alex@cube.(none)>
Alex Deucher <alexdeucher@gmail.com> <alex@samba.(none)>
Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> <alyssa@rosenzweig.io>
Andreas Fänger <a.faenger@e-sign.com> <a.faenger@e-sign.com>
Andreas Hartmetz <ahartmetz@gmail.com> <andreas.hartmetz@kdab.com>
Andre Heider <a.heider@gmail.com>
Andreas Heider <andreas@heider.io>
Andreas Pokorny <andreas.pokorny@canonical.com> <andreas.pokorny@elektrobit.com>
Andres Gomez <agomez@igalia.com> <tanty@igalia.com>
Andrew Randrianasulu <randrianasulu@gmail.com> <randrik_a@yahoo.com>
Andrew Randrianasulu <randrianasulu@gmail.com> <randrik@mail.ru>
Andrii Simiklit <andrii.simiklit@globallogic.com> <asimiklit.work@gmail.com>
Anuj Phogat <anuj.phogat@gmail.com> <anuj.phogat@intel.com>
Arthur Huillet <arthur.huillet@free.fr> Arthur HUILLET <arthur.huillet@free.fr>
Axel Davy <axel.davy@ens.fr> <davyaxel0@gmail.com>
Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl> <basni@chromium.org>
Benjamin Franzke <benjaminfranzke@googlemail.com> ben <benjaminfranzke@googlemail.com>
Ben Skeggs <bskeggs@redhat.com> <darktama@beleth.(none)>
Ben Skeggs <bskeggs@redhat.com> <darktama@iinet.net.au>
Ben Skeggs <bskeggs@redhat.com> <darktama@nisroch.keine.ath.cx>
Ben Skeggs <bskeggs@redhat.com> <skeggsb-at-gmail.com>
Ben Skeggs <bskeggs@redhat.com> <skeggsb@gmail.com>
Ben Skeggs <bskeggs@redhat.com> <skeggsb@localhost.localdomain>
Ben Skeggs <bskeggs@redhat.com> <skeggsb@nisroch.keine.ath.cx>
Ben Widawsky <benjamin.widawsky@intel.com> Ben Widawsky <ben@bwidawsk.net>
Blair Sadewitz <blair.sadewitz@gmail.com> Blair Sadewitz <blair.sadewitz.gmail.com>
Boris Brezillon <boris.brezillon@collabora.com> <boris.brezillon@free-electrons.com>
Boris Peterbarg <reist@users.sourceforge.net> reist <reist>
Brian Paul <brianp@vmware.com> Brian <brian.paul@tungstengraphics.com>
Brian Paul <brianp@vmware.com> <brian.paul@tungstengraphics.com>
Brian Paul <brianp@vmware.com> <brian.e.paul@gmail.com>
Brian Paul <brianp@vmware.com> <brianp@kemper.freedesktop.org>
Brian Paul <brianp@vmware.com> brian <brian@cvp965.(none)>
Brian Paul <brianp@vmware.com> Brian <brian@i915.localnet.net>
Brian Paul <brianp@vmware.com> Brian <brian@nostromo.localnet.net>
Brian Paul <brianp@vmware.com> Brian <brian@poulsbo.localnet.net>
Brian Paul <brianp@vmware.com> Brian <brian@ps3.localnet.net>
Brian Paul <brianp@vmware.com> Brian <brianp@vmware.com>
Brian Paul <brianp@vmware.com> Brian <brian@yutani.localnet.net>
Brian Paul <brianp@vmware.com> root <brian.paul@tungstengraphics.com>
Brian Paul <brianp@vmware.com> root <root@i915.localnet.net>
Brian Paul <brianp@vmware.com> root <root@nostromo.localnet.net>
Brian Paul <brianp@vmware.com> root <root@i965.localnet.net>
Bruce Cherniak <bruce.cherniak@intel.com>
Bruce Merry <bmerry@users.sourceforge.net> <bmerry@gmail.com>
Carl-Philip Hänsch <cphaensch@googlemail.com>
Carl-Philip Hänsch <cphaensch@googlemail.com> <s3734770@mail.zih.tu-dresden.de>
Carl-Philip Hänsch <cphaensch@googlemail.com> <carli@carli-laptop.(none)>
Carl-Philip Hänsch <cphaensch@googlemail.com> <Carl-Philip.Haensch@mailbox.tu-dresden.de>
Chad Versace <chadversary@chromium.org> <chad@kiwitree.net>
Chad Versace <chadversary@chromium.org> <chad@chad-versace.us>
Chad Versace <chadversary@chromium.org> <Chad Versace chad@chad-versace.us>
Chad Versace <chadversary@chromium.org> <chad.versace@intel.com>
Chad Versace <chadversary@chromium.org> <chad.versace@linux.intel.com>
Chad Versace <chadversary@chromium.org> <chadversary@google.com>
Chandu Babu Namburu <chandu@amd.com>
Chandu Babu Namburu <chandu@amd.com> <mailto:chandu@amd.com>
Chenglei Ren <chenglei.ren@intel.com>
Chia-I Wu <olvaffe@gmail.com> <olv@lunarg.com>
Chia-I Wu <olvaffe@gmail.com> Chia-Wu <olvaffe@gmail.com>
Chih-Wei Huang <cwhuang@linux.org.tw> Chih-Wei Huang <cwhuang@android-x86.org>
Christian Gmeiner <christian.gmeiner@gmail.com> <christian.GMEINER@bachmann.info>
Christian Inci <chris.bugsfd@broke-the-inter.net> <chris.pcguy.inci@gmail.com>
Christian König <christian.koenig@amd.com> Christian Koenig <christian.koenig@amd.com>
Christian König <christian.koenig@amd.com> Christian König <christian.koenig at amd.com>
Christian König <christian.koenig@amd.com> Christian König <deathsimple@vodafone.de>
Christoph Brill <egore911@egore911.de> Christoph Bill <egore@gmx.de>
Christoph Brill <egore911@egore911.de> <egore@gmx.de>
Christoph Bumiller <christoph.bumiller@speed.at> <e0425955@student.tuwien.ac.at>
Christoph Haag <haagch@frickel.club> <christoph.haag@collabora.com>
Christoph Haag <haagch@frickel.club> <haagch+mesa@frickel.club>
Christoph Haag <haagch@frickel.club> <haagch+mesadev@frickel.club>
Christopher James Halse Rogers <christopher.halse.rogers@canonical.com> Christopher James Halse Rogers <raof@ubuntu.com>
Christopher Li <chrisl@vmware.com> Chris Li <chrisl@vmware.com>
Christopher Li <chrisl@vmware.com> Qicheng Christopher Li <chrisl@vmware.com>
Claudio Ciccani <klan@directfb.org> <klan@users.sf.net>
Claudio Ciccani <klan@directfb.org> <klan@users.sourceforge.net>
Colin McDonald <cjmmail10-bz@yahoo.co.uk> <cjmcdonald@qinetiq.com>
Connor Abbott <cwabbott0@gmail.com> <connor.w.abbott@intel.com>
Connor Abbott <cwabbott0@gmail.com> <connor.abbott@intel.com>
Constantine Kharlamov <Hi-Angel@yandex.ru>
Corbin Simpson <MostAwesomeDude@gmail.com> <mostawesomed...@gmail.com>
Corbin Simpson <MostAwesomeDude@gmail.com> <mostawesomedude@gmail.com>
Courtney Goeltzenleuchter <courtney@lunarg.com> <courtney@LunarG.com>
Craig Stout <cstout@google.com>
Daniel Schürmann <daniel@schuermann.dev> <daniel.schuermann@campus.tu-berlin.de>
Daniel Skinner <sio@users.sourceforge.net> sio <sio>
Daniel Stone <daniels@collabora.com> <daniel@fooishbar.org>
Danylo Piliaiev <dpiliaiev@igalia.com> <danylo.piliaiev@globallogic.com>
Danylo Piliaiev <dpiliaiev@igalia.com> <danylo.piliaiev@gmail.com>
David Miller <davem@davemloft.net> David S. Miller <davem@davemloft.net>
David Miller <davem@davemloft.net> Dave Miller <davem@davemloft.net>
David Miller <davem@davemloft.net> davem69 <davem69>
David Heidelberg <david@ixit.cz> David Heidelberger <david.heidelberger@ixit.cz>
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
David Reveman <reveman@chromium.org> <c99drn@cs.umu.se>
Dieter Nützel <Dieter@nuetzel-hh.de> Dieter Nützel <dieter@nuetzel-hh.de>
Dmitry Cherkassov <dcherkassov@gmail.com> Dmitry Cherkasov <dcherkassov@gmail.com>
Dylan Baker <dylanx.c.baker@intel.com> <baker.dylan.c@gmail.com>
Dylan Baker <dylanx.c.baker@intel.com> <dylan@pnwbakers.com>
Dylan Noblesmith <nobled@dreamwidth.org>
Dylan Noblesmith <nobled@dreamwidth.org> nobled <nobled2@nobled2-karmic.(none)>
Edward O'Callaghan <funfunctor@folklore1984.net> <eocallaghan@alterapraxis.com>
Eleni Maria Stea <estea@igalia.com> <elene.mst@gmail.com>
Elie Tournier <tournier.elie@gmail.com>
Emeric Grange <emeric.grange@gmail.com> Emeric <emeric.grange@gmail.com>
Emil Velikov <emil.l.velikov@gmail.com> <emil.velikov@collabora.com>
Emil Velikov <emil.l.velikov@gmail.com> <emil.veliko@collabora.com>
Emil Velikov <emil.l.velikov@gmail.com> <emil.velikov@collabora.co.uk>
Emil Velikov <emil.l.velikov@gmail.com> <emil.veliikov@collabora.com>
Emil Velikov <emil.l.velikov@gmail.com> <emil.velikov@gmail.com>
Emil Velikov <emil.l.velikov@gmail.com> <emmil.velikov@collabora.com>
Emmanuel Gil Peyrot <linkmauve@linkmauve.fr> <emmanuel.peyrot@collabora.com>
Emmanuel Vadot <manu@FreeBSD.org> Emmanuel <manu@FreeBSD.org>
Eric Anholt <eric@anholt.net> Eric Anholt <anholt@FreeBSD.org>
Eric Engestrom <eric@engestrom.ch> <eric.engestrom@imgtec.com>
Eric Engestrom <eric@engestrom.ch> <eric.engestrom@intel.com>
Erik Faye-Lund <kusmabite@gmail.com> <erik.faye-lund@collabora.com>
Eugeni Dodonov <eugeni.dodonov@intel.com> <eugeni@mandriva.com>
Fabian Bieler <der.fabe@gmx.net> <fabianbieler@fastmail.fm>
Fabian Bieler <der.fabe@gmx.net> <&lt;der.fabe@gmx.net&gt>
Feng, Haitao <haitao.feng@intel.com> Haitao Feng <haitao.feng@intel.com>
Francesco Ansanelli <francians@gmail.com>
Frank Binns <frank.binns@imgtec.com> <francisbinns@gmail.com>
Frank Henigman <fjhenigman@google.com> <fjhenigman@chromium.org>
George Sapountzis <gsapountzis@gmail.com> George Sapountzis <gsap7@yahoo.gr>
Gert Wollny <gert.wollny@collabora.com> <gw.fossdev@gmail.com>
Gurchetan Singh <gurchetansingh@chromium.org>
Gwenole Beauchesne <gwenole.beauchesne@intel.com> <gb.devel@gmail.com>
Haihao Xiang <haihao.xiang@intel.com>
Hamish Marson <hmarson@users.sourceforge.net> hmarson <hmarson>
Hans de Goede <hdegoede@redhat.com> Hans de Goede <j.w..r..degoede@hhs.nl>
Harish Krupo <harish.krupo.kps@intel.com> <harishkrupo@gmail.com>
Heinrich Fink <heinrich.fink@daqri.com>
Henri Verbeet <hverbeet@gmail.com>
Homer Hsing <dongsheng.xing@intel.com> <homer.hsing@gmail.com>
Hui Qi Tay <hqtay@vmware.com> <tayhuiqithq@gmail.com>
Iago Toral Quiroga <itoral@igalia.com> Iago Toral <itoral@igalia.com>
Ian Romanick <ian.d.romanick@intel.com> <idr@freedesktop.org>
Ian Romanick <ian.d.romanick@intel.com> <idr@us.ibm.com>
Icecream95 <ixn@disroot.org> <ixn@keemail.me>
Igor Gnatenko <i.gnatenko.brain@gmail.com> <ignatenko@redhat.com>
Illia Iorin <illia.iorin@globallogic.com> <illia.iorin@gmail.com>
Indrajit Das <indrajit-kumar.das@amd.com> Indrajit Kumar Das <indrajit-kumar.das@amd.com>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob@vmware.com>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob@aurora.(none)>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob@aurora.walkyrie.se>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob@tungstengraphics.com>
Jakob Bornecrantz <wallbraker@gmail.com> <wallbraker 'at' gmail 'dot' com>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob.bornecrantz@collabora.com>
Jakob Bornecrantz <wallbraker@gmail.com> <jakob@collabora.com>
Jakub Bogusz <qboosh@pld-linux.org> <gboosh@pld-linux.org>
James Legg <jlegg@feralinteractive.com> <lankyleggy@gmail.com>
James Xiong <james.xiong@intel.com> Xiong, James <james.xiong@intel.com>
James Zhu <James.Zhu@amd.com>
Jan Beich <jbeich@freebsd.org> <jbeich@FreeBSD.org>
Jan Vesely <jano.vesely@gmail.com> Jan Vesely <jan.vesely@rutgers.edu>
Jan Zielinski <jan.zielinski@intel.com> jzielins <jan.zielinski@intel.com>
Jason Ekstrand <jason@jlekstrand.net> <jason.ekstrand@intel.com>
Jeremy Huddleston <jeremyhu@apple.com>
Jeremy Huddleston <jeremyhu@apple.com> <jeremyhu@freedesktop.org>
Jeremy Huddleston <jeremyhu@apple.com> <jeremy@tifa.local>
Jeremy Huddleston <jeremyhu@apple.com> <jeremy@vincent.local>
Jeremy Huddleston <jeremyhu@apple.com> <jeremy@yuffie.local>
Jeremy Kolb <jkolb@freedesktop.org> <jkolb@brandeis.edu>
Jerome Glisse <jglisse@redhat.com> <glisse@freedesktop.org>
Jerome Glisse <jglisse@redhat.com> <glisse@kemper.freedesktop.org>
Jerome Glisse <jglisse@redhat.com> John Doe <glisse@barney.(none)>
Jerome Glisse <jglisse@redhat.com> John Doe <glisse@localhost.localdomain>
Jesse Barnes <jesse.barnes@intel.com> <jbarnes@hobbes.lan>
Jesse Barnes <jesse.barnes@intel.com> <jbarnes@hobbes.(none)>
Jesse Barnes <jesse.barnes@intel.com> <jbarnes@jbarnes-desktop.localdomain>
Jesse Barnes <jesse.barnes@intel.com> <jbarnes@jbarnes-t61.(none)>
Jesse Barnes <jesse.barnes@intel.com> <jbarnes@virtuousgeek.org>
Joakim Sindholt <bacn@zhasha.com> <opensource@zhasha.com>
Joakim Sindholt <bacn@zhasha.com> <zhasha@gallium-dev.(none)>
Jochen Gerlach <jtg@users.sourceforge.net> jtg <jtg>
Joel Bosveld <joel.bosveld@gmail.com> <Joel.Bosveld@gmail.com>
Jonathan Adamczewski <jadamcze@utas.edu.au> <jadamcze@utas.edu.a>
Jon Turney <jon.turney@dronecode.org.uk> Jon TURNEY <jon.turney@dronecode.org.uk>
José Fonseca <jfonseca@vmware.com> Jose Fonseca <jfonseca@vmware.com>
José Fonseca <jfonseca@vmware.com> Jose Fonseca <jrfonseca@tungstengraphics.com>
José Fonseca <jfonseca@vmware.com> <jfonseca@pegasus.(none)>
José Fonseca <jfonseca@vmware.com> <jfonseca@titan.(none)>
José Fonseca <jfonseca@vmware.com> <jose.r.fonseca@gmail.com>
José Fonseca <jfonseca@vmware.com> <jrfonseca@tungstengraphics.com>
José Fonseca <jfonseca@vmware.com> <j_r_fonseca@yahoo.co.uk>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> Jouk Jansen <jouk@hrem.nano.tudelft.nl>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> Jouk Jansen <joukj@hrem.stm.tudelft.nl>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> joukj <joukj@tarantella.(none)>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> Jouk <joukj@tarantella.nano.tudelft.nl>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> Jouk <joukj@tarantella.(none)>
Jouk Jansen <joukj@hrem.nano.tudelft.nl> J.Jansen <joukj@tarantella.nano.tudelft.nl>
Juan Zhao <juan.j.zhao@intel.com> <juan.j.zhao@linux.intel.com>
Julien Cristau <jcristau@debian.org> <julien.cristau@logilab.fr>
Julien Isorce <j.isorce@samsung.com> <julien.isorce@gmail.com>
Julien Isorce <j.isorce@samsung.com> <jisorce@oblong.com>
Kalyan Kondapally <kalyan.kondapally@intel.com> <kondapallykalyancontribute@gmail.com>
Karl Schultz <karl.w.schultz@gmail.com> Karl Schultze <k.w.schultz@comcast.net>
Karl Schultz <karl.w.schultz@gmail.com> unknown <kwschult@.na.qualcomm.com>
Karl Schultz <karl.w.schultz@gmail.com> <k.w.schultz@comcast.net>
Karl Schultz <karl.w.schultz@gmail.com> <Karl.W.Schultz@gmail.com>
Karl Schultz <karl.w.schultz@gmail.com> <kschultz@freedesktop.org>
Karol Herbst <kherbst@redhat.com> <git@karolherbst.de>
Karol Herbst <kherbst@redhat.com> <karolherbst@gmail.com>
Karol Herbst <kherbst@redhat.com> <nouveau@karolherbst.de>
Keith Harrison <sio2@users.sourceforge.net> sio2 <sio2>
Keith Packard <keithp@keithp.com> <keithp@koto.keithp.com>
Keith Packard <keithp@keithp.com> <keithp@neko.keithp.com>
Keith Whitwell <keithw@vmware.com> <keith@tungstengraphics.com>
Keith Whitwell <keithw@vmware.com> keithw <keithw@keithw-laptop.(none)>
Kevin Rogovin <kevin.rogovin@intel.com> <kevin.rogovin@gmail.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@redhat.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@hinata.boston.redhat.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@sasori.boston.redhat.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@temari.boston.redhat.com>
Kristian Høgsberg <krh@bitplanet.net> <kristian.h.kristensen@intel.com>
Kristian Høgsberg <krh@bitplanet.net> <hoegsberg@chromium.org>
Kristian Høgsberg <krh@bitplanet.net> <hoegsberg@google.com>
Kristian Høgsberg <krh@bitplanet.net> <hoegsberg@gmail.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@sweater.jf.intel.com>
Kristian Høgsberg <krh@bitplanet.net> <krh@bitplanet.net>
Kristian Høgsberg <krh@bitplanet.net> <krh@owl.jf.intel.com>
Krzesimir Nowak <qdlacz@gmail.com> <krzesimir@kinvolk.io>
Li Peng <peng.li@intel.com> <peng.li@linux.intel.com>
Lin Johnson <johnson.lin@intel.com> Johnson Lin <johnson.lin@intel.com>
Lionel Landwerlin <lionel.g.landwerlin@intel.com> <llandwerlin@gmail.com>
Liviu Prodea <liviuprodea@yahoo.com>
Lucas Stach <dev@lynxeye.de> <l.stach@pengutronix.de>
Maarten Lankhorst <maarten.lankhorst@ubuntu.com> <dev@mblankhorst.nl>
Maarten Lankhorst <maarten.lankhorst@ubuntu.com> <m.b.lankhorst@gmail.com>
Maarten Lankhorst <maarten.lankhorst@ubuntu.com> <maarten.lankhorst@canonical.com>
Maciej Cencora <m.cencora@gmail.com> <maciej@osiris.(none)>
Marc-André Lureau <marcandre.lureau@gmail.com> <marcandre.lureau@gmail.com>
Marc-André Lureau <marcandre.lureau@gmail.com> <marcandre.lureau@redhat.com>
Marc Dietrich <marvin24@gmx.de> Marc <marvin24@gmx.de>
Marc Dietrich <marvin24@gmx.de> marvin24 <marvin24@gmx.de>
Marcin Ślusarz <marcin.slusarz@gmail.com> Marcin Slusarz <marcin.slusarz@gmail.com>
Marek Olšák <maraeo@gmail.com> <marek.olsak@amd.com>
Mario Kleiner <mario.kleiner.de@gmail.com> kleinerm <mario.kleiner@tuebingen.mpg.de>
Mario Kleiner <mario.kleiner.de@gmail.com> <mario.kleiner@tuebingen.mpg.de>
Mark Menzynski <mmenzyns@redhat.com> mmenzyns <mmenzyns@redhat.com>
Mark Mueller <markkmueller@gmail.com> <MarkKMueller@gmail.com>
Marta Lofstedt <marta.lofstedt@intel.com> <marta.lofstedt@linux.intel.com>
Martin Peres <martin.peres@linux.intel.com> <martin.peres@labri.fr>
Mathias Fröhlich <mathias.froehlich@gmx.net> Mathias Froehlich <Mathias.Froehlich@gmx.net>
Mathias Fröhlich <mathias.froehlich@gmx.net> Mathias Froehlich <Mathias.Froehlich@web.de>
Mathias Fröhlich <mathias.froehlich@gmx.net> Mathias Frohlich <M.Froehlich@science-computing.de>
Mathias Fröhlich <mathias.froehlich@gmx.net> <frohlich8@users.sourceforge.net>
Mathias Fröhlich <mathias.froehlich@gmx.net> <Mathias.Froehlich@gmx.net>
Mathias Fröhlich <mathias.froehlich@gmx.net> <Mathias.Froehlich@web.de>
Mathias Fröhlich <mathias.froehlich@gmx.net> M.Froehlich@science-computing.de <M.Froehlich@science-computing.de>
Matthias Groß <grmat@sub.red>
Matthias Hopf <mhopf@suse.de> Mathias Hopf <mhopf@suse.de>
Matthias Lorenz <oschowa@web.de> Oschowa <oschowa@web.de>
Matthew W. S. Bell <matthew@bells23.org.uk> Matthew Bell <matthew@bells23.org.uk>
Maxence Le Doré <maxence.ledore@gmail.com> Maxence Le Dore <maxence.ledore@gmail.com>
Maya Rashish <coypu@sdf.org> coypu <coypu@sdf.org>
Micah Fedke <micah.fedke@collabora.co.uk> <M.Fedke@Astronautics.com>
Michal Krol <michal@vmware.com> <michal@tungstengraphics.com>
Michal Krol <michal@vmware.com> Michal Krol <michal@ubuntu-vbox.(none)>
Michal Krol <michal@vmware.com> Michal Krol <mjkrol@gmail.org>
Michal Krol <michal@vmware.com> michal <michal@capacitor.(none)>
Michal Krol <michal@vmware.com> michal <michal@michal-laptop.(none)>
Michal Krol <michal@vmware.com> michal <michal@quad.(none)>
Michal Krol <michal@vmware.com> michal <michal@transistor.(none)>
Michal Krol <michal@vmware.com> Michal <michal@tungstengraphics.com>
Michal Krol <michal@vmware.com> michal <michal@wmvare.com>
Michel Dänzer <michel@daenzer.net> <michel.daenzer@amd.com>
Michel Dänzer <michel@daenzer.net> <daenzer@vmware.com>
Michel Dänzer <michel@daenzer.net> <michel@tungstengraphics.com>
Michel Dänzer <michel@daenzer.net> Michel Daenzer <michel.daenzer@amd.com>
Michel Dänzer <michel@daenzer.net> Michel Daenzer <daenzer@localhost.(none)>
Michel Dänzer <michel@daenzer.net> <mdaenzer@redhat.com>
Mike Kaplinskiy <mike.kaplinskiy@gmail.com> Mike Kaplinksiy <mike.kaplinskiy@gmail.com>
Mike Kaplinskiy <mike.kaplinskiy@gmail.com> <mike.kaplinskiy@gmai.com>
Mike Stroyan <mike@lunarg.com> <mike@LunarG.com>
Mun Gwan-gyeong <elongbug@gmail.com> Gwan-gyeong Mun <elongbug@gmail.com>
Neha Bhende <bhenden@vmware.com> <nbhende@vmware.com>
Neil Roberts <nroberts@igalia.com> <neil@linux.intel.com>
Nian Wu <nian.wu@intel.com>
Nian Wu <nian.wu@intel.com> <nian@graphics.(none)>
Nian Wu <nian.wu@intel.com> <nian@tinderbox.sh.intel.com>
Nicholas Bishop <nbishop@neverware.com> <nicholasbishop@gmail.com>
Nick Bowler <nbowler@draconx.ca>
Nick Sarnie <commendsarnex@gmail.com>
Nicolai Hähnle <nicolai.haehnle@amd.com>
Nicolai Hähnle <nicolai.haehnle@amd.com> <nhaehnle@gmail.com>
Nicolai Hähnle <nicolai.haehnle@amd.com> <prefect_@gmx.net>
Nicolai Hähnle <nicolai.haehnle@amd.com> <prefect@upb.de>
Nigel Stewart <nigels@users.sourceforge.net> <nigels@sourceforge.net>
Nigel Stewart <nigels@users.sourceforge.net> <nstewart@nvidia.com>
nobled <nobled@dreamwidth.org> <nobled2@nobled2-karmic.(none)>
Oliver McFadden <oliver.mcfadden@linux.intel.com> <z3ro.geek@gmail.com>
Owain Ainsworth <zerooa@googlemail.com> Owain G. Ainsworth <oga@openbsd.org>
Owen W. Taylor <otaylor@fishsoup.net> Owen Taylor <otaylor@snell.localdomain>
Patrice Mandin <patmandin@gmail.com> <patrice@manoir.racoon.city>
Patrice Mandin <patmandin@gmail.com> <pmandin@caramail.com>
Patrice Mandin <patmandin@gmail.com> <pmandin@freedesktop.org>
Pauli Nieminen <pauli.nieminen@linux.intel.com> <suokkos@gmail.com>
Paulo Zanoni <paulo.r.zanoni@intel.com> Paulo Zanoni <pzanoni@mandriva.com>
Paul Seidler <sepek@exherbo.org> Paul Seidler <pl.seidler@googlemail.com>
Pekka Paalanen <pekka.paalanen@collabora.co.uk> <ppaalanen@gmail.com>
Pekka Paalanen <pekka.paalanen@collabora.co.uk> <pq@iki.fi>
Peter Hutterer <peter.hutterer@who-t.net> <peter@cs.unisa.edu.au>
Philipp Zabel <p.zabel@pengutronix.de> <philipp.zabel@gmail.com>
Pierre-Eric Pelloux-Prayer <pelloux@gmail.com> <pelloux@gmail.com>
Pierre-Eric Pelloux-Prayer <pelloux@gmail.com> <pierre-eric.pelloux-prayer@amd.com>
Pierre Willenbrock <pierre@pirsoft.de> Pierre Willenbrok <pierre@pirsoft.de>
Plamena Manolova <plamena.manolova@intel.com> <plamena.n.manolova@gmail.com>
Qiang Yu <yuq825@gmail.com> <Qiang.Yu@amd.com>
Quentin Glidic <sardemff7+git@sardemff7.net> <sardemff7@sardemff7.net>
Randy Xu <randy.xu@intel.com>
RALOVICH, Kristóf <tade60@freemail.hu> <kristof.ralovich@gmail.com>
Renato Caldas <seventhguardian@gmail.com>
Richard Li <richardradeon@gmail.com> <RichardZ.Li@amd.com>
# The next ones are not 100% sure
Richard Li <richardradeon@gmail.com> richard <richard@richard-desktop3.(none)>
Richard Li <richardradeon@gmail.com> richard <richard@richard-desktop.(none)>
Richard Li <richardradeon@gmail.com> root <root@richard-desktop.(none)>
Richard Sandiford <rsandifo@linux.vnet.ibm.com> <r.sandiford@uk.ibm.com>
Rob Clark <robclark@freedesktop.org> <Rob Clark robdclark@freedesktop.org>
Rob Clark <robclark@freedesktop.org> <robdclark@gmail.com>
Rob Clark <robclark@freedesktop.org> <robdclark@chromium.org>
Robert Bragg <robert@sixbynine.org> <robert@linux.intel.com>
Robert Ellison <papillo@vmware.com> <papillo@i965-laptop.(none)>
Robert Ellison <papillo@vmware.com> <papillo@tungstengraphics.com>
Robert Hooker <sarvatt@ubuntu.com> <robert.hooker@canonical.com>
Rodrigo Vivi <rodrigo.vivi@intel.com> <rodrigo.vivi@gmail.com>
Roland Scheidegger <sroland@vmware.com> <rscheidegger@gmx.ch>
Roland Scheidegger <sroland@vmware.com> <sroland@tungstengraphics.com>
Roy Spliet <rspliet@eclipso.eu> <r.spliet@student.tudelft.nl>
Rune Petersen <rune@megahurts.dk> Rune Peterson <rune@megahurts.dk>
Ryan Houdek <sonicadvance1@gmail.com> <Sonicadvance1@gmail.com>
Sam Hocevar <sam@hocevar.net> Sam Hocevar <sam@zoy.org>
Samuel Iglesias Gonsálvez <siglesias@igalia.com> Samuel Iglesias Gonsalvez <siglesias@igalia.com>
Samuel Li <samuel.li@amd.com> <Samuel.Li@amd.com>
Satyeshwar Singh <satyeshwar.singh@intel.com> Singh, Satyeshwar <satyeshwar.singh@intel.com>
Sean D'Epagnier <sean@depagnier.com> <geckosenator@freedesktop.org>
Serge Martin <edb+mesa@sigluy.net> Serge Martin (EdB) <edb+mesa@sigluy.net>
Serge Martin <edb+mesa@sigluy.net> EdB <edb+mesa@sigluy.net>
Sergii Romantsov <sergii.romantsov@globallogic.com> <sergii.romantsov@gmail.com>
Sinclair Yeh <syeh@vmware.com> <sinclair.yeh@intel.com>
Sonny Jiang <sonny.jiang@amd.com>
Stefan Brüns <stefan.bruens@rwth-aachen.de> <Stefan.Bruens@rwth-aachen.de>
Steinar H. Gunderson <sgunderson@bigfoot.com> <sesse@google.com>
Steinar H. Gunderson <sgunderson@bigfoot.com> <steinar+mesa@gunderson.no>
Stéphane Marchesin <marcheu@chromium.org> Stephane Marchesin <marchesin@icps.u-strasbg.fr>
Stéphane Marchesin <marcheu@chromium.org> Stephane Marchesin <stephane.marchesin@gmail.com>
Suresh Guttula <suresh.guttula@amd.com>
Suresh Guttula <suresh.guttula@amd.com> <Suresh.Guttula@amd.com>
Sven M. Hallberg <pesco@users.sourceforge.net> pesco <pesco>
Tapani Pälli <tapani.palli@intel.com> <tapani.palli@gmail.com>
Tapani Pälli <tapani.palli@intel.com> Tapani <tapani.palli@intel.com>
Thierry Reding <treding@nvidia.com> <thierry@gilfi.de>
Thierry Reding <treding@nvidia.com> <thierry.reding@avionic-design.de>
Thierry Reding <treding@nvidia.com> <thierry.reding@gmail.com>
Thierry Vignaud <thierry.vignaud@gmail.com> <tvignaud@mandriva.com>
Thomas Balling Sørensen <tball@io.dk> <tball@tball-laptop.(none)>
Thomas Hellstrom <thellstrom@vmware.com> Thomas <thellstrom@vmware.com>
Thomas Hellstrom <thellstrom@vmware.com> Thomas Hellstrom <thellstrom-at-vmware-dot-com>
Thomas Hellstrom <thellstrom@vmware.com> Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thomas Hellstrom <thellstrom@vmware.com> Thomas Hellstrom <thomas@tungstengraphics.com>
Thomas Hellstrom <thellstrom@vmware.com> Thomas Hellström <thomas@tungstengraphics.com>
Thomas Tanner <tanner@gmx.net> tanner <tanner>
Tilman Sauerbeck <tilman@code-monkey.de> <tilman@freedesktop.org>
Tim Wiederhake <twied@gmx.net>
Timo Aaltonen <tjaalton@debian.org> <tjaalton@cc.hut.fi>
Timothy Arceri <tarceri@itsqueeze.com> <tarceri@localhost.localdomain>
Timothy Arceri <tarceri@itsqueeze.com> <timothy.arceri@collabora.com>
Timothy Arceri <tarceri@itsqueeze.com> <t_arceri@yahoo.com.au>
Timothy Arceri <tarceri@itsqueeze.com> Timothy <t_arceri@yahoo.com.au>
Tom Fogal <tfogal@alumni.unh.edu> <tfogal@sci.utah.edu>
Tom Stellard <thomas.stellard@amd.com> <tstellar@gmail.com>
Tom Stellard <thomas.stellard@amd.com> Thomas Stellard <tom.stellard@amd.com>
Tom Stellard <thomas.stellard@amd.com> <tstellar@redhat.com>
Tomasz Figa <tfiga@chromium.org> <tomasz.figa@gmail.com>
Tomeu Vizoso <tomeu.vizoso@collabora.com> <tomeu@tomeuvizoso.net>
Topi Pohjolainen <topi.pohjolainen@intel.com> <topi.pohjolainen@gmail.com>
Tormod Volden <debian.tormod@gmail.com> <lists.tormod@gmail.com>
Török Edwin <edwin+mesa@etorok.net> Török Edvin <edwintorok@gmail.com>
Török Edwin <edwin+mesa@etorok.net> <edwintorok@gmail.com>
Vadym Shovkoplias <vadym.shovkoplias@globallogic.com>
Vadym Shovkoplias <vadym.shovkoplias@globallogic.com> <vadim.shovkoplias@gmail.com>
Varad Gautam <varad.gautam@collabora.com> <varadgautam@gmail.com>
Ville Syrjälä <ville.syrjala@linux.intel.com> Ville Syrjala <syrjala@freedesktop.org>
Ville Syrjälä <ville.syrjala@linux.intel.com> Ville Syrjala <syrjala@sci.fi>
Vincent Lejeune <vljn@ovi.com> <peluche.canard@gmail.com>
Vinson Lee <vlee@freedesktop.org> <vlee@vmware.com>
Vivek Kasireddy <vivek.kasireddy@intel.com>
Yaakov Selkowitz <yselkowi@redhat.com> <yselkowitz@users.sourceforge.net>
Yogesh Mohan Marimuthu <yogesh.mohanmarimuthu@amd.com> Yogesh mohan marimuthu <yogesh.mohanmarimuthu@amd.com>
Wladimir J. van der Laan <laanwj@gmail.com>
Xavier Bouchoux <xavierb@gmail.com>
Zhaowei Yuan <zhaowei.yuan@samsung.com>
Zhenyu Wang <zhenyuw@linux.intel.com> Wang Zhenyu <zhenyu.z.wang@intel.com>
Zhongmin Wu <zhongmin.wu@intel.com>
Zack Rusin <zackr@vmware.com> <zack@kde.org>
Zack Rusin <zackr@vmware.com> <zack@pixel.(none)>
Zack Rusin <zackr@vmware.com> <zack@tungstengraphics.com>
Zhang <zxpmyth@yahoo.com.cn> zhang <zxpmyth@yahoo.com.cn>

File diff suppressed because it is too large Load Diff

View File

@@ -1,52 +0,0 @@
language: c
os: osx
cache:
ccache: true
env:
global:
- PKG_CONFIG_PATH=""
matrix:
include:
- env:
- BUILD=meson
before_install:
- HOMEBREW_NO_AUTO_UPDATE=1 brew install expat gettext
- if test "x$BUILD" = xmeson; then
HOMEBREW_NO_AUTO_UPDATE=1 brew install ninja;
fi
# Set PATH for homebrew pip3 installs
- PYTHON_VERSION=$(python3 -V | awk '{print $2}' | cut -d. -f1-2)
- PATH="$HOME/Library/Python/$PYTHON_VERSION/bin:${PATH}"
# Set PKG_CONFIG_PATH for keg-only expat
- PKG_CONFIG_PATH="/usr/local/opt/expat/lib/pkgconfig:${PKG_CONFIG_PATH}"
# Set PATH for keg-only gettext
- PATH="/usr/local/opt/gettext/bin:${PATH}"
# Install xquartz for prereqs ...
- XQUARTZ_VERSION="2.7.11"
- wget -nv https://dl.bintray.com/xquartz/downloads/XQuartz-${XQUARTZ_VERSION}.dmg
- hdiutil attach XQuartz-${XQUARTZ_VERSION}.dmg
- sudo installer -pkg /Volumes/XQuartz-${XQUARTZ_VERSION}/XQuartz.pkg -target /
- hdiutil detach /Volumes/XQuartz-${XQUARTZ_VERSION}
# ... and set paths
- PKG_CONFIG_PATH="/opt/X11/share/pkgconfig:/opt/X11/lib/pkgconfig:${PKG_CONFIG_PATH}"
install:
- if test "x$BUILD" = xmeson; then
pip3 install --user meson;
pip3 install --user mako;
fi
script:
- if test "x$BUILD" = xmeson; then
meson _build -Dbuild-tests=true;
ninja -C _build || travis_terminate 1;
ninja -C _build test || travis_terminate 1;
ninja -C _build install || travis_terminate 1;
fi

View File

@@ -1,133 +0,0 @@
# Mesa 3-D graphics library
#
# Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
# Copyright (C) 2010-2011 LunarG Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
ifeq ($(LOCAL_IS_HOST_MODULE),true)
LOCAL_CFLAGS += -D_GNU_SOURCE
endif
LOCAL_C_INCLUDES += \
$(MESA_TOP)/src \
$(MESA_TOP)/include
MESA_VERSION := $(shell cat $(MESA_TOP)/VERSION)
LOCAL_CFLAGS += \
-Wno-error \
-Werror=incompatible-pointer-types \
-Wno-unused-parameter \
-Wno-pointer-arith \
-Wno-missing-field-initializers \
-Wno-initializer-overrides \
-Wno-mismatched-tags \
-DPACKAGE_VERSION=\"$(MESA_VERSION)\" \
-DPACKAGE_BUGREPORT=\"https://gitlab.freedesktop.org/mesa/mesa/-/issues\"
# XXX: The following __STDC_*_MACROS defines should not be needed.
# It's likely due to a bug elsewhere, but let's temporarily add them
# here to fix the radeonsi build.
LOCAL_CFLAGS += \
-DANDROID_API_LEVEL=$(PLATFORM_SDK_VERSION) \
-DENABLE_SHADER_CACHE \
-D__STDC_CONSTANT_MACROS \
-D__STDC_LIMIT_MACROS \
-DHAVE___BUILTIN_EXPECT \
-DHAVE___BUILTIN_FFS \
-DHAVE___BUILTIN_FFSLL \
-DHAVE_DLFCN_H \
-DHAVE_FUNC_ATTRIBUTE_FLATTEN \
-DHAVE_FUNC_ATTRIBUTE_UNUSED \
-DHAVE_FUNC_ATTRIBUTE_FORMAT \
-DHAVE_FUNC_ATTRIBUTE_PACKED \
-DHAVE_FUNC_ATTRIBUTE_ALIAS \
-DHAVE_FUNC_ATTRIBUTE_NORETURN \
-DHAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL \
-DHAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT \
-DHAVE___BUILTIN_CTZ \
-DHAVE___BUILTIN_POPCOUNT \
-DHAVE___BUILTIN_POPCOUNTLL \
-DHAVE___BUILTIN_CLZ \
-DHAVE___BUILTIN_CLZLL \
-DHAVE___BUILTIN_UNREACHABLE \
-DHAVE_PTHREAD=1 \
-DHAVE_DLADDR \
-DHAVE_DL_ITERATE_PHDR \
-DHAVE_LINUX_FUTEX_H \
-DHAVE_ENDIAN_H \
-DHAVE_ZLIB \
-DHAVE_COMPRESSION \
-DMAJOR_IN_SYSMACROS \
-DVK_USE_PLATFORM_ANDROID_KHR \
-fvisibility=hidden \
-fno-math-errno \
-fno-trapping-math \
-Wno-sign-compare
LOCAL_CPPFLAGS += \
-D__STDC_CONSTANT_MACROS \
-D__STDC_FORMAT_MACROS \
-D__STDC_LIMIT_MACROS \
-Wno-error=non-virtual-dtor \
-Wno-non-virtual-dtor
# mesa requires at least c99 compiler
LOCAL_CONLYFLAGS += \
-std=c99
# c11 timespec_get is part of bionic as well
# https://android-review.googlesource.com/c/718518
# This means releases from P and earlier won't need this
ifeq ($(filter 5 6 7 8 9, $(MESA_ANDROID_MAJOR_VERSION)),)
LOCAL_CFLAGS += -DHAVE_TIMESPEC_GET
endif
# Android's libc began supporting shm in Oreo
ifeq ($(shell test $(PLATFORM_SDK_VERSION) -ge 26 && echo true),true)
LOCAL_CFLAGS += -DHAVE_SYS_SHM_H
endif
ifeq ($(TARGET_ARCH),x86)
LOCAL_CFLAGS += \
-DUSE_X86_ASM
endif
ifeq ($(ARCH_ARM_HAVE_NEON),true)
LOCAL_CFLAGS_arm += -DUSE_ARM_ASM
endif
LOCAL_CFLAGS_arm64 += -DUSE_AARCH64_ASM
ifneq ($(LOCAL_IS_HOST_MODULE),true)
LOCAL_CFLAGS += -DHAVE_LIBDRM
LOCAL_SHARED_LIBRARIES += libdrm
endif
LOCAL_CFLAGS_32 += -DDEFAULT_DRIVER_DIR=\"/vendor/lib/$(MESA_DRI_MODULE_REL_PATH)\"
LOCAL_CFLAGS_64 += -DDEFAULT_DRIVER_DIR=\"/vendor/lib64/$(MESA_DRI_MODULE_REL_PATH)\"
LOCAL_PROPRIETARY_MODULE := true
# uncomment to keep the debug symbols
#LOCAL_STRIP_MODULE := false
ifeq ($(strip $(LOCAL_MODULE_TAGS)),)
LOCAL_MODULE_TAGS := optional
endif
# Quiet down the build system and remove any .h files from the sources
LOCAL_SRC_FILES := $(patsubst %.h, , $(LOCAL_SRC_FILES))

View File

@@ -1,124 +0,0 @@
# Mesa 3-D graphics library
#
# Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
# Copyright (C) 2010-2011 LunarG Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# BOARD_GPU_DRIVERS should be defined. The valid values are
#
# classic drivers: i915 i965
# gallium drivers: swrast freedreno i915g nouveau kmsro r300g r600g radeonsi vc4 virgl vmwgfx etnaviv iris lima panfrost
#
# The main target is libGLES_mesa. For each classic driver enabled, a DRI
# module will also be built. DRI modules will be loaded by libGLES_mesa.
MESA_TOP := $(call my-dir)
MESA_ANDROID_MAJOR_VERSION := $(word 1, $(subst ., , $(PLATFORM_VERSION)))
ifneq ($(filter 2 4, $(MESA_ANDROID_MAJOR_VERSION)),)
$(error "Android 4.4 and earlier not supported")
endif
MESA_DRI_MODULE_REL_PATH := dri
MESA_DRI_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/$(MESA_DRI_MODULE_REL_PATH)
MESA_DRI_MODULE_UNSTRIPPED_PATH := $(TARGET_OUT_SHARED_LIBRARIES_UNSTRIPPED)/$(MESA_DRI_MODULE_REL_PATH)
MESA_DRI_LDFLAGS := -Wl,--build-id=sha1
MESA_COMMON_MK := $(MESA_TOP)/Android.common.mk
MESA_PYTHON2 := python
MESA_PYTHON3 := python3
ifeq ($(filter 5 6 7 8 9 10, $(MESA_ANDROID_MAJOR_VERSION)),)
MESA_LEX := M4=$(M4) $(LEX)
else
MESA_LEX := $(LEX)
endif
# Lists to convert driver names to boolean variables
# in form of <driver name>.<boolean make variable>
classic_drivers := i915.HAVE_I915_DRI i965.HAVE_I965_DRI
gallium_drivers := \
swrast.HAVE_GALLIUM_SOFTPIPE \
freedreno.HAVE_GALLIUM_FREEDRENO \
i915g.HAVE_GALLIUM_I915 \
nouveau.HAVE_GALLIUM_NOUVEAU \
kmsro.HAVE_GALLIUM_KMSRO \
r300g.HAVE_GALLIUM_R300 \
r600g.HAVE_GALLIUM_R600 \
radeonsi.HAVE_GALLIUM_RADEONSI \
vmwgfx.HAVE_GALLIUM_VMWGFX \
vc4.HAVE_GALLIUM_VC4 \
virgl.HAVE_GALLIUM_VIRGL \
etnaviv.HAVE_GALLIUM_ETNAVIV \
iris.HAVE_GALLIUM_IRIS \
lima.HAVE_GALLIUM_LIMA \
panfrost.HAVE_GALLIUM_PANFROST
ifeq ($(BOARD_GPU_DRIVERS),all)
MESA_BUILD_CLASSIC := $(filter HAVE_%, $(subst ., , $(classic_drivers)))
MESA_BUILD_GALLIUM := $(filter HAVE_%, $(subst ., , $(gallium_drivers)))
else
# Warn if we have any invalid driver names
$(foreach d, $(BOARD_GPU_DRIVERS), \
$(if $(findstring $(d).,$(classic_drivers) $(gallium_drivers)), \
, \
$(warning invalid GPU driver: $(d)) \
) \
)
MESA_BUILD_CLASSIC := $(strip $(foreach d, $(BOARD_GPU_DRIVERS), $(patsubst $(d).%,%, $(filter $(d).%, $(classic_drivers)))))
MESA_BUILD_GALLIUM := $(strip $(foreach d, $(BOARD_GPU_DRIVERS), $(patsubst $(d).%,%, $(filter $(d).%, $(gallium_drivers)))))
endif
ifeq ($(filter x86%,$(TARGET_ARCH)),)
MESA_BUILD_CLASSIC :=
endif
$(foreach d, $(MESA_BUILD_CLASSIC) $(MESA_BUILD_GALLIUM), $(eval $(d) := true))
ifneq ($(filter true, $(HAVE_GALLIUM_RADEONSI)),)
MESA_ENABLE_LLVM := true
endif
define mesa-build-with-llvm
$(if $(filter $(MESA_ANDROID_MAJOR_VERSION), 4 5 6 7), \
$(warning Unsupported LLVM version in Android $(MESA_ANDROID_MAJOR_VERSION)),) \
$(eval LOCAL_CFLAGS += -DLLVM_AVAILABLE -DDRAW_LLVM_AVAILABLE -DLLVM_IS_SHARED=1 -DMESA_LLVM_VERSION_STRING=\"3.9\") \
$(eval LOCAL_SHARED_LIBRARIES += libLLVM)
endef
# add subdirectories
SUBDIRS := \
src/etnaviv \
src/freedreno \
src/gbm \
src/loader \
src/mapi \
src/compiler \
src/mesa \
src/util \
src/egl \
src/amd \
src/broadcom \
src/intel \
src/mesa/drivers/dri \
src/vulkan \
src/panfrost \
INC_DIRS := $(call all-named-subdir-makefiles,$(SUBDIRS))
INC_DIRS += $(call all-named-subdir-makefiles,src/gallium)
include $(INC_DIRS)

View File

@@ -1,16 +0,0 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libmesa_*_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/i9*5_dri_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libglapi_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libGLES_mesa_intermediates)
$(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/obj/EXECUTABLES/mesa_*_intermediates)
$(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/obj/EXECUTABLES/glsl_compiler_intermediates)
$(call add-clean-step, rm -rf $(OUT_DIR)/host/$(HOST_OS)-$(HOST_ARCH)/obj/STATIC_LIBRARIES/libmesa_glsl_utils_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/STATIC_LIBRARIES/libmesa_*_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/i9?5_dri_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/libglapi_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/libGLES_mesa_intermediates)
$(call add-clean-step, rm -rf $(HOST_OUT)/*/EXECUTABLES/mesa_*_intermediates)
$(call add-clean-step, rm -rf $(HOST_OUT)/*/EXECUTABLES/glsl_compiler_intermediates)
$(call add-clean-step, rm -rf $(HOST_OUT)/*/STATIC_LIBRARIES/libmesa_*_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/*/SHARED_LIBRARIES/*_dri_intermediates)

1265
Make-config Normal file

File diff suppressed because it is too large Load Diff

521
Makefile.X11 Normal file
View File

@@ -0,0 +1,521 @@
# $Id: Makefile.X11,v 1.1.1.1 1999/08/19 00:55:39 jtg Exp $
# Mesa 3-D graphics library
# Version: 3.1
#
# Copyright (C) 1999 Brian Paul All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Top-level makefile for Mesa
# To add a new configuration for your system add it to the list below
# then update the Make-config file.
SHELL = /bin/sh
default:
@echo "Type one of the following:"
@echo " make aix for IBM RS/6000 with AIX"
@echo " make aix-sl for IBM RS/6000, make shared libs"
@echo " make amiwin for Amiga with SAS/C and AmiWin"
@echo " make amix for Amiga 3000 UX SVR4 v2.1 systems"
@echo " make beos-r4 for BeOS R4"
@echo " make bsdos for BSD/OS from BSDI using GCC"
@echo " make bsdos4 for BSD/OS 4.x, dynamic libraries"
@echo " make cygnus for Win95/NT using Cygnus-Win32"
@echo " make cygnus-linux for Win95/NT using Cygnus-Win32 under Linux"
@echo " make dgux for Data General"
@echo " make freebsd for FreeBSD systems with GCC"
@echo " make freebsd-386 for FreeBSD systems with GCC, w/ Intel assembly"
@echo " make gcc for a generic system with GCC"
@echo " make hpux9 for HP systems with HPUX 9.x"
@echo " make hpux9-sl for HP systems with HPUX 9.x, make shared libs"
@echo " make hpux9-gcc for HP systems with HPUX 9.x using GCC"
@echo " make hpux9-gcc-sl for HP systems with HPUX 9.x, GCC, make shared libs"
@echo " make hpux10 for HP systems with HPUX 10.x"
@echo " make hpux10-sl for HP systems with HPUX 10.x, shared libs"
@echo " make hpux10-gcc for HP systems with HPUX 10.x w/ GCC"
@echo " make hpux10-gcc-sl for HP systems with HPUX 10.x w/ GCC, shared libs"
@echo " make irix4 for SGI systems with IRIX 4.x"
@echo " make irix5 for SGI systems with IRIX 5.x"
@echo " make irix5-gcc for SGI systems with IRIX 5.x using GCC"
@echo " make irix5-dso for SGI systems with IRIX 5.x, make DSOs"
@echo " make irix6-o32 for SGI systems with IRIX 6.x, make o32-bit libs"
@echo " make irix6-o32-dso for SGI systems with IRIX 6.x, make o32-bit DSOs"
@echo " make irix6-n32 for SGI systems with IRIX 6.x, make n32-bit libs"
@echo " make irix6-n32-dso for SGI systems with IRIX 6.x, make n32-bit DSOs"
@echo " make irix6-gcc-n32-sl for SGI systems with IRIX 6.x, GCC, make n32 DSOs"
@echo " make irix6-64 for SGI systems with IRIX 6.x, make 64-bit libs"
@echo " make irix6-64-dso for SGI systems with IRIX 6.x, make 64-bit DSOs"
@echo " make linux for Linux systems, make static .a libs"
@echo " make linux-elf for Linux systems, make ELF shared libs"
@echo " make linux-386 for Linux w/ Intel assembly"
@echo " make linux-386-elf for Linux w/ Intel assembly, make ELF shared libs"
@echo " make linux-ggi for Linux systems with libggi"
@echo " make linux-386-ggi for Linux systems with libggi w/ Intel assembly"
@echo " make linux-alpha for Linux on Alpha systems"
@echo " make linux-alpha-elf for Linux on Alpha systems, make ELF shared libs"
@echo " make linux-ppc for Linux on PowerPC systems"
@echo " make linux-ppc-so for Linux on PowerPC systems, make shared libs"
@echo " make linux-glide for Linux w/ 3Dfx Glide driver"
@echo " make linux-386-glide for Linux w/ 3Dfx Glide driver, Intel assembly"
@echo " make linux-386-opt-glide for Linux with 3Dfx Voodoo1 for GLQuake"
@echo " make linux-386-opt-V2-glide for Linux with 3Dfx Voodoo2 for GLQuake"
@echo " make linux-3dnow for Linux on AMD w/ 3DNow!"
@echo " make linux-3dnow-glide for Linux on AMD w/ 3DNow! for Glide"
@echo " make linux-386-pthread for Linux w/ Intel assembly and linuxthreads"
@echo " make linux-386-pthread-shared for Linux w/ Intel assembly and linuxthreads"
@echo " make linux-sparc for Linux on Sparc systems"
@echo " make linux-sparc5-elf for Sparc5 systems, make ELF shared libs"
@echo " make linux-sparc-ultra for UltraSparc systems, make ELF shared libs"
@echo " make lynxos for LynxOS systems with GCC"
@echo " make macintosh for Macintosh"
@echo " make machten-2.2 for Macs w/ MachTen 2.2 (68k w/ FPU)"
@echo " make machten-4.0 for Macs w/ MachTen 4.0.1 or newer with GNU make"
@echo " make mklinux for Linux on Power Macintosh"
@echo " make netbsd for NetBSD 1.0 systems with GCC"
@echo " make next for NeXT systems with NEXTSTEP 3.3"
@echo " make openbsd for OpenBSD systems"
@echo " make openstep for OpenStep/MacOSX Server systems"
@echo " make os2-x11 for OS/2 with XFree86"
@echo " make osf1 for DEC Alpha systems with OSF/1"
@echo " make osf1-sl for DEC Alpha systems with OSF/1, make shared libs"
@echo " make pgi-cygnus for Cygnus with Portland Group, Inc. compiler"
@echo " make pgi-mingw32 for mingW32 with Portland Group, Inc. compiler"
@echo " make qnx for QNX V4 systems with Watcom compiler"
@echo " make sco for SCO Unix systems with ODT"
@echo " make solaris-x86 for PCs with Solaris"
@echo " make solaris-x86-gcc for PCs with Solaris using GCC"
@echo " make sunos4 for Suns with SunOS 4.x"
@echo " make sunos4-sl for Suns with SunOS 4.x, make shared libs"
@echo " make sunos4-gcc for Suns with SunOS 4.x and GCC"
@echo " make sunos4-gcc-sl for Suns with SunOS 4.x, GCC, make shared libs"
@echo " make sunos5 for Suns with SunOS 5.x"
@echo " make sunos5-sl for Suns with SunOS 5.x, make shared libs"
@echo " make sunos5-ultra for Sun UltraSPARCs with SunOS 5.x"
@echo " make sunos5-ultra-sl for Sun UltraSPARCs with SunOS 5.x, make shared libs"
@echo " make sunos5-thread for Suns with SunOS 5.x, using Solaris threads"
@echo " make sunos5-pthread for Suns with SunOS 5.[56] using POSIX threads"
@echo " make sunos5-gcc-thread for Suns with SunOS 5.x and GCC, using Solaris threads"
@echo " make sunos5-gcc-pthread for Suns with SunOS 5.[56] and GCC, using POSIX threads"
@echo " make sunos5-gcc for Suns with SunOS 5.x and GCC"
@echo " make sunos5-gcc-sl for Suns with SunOS 5.x, GCC, make shared libs"
@echo " make sunos5-x11r6-gcc-sl for Suns with X11R6, GCC, make shared libs"
@echo " make sunos5-gcc-thread for Suns with SunOS 5.x and GCC, using Solaris threads"
@echo " make sunos5-gcc-pthread for Suns with SunOS 5.[56] and GCC, using POSIX threads"
@echo " make sunSolaris-CC for Solaris using C++ compiler"
@echo " make ultrix-gcc for DEC systems with Ultrix and GCC"
@echo " make unicos for Cray C90 (and other?) systems"
@echo " make unixware for PCs running UnixWare"
@echo " make unixware-shared for PCs running UnixWare, shared libs"
@echo " make uwin for Windows NT with AT&T/Wipro UWIN"
@echo " make vistra for Stardent Vistra systems"
@echo " make clean remove .o files"
@echo " make realclean remove .o, library and executable files"
aix aix-sl amix bsdos bsdos4 dgux freebsd freebsd-386 gcc \
hpux9 hpux9-sl hpux9-gcc hpux9-gcc-sl \
hpux10 hpux10-sl hpux10-gcc hpux10-gcc-sl \
irix-debug irix4 irix5 irix5-gcc irix5-dso irix6-o32 irix6-o32-dso \
linux linux-debug linux-prof linux-elf linux-elf-debug \
linux-glide linux-386-glide linux-386-opt-glide \
linux-386-opt-V2-glide \
linux-386 linux-386-elf \
linux-3dnow linux-3dnow-glide \
linux-alpha linux-alpha-elf \
linux-ppc linux-ppc-so \
linux-386-pthread linux-386-pthread-shared \
linux-sparc \
linux-sparc5-elf \
linux-sparc-ultra \
lynxos machten-2.2 machten-4.0 \
mklinux netbsd osf1 osf1-sl openbsd qnx sco \
solaris-x86 solaris-x86-gcc sunSolaris-CC \
sunos4 sunos4-sl sunos4-gcc sunos4-gcc-sl sunos4-gcc-x11r6-sl \
sunos5 sunos5-sl sunos5-ultra sunos5-ultra-sl sunos5-gcc sunos5-gcc-sl \
sunos5-thread sunos5-pthread sunos5-gcc-thread sunos5-gcc-pthread \
sunos5-x11r6-gcc-sl ultrix-gcc unicos unixware uwin vistra:
-mkdir lib
touch src/depend
touch src-glu/depend
if [ -d src-glut ] ; then touch src-glut/depend ; fi
cd src ; $(MAKE) -f Makefile.X11 $@
cd src-glu ; $(MAKE) -f Makefile.X11 $@
if [ -d src-glut ] ; then cd src-glut ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d demos ] ; then cd demos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d xdemos ] ; then cd xdemos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d samples ] ; then cd samples ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d book ] ; then cd book ; $(MAKE) -f Makefile.X11 $@ ; fi
irix6-n32 irix6-n32-dso irix6-gcc-n32-sl:
-mkdir lib32
touch src/depend
touch src-glu/depend
if [ -d src-glut ] ; then touch src-glut/depend ; fi
cd src ; $(MAKE) -f Makefile.X11 $@
cd src-glu ; $(MAKE) -f Makefile.X11 $@
if [ -d src-glut ] ; then cd src-glut ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d demos ] ; then cd demos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d xdemos ] ; then cd xdemos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d samples ] ; then cd samples ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d book ] ; then cd book ; $(MAKE) -f Makefile.X11 $@ ; fi
irix6-64 irix6-64-dso:
-mkdir lib64
touch src/depend
touch src-glu/depend
if [ -d src-glut ] ; then touch src-glut/depend ; fi
cd src ; $(MAKE) -f Makefile.X11 $@
cd src-glu ; $(MAKE) -f Makefile.X11 $@
if [ -d src-glut ] ; then cd src-glut ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d demos ] ; then cd demos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d xdemos ] ; then cd xdemos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d samples ] ; then cd samples ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d book ] ; then cd book ; $(MAKE) -f Makefile.X11 $@ ; fi
amiwin:
bin/mklib.amiwin
beos-r4:
-mkdir lib
-rm src/depend
touch src/depend
-rm src-glu/depend
touch src-glu/depend
cd src ; $(MAKE) -f Makefile.BeOS-R4 $@
cd src-glu ; $(MAKE) -f Makefile.BeOS-R4 $@
if [ -d BeOS ] ; then cd BeOS ; $(MAKE) ; fi
if [ -d src-glut.beos ] ; then cd src-glut.beos ; $(MAKE) ; fi
if [ -d src-glut.beos ] ; then cp src-glut.beos/obj*/libglut.so lib ; fi
if [ -d demos ] ; then cd demos ; $(MAKE) -f Makefile.BeOS-R4 $@ ; fi
if [ -d samples ] ; then cd samples ; $(MAKE) -f Makefile.BeOS-R4 $@ ; fi
if [ -d book ] ; then cd book ; $(MAKE) -f Makefile.BeOS-R4 $@ ; fi
pgi-cygnus pgi-mingw32 \
cygnus cygnus-linux:
-mkdir lib
touch src/depend
touch src-glu/depend
cd src ; $(MAKE) -f Makefile.X11 $@
cd src-glu ; $(MAKE) -f Makefile.X11 $@
cd src-glut ; $(MAKE) -f Makefile.X11 $@
cd demos ; $(MAKE) -f Makefile.X11 $@
if [ -d xdemos ] ; then cd xdemos ; $(MAKE) -f Makefile.X11 $@ ; fi
macintosh:
@echo "See the README file for Macintosh intallation information"
next:
-mkdir lib
cd src ; $(MAKE) -f Makefile.X11 "MYCC=${CC}" $@
cd src-glu ; $(MAKE) -f Makefile.X11 "MYCC=${CC}" $@
openstep:
-mkdir lib
cd src ; $(MAKE) -f Makefile.X11 "MYCC=${CC}" $@
cd src-glu ; $(MAKE) -f Makefile.X11 "MYCC=${CC}" $@
os2-x11:
if not EXIST .\lib md lib
touch src/depend
touch src-glu/depend
if exist src-glut touch src-glut/depend
cd src & make -f Makefile.X11 $@
cd src-glu & make -f Makefile.X11 $@
if exist src-glut cd src-glut & make -f Makefile.X11 $@
if exist demos cd demos & make -f Makefile.X11 $@
if exist xdemos cd xdemos & make -f Makefile.X11 $@
if exist samples cd samples & make -f Makefile.X11 $@
if exist book cd book & make -f Makefile.X11 $@
linux-ggi linux-386-ggi:
-mkdir lib
touch src/depend
touch src-glu/depend
if [ -d src-glut ] ; then touch src-glut/depend ; fi
if [ -d ggi ] ; then touch ggi/depend ; fi
cd src ; $(MAKE) -f Makefile.X11 $@
cd src/GGI/default ; $(MAKE)
cd src/GGI/display ; $(MAKE)
cd src-glu ; $(MAKE) -f Makefile.X11 $@
# if [ -d src-glut ] ; then cd src-glut ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d ggi ] ; then cd ggi ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d ggi ] ; then cd ggi/demos; $(MAKE) ; fi
if [ -d demos ] ; then cd demos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d xdemos ] ; then cd xdemos ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d samples ] ; then cd samples ; $(MAKE) -f Makefile.X11 $@ ; fi
if [ -d book ] ; then cd book ; $(MAKE) -f Makefile.X11 $@ ; fi
# if you change GGI_DEST please change it in ggimesa.conf, too.
DESTDIR=/usr/local
GGI_DEST=lib/ggi/mesa
linux-ggi-install linux-386-ggi-install:
install -d $(DESTDIR)/$(GGI_DEST)/default $(DESTDIR)/$(GGI_DEST)/display $(DESTDIR)/etc/ggi
install -m 0755 src/GGI/default/*.so $(DESTDIR)/$(GGI_DEST)/default
install -m 0755 src/GGI/display/*.so $(DESTDIR)/$(GGI_DEST)/display
install -m 0644 src/GGI/ggimesa.conf $(DESTDIR)/etc/ggi
# if [ -z "`grep ggimesa $(DESTDIR)/etc/ggi/libggi.conf`" ]; then \
# echo ".include $(DESTDIR)/etc/ggi/ggimesa.conf" >> $(DESTDIR)/etc/ggi/libggi.conf ; \
# fi
# Remove .o files, emacs backup files, etc.
clean:
-rm -f ggi/*~ *.o
-rm -f src/GGI/default/*~ *.so
-rm -f src/GGI/display/*~ *.so
-rm -f include/*~
-rm -f include/GL/*~
-rm -f src/*.o src/*~ src/*.a src/*/*.o src/*/*~
-rm -f src-glu/*.o src-glu/*~ src-glu/*.a
-rm -f src-glut/*.o
-rm -f demos/*.o
-rm -f book/*.o book/*~
-rm -f xdemos/*.o xdemos/*~
-rm -f samples/*.o samples/*~
-rm -f ggi/*.o ggi/demos/*.o ggi/*.a
# Remove everything that can be remade
realclean: clean
-rm -f lib/*
cd demos && $(MAKE) -f Makefile.X11 realclean || true
cd xdemos && $(MAKE) -f Makefile.X11 realclean || true
cd book && $(MAKE) -f Makefile.X11 realclean || true
cd samples && $(MAKE) -f Makefile.X11 realclean || true
cd ggi/demos && ($MAKE) -f Makefile.X11 realclean || true
cd src/GGI/default && $(MAKE) -f Makefile.X11 realclean || true
DIRECTORY = Mesa-3.1
LIB_NAME = MesaLib-3.1beta2
DEMO_NAME = MesaDemos-3.1beta2
LIB_FILES = \
$(DIRECTORY)/Makefile* \
$(DIRECTORY)/Make-config \
$(DIRECTORY)/acconfig.h \
$(DIRECTORY)/acinclude.m4 \
$(DIRECTORY)/aclocal.m4 \
$(DIRECTORY)/conf.h.in \
$(DIRECTORY)/config.guess \
$(DIRECTORY)/config.sub \
$(DIRECTORY)/configure \
$(DIRECTORY)/configure.in \
$(DIRECTORY)/install-sh \
$(DIRECTORY)/ltconfig \
$(DIRECTORY)/ltmain.sh \
$(DIRECTORY)/missing \
$(DIRECTORY)/mkinstalldirs \
$(DIRECTORY)/stamp-h.in \
$(DIRECTORY)/INSTALL \
$(DIRECTORY)/INSTALL.GNU \
$(DIRECTORY)/configure \
$(DIRECTORY)/docs/CONFIG \
$(DIRECTORY)/docs/CONFORM \
$(DIRECTORY)/docs/COPYRIGHT \
$(DIRECTORY)/docs/IAFA-PACKAGE \
$(DIRECTORY)/docs/LICENSE \
$(DIRECTORY)/docs/README \
$(DIRECTORY)/docs/README.* \
$(DIRECTORY)/docs/RELNOTES \
$(DIRECTORY)/docs/VERSIONS \
$(DIRECTORY)/bin/mklib* \
$(DIRECTORY)/*.BAT \
$(DIRECTORY)/*.bat \
$(DIRECTORY)/descrip.mms \
$(DIRECTORY)/mms-config \
$(DIRECTORY)/xlib.opt \
$(DIRECTORY)/STARTUP.MK \
$(DIRECTORY)/mesawin32.mak \
$(DIRECTORY)/Names.win \
$(DIRECTORY)/win32-openstep.sh \
$(DIRECTORY)/*.dja \
$(DIRECTORY)/include/GL/dosmesa.h \
$(DIRECTORY)/include/GL/foomesa.h \
$(DIRECTORY)/include/GL/fxmesa.h \
$(DIRECTORY)/include/GL/ggimesa.h \
$(DIRECTORY)/include/GL/gl.h \
$(DIRECTORY)/include/GL/gl_mangle.h \
$(DIRECTORY)/include/GL/glu.h \
$(DIRECTORY)/include/GL/glu_mangle.h \
$(DIRECTORY)/include/GL/glx.h \
$(DIRECTORY)/include/GL/glx_mangle.h \
$(DIRECTORY)/include/GL/mglmesa.h \
$(DIRECTORY)/include/GL/osmesa.h \
$(DIRECTORY)/include/GL/svgamesa.h \
$(DIRECTORY)/include/GL/wmesa.h \
$(DIRECTORY)/include/GL/xmesa.h \
$(DIRECTORY)/include/GL/xmesa_x.h \
$(DIRECTORY)/include/GL/xmesa_xf86.h \
$(DIRECTORY)/include/GLView.h \
$(DIRECTORY)/src/Makefile* \
$(DIRECTORY)/src/descrip.mms \
$(DIRECTORY)/src/mms_depend \
$(DIRECTORY)/src/*.def \
$(DIRECTORY)/src/depend \
$(DIRECTORY)/src/*.[chS] \
$(DIRECTORY)/src/Allegro/*.[ch] \
$(DIRECTORY)/src/BeOS/*.cpp \
$(DIRECTORY)/src/D3D/*.cpp \
$(DIRECTORY)/src/D3D/*.CPP \
$(DIRECTORY)/src/D3D/*.h \
$(DIRECTORY)/src/D3D/*.H \
$(DIRECTORY)/src/D3D/*.c \
$(DIRECTORY)/src/D3D/*.C \
$(DIRECTORY)/src/D3D/MAKEFILE \
$(DIRECTORY)/src/D3D/*bat \
$(DIRECTORY)/src/D3D/*DEF \
$(DIRECTORY)/src/DOS/DEPEND.DOS \
$(DIRECTORY)/src/DOS/*.c \
$(DIRECTORY)/src/FX/*.[ch] \
$(DIRECTORY)/src/FX/*.def \
$(DIRECTORY)/src/GGI/*.[ch] \
$(DIRECTORY)/src/GGI/ggimesa.conf \
$(DIRECTORY)/src/GGI/default/*.c \
$(DIRECTORY)/src/GGI/default/Makefile \
$(DIRECTORY)/src/GGI/display/*.c \
$(DIRECTORY)/src/GGI/display/Makefile \
$(DIRECTORY)/src/KNOWN_BUGS \
$(DIRECTORY)/src/MGL/*.[ch] \
$(DIRECTORY)/src/MGL/*.txt \
$(DIRECTORY)/src/OSmesa/*.[ch] \
$(DIRECTORY)/src/S3/*.[ch] \
$(DIRECTORY)/src/S3/*.def \
$(DIRECTORY)/src/S3/*.mak \
$(DIRECTORY)/src/S3/*.rc \
$(DIRECTORY)/src/SVGA/*.[ch] \
$(DIRECTORY)/src/Windows/*.[ch] \
$(DIRECTORY)/src/Windows/*.def \
$(DIRECTORY)/src/X/*.[ch] \
$(DIRECTORY)/src/X86/*.[ch] \
$(DIRECTORY)/src/X86/Makefile \
$(DIRECTORY)/src/X86/*.m4 \
$(DIRECTORY)/src/X86/*.S \
$(DIRECTORY)/src/*.dja \
$(DIRECTORY)/src-glu/README[12] \
$(DIRECTORY)/src-glu/Makefile* \
$(DIRECTORY)/src-glu/descrip.mms \
$(DIRECTORY)/src-glu/mms_depend \
$(DIRECTORY)/src-glu/*.def \
$(DIRECTORY)/src-glu/*.dja \
$(DIRECTORY)/src-glu/depend \
$(DIRECTORY)/src-glu/*.[ch] \
$(DIRECTORY)/widgets-mesa \
$(DIRECTORY)/widgets-sgi \
$(DIRECTORY)/util/README \
$(DIRECTORY)/util/*.[ch] \
$(DIRECTORY)/util/sampleMakefile \
$(DIRECTORY)/BeOS/Makefile \
$(DIRECTORY)/BeOS/*.cpp
# old stuff
# $(DIRECTORY)/Win32 \
# $(DIRECTORY)/win32
# $(DIRECTORY)/OpenStep \
#
#
DEMO_FILES = \
$(DIRECTORY)/include/GL/glut.h \
$(DIRECTORY)/include/GL/glutf90.h \
$(DIRECTORY)/include/GL/glut_h.dja \
$(DIRECTORY)/src-glut/Makefile* \
$(DIRECTORY)/src-glut/depend \
$(DIRECTORY)/src-glut/*def \
$(DIRECTORY)/src-glut/descrip.mms \
$(DIRECTORY)/src-glut/mms_depend \
$(DIRECTORY)/src-glut/*.[ch] \
$(DIRECTORY)/src-glut.dja/* \
$(DIRECTORY)/src-glut.beos/Makefile \
$(DIRECTORY)/src-glut.beos/*.cpp \
$(DIRECTORY)/src-glut.beos/*.h \
$(DIRECTORY)/images/* \
$(DIRECTORY)/demos/Makefile* \
$(DIRECTORY)/demos/descrip.mms \
$(DIRECTORY)/demos/*.[ch] \
$(DIRECTORY)/demos/*.dat \
$(DIRECTORY)/xdemos/Makefile* \
$(DIRECTORY)/xdemos/descrip.mms \
$(DIRECTORY)/xdemos/*.[cf] \
$(DIRECTORY)/book/Makefile* \
$(DIRECTORY)/book/README \
$(DIRECTORY)/book/*.[ch] \
$(DIRECTORY)/samples/Makefile* \
$(DIRECTORY)/samples/README \
$(DIRECTORY)/samples/*.c \
$(DIRECTORY)/samples/*.dja \
$(DIRECTORY)/3Dfx \
$(DIRECTORY)/mtdemos \
$(DIRECTORY)/ggi
lib_tar:
cd .. ; \
tar -cvf $(LIB_NAME).tar $(LIB_FILES) ; \
gzip $(LIB_NAME).tar ; \
mv $(LIB_NAME).tar.gz $(DIRECTORY)
demo_tar:
cd .. ; \
tar -cvf $(DEMO_NAME).tar $(DEMO_FILES) ; \
gzip $(DEMO_NAME).tar ; \
mv $(DEMO_NAME).tar.gz $(DIRECTORY)
lib_zip:
-rm $(LIB_NAME).zip
cd .. ; \
zip -r $(LIB_NAME).zip $(LIB_FILES) ; \
mv $(LIB_NAME).zip $(DIRECTORY)
demo_zip:
-rm $(DEMO_NAME).zip
cd .. ; \
zip -r $(DEMO_NAME).zip $(DEMO_FILES) ; \
mv $(DEMO_NAME).zip $(DIRECTORY)
SRC_FILES = \
RELNOTES \
src/Makefile* \
src/depend \
src/*.[chS] \
src/*/*.[ch] \
include/GL/*.h
srctar:
tar -cvf src.tar $(SRC_FILES) ; \
gzip src.tar
srctar.zip:
-rm src.zip
zip -r src.zip $(SRC_FILES) ; \

View File

@@ -1,59 +0,0 @@
`Mesa <https://mesa3d.org>`_ - The 3D Graphics Library
======================================================
Source
------
This repository lives at https://gitlab.freedesktop.org/mesa/mesa.
Other repositories are likely forks, and code found there is not supported.
Build & install
---------------
You can find more information in our documentation (`docs/install.rst
<https://mesa3d.org/install.html>`_), but the recommended way is to use
Meson (`docs/meson.rst <https://mesa3d.org/meson.html>`_):
.. code-block:: sh
$ mkdir build
$ cd build
$ meson ..
$ sudo ninja install
Support
-------
Many Mesa devs hang on IRC; if you're not sure which channel is
appropriate, you should ask your question on `Freenode's #dri-devel
<irc://chat.freenode.net#dri-devel>`_, someone will redirect you if
necessary.
Remember that not everyone is in the same timezone as you, so it might
take a while before someone qualified sees your question.
To figure out who you're talking to, or which nick to ping for your
question, check out `Who's Who on IRC
<https://dri.freedesktop.org/wiki/WhosWho/>`_.
The next best option is to ask your question in an email to the
mailing lists: `mesa-dev\@lists.freedesktop.org
<https://lists.freedesktop.org/mailman/listinfo/mesa-dev>`_
Bug reports
-----------
If you think something isn't working properly, please file a bug report
(`docs/bugs.rst <https://mesa3d.org/bugs.html>`_).
Contributing
------------
Contributions are welcome, and step-by-step instructions can be found in our
documentation (`docs/submittingpatches.rst
<https://mesa3d.org/submittingpatches.html>`_).
Note that Mesa uses gitlab for patches submission, review and discussions.

115
REVIEWERS
View File

@@ -1,115 +0,0 @@
Overview:
This file is similar in syntax (or more precisly a subset) of what is
used by the MAINTAINERS file in the linux kernel.
The purpose is not exactly the same the MAINTAINERS file in the linux
kernel, as there are not official/formal maintainers of different
subsystems in mesa, but is meant to give an idea of who to CC for
various patches for review.
Descriptions of section entries:
R: Designated reviewer: FullName <address@domain>
These reviewers should be CCed on patches.
F: Files and directories with wildcard patterns.
A trailing slash includes all files and subdirectory files.
F: drivers/net/ all files in and below drivers/net
F: drivers/net/* all files in drivers/net, but not below
F: */net/* all files in "any top level directory"/net
One pattern per line. Multiple F: lines acceptable.
Maintainers List (try to look for most precise areas first)
Note: this is an opt-in system, I have not tried to add anyone who hasn't
either asked me or sent a patch to add themselves.
-----------------------------------
NIR
R: Jason Ekstrand <jason@jlekstrand.net>
F: src/compiler/nir/
DOCUMENTATION
R: Emil Velikov <emil.l.velikov@gmail.com>
R: Eric Engestrom <eric@engestrom.ch>
F: docs/
F: doxygen/
COMPATIBILITY HEADERS
R: Emil Velikov <emil.l.velikov@gmail.com>
F: include/c99*
DRI LOADER
R: Emil Velikov <emil.l.velikov@gmail.com>
F: src/loader/
EGL
R: Eric Engestrom <eric@engestrom.ch>
R: Emil Velikov <emil.l.velikov@gmail.com>
F: src/egl/
F: include/EGL/
HAIKU
R: Alexander von Gluck IV <kallisti5@unixzen.com>
F: include/HaikuGL/
F: src/egl/drivers/haiku/
F: src/gallium/state_trackers/hgl/
F: src/gallium/targets/haiku-softpipe/
F: src/gallium/winsys/sw/hgl/
F: src/hgl/
GALLIUM LOADER
R: Emil Velikov <emil.l.velikov@gmail.com>
F: src/gallium/auxiliary/pipe-loader/
F: src/gallium/auxiliary/target-helpers/
GALLIUM TARGETS
R: Emil Velikov <emil.l.velikov@gmail.com>
F: src/gallium/targets/
ANDROID BUILD
R: Emil Velikov <emil.l.velikov@gmail.com>
R: Rob Herring <robh@kernel.org>
F: CleanSpec.mk
F: */Android.*mk
F: */Makefile.sources
MESON BUILD
R: Dylan Baker <dylan@pnwbakers.com>
R: Eric Engestrom <eric@engestrom.ch>
F: */meson.build
F: meson.build
F: meson_options.txt
ANDROID EGL SUPPORT
R: Rob Herring <robh@kernel.org>
R: Tomasz Figa <tfiga@chromium.org>
F: src/egl/drivers/dri2/platform_android.c
WAYLAND EGL SUPPORT
R: Daniel Stone <daniels@collabora.com>
F: src/egl/wayland/*
F: src/egl/drivers/dri2/platform_wayland.c
FREEDRENO
R: Rob Clark <robclark@freedesktop.org>
F: src/gallium/drivers/freedreno/
GLX
R: Adam Jackson <ajax@redhat.com>
F: src/glx/
VULKAN
R: Eric Engestrom <eric@engestrom.ch>
F: src/vulkan/
F: include/vulkan/
VMWARE DRIVER
R: Brian Paul <brianp@vmware.com>
R: Charmaine Lee <charmainel@vmware.com>
F: src/gallium/drivers/svga/
VMWARE WINSYS CODE
R: Thomas Hellstrom <thellstrom@vmware.com>
R: Deepak Rawat <drawat@vmware.com>
F: src/gallium/winsys/svga/

Some files were not shown because too many files have changed in this diff Show More