Compare commits
36 Commits
mesa-25.0.
...
mesa-22.1.
Author | SHA1 | Date | |
---|---|---|---|
|
7c75d83842 | ||
|
b7e08dbc06 | ||
|
f00600e1a3 | ||
|
68b25a57c1 | ||
|
6a69784335 | ||
|
3987237220 | ||
|
a6c2047ea0 | ||
|
8efeb7e3bf | ||
|
9a28aea2a2 | ||
|
30a980fb94 | ||
|
4f031f35fe | ||
|
df6dc532d2 | ||
|
c849ae36e0 | ||
|
b7fe949ab9 | ||
|
445892367a | ||
|
81b10bd0dd | ||
|
24d6489d0d | ||
|
b27d409cfd | ||
|
df84664032 | ||
|
357e3130ad | ||
|
247ecdcc05 | ||
|
72cc88d205 | ||
|
a27af70c1b | ||
|
424c960492 | ||
|
db3e06f76c | ||
|
353330c094 | ||
|
f1168c53ae | ||
|
62b00f70fd | ||
|
ff76add805 | ||
|
f48a3cffe7 | ||
|
a80f8a5ed0 | ||
|
5f5c562f00 | ||
|
653b560413 | ||
|
9a1ca294a8 | ||
|
252a858bc5 | ||
|
c519c37784 |
@@ -17,7 +17,7 @@ variables:
|
||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${MINIO_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
|
||||
# Individual CI farm status, set to "offline" to disable jobs
|
||||
# running on a particular CI farm (ie. for outages, etc):
|
||||
FD_FARM: "offline"
|
||||
FD_FARM: "online"
|
||||
COLLABORA_FARM: "online"
|
||||
|
||||
default:
|
||||
|
2000
.pick_status.json
Normal file
2000
.pick_status.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -144,8 +144,10 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
|
||||
|
||||
b->cursor = nir_before_instr(&intrin->instr);
|
||||
|
||||
if (off_const > UINT32_MAX)
|
||||
if (off_const > UINT32_MAX) {
|
||||
addr = nir_iadd_imm(b, addr, off_const);
|
||||
off_const = 0;
|
||||
}
|
||||
|
||||
nir_intrinsic_instr *new_intrin = nir_intrinsic_instr_create(b->shader, op);
|
||||
|
||||
|
@@ -3057,9 +3057,9 @@ radv_generate_graphics_pipeline_key(const struct radv_pipeline *pipeline,
|
||||
key.ps.is_int10 = blend->col_format_is_int10;
|
||||
}
|
||||
|
||||
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
|
||||
key.vs.topology = pCreateInfo->pInputAssemblyState ? pCreateInfo->pInputAssemblyState->topology : 0;
|
||||
key.vs.topology = pCreateInfo->pInputAssemblyState ? pCreateInfo->pInputAssemblyState->topology : 0;
|
||||
|
||||
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
|
||||
const VkPipelineRasterizationStateCreateInfo *raster_info = pCreateInfo->pRasterizationState;
|
||||
const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *provoking_vtx_info =
|
||||
vk_find_struct_const(raster_info->pNext,
|
||||
|
@@ -632,6 +632,7 @@ radv_shader_compile_to_nir(struct radv_device *device, const struct radv_pipelin
|
||||
.post_depth_coverage = true,
|
||||
.ray_query = true,
|
||||
.ray_tracing = true,
|
||||
.ray_traversal_primitive_culling = true,
|
||||
.runtime_descriptor_array = true,
|
||||
.shader_clock = true,
|
||||
.shader_viewport_index_layer = true,
|
||||
|
@@ -3286,6 +3286,14 @@ typedef struct nir_shader_compiler_options {
|
||||
|
||||
bool lower_ftrunc;
|
||||
|
||||
/** Lowers fround_even to ffract+feq+csel.
|
||||
*
|
||||
* Not correct in that it doesn't correctly handle the "_even" part of the
|
||||
* rounding, but good enough for DX9 array indexing handling on DX9-class
|
||||
* hardware.
|
||||
*/
|
||||
bool lower_fround_even;
|
||||
|
||||
bool lower_ldexp;
|
||||
|
||||
bool lower_pack_half_2x16;
|
||||
|
@@ -1391,8 +1391,10 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
|
||||
/* The varying is loaded from same uniform, so no need to do any
|
||||
* interpolation. Mark it as flat explicitly.
|
||||
*/
|
||||
if (in_var && in_var->data.interpolation <= INTERP_MODE_NOPERSPECTIVE)
|
||||
if (in_var && in_var->data.interpolation <= INTERP_MODE_NOPERSPECTIVE) {
|
||||
in_var->data.interpolation = INTERP_MODE_FLAT;
|
||||
out_var->data.interpolation = INTERP_MODE_FLAT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1334,9 +1334,10 @@ nir_lower_tex_block(nir_block *block, nir_builder *b,
|
||||
}
|
||||
|
||||
if ((tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) && options->lower_rect &&
|
||||
tex->op != nir_texop_txf && !nir_tex_instr_is_query(tex)) {
|
||||
|
||||
if (compiler_options->has_txs)
|
||||
tex->op != nir_texop_txf) {
|
||||
if (nir_tex_instr_is_query(tex))
|
||||
tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
|
||||
else if (compiler_options->has_txs)
|
||||
lower_rect(b, tex);
|
||||
else
|
||||
lower_rect_tex_scale(b, tex);
|
||||
|
@@ -1292,7 +1292,7 @@ binop("umul24_relaxed", tuint32, _2src_commutative + associative, "src0 * src1")
|
||||
|
||||
unop_convert("fisnormal", tbool1, tfloat, "isnormal(src0)")
|
||||
unop_convert("fisfinite", tbool1, tfloat, "isfinite(src0)")
|
||||
unop_convert("fisfinite32", tint32, tfloat, "isfinite(src0)")
|
||||
unop_convert("fisfinite32", tbool32, tfloat, "isfinite(src0)")
|
||||
|
||||
# vc4-specific opcodes
|
||||
|
||||
|
@@ -356,6 +356,14 @@ optimizations.extend([
|
||||
|
||||
(('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
|
||||
(('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
|
||||
|
||||
# Approximate handling of fround_even for DX9 addressing from gallium nine on
|
||||
# DX9-class hardware with no proper fround support.
|
||||
(('fround_even', a), ('bcsel',
|
||||
('feq', ('ffract', a), 0.5),
|
||||
('fadd', ('ffloor', ('fadd', a, 0.5)), 1.0),
|
||||
('ffloor', ('fadd', a, 0.5))), 'options->lower_fround_even'),
|
||||
|
||||
(('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
|
||||
(('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
|
||||
(('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
|
||||
|
@@ -58,7 +58,7 @@ struct pipe_loader_sw_device {
|
||||
#define pipe_loader_sw_device(dev) ((struct pipe_loader_sw_device *)dev)
|
||||
|
||||
static const struct pipe_loader_ops pipe_loader_sw_ops;
|
||||
#ifdef HAVE_ZINK
|
||||
#if defined(HAVE_PIPE_LOADER_DRI) && defined(HAVE_ZINK)
|
||||
static const struct pipe_loader_ops pipe_loader_vk_ops;
|
||||
#endif
|
||||
|
||||
@@ -93,16 +93,14 @@ static const struct sw_driver_descriptor driver_descriptors = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(GALLIUM_STATIC_TARGETS) && defined(HAVE_ZINK)
|
||||
#if defined(GALLIUM_STATIC_TARGETS) && defined(HAVE_ZINK) && defined(HAVE_PIPE_LOADER_DRI)
|
||||
static const struct sw_driver_descriptor kopper_driver_descriptors = {
|
||||
.create_screen = sw_screen_create_zink,
|
||||
.winsys = {
|
||||
#ifdef HAVE_PIPE_LOADER_DRI
|
||||
{
|
||||
.name = "dri",
|
||||
.create_winsys = dri_create_sw_winsys,
|
||||
},
|
||||
#endif
|
||||
#ifdef HAVE_PIPE_LOADER_KMS
|
||||
{
|
||||
.name = "kms_dri",
|
||||
@@ -158,7 +156,7 @@ pipe_loader_sw_probe_init_common(struct pipe_loader_sw_device *sdev)
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef HAVE_ZINK
|
||||
#if defined(HAVE_PIPE_LOADER_DRI) && defined(HAVE_ZINK)
|
||||
static bool
|
||||
pipe_loader_vk_probe_init_common(struct pipe_loader_sw_device *sdev)
|
||||
{
|
||||
@@ -404,7 +402,7 @@ pipe_loader_sw_get_driconf(struct pipe_loader_device *dev, unsigned *count)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef HAVE_ZINK
|
||||
#if defined(HAVE_PIPE_LOADER_DRI) && defined(HAVE_ZINK)
|
||||
static const driOptionDescription zink_driconf[] = {
|
||||
#include "zink/driinfo_zink.h"
|
||||
};
|
||||
@@ -437,7 +435,7 @@ static const struct pipe_loader_ops pipe_loader_sw_ops = {
|
||||
.release = pipe_loader_sw_release
|
||||
};
|
||||
|
||||
#ifdef HAVE_ZINK
|
||||
#if defined(HAVE_PIPE_LOADER_DRI) && defined(HAVE_ZINK)
|
||||
static const struct pipe_loader_ops pipe_loader_vk_ops = {
|
||||
.create_screen = pipe_loader_sw_create_screen,
|
||||
.get_driconf = pipe_loader_vk_get_driconf,
|
||||
|
@@ -1317,7 +1317,7 @@ Converter::parseNIR()
|
||||
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS);
|
||||
info_out->prop.fp.usesDiscard = nir->info.fs.uses_discard || nir->info.fs.uses_demote;
|
||||
info_out->prop.fp.usesSampleMaskIn =
|
||||
!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
|
||||
BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_SAMPLE_MASK_IN);
|
||||
break;
|
||||
case Program::TYPE_GEOMETRY:
|
||||
info_out->prop.gp.instanceCount = nir->info.gs.invocations;
|
||||
|
@@ -514,6 +514,7 @@ static const nir_shader_compiler_options r500_vs_compiler_options = {
|
||||
.lower_flrp32 = true,
|
||||
.lower_flrp64 = true,
|
||||
.lower_fmod = true,
|
||||
.lower_fround_even = true,
|
||||
.lower_rotate = true,
|
||||
.lower_uniforms_to_ubo = true,
|
||||
.lower_vector_cmp = true,
|
||||
@@ -541,6 +542,7 @@ static const nir_shader_compiler_options r500_fs_compiler_options = {
|
||||
.lower_flrp32 = true,
|
||||
.lower_flrp64 = true,
|
||||
.lower_fmod = true,
|
||||
.lower_fround_even = true,
|
||||
.lower_rotate = true,
|
||||
.lower_uniforms_to_ubo = true,
|
||||
.lower_vector_cmp = true,
|
||||
@@ -568,6 +570,7 @@ static const nir_shader_compiler_options r300_vs_compiler_options = {
|
||||
.lower_flrp32 = true,
|
||||
.lower_flrp64 = true,
|
||||
.lower_fmod = true,
|
||||
.lower_fround_even = true,
|
||||
.lower_rotate = true,
|
||||
.lower_uniforms_to_ubo = true,
|
||||
.lower_vector_cmp = true,
|
||||
@@ -594,6 +597,7 @@ static const nir_shader_compiler_options r300_fs_compiler_options = {
|
||||
.lower_flrp32 = true,
|
||||
.lower_flrp64 = true,
|
||||
.lower_fmod = true,
|
||||
.lower_fround_even = true,
|
||||
.lower_rotate = true,
|
||||
.lower_uniforms_to_ubo = true,
|
||||
.lower_vector_cmp = true,
|
||||
|
@@ -244,10 +244,11 @@ emit_store_lds(nir_builder *b, nir_intrinsic_instr *op, nir_ssa_def *addr)
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
unsigned test_mask = (0x3 << 2 * i);
|
||||
if (!(orig_writemask & test_mask))
|
||||
unsigned wmask = orig_writemask & test_mask;
|
||||
if (!(wmask))
|
||||
continue;
|
||||
|
||||
uint32_t writemask = test_mask >> nir_intrinsic_component(op);
|
||||
uint32_t writemask = wmask >> nir_intrinsic_component(op);
|
||||
|
||||
auto store_tcs_out = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_local_shared_r600);
|
||||
nir_intrinsic_set_write_mask(store_tcs_out, writemask);
|
||||
|
@@ -1,3 +1,9 @@
|
||||
# #6115
|
||||
spec@arb_tessellation_shader@execution@variable-indexing@tes-both-input-array-float-index-rd,Crash
|
||||
spec@arb_tessellation_shader@execution@variable-indexing@tes-both-input-array-vec2-index-rd,Crash
|
||||
spec@arb_tessellation_shader@execution@variable-indexing@tes-both-input-array-vec3-index-rd,Crash
|
||||
spec@arb_tessellation_shader@execution@variable-indexing@tes-both-input-array-vec4-index-rd,Crash
|
||||
|
||||
# #6270
|
||||
spec@arb_shader_texture_lod@execution@arb_shader_texture_lod-texgradcube,Fail
|
||||
|
||||
|
@@ -143,17 +143,11 @@ KHR-GL46.tessellation_shader.tessellation_shader_triangles_tessellation.inner_te
|
||||
KHR-GL46.tessellation_shader.vertex.vertex_ordering,Fail
|
||||
KHR-GL46.tessellation_shader.vertex.vertex_spacing,Fail
|
||||
KHR-GL46.texture_swizzle.smoke,Timeout
|
||||
KHR-GL46.texture_view.reference_counting,Fail
|
||||
KHR-GL46.texture_view.view_classes,Fail
|
||||
KHR-GL46.texture_view.view_sampling,Fail
|
||||
KHR-GL46.transform_feedback.capture_geometry_interleaved_test,Fail
|
||||
KHR-GL46.transform_feedback.capture_geometry_separate_test,Fail
|
||||
KHR-GL46.transform_feedback.capture_vertex_separate_test,Fail
|
||||
KHR-GL46.transform_feedback.draw_xfb_feedbackk_test,Fail
|
||||
KHR-GL46.transform_feedback.draw_xfb_instanced_test,Fail
|
||||
KHR-GL46.transform_feedback.draw_xfb_stream_instanced_test,Fail
|
||||
KHR-GL46.transform_feedback.draw_xfb_stream_test,Fail
|
||||
KHR-GL46.transform_feedback.draw_xfb_test,Fail
|
||||
KHR-GL46.transform_feedback.query_geometry_interleaved_test,Fail
|
||||
KHR-GL46.transform_feedback.query_geometry_separate_test,Fail
|
||||
KHR-GL46.transform_feedback.query_vertex_interleaved_test,Fail
|
||||
@@ -162,8 +156,6 @@ KHR-GL46.transform_feedback_overflow_query_ARB.advanced-single-stream-interleave
|
||||
KHR-GL46.transform_feedback_overflow_query_ARB.advanced-single-stream-separate-attribs,Fail
|
||||
KHR-GL46.transform_feedback_overflow_query_ARB.basic-single-stream-interleaved-attribs,Fail
|
||||
KHR-GL46.transform_feedback_overflow_query_ARB.basic-single-stream-separate-attribs,Fail
|
||||
KHR-GL46.transform_feedback_overflow_query_ARB.multiple-streams-multiple-buffers-per-stream,Fail
|
||||
KHR-GL46.transform_feedback_overflow_query_ARB.multiple-streams-one-buffer-per-stream,Fail
|
||||
|
||||
dEQP-GLES31.functional.blend_equation_advanced.barrier.colorburn,Fail
|
||||
dEQP-GLES31.functional.blend_equation_advanced.barrier.colordodge,Fail
|
||||
|
@@ -1169,11 +1169,9 @@ rewrite_and_discard_read(nir_builder *b, nir_instr *instr, void *data)
|
||||
void
|
||||
zink_compiler_assign_io(nir_shader *producer, nir_shader *consumer)
|
||||
{
|
||||
unsigned reserved = 0, patch_reserved = 0;
|
||||
unsigned reserved = 0;
|
||||
unsigned char slot_map[VARYING_SLOT_MAX];
|
||||
memset(slot_map, -1, sizeof(slot_map));
|
||||
unsigned char patch_slot_map[VARYING_SLOT_MAX];
|
||||
memset(patch_slot_map, -1, sizeof(patch_slot_map));
|
||||
bool do_fixup = false;
|
||||
nir_shader *nir = producer->info.stage == MESA_SHADER_TESS_CTRL ? producer : consumer;
|
||||
if (consumer->info.stage != MESA_SHADER_FRAGMENT) {
|
||||
@@ -1189,13 +1187,9 @@ zink_compiler_assign_io(nir_shader *producer, nir_shader *consumer)
|
||||
if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
|
||||
/* never assign from tcs -> tes, always invert */
|
||||
nir_foreach_variable_with_modes(var, consumer, nir_var_shader_in)
|
||||
assign_producer_var_io(consumer->info.stage, var,
|
||||
var->data.patch ? &patch_reserved : &reserved,
|
||||
var->data.patch ? patch_slot_map : slot_map);
|
||||
assign_producer_var_io(consumer->info.stage, var, &reserved, slot_map);
|
||||
nir_foreach_variable_with_modes_safe(var, producer, nir_var_shader_out) {
|
||||
if (!assign_consumer_var_io(producer->info.stage, var,
|
||||
var->data.patch ? &patch_reserved : &reserved,
|
||||
var->data.patch ? patch_slot_map : slot_map))
|
||||
if (!assign_consumer_var_io(producer->info.stage, var, &reserved, slot_map))
|
||||
/* this is an output, nothing more needs to be done for it to be dropped */
|
||||
do_fixup = true;
|
||||
}
|
||||
|
@@ -2075,7 +2075,11 @@ get_render_pass(struct zink_context *ctx)
|
||||
|
||||
bool needs_write_s = state.rts[fb->nr_cbufs].clear_stencil || outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
|
||||
if (!needs_write_z && (!ctx->dsa_state || !ctx->dsa_state->base.depth_enabled))
|
||||
/* depth sample, stencil write */
|
||||
state.rts[fb->nr_cbufs].mixed_zs = needs_write_s && zsbuf->bind_count[0];
|
||||
else
|
||||
/* depth write + sample */
|
||||
state.rts[fb->nr_cbufs].mixed_zs = needs_write_z && zsbuf->bind_count[0];
|
||||
state.rts[fb->nr_cbufs].needs_write = needs_write_z | needs_write_s;
|
||||
state.num_rts++;
|
||||
}
|
||||
@@ -3887,17 +3891,8 @@ zink_set_stream_output_targets(struct pipe_context *pctx,
|
||||
pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
|
||||
if (!t)
|
||||
continue;
|
||||
struct zink_resource *res = zink_resource(t->counter_buffer);
|
||||
if (offsets[0] == (unsigned)-1) {
|
||||
ctx->xfb_barrier |= zink_resource_buffer_needs_barrier(res,
|
||||
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
|
||||
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
|
||||
} else {
|
||||
ctx->xfb_barrier |= zink_resource_buffer_needs_barrier(res,
|
||||
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
|
||||
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
|
||||
if (offsets[0] != (unsigned)-1)
|
||||
t->counter_buffer_valid = false;
|
||||
}
|
||||
struct zink_resource *so = zink_resource(ctx->so_targets[i]->buffer);
|
||||
if (so) {
|
||||
so->so_bind_count++;
|
||||
|
@@ -363,7 +363,6 @@ struct zink_context {
|
||||
uint32_t num_so_targets;
|
||||
struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_OUTPUTS];
|
||||
bool dirty_so_targets;
|
||||
bool xfb_barrier;
|
||||
bool first_frame_done;
|
||||
bool have_timelines;
|
||||
|
||||
|
@@ -20,45 +20,27 @@
|
||||
static void
|
||||
zink_emit_xfb_counter_barrier(struct zink_context *ctx)
|
||||
{
|
||||
/* Between the pause and resume there needs to be a memory barrier for the counter buffers
|
||||
* with a source access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
|
||||
* at pipeline stage VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT
|
||||
* to a destination access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
|
||||
* at pipeline stage VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT.
|
||||
*
|
||||
* - from VK_EXT_transform_feedback spec
|
||||
*/
|
||||
for (unsigned i = 0; i < ctx->num_so_targets; i++) {
|
||||
struct zink_so_target *t = zink_so_target(ctx->so_targets[i]);
|
||||
if (!t)
|
||||
continue;
|
||||
struct zink_resource *res = zink_resource(t->counter_buffer);
|
||||
if (t->counter_buffer_valid)
|
||||
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
|
||||
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
|
||||
else
|
||||
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
|
||||
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
|
||||
VkAccessFlags access = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT;
|
||||
VkPipelineStageFlags stage = VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
|
||||
if (t->counter_buffer_valid) {
|
||||
/* Between the pause and resume there needs to be a memory barrier for the counter buffers
|
||||
* with a source access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT
|
||||
* at pipeline stage VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT
|
||||
* to a destination access of VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT
|
||||
* at pipeline stage VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT.
|
||||
*
|
||||
* - from VK_EXT_transform_feedback spec
|
||||
*/
|
||||
access |= VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT;
|
||||
stage |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
|
||||
}
|
||||
zink_resource_buffer_barrier(ctx, res, access, stage);
|
||||
}
|
||||
ctx->xfb_barrier = false;
|
||||
}
|
||||
|
||||
static void
|
||||
zink_emit_xfb_vertex_input_barrier(struct zink_context *ctx, struct zink_resource *res)
|
||||
{
|
||||
/* A pipeline barrier is required between using the buffers as
|
||||
* transform feedback buffers and vertex buffers to
|
||||
* ensure all writes to the transform feedback buffers are visible
|
||||
* when the data is read as vertex attributes.
|
||||
* The source access is VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT
|
||||
* and the destination access is VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
|
||||
* for the pipeline stages VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT
|
||||
* and VK_PIPELINE_STAGE_VERTEX_INPUT_BIT respectively.
|
||||
*
|
||||
* - 20.3.1. Drawing Transform Feedback
|
||||
*/
|
||||
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
|
||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -239,25 +221,6 @@ update_gfx_program(struct zink_context *ctx)
|
||||
ctx->dirty_shader_stages &= ~bits;
|
||||
}
|
||||
|
||||
static bool
|
||||
line_width_needed(enum pipe_prim_type reduced_prim,
|
||||
unsigned polygon_mode)
|
||||
{
|
||||
switch (reduced_prim) {
|
||||
case PIPE_PRIM_POINTS:
|
||||
return false;
|
||||
|
||||
case PIPE_PRIM_LINES:
|
||||
return true;
|
||||
|
||||
case PIPE_PRIM_TRIANGLES:
|
||||
return polygon_mode == VK_POLYGON_MODE_LINE;
|
||||
|
||||
default:
|
||||
unreachable("unexpected reduced prim");
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static void
|
||||
update_drawid(struct zink_context *ctx, unsigned draw_id)
|
||||
{
|
||||
@@ -552,8 +515,7 @@ zink_draw(struct pipe_context *pctx,
|
||||
|
||||
bool have_streamout = !!ctx->num_so_targets;
|
||||
if (have_streamout) {
|
||||
if (ctx->xfb_barrier)
|
||||
zink_emit_xfb_counter_barrier(ctx);
|
||||
zink_emit_xfb_counter_barrier(ctx);
|
||||
if (ctx->dirty_so_targets) {
|
||||
/* have to loop here and below because barriers must be emitted out of renderpass,
|
||||
* but xfb buffers can't be bound before the renderpass is active to avoid
|
||||
@@ -568,8 +530,13 @@ zink_draw(struct pipe_context *pctx,
|
||||
}
|
||||
}
|
||||
|
||||
if (so_target)
|
||||
zink_emit_xfb_vertex_input_barrier(ctx, zink_resource(so_target->base.buffer));
|
||||
/* ensure synchronization between doing streamout with counter buffer
|
||||
* and using counter buffer for indirect draw
|
||||
*/
|
||||
if (so_target && so_target->counter_buffer_valid)
|
||||
zink_resource_buffer_barrier(ctx, zink_resource(so_target->counter_buffer),
|
||||
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
|
||||
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
|
||||
|
||||
barrier_draw_buffers(ctx, dinfo, dindirect, index_buffer);
|
||||
|
||||
@@ -745,12 +712,7 @@ zink_draw(struct pipe_context *pctx,
|
||||
unreachable("unexpected reduced prim");
|
||||
}
|
||||
|
||||
if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
|
||||
if (screen->info.feats.features.wideLines || rast_state->line_width == 1.0f)
|
||||
VKCTX(CmdSetLineWidth)(batch->state->cmdbuf, rast_state->line_width);
|
||||
else
|
||||
debug_printf("BUG: wide lines not supported, needs fallback!");
|
||||
}
|
||||
VKCTX(CmdSetLineWidth)(batch->state->cmdbuf, rast_state->line_width);
|
||||
if (depth_bias)
|
||||
VKCTX(CmdSetDepthBias)(batch->state->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
|
||||
else
|
||||
|
@@ -681,7 +681,9 @@ zink_kopper_update(struct pipe_screen *pscreen, struct pipe_resource *pres, int
|
||||
{
|
||||
struct zink_resource *res = zink_resource(pres);
|
||||
struct zink_screen *screen = zink_screen(pscreen);
|
||||
assert(res->obj->dt);
|
||||
assert(pres->bind & PIPE_BIND_DISPLAY_TARGET);
|
||||
if (!res->obj->dt)
|
||||
return false;
|
||||
struct kopper_displaytarget *cdt = kopper_displaytarget(res->obj->dt);
|
||||
if (cdt->type != KOPPER_X11) {
|
||||
*w = res->base.b.width0;
|
||||
@@ -689,7 +691,7 @@ zink_kopper_update(struct pipe_screen *pscreen, struct pipe_resource *pres, int
|
||||
return true;
|
||||
}
|
||||
if (update_caps(screen, cdt) != VK_SUCCESS) {
|
||||
debug_printf("zink: failed to update swapchain capabilities");
|
||||
mesa_loge("zink: failed to update swapchain capabilities");
|
||||
return false;
|
||||
}
|
||||
*w = cdt->caps.currentExtent.width;
|
||||
|
@@ -86,12 +86,17 @@ zink_create_gfx_pipeline(struct zink_screen *screen,
|
||||
switch (primitive_topology) {
|
||||
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
|
||||
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
|
||||
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
|
||||
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
|
||||
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
|
||||
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
|
||||
if (screen->info.have_EXT_primitive_topology_list_restart) {
|
||||
primitive_state.primitiveRestartEnable = state->dyn_state2.primitive_restart ? VK_TRUE : VK_FALSE;
|
||||
break;
|
||||
}
|
||||
FALLTHROUGH;
|
||||
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
|
||||
if (state->dyn_state2.primitive_restart)
|
||||
debug_printf("restart_index set with unsupported primitive topology %u\n", primitive_topology);
|
||||
mesa_loge("zink: restart_index set with unsupported primitive topology %u\n", primitive_topology);
|
||||
primitive_state.primitiveRestartEnable = VK_FALSE;
|
||||
break;
|
||||
default:
|
||||
|
@@ -768,7 +768,17 @@ update_qbo(struct zink_context *ctx, struct zink_query *q)
|
||||
copy_pool_results_to_buffer(ctx, q, start->vkq[i]->pool->query_pool, start->vkq[i]->query_id,
|
||||
zink_resource(qbo->buffers[i]),
|
||||
offset,
|
||||
1, VK_QUERY_RESULT_64_BIT);
|
||||
1,
|
||||
/*
|
||||
there is an implicit execution dependency from
|
||||
each such query command to all query commands previously submitted to the same queue. There
|
||||
is one significant exception to this; if the flags parameter of vkCmdCopyQueryPoolResults does not
|
||||
include VK_QUERY_RESULT_WAIT_BIT, execution of vkCmdCopyQueryPoolResults may happen-before
|
||||
the results of vkCmdEndQuery are available.
|
||||
|
||||
* - Chapter 18. Queries
|
||||
*/
|
||||
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
|
||||
}
|
||||
|
||||
if (!is_timestamp)
|
||||
@@ -1230,6 +1240,16 @@ zink_get_query_result_resource(struct pipe_context *pctx,
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
there is an implicit execution dependency from
|
||||
each such query command to all query commands previously submitted to the same queue. There
|
||||
is one significant exception to this; if the flags parameter of vkCmdCopyQueryPoolResults does not
|
||||
include VK_QUERY_RESULT_WAIT_BIT, execution of vkCmdCopyQueryPoolResults may happen-before
|
||||
the results of vkCmdEndQuery are available.
|
||||
|
||||
* - Chapter 18. Queries
|
||||
*/
|
||||
size_flags |= VK_QUERY_RESULT_WAIT_BIT;
|
||||
if (!is_time_query(query) && !is_bool_query(query)) {
|
||||
if (num_queries == 1 && query->type != PIPE_QUERY_PRIMITIVES_GENERATED &&
|
||||
query->type != PIPE_QUERY_PRIMITIVES_EMITTED &&
|
||||
|
@@ -260,7 +260,7 @@ get_image_usage_for_feats(struct zink_screen *screen, VkFormatFeatureFlags feats
|
||||
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
||||
if ((bind & (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) != (PIPE_BIND_LINEAR | PIPE_BIND_SHARED))
|
||||
usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
|
||||
} else if (templ->nr_samples)
|
||||
} else if (templ->nr_samples || !(feats & VK_FORMAT_FEATURE_BLIT_DST_BIT))
|
||||
/* this can't be populated, so we can't do it */
|
||||
return 0;
|
||||
}
|
||||
|
@@ -347,6 +347,7 @@ zink_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
|
||||
if (screen->info.have_EXT_primitive_topology_list_restart) {
|
||||
modes |= BITFIELD_BIT(PIPE_PRIM_POINTS) |
|
||||
BITFIELD_BIT(PIPE_PRIM_LINES) |
|
||||
BITFIELD_BIT(PIPE_PRIM_LINES_ADJACENCY) |
|
||||
BITFIELD_BIT(PIPE_PRIM_TRIANGLES) |
|
||||
BITFIELD_BIT(PIPE_PRIM_TRIANGLES_ADJACENCY);
|
||||
if (screen->info.list_restart_feats.primitiveTopologyPatchListRestart)
|
||||
|
@@ -225,14 +225,32 @@ intel_get_urb_config(const struct intel_device_info *devinfo,
|
||||
}
|
||||
|
||||
/* Lay out the URB in pipeline order: push constants, VS, HS, DS, GS. */
|
||||
int next = push_constant_chunks;
|
||||
int first_urb = push_constant_chunks;
|
||||
|
||||
/* From the BDW PRM: for 3DSTATE_URB_*: VS URB Starting Address
|
||||
*
|
||||
* "Value: [4,48] Device [SliceCount] GT 1"
|
||||
*
|
||||
* From the ICL PRMs and above :
|
||||
*
|
||||
* "If CTXT_SR_CTL::POSH_Enable is clear and Push Constants are required
|
||||
* or Device[SliceCount] GT 1, the lower limit is 4."
|
||||
*
|
||||
* "If Push Constants are not required andDevice[SliceCount] == 1, the
|
||||
* lower limit is 0."
|
||||
*/
|
||||
if ((devinfo->ver == 8 && devinfo->num_slices == 1) ||
|
||||
(devinfo->ver >= 11 && push_constant_chunks > 0 && devinfo->num_slices == 1))
|
||||
first_urb = MAX2(first_urb, 4);
|
||||
|
||||
int next_urb = first_urb;
|
||||
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
|
||||
if (entries[i]) {
|
||||
start[i] = next;
|
||||
next += chunks[i];
|
||||
start[i] = next_urb;
|
||||
next_urb += chunks[i];
|
||||
} else {
|
||||
/* Just put disabled stages at the beginning. */
|
||||
start[i] = 0;
|
||||
/* Put disabled stages at the beginning of the valid range */
|
||||
start[i] = first_urb;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -359,4 +377,3 @@ intel_get_mesh_urb_config(const struct intel_device_info *devinfo,
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@@ -584,6 +584,13 @@ brw_nir_initialize_mue(nir_shader *nir,
|
||||
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
|
||||
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
|
||||
}
|
||||
|
||||
if (remaining) {
|
||||
nir_metadata_preserve(entrypoint, nir_metadata_none);
|
||||
} else {
|
||||
nir_metadata_preserve(entrypoint, nir_metadata_block_index |
|
||||
nir_metadata_dominance);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@@ -246,6 +246,7 @@ brw_nir_lower_intersection_shader(nir_shader *intersection,
|
||||
}
|
||||
}
|
||||
}
|
||||
nir_metadata_preserve(impl, nir_metadata_none);
|
||||
|
||||
/* We did some inlining; have to re-index SSA defs */
|
||||
nir_index_ssa_defs(impl);
|
||||
|
@@ -298,7 +298,7 @@ anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
|
||||
struct anv_address
|
||||
anv_batch_address(struct anv_batch *batch, void *batch_location)
|
||||
{
|
||||
assert(batch->start < batch_location);
|
||||
assert(batch->start <= batch_location);
|
||||
|
||||
/* Allow a jump at the current location of the batch. */
|
||||
assert(batch->next >= batch_location);
|
||||
|
@@ -119,7 +119,6 @@ bool
|
||||
anv_nir_lower_ubo_loads(nir_shader *shader)
|
||||
{
|
||||
return nir_shader_instructions_pass(shader, lower_ubo_load_instr,
|
||||
nir_metadata_block_index |
|
||||
nir_metadata_dominance,
|
||||
nir_metadata_none,
|
||||
NULL);
|
||||
}
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#include "varray.h"
|
||||
#include "api_exec_decl.h"
|
||||
|
||||
#include "state_tracker/st_cb_bitmap.h"
|
||||
#include "state_tracker/st_context.h"
|
||||
|
||||
void
|
||||
@@ -865,6 +866,8 @@ _mesa_set_enable(struct gl_context *ctx, GLenum cap, GLboolean state)
|
||||
GLbitfield newEnabled =
|
||||
state * ((1 << ctx->Const.MaxViewports) - 1);
|
||||
if (newEnabled != ctx->Scissor.EnableFlags) {
|
||||
st_flush_bitmap_cache(st_context(ctx));
|
||||
|
||||
FLUSH_VERTICES(ctx, 0,
|
||||
GL_SCISSOR_BIT | GL_ENABLE_BIT);
|
||||
ctx->NewDriverState |= ST_NEW_SCISSOR | ST_NEW_RASTERIZER;
|
||||
@@ -1390,6 +1393,8 @@ _mesa_set_enablei(struct gl_context *ctx, GLenum cap,
|
||||
return;
|
||||
}
|
||||
if (((ctx->Scissor.EnableFlags >> index) & 1) != state) {
|
||||
st_flush_bitmap_cache(st_context(ctx));
|
||||
|
||||
FLUSH_VERTICES(ctx, 0,
|
||||
GL_SCISSOR_BIT | GL_ENABLE_BIT);
|
||||
ctx->NewDriverState |= ST_NEW_SCISSOR | ST_NEW_RASTERIZER;
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#include "main/scissor.h"
|
||||
#include "api_exec_decl.h"
|
||||
|
||||
#include "state_tracker/st_cb_bitmap.h"
|
||||
#include "state_tracker/st_context.h"
|
||||
|
||||
/**
|
||||
@@ -50,6 +51,9 @@ set_scissor_no_notify(struct gl_context *ctx, unsigned idx,
|
||||
height == ctx->Scissor.ScissorArray[idx].Height)
|
||||
return;
|
||||
|
||||
if (ctx->Scissor.EnableFlags)
|
||||
st_flush_bitmap_cache(st_context(ctx));
|
||||
|
||||
FLUSH_VERTICES(ctx, 0, GL_SCISSOR_BIT);
|
||||
ctx->NewDriverState |= ST_NEW_SCISSOR;
|
||||
|
||||
@@ -294,6 +298,8 @@ _mesa_WindowRectanglesEXT(GLenum mode, GLsizei count, const GLint *box)
|
||||
box += 4;
|
||||
}
|
||||
|
||||
st_flush_bitmap_cache(st_context(ctx));
|
||||
|
||||
FLUSH_VERTICES(ctx, 0, GL_SCISSOR_BIT);
|
||||
ctx->NewDriverState |= ST_NEW_WINDOW_RECTANGLES;
|
||||
|
||||
|
@@ -72,7 +72,7 @@ st_convert_sampler(const struct st_context *st,
|
||||
sampler->mag_img_filter = PIPE_TEX_FILTER_NEAREST;
|
||||
}
|
||||
|
||||
if (texobj->Target != GL_TEXTURE_RECTANGLE_ARB)
|
||||
if (texobj->Target != GL_TEXTURE_RECTANGLE_ARB || st->lower_rect_tex)
|
||||
sampler->normalized_coords = 1;
|
||||
|
||||
sampler->lod_bias += tex_unit_lod_bias;
|
||||
|
@@ -207,6 +207,9 @@ static VkResult
|
||||
dzn_cmd_buffer_reset(dzn_cmd_buffer *cmdbuf)
|
||||
{
|
||||
dzn_device *device = container_of(cmdbuf->vk.base.device, dzn_device, vk);
|
||||
const struct dzn_physical_device *pdev =
|
||||
container_of(device->vk.physical, dzn_physical_device, vk);
|
||||
const struct vk_command_pool *pool = cmdbuf->vk.pool;
|
||||
|
||||
/* Reset the state */
|
||||
memset(&cmdbuf->state, 0, sizeof(cmdbuf->state));
|
||||
@@ -255,7 +258,9 @@ dzn_cmd_buffer_reset(dzn_cmd_buffer *cmdbuf)
|
||||
cmdbuf->cmdlist->Release();
|
||||
cmdbuf->cmdlist = NULL;
|
||||
cmdbuf->cmdalloc->Reset();
|
||||
if (FAILED(device->dev->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT,
|
||||
D3D12_COMMAND_LIST_TYPE type =
|
||||
pdev->queue_families[pool->queue_family_index].desc.Type;
|
||||
if (FAILED(device->dev->CreateCommandList(0, type,
|
||||
cmdbuf->cmdalloc, NULL,
|
||||
IID_PPV_ARGS(&cmdbuf->cmdlist)))) {
|
||||
cmdbuf->error = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
@@ -202,7 +202,7 @@ pan_image_layout_init(const struct panfrost_device *dev,
|
||||
|
||||
if (should_align) {
|
||||
effective_width = ALIGN_POT(effective_width, tile_w) >> tile_shift;
|
||||
effective_height = ALIGN_POT(effective_height, tile_h);
|
||||
effective_height = ALIGN_POT(effective_height, tile_h) >> tile_shift;
|
||||
|
||||
/* We don't need to align depth */
|
||||
}
|
||||
|
@@ -150,8 +150,9 @@ vk_debug_report(struct vk_instance *instance,
|
||||
const char* pLayerPrefix,
|
||||
const char *pMessage)
|
||||
{
|
||||
VkDebugReportObjectTypeEXT object_type =
|
||||
VkObjectType object_type =
|
||||
object ? object->type : VK_OBJECT_TYPE_UNKNOWN;
|
||||
debug_report(instance, flags, object_type, (uint64_t)(uintptr_t)object,
|
||||
location, messageCode, pLayerPrefix, pMessage);
|
||||
debug_report(instance, flags, (VkDebugReportObjectTypeEXT)object_type,
|
||||
(uint64_t)(uintptr_t)object, location, messageCode,
|
||||
pLayerPrefix, pMessage);
|
||||
}
|
||||
|
Reference in New Issue
Block a user