Compare commits
18 Commits
mesa-18.2.
...
mesa-18.2.
Author | SHA1 | Date | |
---|---|---|---|
|
86aa912dda | ||
|
2ec87de498 | ||
|
54cd81dfc5 | ||
|
5457e58a64 | ||
|
1e9c422894 | ||
|
4320851198 | ||
|
f69fcede0a | ||
|
26c07daf9d | ||
|
f3fc2d40fe | ||
|
4477635b69 | ||
|
bc6b6cb290 | ||
|
3ff3bfa3f5 | ||
|
c2268223c8 | ||
|
b9a97a8b88 | ||
|
dbb5396667 | ||
|
586ac9c237 | ||
|
f070d5a568 | ||
|
b1e0876a6b |
3
bin/.cherry-ignore
Normal file
3
bin/.cherry-ignore
Normal file
@@ -0,0 +1,3 @@
|
||||
# fixes: This commit has more than one Fixes tag but the commit it
|
||||
# addresses didn't land in branch.
|
||||
6ff1c479968819b93c46d24bd898e89ce14ac401 autotools: don't ship the git_sha1.h generated in git in the tarballs
|
@@ -149,7 +149,8 @@ static LLVMTargetMachineRef ac_create_target_machine(enum radeon_family family,
|
||||
char features[256];
|
||||
const char *triple = (tm_options & AC_TM_SUPPORTS_SPILL) ? "amdgcn-mesa-mesa3d" : "amdgcn--";
|
||||
LLVMTargetRef target = ac_get_llvm_target(triple);
|
||||
bool barrier_does_waitcnt = family != CHIP_VEGA20;
|
||||
bool barrier_does_waitcnt = (tm_options & AC_TM_AUTO_WAITCNT_BEFORE_BARRIER) &&
|
||||
family != CHIP_VEGA20;
|
||||
|
||||
snprintf(features, sizeof(features),
|
||||
"+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s%s%s%s",
|
||||
|
@@ -65,6 +65,7 @@ enum ac_target_machine_options {
|
||||
AC_TM_CHECK_IR = (1 << 5),
|
||||
AC_TM_ENABLE_GLOBAL_ISEL = (1 << 6),
|
||||
AC_TM_CREATE_LOW_OPT = (1 << 7),
|
||||
AC_TM_AUTO_WAITCNT_BEFORE_BARRIER = (1 << 8),
|
||||
};
|
||||
|
||||
enum ac_float_mode {
|
||||
|
@@ -2307,6 +2307,7 @@ VkResult radv_BeginCommandBuffer(
|
||||
cmd_buffer->state.last_num_instances = -1;
|
||||
cmd_buffer->state.last_vertex_offset = -1;
|
||||
cmd_buffer->state.last_first_instance = -1;
|
||||
cmd_buffer->state.predication_type = -1;
|
||||
cmd_buffer->usage_flags = pBeginInfo->flags;
|
||||
|
||||
/* setup initial configuration into command buffer */
|
||||
@@ -4126,15 +4127,18 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
|
||||
|
||||
if (radv_image_has_dcc(image)) {
|
||||
uint32_t value = 0xffffffffu; /* Fully expanded mode. */
|
||||
bool need_decompress_pass = false;
|
||||
|
||||
if (radv_layout_dcc_compressed(image, dst_layout,
|
||||
dst_queue_mask)) {
|
||||
value = 0x20202020u;
|
||||
need_decompress_pass = true;
|
||||
}
|
||||
|
||||
radv_initialize_dcc(cmd_buffer, image, value);
|
||||
|
||||
radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image, false);
|
||||
radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image,
|
||||
need_decompress_pass);
|
||||
}
|
||||
|
||||
if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
|
||||
|
@@ -480,6 +480,9 @@ radv_handle_per_app_options(struct radv_instance *instance,
|
||||
*/
|
||||
instance->perftest_flags |= RADV_PERFTEST_SISCHED;
|
||||
}
|
||||
} else if (!strcmp(name, "DOOM_VFR")) {
|
||||
/* Work around a Doom VFR game bug */
|
||||
instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -603,7 +603,7 @@ radv_emit_color_decompress(struct radv_cmd_buffer *cmd_buffer,
|
||||
pipeline = cmd_buffer->device->meta_state.fast_clear_flush.cmask_eliminate_pipeline;
|
||||
}
|
||||
|
||||
if (radv_image_has_dcc(image)) {
|
||||
if (!decompress_dcc && radv_image_has_dcc(image)) {
|
||||
old_predicating = cmd_buffer->state.predicating;
|
||||
|
||||
radv_emit_set_predication_state_from_image(cmd_buffer, image, true);
|
||||
@@ -671,7 +671,7 @@ radv_emit_color_decompress(struct radv_cmd_buffer *cmd_buffer,
|
||||
&cmd_buffer->pool->alloc);
|
||||
|
||||
}
|
||||
if (radv_image_has_dcc(image)) {
|
||||
if (!decompress_dcc && radv_image_has_dcc(image)) {
|
||||
cmd_buffer->state.predicating = old_predicating;
|
||||
|
||||
radv_emit_set_predication_state_from_image(cmd_buffer, image, false);
|
||||
|
@@ -2006,7 +2006,7 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
|
||||
MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
|
||||
}
|
||||
} else {
|
||||
unreachable("Invalid vertex attribute divisor of 0.");
|
||||
buffer_index = ctx->ac.i32_0;
|
||||
}
|
||||
|
||||
buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.start_instance, buffer_index, "");
|
||||
|
@@ -673,7 +673,7 @@ static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
|
||||
if (!cs->num_buffers)
|
||||
continue;
|
||||
|
||||
if (unique_bo_count == 0) {
|
||||
if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
|
||||
memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
|
||||
unique_bo_count = cs->num_buffers;
|
||||
continue;
|
||||
|
@@ -826,7 +826,7 @@ ir_dereference_array::constant_expression_value(void *mem_ctx,
|
||||
const unsigned component = idx->value.u[0];
|
||||
|
||||
return new(mem_ctx) ir_constant(array, component);
|
||||
} else {
|
||||
} else if (array->type->is_array()) {
|
||||
const unsigned index = idx->value.u[0];
|
||||
return array->get_array_element(index)->clone(mem_ctx, NULL);
|
||||
}
|
||||
|
@@ -1134,6 +1134,25 @@ droid_add_configs_for_visuals(_EGLDriver *drv, _EGLDisplay *dpy)
|
||||
return (config_count != 0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_DRM_GRALLOC
|
||||
static int
|
||||
droid_open_device_drm_gralloc(struct dri2_egl_display *dri2_dpy)
|
||||
{
|
||||
int fd = -1, err = -EINVAL;
|
||||
|
||||
if (dri2_dpy->gralloc->perform)
|
||||
err = dri2_dpy->gralloc->perform(dri2_dpy->gralloc,
|
||||
GRALLOC_MODULE_PERFORM_GET_DRM_FD,
|
||||
&fd);
|
||||
if (err || fd < 0) {
|
||||
_eglLog(_EGL_WARNING, "fail to get drm fd");
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
return (fd >= 0) ? fcntl(fd, F_DUPFD_CLOEXEC, 3) : -1;
|
||||
}
|
||||
#endif /* HAVE_DRM_GRALLOC */
|
||||
|
||||
static const struct dri2_egl_display_vtbl droid_display_vtbl = {
|
||||
.authenticate = NULL,
|
||||
.create_window_surface = droid_create_window_surface,
|
||||
@@ -1384,7 +1403,11 @@ dri2_initialize_android(_EGLDriver *drv, _EGLDisplay *disp)
|
||||
|
||||
disp->DriverData = (void *) dri2_dpy;
|
||||
|
||||
#ifdef HAVE_DRM_GRALLOC
|
||||
dri2_dpy->fd = droid_open_device_drm_gralloc(dri2_dpy);
|
||||
#else
|
||||
dri2_dpy->fd = droid_open_device(disp);
|
||||
#endif
|
||||
if (dri2_dpy->fd < 0) {
|
||||
err = "DRI2: failed to open device";
|
||||
goto cleanup;
|
||||
|
@@ -99,10 +99,10 @@ endif
|
||||
|
||||
if with_platform_x11
|
||||
files_egl += files('drivers/dri2/platform_x11.c')
|
||||
incs_for_egl += inc_loader
|
||||
if with_dri3
|
||||
files_egl += files('drivers/dri2/platform_x11_dri3.c')
|
||||
link_for_egl += libloader_dri3_helper
|
||||
incs_for_egl += inc_loader
|
||||
endif
|
||||
deps_for_egl += [dep_x11_xcb, dep_xcb_dri2, dep_xcb_xfixes]
|
||||
endif
|
||||
|
@@ -715,7 +715,6 @@ static void compute_emit_cs(struct r600_context *rctx,
|
||||
rctx->cmd_buf_is_compute = true;
|
||||
}
|
||||
|
||||
r600_need_cs_space(rctx, 0, true);
|
||||
if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
|
||||
r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty);
|
||||
current = rctx->cs_shader_state.shader->sel->current;
|
||||
@@ -742,16 +741,22 @@ static void compute_emit_cs(struct r600_context *rctx,
|
||||
}
|
||||
rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0;
|
||||
rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true;
|
||||
|
||||
evergreen_emit_atomic_buffer_setup_count(rctx, current, combined_atomics, &atomic_used_mask);
|
||||
r600_need_cs_space(rctx, 0, true, util_bitcount(atomic_used_mask));
|
||||
|
||||
if (need_buf_const) {
|
||||
eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE);
|
||||
}
|
||||
r600_update_driver_const_buffers(rctx, true);
|
||||
|
||||
if (evergreen_emit_atomic_buffer_setup(rctx, current, combined_atomics, &atomic_used_mask)) {
|
||||
evergreen_emit_atomic_buffer_setup(rctx, true, combined_atomics, atomic_used_mask);
|
||||
if (atomic_used_mask) {
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
||||
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
|
||||
}
|
||||
}
|
||||
} else
|
||||
r600_need_cs_space(rctx, 0, true, 0);
|
||||
|
||||
/* Initialize all the compute-related registers.
|
||||
*
|
||||
|
@@ -109,7 +109,7 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
|
||||
|
||||
r600_need_cs_space(rctx,
|
||||
10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
|
||||
R600_MAX_PFP_SYNC_ME_DWORDS, FALSE);
|
||||
R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
|
||||
|
||||
/* Flush the caches for the first copy only. */
|
||||
if (rctx->b.flags) {
|
||||
|
@@ -4030,7 +4030,6 @@ static void evergreen_set_hw_atomic_buffers(struct pipe_context *ctx,
|
||||
|
||||
if (!buffers || !buffers[idx].buffer) {
|
||||
pipe_resource_reference(&abuf->buffer, NULL);
|
||||
astate->enabled_mask &= ~(1 << i);
|
||||
continue;
|
||||
}
|
||||
buf = &buffers[idx];
|
||||
@@ -4038,7 +4037,6 @@ static void evergreen_set_hw_atomic_buffers(struct pipe_context *ctx,
|
||||
pipe_resource_reference(&abuf->buffer, buf->buffer);
|
||||
abuf->buffer_offset = buf->buffer_offset;
|
||||
abuf->buffer_size = buf->buffer_size;
|
||||
astate->enabled_mask |= (1 << i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4868,20 +4866,15 @@ static void cayman_write_count_to_gds(struct r600_context *rctx,
|
||||
radeon_emit(cs, reloc);
|
||||
}
|
||||
|
||||
bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
struct r600_pipe_shader *cs_shader,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
uint8_t *atomic_used_mask_p)
|
||||
void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
|
||||
struct r600_pipe_shader *cs_shader,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
uint8_t *atomic_used_mask_p)
|
||||
{
|
||||
struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
|
||||
unsigned pkt_flags = 0;
|
||||
uint8_t atomic_used_mask = 0;
|
||||
int i, j, k;
|
||||
bool is_compute = cs_shader ? true : false;
|
||||
|
||||
if (is_compute)
|
||||
pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
|
||||
|
||||
for (i = 0; i < (is_compute ? 1 : EG_NUM_HW_STAGES); i++) {
|
||||
uint8_t num_atomic_stage;
|
||||
struct r600_pipe_shader *pshader;
|
||||
@@ -4914,8 +4907,25 @@ bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
}
|
||||
}
|
||||
}
|
||||
*atomic_used_mask_p = atomic_used_mask;
|
||||
}
|
||||
|
||||
void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
bool is_compute,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
uint8_t atomic_used_mask)
|
||||
{
|
||||
struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
|
||||
unsigned pkt_flags = 0;
|
||||
uint32_t mask;
|
||||
|
||||
if (is_compute)
|
||||
pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
|
||||
|
||||
mask = atomic_used_mask;
|
||||
if (!mask)
|
||||
return;
|
||||
|
||||
uint32_t mask = atomic_used_mask;
|
||||
while (mask) {
|
||||
unsigned atomic_index = u_bit_scan(&mask);
|
||||
struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
|
||||
@@ -4927,8 +4937,6 @@ bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
else
|
||||
evergreen_emit_set_append_cnt(rctx, atomic, resource, pkt_flags);
|
||||
}
|
||||
*atomic_used_mask_p = atomic_used_mask;
|
||||
return true;
|
||||
}
|
||||
|
||||
void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
|
||||
@@ -4940,7 +4948,7 @@ void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
|
||||
struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
|
||||
uint32_t pkt_flags = 0;
|
||||
uint32_t event = EVENT_TYPE_PS_DONE;
|
||||
uint32_t mask = astate->enabled_mask;
|
||||
uint32_t mask;
|
||||
uint64_t dst_offset;
|
||||
unsigned reloc;
|
||||
|
||||
|
@@ -31,7 +31,7 @@
|
||||
|
||||
|
||||
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
|
||||
boolean count_draw_in)
|
||||
boolean count_draw_in, unsigned num_atomics)
|
||||
{
|
||||
/* Flush the DMA IB if it's not empty. */
|
||||
if (radeon_emitted(ctx->b.dma.cs, 0))
|
||||
@@ -61,6 +61,9 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
|
||||
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
|
||||
}
|
||||
|
||||
/* add atomic counters, 8 pre + 8 post per counter + 16 post if any counters */
|
||||
num_dw += (num_atomics * 16) + (num_atomics ? 16 : 0);
|
||||
|
||||
/* Count in r600_suspend_queries. */
|
||||
num_dw += ctx->b.num_cs_dw_queries_suspend;
|
||||
|
||||
@@ -526,7 +529,7 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx,
|
||||
|
||||
r600_need_cs_space(rctx,
|
||||
10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
|
||||
3 + R600_MAX_PFP_SYNC_ME_DWORDS, FALSE);
|
||||
3 + R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
|
||||
|
||||
/* Flush the caches for the first copy only. */
|
||||
if (rctx->b.flags) {
|
||||
|
@@ -446,8 +446,6 @@ struct r600_shader_state {
|
||||
};
|
||||
|
||||
struct r600_atomic_buffer_state {
|
||||
uint32_t enabled_mask;
|
||||
uint32_t dirty_mask;
|
||||
struct pipe_shader_buffer buffer[EG_MAX_ATOMIC_BUFFERS];
|
||||
};
|
||||
|
||||
@@ -773,7 +771,7 @@ void r600_context_gfx_flush(void *context, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
void r600_begin_new_cs(struct r600_context *ctx);
|
||||
void r600_flush_emit(struct r600_context *ctx);
|
||||
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
|
||||
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in, unsigned num_atomics);
|
||||
void r600_emit_pfp_sync_me(struct r600_context *rctx);
|
||||
void r600_cp_dma_copy_buffer(struct r600_context *rctx,
|
||||
struct pipe_resource *dst, uint64_t dst_offset,
|
||||
@@ -1067,10 +1065,14 @@ void r600_delete_shader_selector(struct pipe_context *ctx,
|
||||
struct r600_pipe_shader_selector *sel);
|
||||
|
||||
struct r600_shader_atomic;
|
||||
bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
struct r600_pipe_shader *cs_shader,
|
||||
void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
|
||||
struct r600_pipe_shader *cs_shader,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
uint8_t *atomic_used_mask_p);
|
||||
void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
|
||||
bool is_compute,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
uint8_t *atomic_used_mask_p);
|
||||
uint8_t atomic_used_mask);
|
||||
void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
|
||||
bool is_compute,
|
||||
struct r600_shader_atomic *combined_atomics,
|
||||
|
@@ -2085,8 +2085,9 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
|
||||
: (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
|
||||
: info->mode;
|
||||
|
||||
if (rctx->b.chip_class >= EVERGREEN)
|
||||
evergreen_emit_atomic_buffer_setup(rctx, NULL, combined_atomics, &atomic_used_mask);
|
||||
if (rctx->b.chip_class >= EVERGREEN) {
|
||||
evergreen_emit_atomic_buffer_setup_count(rctx, NULL, combined_atomics, &atomic_used_mask);
|
||||
}
|
||||
|
||||
if (index_size) {
|
||||
index_offset += info->start * index_size;
|
||||
@@ -2172,7 +2173,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
|
||||
evergreen_setup_tess_constants(rctx, info, &num_patches);
|
||||
|
||||
/* Emit states. */
|
||||
r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE);
|
||||
r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE, util_bitcount(atomic_used_mask));
|
||||
r600_flush_emit(rctx);
|
||||
|
||||
mask = rctx->dirty_atoms;
|
||||
@@ -2180,6 +2181,10 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
|
||||
r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]);
|
||||
}
|
||||
|
||||
if (rctx->b.chip_class >= EVERGREEN) {
|
||||
evergreen_emit_atomic_buffer_setup(rctx, false, combined_atomics, atomic_used_mask);
|
||||
}
|
||||
|
||||
if (rctx->b.chip_class == CAYMAN) {
|
||||
/* Copied from radeonsi. */
|
||||
unsigned primgroup_size = 128; /* recommended without a GS */
|
||||
@@ -3284,7 +3289,7 @@ static void r600_set_active_query_state(struct pipe_context *ctx, boolean enable
|
||||
static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw,
|
||||
bool include_draw_vbo)
|
||||
{
|
||||
r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo);
|
||||
r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo, 0);
|
||||
}
|
||||
|
||||
/* keep this at the end of this file, please */
|
||||
|
@@ -114,6 +114,7 @@ static void si_init_compiler(struct si_screen *sscreen,
|
||||
sscreen->info.chip_class <= VI;
|
||||
|
||||
enum ac_target_machine_options tm_options =
|
||||
AC_TM_AUTO_WAITCNT_BEFORE_BARRIER |
|
||||
(sscreen->debug_flags & DBG(SI_SCHED) ? AC_TM_SISCHED : 0) |
|
||||
(sscreen->debug_flags & DBG(GISEL) ? AC_TM_ENABLE_GLOBAL_ISEL : 0) |
|
||||
(sscreen->info.chip_class >= GFX9 ? AC_TM_FORCE_ENABLE_XNACK : 0) |
|
||||
|
@@ -176,6 +176,8 @@ kms_sw_displaytarget_create(struct sw_winsys *ws,
|
||||
|
||||
list_inithead(&kms_sw_dt->planes);
|
||||
kms_sw_dt->ref_count = 1;
|
||||
kms_sw_dt->mapped = MAP_FAILED;
|
||||
kms_sw_dt->ro_mapped = MAP_FAILED;
|
||||
|
||||
kms_sw_dt->format = format;
|
||||
|
||||
@@ -262,7 +264,7 @@ kms_sw_displaytarget_map(struct sw_winsys *ws,
|
||||
|
||||
prot = (flags == PIPE_TRANSFER_READ) ? PROT_READ : (PROT_READ | PROT_WRITE);
|
||||
void **ptr = (flags == PIPE_TRANSFER_READ) ? &kms_sw_dt->ro_mapped : &kms_sw_dt->mapped;
|
||||
if (!*ptr) {
|
||||
if (*ptr == MAP_FAILED) {
|
||||
void *tmp = mmap(0, kms_sw_dt->size, prot, MAP_SHARED,
|
||||
kms_sw->fd, map_req.offset);
|
||||
if (tmp == MAP_FAILED)
|
||||
@@ -332,6 +334,8 @@ kms_sw_displaytarget_add_from_prime(struct kms_sw_winsys *kms_sw, int fd,
|
||||
FREE(kms_sw_dt);
|
||||
return NULL;
|
||||
}
|
||||
kms_sw_dt->mapped = MAP_FAILED;
|
||||
kms_sw_dt->ro_mapped = MAP_FAILED;
|
||||
kms_sw_dt->size = lseek_ret;
|
||||
kms_sw_dt->ref_count = 1;
|
||||
kms_sw_dt->handle = handle;
|
||||
@@ -368,10 +372,14 @@ kms_sw_displaytarget_unmap(struct sw_winsys *ws,
|
||||
DEBUG_PRINT("KMS-DEBUG: unmapped buffer %u (was %p)\n", kms_sw_dt->handle, kms_sw_dt->mapped);
|
||||
DEBUG_PRINT("KMS-DEBUG: unmapped buffer %u (was %p)\n", kms_sw_dt->handle, kms_sw_dt->ro_mapped);
|
||||
|
||||
munmap(kms_sw_dt->mapped, kms_sw_dt->size);
|
||||
kms_sw_dt->mapped = NULL;
|
||||
munmap(kms_sw_dt->ro_mapped, kms_sw_dt->size);
|
||||
kms_sw_dt->ro_mapped = NULL;
|
||||
if (kms_sw_dt->mapped != MAP_FAILED) {
|
||||
munmap(kms_sw_dt->mapped, kms_sw_dt->size);
|
||||
kms_sw_dt->mapped = MAP_FAILED;
|
||||
}
|
||||
if (kms_sw_dt->ro_mapped != MAP_FAILED) {
|
||||
munmap(kms_sw_dt->ro_mapped, kms_sw_dt->size);
|
||||
kms_sw_dt->ro_mapped = MAP_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
static struct sw_displaytarget *
|
||||
|
@@ -713,18 +713,6 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
|
||||
nir_validate_shader(*producer);
|
||||
nir_validate_shader(*consumer);
|
||||
|
||||
const bool p_is_scalar =
|
||||
compiler->scalar_stage[(*producer)->info.stage];
|
||||
const bool c_is_scalar =
|
||||
compiler->scalar_stage[(*consumer)->info.stage];
|
||||
|
||||
if (p_is_scalar && c_is_scalar) {
|
||||
NIR_PASS_V(*producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
|
||||
NIR_PASS_V(*consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
|
||||
*producer = brw_nir_optimize(*producer, compiler, p_is_scalar);
|
||||
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar);
|
||||
}
|
||||
|
||||
NIR_PASS_V(*producer, nir_remove_dead_variables, nir_var_shader_out);
|
||||
NIR_PASS_V(*consumer, nir_remove_dead_variables, nir_var_shader_in);
|
||||
|
||||
@@ -741,7 +729,12 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
|
||||
NIR_PASS_V(*consumer, nir_lower_indirect_derefs,
|
||||
brw_nir_no_indirect_mask(compiler, (*consumer)->info.stage));
|
||||
|
||||
const bool p_is_scalar =
|
||||
compiler->scalar_stage[(*producer)->info.stage];
|
||||
*producer = brw_nir_optimize(*producer, compiler, p_is_scalar);
|
||||
|
||||
const bool c_is_scalar =
|
||||
compiler->scalar_stage[(*consumer)->info.stage];
|
||||
*consumer = brw_nir_optimize(*consumer, compiler, c_is_scalar);
|
||||
}
|
||||
}
|
||||
|
@@ -340,18 +340,16 @@ try_lower_tex_ycbcr(struct anv_pipeline_layout *layout,
|
||||
if (binding->immutable_samplers == NULL)
|
||||
return false;
|
||||
|
||||
unsigned texture_index = tex->texture_index;
|
||||
assert(tex->texture_index == 0);
|
||||
unsigned array_index = 0;
|
||||
if (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
nir_const_value *const_index = nir_src_as_const_value(deref->arr.index);
|
||||
if (!const_index)
|
||||
return false;
|
||||
size_t hw_binding_size =
|
||||
anv_descriptor_set_binding_layout_get_hw_size(binding);
|
||||
texture_index += MIN2(const_index->u32[0], hw_binding_size - 1);
|
||||
array_index = MIN2(const_index->u32[0], binding->array_size - 1);
|
||||
}
|
||||
const struct anv_sampler *sampler =
|
||||
binding->immutable_samplers[texture_index];
|
||||
const struct anv_sampler *sampler = binding->immutable_samplers[array_index];
|
||||
|
||||
if (sampler->conversion == NULL)
|
||||
return false;
|
||||
|
@@ -496,7 +496,6 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
|
||||
uint32_t stride)
|
||||
{
|
||||
struct brw_bo *bo;
|
||||
unsigned int page_size = getpagesize();
|
||||
int ret;
|
||||
struct bo_cache_bucket *bucket;
|
||||
bool alloc_from_cache;
|
||||
@@ -522,12 +521,12 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
|
||||
* allocation up.
|
||||
*/
|
||||
if (bucket == NULL) {
|
||||
bo_size = size;
|
||||
if (bo_size < page_size)
|
||||
bo_size = page_size;
|
||||
unsigned int page_size = getpagesize();
|
||||
bo_size = size == 0 ? page_size : ALIGN(size, page_size);
|
||||
} else {
|
||||
bo_size = bucket->size;
|
||||
}
|
||||
assert(bo_size);
|
||||
|
||||
mtx_lock(&bufmgr->lock);
|
||||
/* Get a buffer out of the cache if available */
|
||||
|
@@ -695,7 +695,7 @@ brw_initialize_context_constants(struct brw_context *brw)
|
||||
/* ARB_viewport_array, OES_viewport_array */
|
||||
if (devinfo->gen >= 6) {
|
||||
ctx->Const.MaxViewports = GEN6_NUM_VIEWPORTS;
|
||||
ctx->Const.ViewportSubpixelBits = 0;
|
||||
ctx->Const.ViewportSubpixelBits = 8;
|
||||
|
||||
/* Cast to float before negating because MaxViewportWidth is unsigned.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user