Compare commits
42 Commits
mesa-18.3.
...
mesa-18.3.
Author | SHA1 | Date | |
---|---|---|---|
|
af223b57a4 | ||
|
c694d84f10 | ||
|
a69ef11424 | ||
|
4b715e3e59 | ||
|
bd7edf473e | ||
|
a34228e1b0 | ||
|
d92bbe54ea | ||
|
54acae83e0 | ||
|
462bc0d5d4 | ||
|
055e0d7126 | ||
|
5b50e6a7ec | ||
|
5594bb584d | ||
|
d369bd91c3 | ||
|
cc45108382 | ||
|
babf9ab7da | ||
|
3985a62afc | ||
|
a7c4368a66 | ||
|
ab83cfd2bf | ||
|
98d571d212 | ||
|
56f90f6213 | ||
|
c2a22a44a1 | ||
|
fe460ee8cd | ||
|
b28aa1178a | ||
|
bb4bbb5c2d | ||
|
ce6a9169f0 | ||
|
bcc8332606 | ||
|
ace4860a4f | ||
|
41671f5dc0 | ||
|
ec659efcba | ||
|
a1f6ae4e27 | ||
|
a32c568d39 | ||
|
d575455be6 | ||
|
7d8a9087ae | ||
|
5598426132 | ||
|
35e9cd3428 | ||
|
1a905e4c5b | ||
|
6b9b7ce38c | ||
|
02566b9725 | ||
|
825cb76860 | ||
|
a941399117 | ||
|
f7040d9107 | ||
|
b8502f1517 |
2
bin/.cherry-ignore
Normal file
2
bin/.cherry-ignore
Normal file
@@ -0,0 +1,2 @@
|
||||
# fixes: Commit was squashed into the respective offenders
|
||||
c02390f8fcd367c7350db568feabb2f062efca14 egl/wayland: rather obvious build fix
|
@@ -61,7 +61,6 @@ Note: some of the new features are only available with certain drivers.
|
||||
<li>GL_EXT_vertex_attrib_64bit on i965, nvc0, radeonsi.</li>
|
||||
<li>GL_EXT_window_rectangles on radeonsi.</li>
|
||||
<li>GL_KHR_texture_compression_astc_sliced_3d on radeonsi.</li>
|
||||
<li>GL_INTEL_fragment_shader_ordering on i965.</li>
|
||||
<li>GL_NV_fragment_shader_interlock on i965.</li>
|
||||
<li>EGL_EXT_device_base for all drivers.</li>
|
||||
<li>EGL_EXT_device_drm for all drivers.</li>
|
||||
|
@@ -140,7 +140,7 @@ libvulkan_radeon = shared_library(
|
||||
],
|
||||
dependencies : [
|
||||
dep_llvm, dep_libdrm_amdgpu, dep_thread, dep_elf, dep_dl, dep_m,
|
||||
dep_valgrind,
|
||||
dep_valgrind, radv_deps,
|
||||
idep_nir,
|
||||
],
|
||||
c_args : [c_vis_args, no_override_init_args, radv_flags],
|
||||
|
@@ -110,17 +110,6 @@ radv_image_from_gralloc(VkDevice device_h,
|
||||
struct radv_bo *bo = NULL;
|
||||
VkResult result;
|
||||
|
||||
result = radv_image_create(device_h,
|
||||
&(struct radv_image_create_info) {
|
||||
.vk_info = base_info,
|
||||
.scanout = true,
|
||||
.no_metadata_planes = true},
|
||||
alloc,
|
||||
&image_h);
|
||||
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
if (gralloc_info->handle->numFds != 1) {
|
||||
return vk_errorf(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
|
||||
"VkNativeBufferANDROID::handle::numFds is %d, "
|
||||
@@ -133,23 +122,14 @@ radv_image_from_gralloc(VkDevice device_h,
|
||||
*/
|
||||
int dma_buf = gralloc_info->handle->data[0];
|
||||
|
||||
image = radv_image_from_handle(image_h);
|
||||
|
||||
VkDeviceMemory memory_h;
|
||||
|
||||
const VkMemoryDedicatedAllocateInfoKHR ded_alloc = {
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
|
||||
.pNext = NULL,
|
||||
.buffer = VK_NULL_HANDLE,
|
||||
.image = image_h
|
||||
};
|
||||
|
||||
const VkImportMemoryFdInfoKHR import_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
|
||||
.pNext = &ded_alloc,
|
||||
.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
|
||||
.fd = dup(dma_buf),
|
||||
};
|
||||
|
||||
/* Find the first VRAM memory type, or GART for PRIME images. */
|
||||
int memory_type_index = -1;
|
||||
for (int i = 0; i < device->physical_device->memory_properties.memoryTypeCount; ++i) {
|
||||
@@ -168,14 +148,49 @@ radv_image_from_gralloc(VkDevice device_h,
|
||||
&(VkMemoryAllocateInfo) {
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.pNext = &import_info,
|
||||
.allocationSize = image->size,
|
||||
/* Max buffer size, unused for imports */
|
||||
.allocationSize = 0x7FFFFFFF,
|
||||
.memoryTypeIndex = memory_type_index,
|
||||
},
|
||||
alloc,
|
||||
&memory_h);
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
struct radeon_bo_metadata md;
|
||||
device->ws->buffer_get_metadata(radv_device_memory_from_handle(memory_h)->bo, &md);
|
||||
|
||||
bool is_scanout;
|
||||
if (device->physical_device->rad_info.chip_class >= GFX9) {
|
||||
/* Copied from radeonsi, but is hacky so should be cleaned up. */
|
||||
is_scanout = md.u.gfx9.swizzle_mode == 0 || md.u.gfx9.swizzle_mode % 4 == 2;
|
||||
} else {
|
||||
is_scanout = md.u.legacy.scanout;
|
||||
}
|
||||
|
||||
VkImageCreateInfo updated_base_info = *base_info;
|
||||
|
||||
VkExternalMemoryImageCreateInfo external_memory_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
|
||||
.pNext = updated_base_info.pNext,
|
||||
.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
|
||||
};
|
||||
|
||||
updated_base_info.pNext = &external_memory_info;
|
||||
|
||||
result = radv_image_create(device_h,
|
||||
&(struct radv_image_create_info) {
|
||||
.vk_info = &updated_base_info,
|
||||
.scanout = is_scanout,
|
||||
.no_metadata_planes = true},
|
||||
alloc,
|
||||
&image_h);
|
||||
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_create_image;
|
||||
|
||||
image = radv_image_from_handle(image_h);
|
||||
|
||||
radv_BindImageMemory(device_h, image_h, memory_h, 0);
|
||||
|
||||
image->owned_memory = memory_h;
|
||||
@@ -185,9 +200,7 @@ radv_image_from_gralloc(VkDevice device_h,
|
||||
return VK_SUCCESS;
|
||||
|
||||
fail_create_image:
|
||||
fail_size:
|
||||
radv_DestroyImage(device_h, image_h, alloc);
|
||||
|
||||
radv_FreeMemory(device_h, memory_h, alloc);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@@ -1068,7 +1068,7 @@ static void
|
||||
radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
|
||||
struct radv_ds_buffer_info *ds,
|
||||
struct radv_image *image, VkImageLayout layout,
|
||||
bool requires_cond_write)
|
||||
bool requires_cond_exec)
|
||||
{
|
||||
uint32_t db_z_info = ds->db_z_info;
|
||||
uint32_t db_z_info_reg;
|
||||
@@ -1092,38 +1092,21 @@ radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
|
||||
}
|
||||
|
||||
/* When we don't know the last fast clear value we need to emit a
|
||||
* conditional packet, otherwise we can update DB_Z_INFO directly.
|
||||
* conditional packet that will eventually skip the following
|
||||
* SET_CONTEXT_REG packet.
|
||||
*/
|
||||
if (requires_cond_write) {
|
||||
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
|
||||
|
||||
const uint32_t write_space = 0 << 8; /* register */
|
||||
const uint32_t poll_space = 1 << 4; /* memory */
|
||||
const uint32_t function = 3 << 0; /* equal to the reference */
|
||||
const uint32_t options = write_space | poll_space | function;
|
||||
radeon_emit(cmd_buffer->cs, options);
|
||||
|
||||
/* poll address - location of the depth clear value */
|
||||
if (requires_cond_exec) {
|
||||
uint64_t va = radv_buffer_get_va(image->bo);
|
||||
va += image->offset + image->clear_value_offset;
|
||||
|
||||
/* In presence of stencil format, we have to adjust the base
|
||||
* address because the first value is the stencil clear value.
|
||||
*/
|
||||
if (vk_format_is_stencil(image->vk_format))
|
||||
va += 4;
|
||||
va += image->offset + image->tc_compat_zrange_offset;
|
||||
|
||||
radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_EXEC, 3, 0));
|
||||
radeon_emit(cmd_buffer->cs, va);
|
||||
radeon_emit(cmd_buffer->cs, va >> 32);
|
||||
|
||||
radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
|
||||
radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
|
||||
radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
|
||||
radeon_emit(cmd_buffer->cs, 0u); /* write address high */
|
||||
radeon_emit(cmd_buffer->cs, db_z_info);
|
||||
} else {
|
||||
radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
|
||||
radeon_emit(cmd_buffer->cs, 0);
|
||||
radeon_emit(cmd_buffer->cs, 3); /* SET_CONTEXT_REG size */
|
||||
}
|
||||
|
||||
radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1270,6 +1253,45 @@ radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
|
||||
radeon_emit(cs, fui(ds_clear_value.depth));
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the TC-compat metadata value for this image.
|
||||
*/
|
||||
static void
|
||||
radv_set_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
|
||||
struct radv_image *image,
|
||||
uint32_t value)
|
||||
{
|
||||
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
||||
uint64_t va = radv_buffer_get_va(image->bo);
|
||||
va += image->offset + image->tc_compat_zrange_offset;
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
|
||||
radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
|
||||
S_370_WR_CONFIRM(1) |
|
||||
S_370_ENGINE_SEL(V_370_PFP));
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, va >> 32);
|
||||
radeon_emit(cs, value);
|
||||
}
|
||||
|
||||
static void
|
||||
radv_update_tc_compat_zrange_metadata(struct radv_cmd_buffer *cmd_buffer,
|
||||
struct radv_image *image,
|
||||
VkClearDepthStencilValue ds_clear_value)
|
||||
{
|
||||
struct radeon_cmdbuf *cs = cmd_buffer->cs;
|
||||
uint64_t va = radv_buffer_get_va(image->bo);
|
||||
va += image->offset + image->tc_compat_zrange_offset;
|
||||
uint32_t cond_val;
|
||||
|
||||
/* Conditionally set DB_Z_INFO.ZRANGE_PRECISION to 0 when the last
|
||||
* depth clear value is 0.0f.
|
||||
*/
|
||||
cond_val = ds_clear_value.depth == 0.0f ? UINT_MAX : 0;
|
||||
|
||||
radv_set_tc_compat_zrange_metadata(cmd_buffer, image, cond_val);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the clear depth/stencil values for this image.
|
||||
*/
|
||||
@@ -1283,6 +1305,12 @@ radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
|
||||
|
||||
radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
|
||||
|
||||
if (radv_image_is_tc_compat_htile(image) &&
|
||||
(aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
|
||||
radv_update_tc_compat_zrange_metadata(cmd_buffer, image,
|
||||
ds_clear_value);
|
||||
}
|
||||
|
||||
radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
|
||||
aspects);
|
||||
}
|
||||
@@ -4192,6 +4220,15 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
|
||||
aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
|
||||
|
||||
radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
|
||||
|
||||
if (radv_image_is_tc_compat_htile(image)) {
|
||||
/* Initialize the TC-compat metada value to 0 because by
|
||||
* default DB_Z_INFO.RANGE_PRECISION is set to 1, and we only
|
||||
* need have to conditionally update its value when performing
|
||||
* a fast depth clear.
|
||||
*/
|
||||
radv_set_tc_compat_zrange_metadata(cmd_buffer, image, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
|
||||
|
@@ -691,7 +691,7 @@ radv_query_opaque_metadata(struct radv_device *device,
|
||||
si_make_texture_descriptor(device, image, false,
|
||||
(VkImageViewType)image->type, image->vk_format,
|
||||
&fixedmapping, 0, image->info.levels - 1, 0,
|
||||
image->info.array_size,
|
||||
image->info.array_size - 1,
|
||||
image->info.width, image->info.height,
|
||||
image->info.depth,
|
||||
desc, NULL);
|
||||
@@ -870,6 +870,14 @@ radv_image_alloc_htile(struct radv_image *image)
|
||||
/* + 8 for storing the clear values */
|
||||
image->clear_value_offset = image->htile_offset + image->surface.htile_size;
|
||||
image->size = image->clear_value_offset + 8;
|
||||
if (radv_image_is_tc_compat_htile(image)) {
|
||||
/* Metadata for the TC-compatible HTILE hardware bug which
|
||||
* have to be fixed by updating ZRANGE_PRECISION when doing
|
||||
* fast depth clears to 0.0f.
|
||||
*/
|
||||
image->tc_compat_zrange_offset = image->clear_value_offset + 8;
|
||||
image->size = image->clear_value_offset + 16;
|
||||
}
|
||||
image->alignment = align64(image->alignment, image->surface.htile_alignment);
|
||||
}
|
||||
|
||||
@@ -1014,8 +1022,8 @@ radv_image_create(VkDevice _device,
|
||||
/* Otherwise, try to enable HTILE for depth surfaces. */
|
||||
if (radv_image_can_enable_htile(image) &&
|
||||
!(device->instance->debug_flags & RADV_DEBUG_NO_HIZ)) {
|
||||
radv_image_alloc_htile(image);
|
||||
image->tc_compatible_htile = image->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE;
|
||||
radv_image_alloc_htile(image);
|
||||
} else {
|
||||
image->surface.htile_size = 0;
|
||||
}
|
||||
@@ -1175,8 +1183,6 @@ radv_image_view_init(struct radv_image_view *iview,
|
||||
if (device->physical_device->rad_info.chip_class >= GFX9 &&
|
||||
vk_format_is_compressed(image->vk_format) &&
|
||||
!vk_format_is_compressed(iview->vk_format)) {
|
||||
unsigned rounded_img_w = util_next_power_of_two(iview->extent.width);
|
||||
unsigned rounded_img_h = util_next_power_of_two(iview->extent.height);
|
||||
unsigned lvl_width = radv_minify(image->info.width , range->baseMipLevel);
|
||||
unsigned lvl_height = radv_minify(image->info.height, range->baseMipLevel);
|
||||
|
||||
@@ -1186,8 +1192,8 @@ radv_image_view_init(struct radv_image_view *iview,
|
||||
lvl_width <<= range->baseMipLevel;
|
||||
lvl_height <<= range->baseMipLevel;
|
||||
|
||||
iview->extent.width = CLAMP(lvl_width, iview->extent.width, rounded_img_w);
|
||||
iview->extent.height = CLAMP(lvl_height, iview->extent.height, rounded_img_h);
|
||||
iview->extent.width = CLAMP(lvl_width, iview->extent.width, iview->image->surface.u.gfx9.surf_pitch);
|
||||
iview->extent.height = CLAMP(lvl_height, iview->extent.height, iview->image->surface.u.gfx9.surf_height);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2061,7 +2061,7 @@ radv_meta_image_to_image_cs(struct radv_cmd_buffer *cmd_buffer,
|
||||
itoi_bind_descriptors(cmd_buffer, &src_view, &dst_view);
|
||||
|
||||
if (device->physical_device->rad_info.chip_class >= GFX9 &&
|
||||
src->image->type == VK_IMAGE_TYPE_3D)
|
||||
(src->image->type == VK_IMAGE_TYPE_3D || dst->image->type == VK_IMAGE_TYPE_3D))
|
||||
pipeline = cmd_buffer->device->meta_state.itoi.pipeline_3d;
|
||||
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
|
||||
VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
|
@@ -1498,6 +1498,14 @@ struct radv_image {
|
||||
uint64_t clear_value_offset;
|
||||
uint64_t dcc_pred_offset;
|
||||
|
||||
/*
|
||||
* Metadata for the TC-compat zrange workaround. If the 32-bit value
|
||||
* stored at this offset is UINT_MAX, the driver will emit
|
||||
* DB_Z_INFO.ZRANGE_PRECISION=0, otherwise it will skip the
|
||||
* SET_CONTEXT_REG packet.
|
||||
*/
|
||||
uint64_t tc_compat_zrange_offset;
|
||||
|
||||
/* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
|
||||
VkDeviceMemory owned_memory;
|
||||
};
|
||||
|
@@ -1341,10 +1341,13 @@ void radv_CmdCopyQueryPoolResults(
|
||||
|
||||
|
||||
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
||||
/* Wait on the high 32 bits of the timestamp in
|
||||
* case the low part is 0xffffffff.
|
||||
*/
|
||||
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, false));
|
||||
radeon_emit(cs, WAIT_REG_MEM_NOT_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
|
||||
radeon_emit(cs, local_src_va);
|
||||
radeon_emit(cs, local_src_va >> 32);
|
||||
radeon_emit(cs, local_src_va + 4);
|
||||
radeon_emit(cs, (local_src_va + 4) >> 32);
|
||||
radeon_emit(cs, TIMESTAMP_NOT_READY >> 32);
|
||||
radeon_emit(cs, 0xffffffff);
|
||||
radeon_emit(cs, 4);
|
||||
@@ -1447,6 +1450,22 @@ static unsigned event_type_for_stream(unsigned stream)
|
||||
}
|
||||
}
|
||||
|
||||
static void emit_query_flush(struct radv_cmd_buffer *cmd_buffer,
|
||||
struct radv_query_pool *pool)
|
||||
{
|
||||
if (cmd_buffer->pending_reset_query) {
|
||||
if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
|
||||
/* Only need to flush caches if the query pool size is
|
||||
* large enough to be resetted using the compute shader
|
||||
* path. Small pools don't need any cache flushes
|
||||
* because we use a CP dma clear.
|
||||
*/
|
||||
si_emit_cache_flush(cmd_buffer);
|
||||
cmd_buffer->pending_reset_query = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void emit_begin_query(struct radv_cmd_buffer *cmd_buffer,
|
||||
uint64_t va,
|
||||
VkQueryType query_type,
|
||||
@@ -1593,17 +1612,7 @@ void radv_CmdBeginQueryIndexedEXT(
|
||||
|
||||
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
|
||||
|
||||
if (cmd_buffer->pending_reset_query) {
|
||||
if (pool->size >= RADV_BUFFER_OPS_CS_THRESHOLD) {
|
||||
/* Only need to flush caches if the query pool size is
|
||||
* large enough to be resetted using the compute shader
|
||||
* path. Small pools don't need any cache flushes
|
||||
* because we use a CP dma clear.
|
||||
*/
|
||||
si_emit_cache_flush(cmd_buffer);
|
||||
cmd_buffer->pending_reset_query = false;
|
||||
}
|
||||
}
|
||||
emit_query_flush(cmd_buffer, pool);
|
||||
|
||||
va += pool->stride * query;
|
||||
|
||||
@@ -1680,6 +1689,8 @@ void radv_CmdWriteTimestamp(
|
||||
|
||||
radv_cs_add_buffer(cmd_buffer->device->ws, cs, pool->bo);
|
||||
|
||||
emit_query_flush(cmd_buffer, pool);
|
||||
|
||||
int num_queries = 1;
|
||||
if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask)
|
||||
num_queries = util_bitcount(cmd_buffer->state.subpass->view_mask);
|
||||
|
@@ -223,6 +223,8 @@ struct radeon_winsys {
|
||||
|
||||
void (*buffer_set_metadata)(struct radeon_winsys_bo *bo,
|
||||
struct radeon_bo_metadata *md);
|
||||
void (*buffer_get_metadata)(struct radeon_winsys_bo *bo,
|
||||
struct radeon_bo_metadata *md);
|
||||
|
||||
void (*buffer_virtual_bind)(struct radeon_winsys_bo *parent,
|
||||
uint64_t offset, uint64_t size,
|
||||
|
@@ -304,8 +304,12 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
unsigned virt_alignment = alignment;
|
||||
if (size >= ws->info.pte_fragment_size)
|
||||
virt_alignment = MAX2(virt_alignment, ws->info.pte_fragment_size);
|
||||
|
||||
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
|
||||
size, alignment, 0, &va, &va_handle,
|
||||
size, virt_alignment, 0, &va, &va_handle,
|
||||
(flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
|
||||
AMDGPU_VA_RANGE_HIGH);
|
||||
if (r)
|
||||
@@ -536,6 +540,21 @@ radv_amdgpu_winsys_get_fd(struct radeon_winsys *_ws,
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned eg_tile_split(unsigned tile_split)
|
||||
{
|
||||
switch (tile_split) {
|
||||
case 0: tile_split = 64; break;
|
||||
case 1: tile_split = 128; break;
|
||||
case 2: tile_split = 256; break;
|
||||
case 3: tile_split = 512; break;
|
||||
default:
|
||||
case 4: tile_split = 1024; break;
|
||||
case 5: tile_split = 2048; break;
|
||||
case 6: tile_split = 4096; break;
|
||||
}
|
||||
return tile_split;
|
||||
}
|
||||
|
||||
static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split)
|
||||
{
|
||||
switch (eg_tile_split) {
|
||||
@@ -589,6 +608,43 @@ radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo *_bo,
|
||||
amdgpu_bo_set_metadata(bo->bo, &metadata);
|
||||
}
|
||||
|
||||
static void
|
||||
radv_amdgpu_winsys_bo_get_metadata(struct radeon_winsys_bo *_bo,
|
||||
struct radeon_bo_metadata *md)
|
||||
{
|
||||
struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
|
||||
struct amdgpu_bo_info info = {0};
|
||||
|
||||
int r = amdgpu_bo_query_info(bo->bo, &info);
|
||||
if (r)
|
||||
return;
|
||||
|
||||
uint64_t tiling_flags = info.metadata.tiling_info;
|
||||
|
||||
if (bo->ws->info.chip_class >= GFX9) {
|
||||
md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
|
||||
} else {
|
||||
md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
|
||||
md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
|
||||
|
||||
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
|
||||
md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
|
||||
else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
|
||||
md->u.legacy.microtile = RADEON_LAYOUT_TILED;
|
||||
|
||||
md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
|
||||
md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
|
||||
md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
|
||||
md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
|
||||
md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
|
||||
md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
|
||||
md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
|
||||
}
|
||||
|
||||
md->size_metadata = info.metadata.size_metadata;
|
||||
memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
|
||||
}
|
||||
|
||||
void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws)
|
||||
{
|
||||
ws->base.buffer_create = radv_amdgpu_winsys_bo_create;
|
||||
@@ -599,5 +655,6 @@ void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys *ws)
|
||||
ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd;
|
||||
ws->base.buffer_get_fd = radv_amdgpu_winsys_get_fd;
|
||||
ws->base.buffer_set_metadata = radv_amdgpu_winsys_bo_set_metadata;
|
||||
ws->base.buffer_get_metadata = radv_amdgpu_winsys_bo_get_metadata;
|
||||
ws->base.buffer_virtual_bind = radv_amdgpu_winsys_bo_virtual_bind;
|
||||
}
|
||||
|
@@ -525,12 +525,6 @@ supports_nv_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
|
||||
return state->NV_fragment_shader_interlock_enable;
|
||||
}
|
||||
|
||||
static bool
|
||||
supports_intel_fragment_shader_ordering(const _mesa_glsl_parse_state *state)
|
||||
{
|
||||
return state->INTEL_fragment_shader_ordering_enable;
|
||||
}
|
||||
|
||||
static bool
|
||||
shader_clock(const _mesa_glsl_parse_state *state)
|
||||
{
|
||||
@@ -1311,11 +1305,6 @@ builtin_builder::create_intrinsics()
|
||||
supports_arb_fragment_shader_interlock,
|
||||
ir_intrinsic_end_invocation_interlock), NULL);
|
||||
|
||||
add_function("__intrinsic_begin_fragment_shader_ordering",
|
||||
_invocation_interlock_intrinsic(
|
||||
supports_intel_fragment_shader_ordering,
|
||||
ir_intrinsic_begin_fragment_shader_ordering), NULL);
|
||||
|
||||
add_function("__intrinsic_shader_clock",
|
||||
_shader_clock_intrinsic(shader_clock,
|
||||
glsl_type::uvec2_type),
|
||||
@@ -3430,12 +3419,6 @@ builtin_builder::create_builtins()
|
||||
supports_nv_fragment_shader_interlock),
|
||||
NULL);
|
||||
|
||||
add_function("beginFragmentShaderOrderingINTEL",
|
||||
_invocation_interlock(
|
||||
"__intrinsic_begin_fragment_shader_ordering",
|
||||
supports_intel_fragment_shader_ordering),
|
||||
NULL);
|
||||
|
||||
add_function("anyInvocationARB",
|
||||
_vote("__intrinsic_vote_any", vote),
|
||||
NULL);
|
||||
|
@@ -727,7 +727,6 @@ static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
|
||||
EXT_AEP(EXT_texture_buffer),
|
||||
EXT_AEP(EXT_texture_cube_map_array),
|
||||
EXT(INTEL_conservative_rasterization),
|
||||
EXT(INTEL_fragment_shader_ordering),
|
||||
EXT(INTEL_shader_atomic_float_minmax),
|
||||
EXT(MESA_shader_integer_functions),
|
||||
EXT(NV_fragment_shader_interlock),
|
||||
|
@@ -812,8 +812,6 @@ struct _mesa_glsl_parse_state {
|
||||
bool EXT_texture_cube_map_array_warn;
|
||||
bool INTEL_conservative_rasterization_enable;
|
||||
bool INTEL_conservative_rasterization_warn;
|
||||
bool INTEL_fragment_shader_ordering_enable;
|
||||
bool INTEL_fragment_shader_ordering_warn;
|
||||
bool INTEL_shader_atomic_float_minmax_enable;
|
||||
bool INTEL_shader_atomic_float_minmax_warn;
|
||||
bool MESA_shader_integer_functions_enable;
|
||||
|
@@ -742,9 +742,6 @@ nir_visitor::visit(ir_call *ir)
|
||||
case ir_intrinsic_end_invocation_interlock:
|
||||
op = nir_intrinsic_end_invocation_interlock;
|
||||
break;
|
||||
case ir_intrinsic_begin_fragment_shader_ordering:
|
||||
op = nir_intrinsic_begin_fragment_shader_ordering;
|
||||
break;
|
||||
case ir_intrinsic_group_memory_barrier:
|
||||
op = nir_intrinsic_group_memory_barrier;
|
||||
break;
|
||||
@@ -983,9 +980,6 @@ nir_visitor::visit(ir_call *ir)
|
||||
case nir_intrinsic_end_invocation_interlock:
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
case nir_intrinsic_begin_fragment_shader_ordering:
|
||||
nir_builder_instr_insert(&b, &instr->instr);
|
||||
break;
|
||||
case nir_intrinsic_store_ssbo: {
|
||||
exec_node *param = ir->actual_parameters.get_head();
|
||||
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
||||
|
@@ -1122,7 +1122,6 @@ enum ir_intrinsic_id {
|
||||
ir_intrinsic_memory_barrier_shared,
|
||||
ir_intrinsic_begin_invocation_interlock,
|
||||
ir_intrinsic_end_invocation_interlock,
|
||||
ir_intrinsic_begin_fragment_shader_ordering,
|
||||
|
||||
ir_intrinsic_vote_all,
|
||||
ir_intrinsic_vote_any,
|
||||
|
@@ -360,13 +360,20 @@ read_xfb(struct blob_reader *metadata, struct gl_shader_program *shProg)
|
||||
if (xfb_stage == ~0u)
|
||||
return;
|
||||
|
||||
if (shProg->TransformFeedback.VaryingNames) {
|
||||
for (unsigned i = 0; i < shProg->TransformFeedback.NumVarying; ++i)
|
||||
free(shProg->TransformFeedback.VaryingNames[i]);
|
||||
}
|
||||
|
||||
/* Data set by glTransformFeedbackVaryings. */
|
||||
shProg->TransformFeedback.BufferMode = blob_read_uint32(metadata);
|
||||
blob_copy_bytes(metadata, &shProg->TransformFeedback.BufferStride,
|
||||
sizeof(shProg->TransformFeedback.BufferStride));
|
||||
shProg->TransformFeedback.NumVarying = blob_read_uint32(metadata);
|
||||
|
||||
shProg->TransformFeedback.VaryingNames = (char **)
|
||||
malloc(shProg->TransformFeedback.NumVarying * sizeof(GLchar *));
|
||||
realloc(shProg->TransformFeedback.VaryingNames,
|
||||
shProg->TransformFeedback.NumVarying * sizeof(GLchar *));
|
||||
/* Note, malloc used with VaryingNames. */
|
||||
for (unsigned i = 0; i < shProg->TransformFeedback.NumVarying; i++)
|
||||
shProg->TransformFeedback.VaryingNames[i] =
|
||||
|
@@ -199,7 +199,6 @@ barrier("memory_barrier_image")
|
||||
barrier("memory_barrier_shared")
|
||||
barrier("begin_invocation_interlock")
|
||||
barrier("end_invocation_interlock")
|
||||
barrier("begin_fragment_shader_ordering")
|
||||
|
||||
# A conditional discard, with a single boolean source.
|
||||
intrinsic("discard_if", src_comp=[1])
|
||||
|
@@ -1127,13 +1127,22 @@ drm_handle_device(void *data, struct wl_drm *drm, const char *device)
|
||||
if (dri2_dpy->fd == -1) {
|
||||
_eglLog(_EGL_WARNING, "wayland-egl: could not open %s (%s)",
|
||||
dri2_dpy->device_name, strerror(errno));
|
||||
free(dri2_dpy->device_name);
|
||||
dri2_dpy->device_name = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (drmGetNodeTypeFromFd(dri2_dpy->fd) == DRM_NODE_RENDER) {
|
||||
dri2_dpy->authenticated = true;
|
||||
} else {
|
||||
drmGetMagic(dri2_dpy->fd, &magic);
|
||||
if (drmGetMagic(dri2_dpy->fd, &magic)) {
|
||||
close(dri2_dpy->fd);
|
||||
dri2_dpy->fd = -1;
|
||||
free(dri2_dpy->device_name);
|
||||
dri2_dpy->device_name = NULL;
|
||||
_eglLog(_EGL_WARNING, "wayland-egl: drmGetMagic failed");
|
||||
return;
|
||||
}
|
||||
wl_drm_authenticate(dri2_dpy->wl_drm, magic);
|
||||
}
|
||||
}
|
||||
|
@@ -142,7 +142,7 @@ pipe_loader_release(struct pipe_loader_device **devs, int ndev);
|
||||
*/
|
||||
bool
|
||||
pipe_loader_sw_probe_dri(struct pipe_loader_device **devs,
|
||||
struct drisw_loader_funcs *drisw_lf);
|
||||
const struct drisw_loader_funcs *drisw_lf);
|
||||
|
||||
/**
|
||||
* Initialize a kms backed sw device given an fd.
|
||||
|
@@ -132,7 +132,7 @@ pipe_loader_sw_probe_teardown_common(struct pipe_loader_sw_device *sdev)
|
||||
|
||||
#ifdef HAVE_PIPE_LOADER_DRI
|
||||
bool
|
||||
pipe_loader_sw_probe_dri(struct pipe_loader_device **devs, struct drisw_loader_funcs *drisw_lf)
|
||||
pipe_loader_sw_probe_dri(struct pipe_loader_device **devs, const struct drisw_loader_funcs *drisw_lf)
|
||||
{
|
||||
struct pipe_loader_sw_device *sdev = CALLOC_STRUCT(pipe_loader_sw_device);
|
||||
int i;
|
||||
|
@@ -600,25 +600,23 @@ static inline void
|
||||
nv50_stage_sampler_states_bind(struct nv50_context *nv50, int s,
|
||||
unsigned nr, void **hwcso)
|
||||
{
|
||||
unsigned highest_found = 0;
|
||||
unsigned i;
|
||||
|
||||
assert(nr <= PIPE_MAX_SAMPLERS);
|
||||
for (i = 0; i < nr; ++i) {
|
||||
struct nv50_tsc_entry *old = nv50->samplers[s][i];
|
||||
|
||||
if (hwcso[i])
|
||||
highest_found = i;
|
||||
|
||||
nv50->samplers[s][i] = nv50_tsc_entry(hwcso[i]);
|
||||
if (old)
|
||||
nv50_screen_tsc_unlock(nv50->screen, old);
|
||||
}
|
||||
assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS);
|
||||
for (; i < nv50->num_samplers[s]; ++i) {
|
||||
if (nv50->samplers[s][i]) {
|
||||
nv50_screen_tsc_unlock(nv50->screen, nv50->samplers[s][i]);
|
||||
nv50->samplers[s][i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
nv50->num_samplers[s] = nr;
|
||||
if (nr >= nv50->num_samplers[s])
|
||||
nv50->num_samplers[s] = highest_found + 1;
|
||||
|
||||
nv50->dirty_3d |= NV50_NEW_3D_SAMPLERS;
|
||||
}
|
||||
|
@@ -464,11 +464,15 @@ nvc0_stage_sampler_states_bind(struct nvc0_context *nvc0,
|
||||
unsigned s,
|
||||
unsigned nr, void **hwcso)
|
||||
{
|
||||
unsigned highest_found = 0;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < nr; ++i) {
|
||||
struct nv50_tsc_entry *old = nvc0->samplers[s][i];
|
||||
|
||||
if (hwcso[i])
|
||||
highest_found = i;
|
||||
|
||||
if (hwcso[i] == old)
|
||||
continue;
|
||||
nvc0->samplers_dirty[s] |= 1 << i;
|
||||
@@ -477,14 +481,8 @@ nvc0_stage_sampler_states_bind(struct nvc0_context *nvc0,
|
||||
if (old)
|
||||
nvc0_screen_tsc_unlock(nvc0->screen, old);
|
||||
}
|
||||
for (; i < nvc0->num_samplers[s]; ++i) {
|
||||
if (nvc0->samplers[s][i]) {
|
||||
nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
|
||||
nvc0->samplers[s][i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
nvc0->num_samplers[s] = nr;
|
||||
if (nr >= nvc0->num_samplers[s])
|
||||
nvc0->num_samplers[s] = highest_found + 1;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -1636,7 +1636,7 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
|
||||
}
|
||||
|
||||
if (query->buffer.previous) {
|
||||
u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
|
||||
u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 256,
|
||||
&tmp_buffer_offset, &tmp_buffer);
|
||||
if (!tmp_buffer)
|
||||
return;
|
||||
|
@@ -106,7 +106,6 @@ static void virgl_buffer_transfer_unmap(struct pipe_context *ctx,
|
||||
if (trans->base.usage & PIPE_TRANSFER_WRITE) {
|
||||
if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
|
||||
struct virgl_screen *vs = virgl_screen(ctx->screen);
|
||||
vbuf->base.clean = FALSE;
|
||||
vctx->num_transfers++;
|
||||
vs->vws->transfer_put(vs->vws, vbuf->base.hw_res,
|
||||
&transfer->box, trans->base.stride, trans->base.layer_stride, trans->offset, transfer->level);
|
||||
|
@@ -61,6 +61,12 @@ static void virgl_encoder_write_res(struct virgl_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void virgl_dirty_res(struct virgl_resource *res)
|
||||
{
|
||||
if (res)
|
||||
res->clean = FALSE;
|
||||
}
|
||||
|
||||
int virgl_encode_bind_object(struct virgl_context *ctx,
|
||||
uint32_t handle, uint32_t object)
|
||||
{
|
||||
@@ -615,6 +621,7 @@ int virgl_encode_sampler_view(struct virgl_context *ctx,
|
||||
if (res->u.b.target == PIPE_BUFFER) {
|
||||
virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
|
||||
virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
|
||||
virgl_dirty_res(res);
|
||||
} else {
|
||||
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
|
||||
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
|
||||
@@ -949,6 +956,7 @@ int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
|
||||
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
|
||||
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
|
||||
virgl_encoder_write_res(ctx, res);
|
||||
virgl_dirty_res(res);
|
||||
} else {
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
@@ -972,6 +980,7 @@ int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
|
||||
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
|
||||
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
|
||||
virgl_encoder_write_res(ctx, res);
|
||||
virgl_dirty_res(res);
|
||||
} else {
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
@@ -999,6 +1008,7 @@ int virgl_encode_set_shader_images(struct virgl_context *ctx,
|
||||
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
|
||||
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
|
||||
virgl_encoder_write_res(ctx, res);
|
||||
virgl_dirty_res(res);
|
||||
} else {
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
virgl_encoder_write_dword(ctx->cbuf, 0);
|
||||
|
@@ -95,7 +95,11 @@ static void virgl_buffer_subdata(struct pipe_context *pipe,
|
||||
usage |= PIPE_TRANSFER_DISCARD_RANGE;
|
||||
|
||||
u_box_1d(offset, size, &box);
|
||||
virgl_transfer_inline_write(pipe, resource, 0, usage, &box, data, 0, 0);
|
||||
|
||||
if (size >= (VIRGL_MAX_CMDBUF_DWORDS * 4))
|
||||
u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
|
||||
else
|
||||
virgl_transfer_inline_write(pipe, resource, 0, usage, &box, data, 0, 0);
|
||||
}
|
||||
|
||||
void virgl_init_context_resource_functions(struct pipe_context *ctx)
|
||||
|
@@ -31,7 +31,7 @@ struct pipe_fence_handle;
|
||||
struct winsys_handle;
|
||||
struct virgl_hw_res;
|
||||
|
||||
#define VIRGL_MAX_CMDBUF_DWORDS (16*1024)
|
||||
#define VIRGL_MAX_CMDBUF_DWORDS (64 * 1024)
|
||||
|
||||
struct virgl_drm_caps {
|
||||
union virgl_caps caps;
|
||||
|
@@ -421,12 +421,19 @@ static const __DRIextension *drisw_screen_extensions[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct drisw_loader_funcs drisw_lf = {
|
||||
static const struct drisw_loader_funcs drisw_lf = {
|
||||
.get_image = drisw_get_image,
|
||||
.put_image = drisw_put_image,
|
||||
.put_image2 = drisw_put_image2
|
||||
};
|
||||
|
||||
static const struct drisw_loader_funcs drisw_shm_lf = {
|
||||
.get_image = drisw_get_image,
|
||||
.put_image = drisw_put_image,
|
||||
.put_image2 = drisw_put_image2,
|
||||
.put_image_shm = drisw_put_image_shm
|
||||
};
|
||||
|
||||
static const __DRIconfig **
|
||||
drisw_init_screen(__DRIscreen * sPriv)
|
||||
{
|
||||
@@ -434,6 +441,7 @@ drisw_init_screen(__DRIscreen * sPriv)
|
||||
const __DRIconfig **configs;
|
||||
struct dri_screen *screen;
|
||||
struct pipe_screen *pscreen = NULL;
|
||||
const struct drisw_loader_funcs *lf = &drisw_lf;
|
||||
|
||||
screen = CALLOC_STRUCT(dri_screen);
|
||||
if (!screen)
|
||||
@@ -448,10 +456,10 @@ drisw_init_screen(__DRIscreen * sPriv)
|
||||
sPriv->extensions = drisw_screen_extensions;
|
||||
if (loader->base.version >= 4) {
|
||||
if (loader->putImageShm)
|
||||
drisw_lf.put_image_shm = drisw_put_image_shm;
|
||||
lf = &drisw_shm_lf;
|
||||
}
|
||||
|
||||
if (pipe_loader_sw_probe_dri(&screen->dev, &drisw_lf)) {
|
||||
if (pipe_loader_sw_probe_dri(&screen->dev, lf)) {
|
||||
dri_init_options(screen);
|
||||
|
||||
pscreen = pipe_loader_create_screen(screen->dev);
|
||||
|
@@ -91,6 +91,7 @@ xa_context_destroy(struct xa_context *r)
|
||||
}
|
||||
|
||||
r->pipe->destroy(r->pipe);
|
||||
free(r);
|
||||
}
|
||||
|
||||
XA_EXPORT int
|
||||
|
@@ -27,6 +27,7 @@ AM_CFLAGS = \
|
||||
$(GALLIUM_CFLAGS) \
|
||||
$(VISIBILITY_CFLAGS) \
|
||||
$(VL_CFLAGS) \
|
||||
$(X11_INCLUDES) \
|
||||
$(XCB_DRI3_CFLAGS) \
|
||||
$(XVMC_CFLAGS)
|
||||
|
||||
|
@@ -1310,6 +1310,12 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
||||
if (bo) {
|
||||
p_atomic_inc(&bo->base.reference.count);
|
||||
simple_mtx_unlock(&ws->bo_export_table_lock);
|
||||
|
||||
/* Release the buffer handle, because we don't need it anymore.
|
||||
* This function is returning an existing buffer, which has its own
|
||||
* handle.
|
||||
*/
|
||||
amdgpu_bo_free(result.buf_handle);
|
||||
return &bo->base;
|
||||
}
|
||||
|
||||
|
@@ -280,6 +280,12 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
||||
if (ws) {
|
||||
pipe_reference(NULL, &ws->reference);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
|
||||
/* Release the device handle, because we don't need it anymore.
|
||||
* This function is returning an existing winsys instance, which
|
||||
* has its own device handle.
|
||||
*/
|
||||
amdgpu_device_deinitialize(dev);
|
||||
return &ws->base;
|
||||
}
|
||||
|
||||
|
@@ -1198,4 +1198,6 @@ void
|
||||
vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
|
||||
{
|
||||
VMW_FUNC;
|
||||
|
||||
free(vws->ioctl.cap_3d);
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ struct dri_sw_winsys
|
||||
{
|
||||
struct sw_winsys base;
|
||||
|
||||
struct drisw_loader_funcs *lf;
|
||||
const struct drisw_loader_funcs *lf;
|
||||
};
|
||||
|
||||
static inline struct dri_sw_displaytarget *
|
||||
@@ -282,7 +282,7 @@ dri_destroy_sw_winsys(struct sw_winsys *winsys)
|
||||
}
|
||||
|
||||
struct sw_winsys *
|
||||
dri_create_sw_winsys(struct drisw_loader_funcs *lf)
|
||||
dri_create_sw_winsys(const struct drisw_loader_funcs *lf)
|
||||
{
|
||||
struct dri_sw_winsys *ws;
|
||||
|
||||
|
@@ -33,6 +33,6 @@
|
||||
|
||||
struct sw_winsys;
|
||||
|
||||
struct sw_winsys *dri_create_sw_winsys(struct drisw_loader_funcs *lf);
|
||||
struct sw_winsys *dri_create_sw_winsys(const struct drisw_loader_funcs *lf);
|
||||
|
||||
#endif
|
||||
|
@@ -4804,7 +4804,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
break;
|
||||
}
|
||||
|
||||
case nir_intrinsic_begin_fragment_shader_ordering:
|
||||
case nir_intrinsic_begin_invocation_interlock: {
|
||||
const fs_builder ubld = bld.group(8, 0);
|
||||
const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
|
||||
|
@@ -636,7 +636,7 @@ VkResult anv_CreateInstance(
|
||||
}
|
||||
|
||||
if (instance->app_info.api_version == 0)
|
||||
anv_EnumerateInstanceVersion(&instance->app_info.api_version);
|
||||
instance->app_info.api_version = VK_API_VERSION_1_0;
|
||||
|
||||
instance->enabled_extensions = enabled_extensions;
|
||||
|
||||
|
@@ -446,6 +446,9 @@ anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
|
||||
if (layout)
|
||||
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
|
||||
|
||||
const bool rba = pipeline->device->robust_buffer_access;
|
||||
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
|
||||
|
||||
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
|
||||
if (stages[s].entrypoint)
|
||||
anv_pipeline_hash_shader(&ctx, &stages[s]);
|
||||
@@ -466,6 +469,9 @@ anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
|
||||
if (layout)
|
||||
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
|
||||
|
||||
const bool rba = pipeline->device->robust_buffer_access;
|
||||
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
|
||||
|
||||
anv_pipeline_hash_shader(&ctx, stage);
|
||||
|
||||
_mesa_sha1_final(&ctx, sha1_out);
|
||||
|
@@ -1747,6 +1747,13 @@ enum anv_pipe_bits {
|
||||
* we would have to CS stall on every flush which could be bad.
|
||||
*/
|
||||
ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
|
||||
|
||||
/* This bit does not exist directly in PIPE_CONTROL. It means that render
|
||||
* target operations are ongoing. Some operations like copies on the
|
||||
* command streamer might need to be aware of this to trigger the
|
||||
* appropriate stall before they can proceed with the copy.
|
||||
*/
|
||||
ANV_PIPE_RENDER_TARGET_WRITES = (1 << 22),
|
||||
};
|
||||
|
||||
#define ANV_PIPE_FLUSH_BITS ( \
|
||||
|
@@ -263,4 +263,5 @@ genX(blorp_exec)(struct blorp_batch *batch,
|
||||
cmd_buffer->state.gfx.vb_dirty = ~0;
|
||||
cmd_buffer->state.gfx.dirty = ~0;
|
||||
cmd_buffer->state.push_constants_dirty = ~0;
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
@@ -1758,6 +1758,12 @@ genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
|
||||
pipe.StallAtPixelScoreboard = true;
|
||||
}
|
||||
|
||||
/* If a render target flush was emitted, then we can toggle off the bit
|
||||
* saying that render target writes are ongoing.
|
||||
*/
|
||||
if (bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
|
||||
bits &= ~(ANV_PIPE_RENDER_TARGET_WRITES);
|
||||
|
||||
bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
|
||||
}
|
||||
|
||||
@@ -2769,6 +2775,8 @@ void genX(CmdDraw)(
|
||||
prim.StartInstanceLocation = firstInstance;
|
||||
prim.BaseVertexLocation = 0;
|
||||
}
|
||||
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
||||
void genX(CmdDrawIndexed)(
|
||||
@@ -2808,6 +2816,8 @@ void genX(CmdDrawIndexed)(
|
||||
prim.StartInstanceLocation = firstInstance;
|
||||
prim.BaseVertexLocation = vertexOffset;
|
||||
}
|
||||
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
||||
/* Auto-Draw / Indirect Registers */
|
||||
@@ -2941,6 +2951,8 @@ void genX(CmdDrawIndirect)(
|
||||
|
||||
offset += stride;
|
||||
}
|
||||
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
||||
void genX(CmdDrawIndexedIndirect)(
|
||||
@@ -2980,6 +2992,8 @@ void genX(CmdDrawIndexedIndirect)(
|
||||
|
||||
offset += stride;
|
||||
}
|
||||
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
||||
static VkResult
|
||||
|
@@ -302,4 +302,5 @@ genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
|
||||
}
|
||||
|
||||
cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_RENDER_TARGET_WRITES;
|
||||
}
|
||||
|
@@ -729,11 +729,19 @@ void genX(CmdCopyQueryPoolResults)(
|
||||
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
|
||||
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
|
||||
|
||||
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
|
||||
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
|
||||
pc.CommandStreamerStallEnable = true;
|
||||
pc.StallAtPixelScoreboard = true;
|
||||
}
|
||||
/* If render target writes are ongoing, request a render target cache flush
|
||||
* to ensure proper ordering of the commands from the 3d pipe and the
|
||||
* command streamer.
|
||||
*/
|
||||
if (cmd_buffer->state.pending_pipe_bits & ANV_PIPE_RENDER_TARGET_WRITES) {
|
||||
cmd_buffer->state.pending_pipe_bits |=
|
||||
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
|
||||
}
|
||||
|
||||
if ((flags & VK_QUERY_RESULT_WAIT_BIT) ||
|
||||
(cmd_buffer->state.pending_pipe_bits & ANV_PIPE_FLUSH_BITS)) {
|
||||
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
|
||||
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
|
||||
}
|
||||
|
||||
struct anv_address dest_addr = anv_address_add(buffer->address, destOffset);
|
||||
|
@@ -40,7 +40,7 @@ libglapi = shared_library(
|
||||
'glapi',
|
||||
[files_mapi_glapi, files_mapi_util, shared_glapi_mapi_tmp_h],
|
||||
c_args : [
|
||||
c_msvc_compat_args, '-DMAPI_MODE_GLAPI',
|
||||
c_msvc_compat_args, c_vis_args, '-DMAPI_MODE_GLAPI',
|
||||
'-DMAPI_ABI_HEADER="@0@"'.format(shared_glapi_mapi_tmp_h.full_path()),
|
||||
],
|
||||
link_args : [ld_args_gc_sections],
|
||||
|
@@ -247,7 +247,6 @@ intelInitExtensions(struct gl_context *ctx)
|
||||
ctx->Extensions.OES_primitive_bounding_box = true;
|
||||
ctx->Extensions.OES_texture_buffer = true;
|
||||
ctx->Extensions.ARB_fragment_shader_interlock = true;
|
||||
ctx->Extensions.INTEL_fragment_shader_ordering = true;
|
||||
|
||||
if (can_do_pipelined_register_writes(brw->screen)) {
|
||||
ctx->Extensions.ARB_draw_indirect = true;
|
||||
|
@@ -317,7 +317,6 @@ EXT(IBM_texture_mirrored_repeat , dummy_true
|
||||
EXT(INGR_blend_func_separate , EXT_blend_func_separate , GLL, x , x , x , 1999)
|
||||
|
||||
EXT(INTEL_conservative_rasterization , INTEL_conservative_rasterization , x , GLC, x , 31, 2013)
|
||||
EXT(INTEL_fragment_shader_ordering , INTEL_fragment_shader_ordering , GLL, GLC, x , x , 2013)
|
||||
EXT(INTEL_performance_query , INTEL_performance_query , GLL, GLC, x , ES2, 2013)
|
||||
EXT(INTEL_shader_atomic_float_minmax , INTEL_shader_atomic_float_minmax , GLL, GLC, x , x , 2018)
|
||||
|
||||
|
@@ -4296,7 +4296,6 @@ struct gl_extensions
|
||||
GLboolean ATI_fragment_shader;
|
||||
GLboolean GREMEDY_string_marker;
|
||||
GLboolean INTEL_conservative_rasterization;
|
||||
GLboolean INTEL_fragment_shader_ordering;
|
||||
GLboolean INTEL_performance_query;
|
||||
GLboolean INTEL_shader_atomic_float_minmax;
|
||||
GLboolean KHR_blend_equation_advanced;
|
||||
|
@@ -900,8 +900,7 @@ select_tex_image(const struct gl_texture_object *texObj, GLenum target,
|
||||
|
||||
/**
|
||||
* Error-check the offset and size arguments to
|
||||
* glGet[Compressed]TextureSubImage(). Also checks if the specified
|
||||
* texture image is missing.
|
||||
* glGet[Compressed]TextureSubImage().
|
||||
* \return true if error, false if no error.
|
||||
*/
|
||||
static bool
|
||||
@@ -913,6 +912,7 @@ dimensions_error_check(struct gl_context *ctx,
|
||||
const char *caller)
|
||||
{
|
||||
const struct gl_texture_image *texImage;
|
||||
GLuint imageWidth = 0, imageHeight = 0, imageDepth = 0;
|
||||
|
||||
if (xoffset < 0) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE, "%s(xoffset = %d)", caller, xoffset);
|
||||
@@ -981,82 +981,44 @@ dimensions_error_check(struct gl_context *ctx,
|
||||
"%s(zoffset + depth = %d)", caller, zoffset + depth);
|
||||
return true;
|
||||
}
|
||||
/* According to OpenGL 4.6 spec, section 8.11.4 ("Texture Image Queries"):
|
||||
*
|
||||
* "An INVALID_OPERATION error is generated by GetTextureImage if the
|
||||
* effective target is TEXTURE_CUBE_MAP or TEXTURE_CUBE_MAP_ARRAY ,
|
||||
* and the texture object is not cube complete or cube array complete,
|
||||
* respectively."
|
||||
*
|
||||
* This applies also to GetTextureSubImage, GetCompressedTexImage,
|
||||
* GetCompressedTextureImage, and GetnCompressedTexImage.
|
||||
*/
|
||||
if (!_mesa_cube_complete(texObj)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"%s(cube incomplete)", caller);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
; /* nothing */
|
||||
}
|
||||
|
||||
texImage = select_tex_image(texObj, target, level, zoffset);
|
||||
if (!texImage) {
|
||||
/* Trying to return a non-defined level is a valid operation per se, as
|
||||
* OpenGL 4.6 spec, section 8.11.4 ("Texture Image Queries") does not
|
||||
* handle this case as an error.
|
||||
*
|
||||
* Rather, we need to look at section 8.22 ("Texture State and Proxy
|
||||
* State"):
|
||||
*
|
||||
* "Each initial texture image is null. It has zero width, height, and
|
||||
* depth, internal format RGBA, or R8 for buffer textures, component
|
||||
* sizes set to zero and component types set to NONE, the compressed
|
||||
* flag set to FALSE, a zero compressed size, and the bound buffer
|
||||
* object name is zero."
|
||||
*
|
||||
* This means we need to assume the image for the non-defined level is
|
||||
* an empty image. With this assumption, we can go back to section
|
||||
* 8.11.4 and checking again the errors:
|
||||
*
|
||||
* "An INVALID_VALUE error is generated if xoffset + width is greater
|
||||
* than the texture’s width, yoffset + height is greater than the
|
||||
* texture’s height, or zoffset + depth is greater than the texture’s
|
||||
* depth."
|
||||
*
|
||||
* Thus why we return INVALID_VALUE.
|
||||
*/
|
||||
_mesa_error(ctx, GL_INVALID_VALUE, "%s(missing image)", caller);
|
||||
return true;
|
||||
if (texImage) {
|
||||
imageWidth = texImage->Width;
|
||||
imageHeight = texImage->Height;
|
||||
imageDepth = texImage->Depth;
|
||||
}
|
||||
|
||||
if (xoffset + width > texImage->Width) {
|
||||
if (xoffset + width > imageWidth) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE,
|
||||
"%s(xoffset %d + width %d > %u)",
|
||||
caller, xoffset, width, texImage->Width);
|
||||
caller, xoffset, width, imageWidth);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (yoffset + height > texImage->Height) {
|
||||
if (yoffset + height > imageHeight) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE,
|
||||
"%s(yoffset %d + height %d > %u)",
|
||||
caller, yoffset, height, texImage->Height);
|
||||
caller, yoffset, height, imageHeight);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (target != GL_TEXTURE_CUBE_MAP) {
|
||||
/* Cube map error checking was done above */
|
||||
if (zoffset + depth > texImage->Depth) {
|
||||
if (zoffset + depth > imageDepth) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE,
|
||||
"%s(zoffset %d + depth %d > %u)",
|
||||
caller, zoffset, depth, texImage->Depth);
|
||||
caller, zoffset, depth, imageDepth);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Extra checks for compressed textures */
|
||||
{
|
||||
if (texImage) {
|
||||
GLuint bw, bh, bd;
|
||||
_mesa_get_format_block_size_3d(texImage->TexFormat, &bw, &bh, &bd);
|
||||
if (bw > 1 || bh > 1 || bd > 1) {
|
||||
@@ -1162,53 +1124,15 @@ pbo_error_check(struct gl_context *ctx, GLenum target,
|
||||
|
||||
|
||||
/**
|
||||
* Do error checking for all (non-compressed) get-texture-image functions.
|
||||
* \return true if any error, false if no errors.
|
||||
* Do teximage-related error checking for getting uncompressed images.
|
||||
* \return true if there was an error
|
||||
*/
|
||||
static bool
|
||||
getteximage_error_check(struct gl_context *ctx,
|
||||
struct gl_texture_object *texObj,
|
||||
GLenum target, GLint level,
|
||||
GLint xoffset, GLint yoffset, GLint zoffset,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLenum format, GLenum type, GLsizei bufSize,
|
||||
GLvoid *pixels, const char *caller)
|
||||
teximage_error_check(struct gl_context *ctx,
|
||||
struct gl_texture_image *texImage,
|
||||
GLenum format, const char *caller)
|
||||
{
|
||||
struct gl_texture_image *texImage;
|
||||
GLenum baseFormat, err;
|
||||
GLint maxLevels;
|
||||
|
||||
assert(texObj);
|
||||
|
||||
if (texObj->Target == 0) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "%s(invalid texture)", caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
maxLevels = _mesa_max_texture_levels(ctx, target);
|
||||
if (level < 0 || level >= maxLevels) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE, "%s(level = %d)", caller, level);
|
||||
return true;
|
||||
}
|
||||
|
||||
err = _mesa_error_check_format_and_type(ctx, format, type);
|
||||
if (err != GL_NO_ERROR) {
|
||||
_mesa_error(ctx, err, "%s(format/type)", caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (dimensions_error_check(ctx, texObj, target, level,
|
||||
xoffset, yoffset, zoffset,
|
||||
width, height, depth, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pbo_error_check(ctx, target, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
texImage = select_tex_image(texObj, target, level, zoffset);
|
||||
GLenum baseFormat;
|
||||
assert(texImage);
|
||||
|
||||
/*
|
||||
@@ -1241,8 +1165,8 @@ getteximage_error_check(struct gl_context *ctx,
|
||||
return true;
|
||||
}
|
||||
else if (_mesa_is_stencil_format(format)
|
||||
&& !_mesa_is_depthstencil_format(baseFormat)
|
||||
&& !_mesa_is_stencil_format(baseFormat)) {
|
||||
&& !_mesa_is_depthstencil_format(baseFormat)
|
||||
&& !_mesa_is_stencil_format(baseFormat)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"%s(format mismatch)", caller);
|
||||
return true;
|
||||
@@ -1271,6 +1195,142 @@ getteximage_error_check(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Do common teximage-related error checking for getting uncompressed images.
|
||||
* \return true if there was an error
|
||||
*/
|
||||
static bool
|
||||
common_error_check(struct gl_context *ctx,
|
||||
struct gl_texture_object *texObj,
|
||||
GLenum target, GLint level,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLenum format, GLenum type, GLsizei bufSize,
|
||||
GLvoid *pixels, const char *caller)
|
||||
{
|
||||
GLenum err;
|
||||
GLint maxLevels;
|
||||
|
||||
if (texObj->Target == 0) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "%s(invalid texture)", caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
maxLevels = _mesa_max_texture_levels(ctx, target);
|
||||
if (level < 0 || level >= maxLevels) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE, "%s(level = %d)", caller, level);
|
||||
return true;
|
||||
}
|
||||
|
||||
err = _mesa_error_check_format_and_type(ctx, format, type);
|
||||
if (err != GL_NO_ERROR) {
|
||||
_mesa_error(ctx, err, "%s(format/type)", caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* According to OpenGL 4.6 spec, section 8.11.4 ("Texture Image Queries"):
|
||||
*
|
||||
* "An INVALID_OPERATION error is generated by GetTextureImage if the
|
||||
* effective target is TEXTURE_CUBE_MAP or TEXTURE_CUBE_MAP_ARRAY ,
|
||||
* and the texture object is not cube complete or cube array complete,
|
||||
* respectively."
|
||||
*
|
||||
* This applies also to GetTextureSubImage, GetCompressedTexImage,
|
||||
* GetCompressedTextureImage, and GetnCompressedTexImage.
|
||||
*/
|
||||
if (target == GL_TEXTURE_CUBE_MAP && !_mesa_cube_complete(texObj)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"%s(cube incomplete)", caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Do error checking for all (non-compressed) get-texture-image functions.
|
||||
* \return true if any error, false if no errors.
|
||||
*/
|
||||
static bool
|
||||
getteximage_error_check(struct gl_context *ctx,
|
||||
struct gl_texture_object *texObj,
|
||||
GLenum target, GLint level,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLenum format, GLenum type, GLsizei bufSize,
|
||||
GLvoid *pixels, const char *caller)
|
||||
{
|
||||
struct gl_texture_image *texImage;
|
||||
|
||||
assert(texObj);
|
||||
|
||||
if (common_error_check(ctx, texObj, target, level, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (width == 0 || height == 0 || depth == 0) {
|
||||
/* Not an error, but nothing to do. Return 'true' so that the
|
||||
* caller simply returns.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pbo_error_check(ctx, target, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
texImage = select_tex_image(texObj, target, level, 0);
|
||||
if (teximage_error_check(ctx, texImage, format, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Do error checking for all (non-compressed) get-texture-image functions.
|
||||
* \return true if any error, false if no errors.
|
||||
*/
|
||||
static bool
|
||||
gettexsubimage_error_check(struct gl_context *ctx,
|
||||
struct gl_texture_object *texObj,
|
||||
GLenum target, GLint level,
|
||||
GLint xoffset, GLint yoffset, GLint zoffset,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLenum format, GLenum type, GLsizei bufSize,
|
||||
GLvoid *pixels, const char *caller)
|
||||
{
|
||||
struct gl_texture_image *texImage;
|
||||
|
||||
assert(texObj);
|
||||
|
||||
if (common_error_check(ctx, texObj, target, level, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (dimensions_error_check(ctx, texObj, target, level,
|
||||
xoffset, yoffset, zoffset,
|
||||
width, height, depth, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pbo_error_check(ctx, target, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
texImage = select_tex_image(texObj, target, level, zoffset);
|
||||
if (teximage_error_check(ctx, texImage, format, caller)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return the width, height and depth of a texture image.
|
||||
* This function must be resilient to bad parameter values since
|
||||
@@ -1399,7 +1459,7 @@ _mesa_GetnTexImageARB(GLenum target, GLint level, GLenum format, GLenum type,
|
||||
get_texture_image_dims(texObj, target, level, &width, &height, &depth);
|
||||
|
||||
if (getteximage_error_check(ctx, texObj, target, level,
|
||||
0, 0, 0, width, height, depth,
|
||||
width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return;
|
||||
}
|
||||
@@ -1430,7 +1490,7 @@ _mesa_GetTexImage(GLenum target, GLint level, GLenum format, GLenum type,
|
||||
get_texture_image_dims(texObj, target, level, &width, &height, &depth);
|
||||
|
||||
if (getteximage_error_check(ctx, texObj, target, level,
|
||||
0, 0, 0, width, height, depth,
|
||||
width, height, depth,
|
||||
format, type, INT_MAX, pixels, caller)) {
|
||||
return;
|
||||
}
|
||||
@@ -1464,7 +1524,7 @@ _mesa_GetTextureImage(GLuint texture, GLint level, GLenum format, GLenum type,
|
||||
&width, &height, &depth);
|
||||
|
||||
if (getteximage_error_check(ctx, texObj, texObj->Target, level,
|
||||
0, 0, 0, width, height, depth,
|
||||
width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return;
|
||||
}
|
||||
@@ -1497,9 +1557,10 @@ _mesa_GetTextureSubImage(GLuint texture, GLint level,
|
||||
return;
|
||||
}
|
||||
|
||||
if (getteximage_error_check(ctx, texObj, texObj->Target, level,
|
||||
xoffset, yoffset, zoffset, width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
if (gettexsubimage_error_check(ctx, texObj, texObj->Target, level,
|
||||
xoffset, yoffset, zoffset,
|
||||
width, height, depth,
|
||||
format, type, bufSize, pixels, caller)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -4072,7 +4072,6 @@ glsl_to_tgsi_visitor::visit(ir_call *ir)
|
||||
case ir_intrinsic_generic_atomic_comp_swap:
|
||||
case ir_intrinsic_begin_invocation_interlock:
|
||||
case ir_intrinsic_end_invocation_interlock:
|
||||
case ir_intrinsic_begin_fragment_shader_ordering:
|
||||
unreachable("Invalid intrinsic");
|
||||
}
|
||||
}
|
||||
|
@@ -954,8 +954,8 @@ wsi_common_queue_present(const struct wsi_device *wsi,
|
||||
/* We only need/want to wait on semaphores once. After that, we're
|
||||
* guaranteed ordering since it all happens on the same queue.
|
||||
*/
|
||||
submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount,
|
||||
submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores,
|
||||
submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
|
||||
submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
|
||||
|
||||
/* Set up the pWaitDstStageMasks */
|
||||
stage_flags = vk_alloc(&swapchain->alloc,
|
||||
|
@@ -1062,6 +1062,8 @@ wsi_display_swapchain_destroy(struct wsi_swapchain *drv_chain,
|
||||
|
||||
for (uint32_t i = 0; i < chain->base.image_count; i++)
|
||||
wsi_display_image_finish(drv_chain, allocator, &chain->images[i]);
|
||||
|
||||
wsi_swapchain_finish(&chain->base);
|
||||
vk_free(allocator, chain);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
Reference in New Issue
Block a user