Compare commits

...

11 Commits

Author SHA1 Message Date
Joshua Ashton
aa5bc3e41f wsi: Implement linux-drm-syncobj-v1
This implements explicit sync with linux-drm-syncobj-v1 for the
Wayland WSI.

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-27 17:49:03 +00:00
Joshua Ashton
e4e3436d45 wsi: Add common infrastructure for explicit sync
Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-27 17:49:03 +00:00
Joshua Ashton
becb5d5161 wsi: Get timeline semaphore exportable handle types
We need to know this for explicit sync

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-27 17:44:16 +00:00
Joshua Ashton
06c2af994b wsi: Track CPU side present ordering via a serial
We will use this in our hueristics to pick the most optimal buffer in AcquireNextImageKHR

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-25 21:00:54 +00:00
Joshua Ashton
d9cbc79941 wsi: Add acquired member to wsi_image
Tracks whether this wsi_image has been acquired by the app

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-25 21:00:54 +00:00
Joshua Ashton
e209b02b97 wsi: Track if timeline semaphores are supported
This will be needed before we expose and use explicit sync.

Even if the host Wayland compositor supports timeline semaphores, in the
case of Venus, etc the underlying driver may not.

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-22 00:24:26 +00:00
Joshua Ashton
8a098f591b build: Add linux-drm-syncobj-v1 wayland protocol
Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-22 00:24:26 +00:00
Joshua Ashton
754f52e1e1 wsi: Add explicit_sync to wsi_drm_image_params
Allow the WSI frontend to request explicit sync buffers.

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-22 00:24:26 +00:00
Joshua Ashton
00dba3992c wsi: Add explicit_sync to wsi_image_info
Will be used in future for specifying explicit sync for Vulkan WSI when supported.

Additionally cleans up wsi_create_buffer_blit_context, etc..

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-22 00:24:26 +00:00
Joshua Ashton
9c8f205131 wsi: Pass wsi_drm_image_params to wsi_configure_prime_image
Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-20 17:21:27 +00:00
Joshua Ashton
f17f43b149 wsi: Pass wsi_drm_image_params to wsi_configure_native_image
No need to split this out into function parameters, it's just less clean.

Signed-off-by: Joshua Ashton <joshua@froggi.es>
2024-03-20 17:21:26 +00:00
7 changed files with 647 additions and 72 deletions

View File

@@ -62,6 +62,7 @@ wp_protos = {
'linux-dmabuf-unstable-v1': 'unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml',
'presentation-time': 'stable/presentation-time/presentation-time.xml',
'tearing-control-v1': 'staging/tearing-control/tearing-control-v1.xml',
'linux-drm-syncobj-v1': 'staging/linux-drm-syncobj/linux-drm-syncobj-v1.xml',
}
wp_files = {}
foreach name, xml : wp_protos

View File

@@ -36,6 +36,7 @@ if with_platform_wayland
files_vulkan_wsi += wp_files['presentation-time']
files_vulkan_wsi += wp_files['tearing-control-v1']
links_vulkan_wsi += libloader_wayland_helper
files_vulkan_wsi += wp_files['linux-drm-syncobj-v1']
endif
if with_platform_windows

View File

@@ -123,7 +123,7 @@ wsi_device_init(struct wsi_device *wsi,
for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
handle_type <<= 1) {
const VkPhysicalDeviceExternalSemaphoreInfo esi = {
VkPhysicalDeviceExternalSemaphoreInfo esi = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
.handleType = handle_type,
};
@@ -135,6 +135,17 @@ wsi_device_init(struct wsi_device *wsi,
if (esp.externalSemaphoreFeatures &
VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
wsi->semaphore_export_handle_types |= handle_type;
VkSemaphoreTypeCreateInfo timeline_tci = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
};
esi.pNext = &timeline_tci;
GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
if (esp.externalSemaphoreFeatures &
VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
wsi->timeline_semaphore_export_handle_types |= handle_type;
}
const struct vk_device_extension_table *supported_extensions =
@@ -144,6 +155,8 @@ wsi_device_init(struct wsi_device *wsi,
wsi->khr_present_wait =
supported_extensions->KHR_present_id &&
supported_extensions->KHR_present_wait;
wsi->has_timeline_semaphore =
supported_extensions->KHR_timeline_semaphore;
/* We cannot expose KHR_present_wait without timeline semaphores. */
assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
@@ -703,6 +716,8 @@ wsi_create_image(const struct wsi_swapchain *chain,
#ifndef _WIN32
image->dma_buf_fd = -1;
for (uint32_t i = 0; i < WSI_ES_COUNT; i++)
image->explicit_sync[i].fd = -1;
#endif
result = wsi->CreateImage(chain->device, &info->create,
@@ -725,6 +740,17 @@ wsi_create_image(const struct wsi_swapchain *chain,
goto fail;
}
if (info->explicit_sync) {
#if HAVE_LIBDRM
result = wsi_create_image_explicit_sync_drm(chain, image);
if (result != VK_SUCCESS)
goto fail;
#else
result = VK_ERROR_FEATURE_NOT_PRESENT;
goto fail;
#endif
}
return VK_SUCCESS;
fail:
@@ -743,6 +769,12 @@ wsi_destroy_image(const struct wsi_swapchain *chain,
close(image->dma_buf_fd);
#endif
if (image->explicit_sync[WSI_ES_ACQUIRE].semaphore) {
#if HAVE_LIBDRM
wsi_destroy_image_explicit_sync_drm(chain, image);
#endif
}
if (image->cpu_map != NULL) {
wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
image->blit.memory : image->memory);
@@ -1047,6 +1079,15 @@ wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
{
VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
uint32_t index = pReleaseInfo->pImageIndices[i];
assert(index < swapchain->image_count);
struct wsi_image *image = swapchain->get_wsi_image(swapchain, index);
assert(image->acquired);
image->acquired = false;
}
VkResult result = swapchain->release_images(swapchain,
pReleaseInfo->imageIndexCount,
pReleaseInfo->pImageIndices);
@@ -1140,9 +1181,13 @@ wsi_signal_semaphore_for_image(struct vk_device *device,
vk_semaphore_reset_temporary(device, semaphore);
#ifdef HAVE_LIBDRM
VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
VK_SYNC_FEATURE_GPU_WAIT,
&semaphore->temporary);
VkResult result = chain->image_info.explicit_sync ?
wsi_create_sync_for_image_syncobj(chain, image,
VK_SYNC_FEATURE_GPU_WAIT,
&semaphore->temporary) :
wsi_create_sync_for_dma_buf_wait(chain, image,
VK_SYNC_FEATURE_GPU_WAIT,
&semaphore->temporary);
if (result != VK_ERROR_FEATURE_NOT_PRESENT)
return result;
#endif
@@ -1172,9 +1217,13 @@ wsi_signal_fence_for_image(struct vk_device *device,
vk_fence_reset_temporary(device, fence);
#ifdef HAVE_LIBDRM
VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
VK_SYNC_FEATURE_CPU_WAIT,
&fence->temporary);
VkResult result = chain->image_info.explicit_sync ?
wsi_create_sync_for_image_syncobj(chain, image,
VK_SYNC_FEATURE_CPU_WAIT,
&fence->temporary) :
wsi_create_sync_for_dma_buf_wait(chain, image,
VK_SYNC_FEATURE_CPU_WAIT,
&fence->temporary);
if (result != VK_ERROR_FEATURE_NOT_PRESENT)
return result;
#endif
@@ -1206,6 +1255,8 @@ wsi_common_acquire_next_image2(const struct wsi_device *wsi,
struct wsi_image *image =
swapchain->get_wsi_image(swapchain, *pImageIndex);
image->acquired = true;
if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
VkResult signal_result =
wsi_signal_semaphore_for_image(device, swapchain, image,
@@ -1370,6 +1421,10 @@ wsi_common_queue_present(const struct wsi_device *wsi,
if (result != VK_SUCCESS)
goto fail_present;
VkTimelineSemaphoreSubmitInfo timeline_submit_info = {
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
};
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
};
@@ -1422,49 +1477,72 @@ wsi_common_queue_present(const struct wsi_device *wsi,
VkFence fence = swapchain->fences[image_index];
struct wsi_memory_signal_submit_info mem_signal;
bool has_signal_dma_buf = false;
#ifdef HAVE_LIBDRM
result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
if (result == VK_SUCCESS) {
bool explicit_sync = swapchain->image_info.explicit_sync;
if (explicit_sync) {
/* We will signal this acquire value ourselves when GPU work is done. */
image->explicit_sync[WSI_ES_ACQUIRE].timeline++;
/* The compositor will signal this value when it is done with the image. */
image->explicit_sync[WSI_ES_RELEASE].timeline++;
timeline_submit_info.signalSemaphoreValueCount = 1;
timeline_submit_info.pSignalSemaphoreValues = &image->explicit_sync[WSI_ES_ACQUIRE].timeline;
assert(submit_info.signalSemaphoreCount == 0);
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
has_signal_dma_buf = true;
} else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
result = VK_SUCCESS;
has_signal_dma_buf = false;
submit_info.pSignalSemaphores = &image->explicit_sync[WSI_ES_ACQUIRE].semaphore;
__vk_append_struct(&submit_info, &timeline_submit_info);
} else {
goto fail_present;
}
#ifdef HAVE_LIBDRM
result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
if (result == VK_SUCCESS) {
assert(submit_info.signalSemaphoreCount == 0);
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
has_signal_dma_buf = true;
} else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
result = VK_SUCCESS;
has_signal_dma_buf = false;
} else {
goto fail_present;
}
#endif
struct wsi_memory_signal_submit_info mem_signal;
if (!has_signal_dma_buf) {
/* If we don't have dma-buf signaling, signal the memory object by
* chaining wsi_memory_signal_submit_info into VkSubmitInfo.
*/
result = VK_SUCCESS;
has_signal_dma_buf = false;
mem_signal = (struct wsi_memory_signal_submit_info) {
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
.memory = image->memory,
};
__vk_append_struct(&submit_info, &mem_signal);
if (!has_signal_dma_buf) {
/* If we don't have dma-buf signaling, signal the memory object by
* chaining wsi_memory_signal_submit_info into VkSubmitInfo.
*/
result = VK_SUCCESS;
has_signal_dma_buf = false;
mem_signal = (struct wsi_memory_signal_submit_info) {
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
.memory = image->memory,
};
__vk_append_struct(&submit_info, &mem_signal);
}
}
result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
if (result != VK_SUCCESS)
goto fail_present;
/* The app can only submit images they have acquired. */
assert(image->acquired);
image->acquired = false;
image->present_serial = ++swapchain->present_serial;
if (!explicit_sync) {
#ifdef HAVE_LIBDRM
if (has_signal_dma_buf) {
result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
if (result != VK_SUCCESS)
goto fail_present;
}
if (has_signal_dma_buf) {
result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
if (result != VK_SUCCESS)
goto fail_present;
}
#else
assert(!has_signal_dma_buf);
assert(!has_signal_dma_buf);
#endif
}
if (wsi->sw)
wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
@@ -1695,8 +1773,7 @@ VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
const struct wsi_image_info *info,
struct wsi_image *image,
VkExternalMemoryHandleTypeFlags handle_types,
bool implicit_sync)
VkExternalMemoryHandleTypeFlags handle_types)
{
assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
@@ -1727,7 +1804,7 @@ wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
struct wsi_memory_allocate_info memory_wsi_info = {
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
.pNext = NULL,
.implicit_sync = implicit_sync,
.implicit_sync = !info->explicit_sync,
};
VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
@@ -2073,8 +2150,7 @@ wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
{
VkResult result;
result = wsi_create_buffer_blit_context(chain, info, image, 0,
false /* implicit_sync */);
result = wsi_create_buffer_blit_context(chain, info, image, 0);
if (result != VK_SUCCESS)
return result;
@@ -2124,6 +2200,7 @@ wsi_configure_cpu_image(const struct wsi_swapchain *chain,
1 /* size_align */,
info);
info->explicit_sync = true;
info->select_blit_dst_memory_type = wsi_select_host_memory_type;
info->select_image_memory_type = wsi_select_device_memory_type;
info->create_mem = wsi_create_cpu_buffer_image_mem;

View File

@@ -112,8 +112,10 @@ struct wsi_device {
VkPhysicalDevicePCIBusInfoPropertiesEXT pci_bus_info;
VkExternalSemaphoreHandleTypeFlags semaphore_export_handle_types;
VkExternalSemaphoreHandleTypeFlags timeline_semaphore_export_handle_types;
bool has_import_memory_host;
bool has_timeline_semaphore;
/** Indicates if wsi_image_create_info::scanout is supported
*

View File

@@ -29,9 +29,11 @@
#include "util/xmlconfig.h"
#include "vk_device.h"
#include "vk_physical_device.h"
#include "vk_log.h"
#include "vk_util.h"
#include "drm-uapi/drm_fourcc.h"
#include "drm-uapi/dma-buf.h"
#include "util/libsync.h"
#include <errno.h>
#include <time.h>
@@ -229,6 +231,198 @@ fail_close_sync_file:
return result;
}
VkResult
wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
struct wsi_image *image)
{
/* Cleanup of any failures is handled by the caller in wsi_create_image
* calling wsi_destroy_image -> wsi_destroy_image_explicit_sync_drm. */
VK_FROM_HANDLE(vk_device, device, chain->device);
const struct wsi_device *wsi = chain->wsi;
VkResult result = VK_SUCCESS;
int ret = 0;
const VkExportSemaphoreCreateInfo semaphore_export_info = {
.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,
/* This is a syncobj fd for any drivers using syncobj. */
.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
};
const VkSemaphoreTypeCreateInfo semaphore_type_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
.pNext = &semaphore_export_info,
.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
};
const VkSemaphoreCreateInfo semaphore_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = &semaphore_type_info,
};
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
result = wsi->CreateSemaphore(chain->device,
&semaphore_info,
&chain->alloc,
&image->explicit_sync[i].semaphore);
if (result != VK_SUCCESS)
return result;
const VkSemaphoreGetFdInfoKHR semaphore_get_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,
.semaphore = image->explicit_sync[i].semaphore,
.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
};
result = wsi->GetSemaphoreFdKHR(chain->device, &semaphore_get_info, &image->explicit_sync[i].fd);
if (result != VK_SUCCESS)
return result;
}
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
ret = drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
if (ret != 0)
return VK_ERROR_FEATURE_NOT_PRESENT;
}
return VK_SUCCESS;
}
void
wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
struct wsi_image *image)
{
VK_FROM_HANDLE(vk_device, device, chain->device);
const struct wsi_device *wsi = chain->wsi;
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (image->explicit_sync[i].handle != 0) {
drmSyncobjDestroy(device->drm_fd, image->explicit_sync[i].handle);
image->explicit_sync[i].handle = 0;
}
if (image->explicit_sync[i].fd >= 0) {
close(image->explicit_sync[i].fd);
image->explicit_sync[i].fd = -1;
}
if (image->explicit_sync[i].semaphore != VK_NULL_HANDLE) {
wsi->DestroySemaphore(chain->device, image->explicit_sync[i].semaphore, &chain->alloc);
image->explicit_sync[i].semaphore = VK_NULL_HANDLE;
}
}
}
static VkResult
wsi_create_sync_imm(struct vk_device *device, struct vk_sync **sync_out)
{
const struct vk_sync_type *sync_type =
get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
struct vk_sync *sync = NULL;
VkResult result;
result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
if (result != VK_SUCCESS)
goto error;
result = vk_sync_signal(device, sync, 0);
if (result != VK_SUCCESS)
goto error;
*sync_out = sync;
goto done;
error:
vk_sync_destroy(device, sync);
done:
return result;
}
VkResult
wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
const struct wsi_image *image,
enum vk_sync_features req_features,
struct vk_sync **sync_out)
{
VK_FROM_HANDLE(vk_device, device, chain->device);
const struct vk_sync_type *sync_type =
get_sync_file_sync_type(device, VK_SYNC_FEATURE_CPU_WAIT);
VkResult result = VK_SUCCESS;
struct vk_sync *sync = NULL;
int sync_file_fds[WSI_ES_COUNT] = { -1, -1 };
uint32_t handles[WSI_ES_COUNT] = { 0, 0 };
uint32_t tmp_handles[WSI_ES_COUNT] = { 0, 0 };
int merged_sync_fd = -1;
if (sync_type == NULL)
return VK_ERROR_FEATURE_NOT_PRESENT;
if (image->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
/* Signal immediately, there is no release to forward. */
return wsi_create_sync_imm(device, sync_out);
}
/* Transfer over to a new sync file with a
* surrogate handle.
*/
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &handles[i])) {
return vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to convert syncobj fd to handle. Errno: %d - %s", errno, strerror(errno));
}
if (drmSyncobjCreate(device->drm_fd, 0, &tmp_handles[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno));
goto fail;
}
if (drmSyncobjTransfer(device->drm_fd, tmp_handles[i], 0,
handles[i], image->explicit_sync[i].timeline, 0)) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno));
goto fail;
}
if (drmSyncobjExportSyncFile(device->drm_fd, tmp_handles[i], &sync_file_fds[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno));
goto fail;
}
}
merged_sync_fd = sync_merge("acquire merged sync", sync_file_fds[WSI_ES_ACQUIRE], sync_file_fds[WSI_ES_RELEASE]);
if (merged_sync_fd < 0) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to merge acquire + release sync timelines. Errno: %d - %s", errno, strerror(errno));
goto fail;
}
result = vk_sync_create(device, sync_type, VK_SYNC_IS_SHAREABLE, 0, &sync);
if (result != VK_SUCCESS)
goto fail;
result = vk_sync_import_sync_file(device, sync, merged_sync_fd);
if (result != VK_SUCCESS)
goto fail;
*sync_out = sync;
goto done;
fail:
if (sync)
vk_sync_destroy(device, sync);
done:
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (tmp_handles[i])
drmSyncobjDestroy(device->drm_fd, tmp_handles[i]);
}
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (handles[i])
drmSyncobjDestroy(device->drm_fd, handles[i]);
}
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (sync_file_fds[i] >= 0)
close(sync_file_fds[i]);
}
if (merged_sync_fd >= 0)
close(merged_sync_fd);
return result;
}
bool
wsi_common_drm_devices_equal(int fd_a, int fd_b)
{
@@ -309,9 +503,7 @@ wsi_create_native_image_mem(const struct wsi_swapchain *chain,
static VkResult
wsi_configure_native_image(const struct wsi_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
uint32_t num_modifier_lists,
const uint32_t *num_modifiers,
const uint64_t *const *modifiers,
const struct wsi_drm_image_params *params,
struct wsi_image_info *info)
{
const struct wsi_device *wsi = chain->wsi;
@@ -323,7 +515,9 @@ wsi_configure_native_image(const struct wsi_swapchain *chain,
if (result != VK_SUCCESS)
return result;
if (num_modifier_lists == 0) {
info->explicit_sync = params->explicit_sync;
if (params->num_modifier_lists == 0) {
/* If we don't have modifiers, fall back to the legacy "scanout" flag */
info->wsi.scanout = true;
} else {
@@ -402,8 +596,8 @@ wsi_configure_native_image(const struct wsi_swapchain *chain,
}
uint32_t max_modifier_count = 0;
for (uint32_t l = 0; l < num_modifier_lists; l++)
max_modifier_count = MAX2(max_modifier_count, num_modifiers[l]);
for (uint32_t l = 0; l < params->num_modifier_lists; l++)
max_modifier_count = MAX2(max_modifier_count, params->num_modifiers[l]);
uint64_t *image_modifiers =
vk_alloc(&chain->alloc, sizeof(*image_modifiers) * max_modifier_count,
@@ -412,13 +606,13 @@ wsi_configure_native_image(const struct wsi_swapchain *chain,
goto fail_oom;
uint32_t image_modifier_count = 0;
for (uint32_t l = 0; l < num_modifier_lists; l++) {
for (uint32_t l = 0; l < params->num_modifier_lists; l++) {
/* Walk the modifier lists and construct a list of supported
* modifiers.
*/
for (uint32_t i = 0; i < num_modifiers[l]; i++) {
if (get_modifier_props(info, modifiers[l][i]))
image_modifiers[image_modifier_count++] = modifiers[l][i];
for (uint32_t i = 0; i < params->num_modifiers[l]; i++) {
if (get_modifier_props(info, params->modifiers[l][i]))
image_modifiers[image_modifier_count++] = params->modifiers[l][i];
}
/* We only want to take the modifiers from the first list */
@@ -484,7 +678,7 @@ wsi_create_native_image_mem(const struct wsi_swapchain *chain,
const struct wsi_memory_allocate_info memory_wsi_info = {
.sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
.pNext = NULL,
.implicit_sync = true,
.implicit_sync = !info->explicit_sync,
};
const VkExportMemoryAllocateInfo memory_export_info = {
.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
@@ -572,8 +766,7 @@ wsi_create_prime_image_mem(const struct wsi_swapchain *chain,
{
VkResult result =
wsi_create_buffer_blit_context(chain, info, image,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
true);
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
if (result != VK_SUCCESS)
return result;
@@ -590,15 +783,21 @@ wsi_create_prime_image_mem(const struct wsi_swapchain *chain,
static VkResult
wsi_configure_prime_image(UNUSED const struct wsi_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
bool use_modifier,
wsi_memory_type_select_cb select_buffer_memory_type,
const struct wsi_drm_image_params *params,
struct wsi_image_info *info)
{
bool use_modifier = params->num_modifier_lists > 0;
wsi_memory_type_select_cb select_buffer_memory_type =
params->same_gpu ? wsi_select_device_memory_type :
prime_select_buffer_memory_type;
VkResult result = wsi_configure_image(chain, pCreateInfo,
0 /* handle_types */, info);
if (result != VK_SUCCESS)
return result;
info->explicit_sync = params->explicit_sync;
wsi_configure_buffer_image(chain, pCreateInfo,
WSI_PRIME_LINEAR_STRIDE_ALIGN, 4096,
info);
@@ -633,17 +832,172 @@ wsi_drm_configure_image(const struct wsi_swapchain *chain,
assert(params->base.image_type == WSI_IMAGE_TYPE_DRM);
if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
bool use_modifier = params->num_modifier_lists > 0;
wsi_memory_type_select_cb select_buffer_memory_type =
params->same_gpu ? wsi_select_device_memory_type :
prime_select_buffer_memory_type;
return wsi_configure_prime_image(chain, pCreateInfo, use_modifier,
select_buffer_memory_type, info);
return wsi_configure_prime_image(chain, pCreateInfo,
params,
info);
} else {
return wsi_configure_native_image(chain, pCreateInfo,
params->num_modifier_lists,
params->num_modifiers,
params->modifiers,
params,
info);
}
}
enum wsi_explicit_sync_state_flags
{
WSI_ES_STATE_RELEASE_MATERIALIZED = (1u << 0),
WSI_ES_STATE_RELEASE_SIGNALLED = (1u << 1),
WSI_ES_STATE_ACQUIRE_SIGNALLED = (1u << 2),
};
/* Levels of "freeness"
* 0 -> Acquire Signalled + Release Signalled
* 1 -> Acquire Signalled + Release Materialized
* 2 -> Release Signalled
* 3 -> Release Materialized
*/
static const uint32_t wsi_explicit_sync_free_levels[] = {
(WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
(WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED),
(WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_RELEASE_SIGNALLED),
(WSI_ES_STATE_RELEASE_MATERIALIZED),
};
static uint32_t
wsi_drm_image_explicit_sync_state(struct vk_device *device, struct wsi_image *image)
{
if (image->explicit_sync[WSI_ES_RELEASE].timeline == 0) {
/* This image has never been used in a timeline.
* It must be free.
*/
return WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED | WSI_ES_STATE_ACQUIRE_SIGNALLED;
}
uint64_t points[WSI_ES_COUNT] = { 0 };
uint32_t handles[WSI_ES_COUNT] = {
image->explicit_sync[WSI_ES_ACQUIRE].handle,
image->explicit_sync[WSI_ES_RELEASE].handle
};
int ret = drmSyncobjQuery(device->drm_fd, handles, points, WSI_ES_COUNT);
if (ret)
return 0;
uint32_t flags = 0;
if (points[WSI_ES_ACQUIRE] >= image->explicit_sync[WSI_ES_ACQUIRE].timeline) {
flags |= WSI_ES_STATE_ACQUIRE_SIGNALLED;
}
if (points[WSI_ES_RELEASE] >= image->explicit_sync[WSI_ES_RELEASE].timeline) {
flags |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED;
} else {
uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, &handles[WSI_ES_RELEASE], &image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
if (ret == 0)
flags |= WSI_ES_STATE_RELEASE_MATERIALIZED;
}
return flags;
}
static uint64_t
wsi_drm_rel_timeout_to_abs(uint64_t rel_timeout_ns)
{
uint64_t cur_time_ns = os_time_get_nano();
/* Syncobj timeouts are signed */
return rel_timeout_ns > INT64_MAX - cur_time_ns
? INT64_MAX
: cur_time_ns + rel_timeout_ns;
}
VkResult
wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
uint32_t image_count,
struct wsi_image **images,
uint64_t rel_timeout_ns,
uint32_t *image_index)
{
#ifdef HAVE_LIBDRM
STACK_ARRAY(uint32_t, handles, image_count);
STACK_ARRAY(uint64_t, points, image_count);
STACK_ARRAY(uint32_t, indices, image_count);
STACK_ARRAY(uint32_t, flags, image_count);
VK_FROM_HANDLE(vk_device, device, chain->device);
int ret = 0;
/* We don't need to wait for the merged timeline on the CPU,
* only on the GPU side of things.
*
* We already know that the CPU side for the acquire has materialized,
* for all images in this array.
* That's what "busy"/"free" essentially represents.
*/
uint32_t unacquired_image_count = 0;
for (uint32_t i = 0; i < image_count; i++) {
if (images[i]->acquired)
continue;
flags[unacquired_image_count] = wsi_drm_image_explicit_sync_state(device, images[i]);
handles[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].handle;
points[unacquired_image_count] = images[i]->explicit_sync[WSI_ES_RELEASE].timeline;
indices[unacquired_image_count] = i;
unacquired_image_count++;
}
/* Handle the case where there are no images to possible acquire. */
if (!unacquired_image_count) {
ret = -ETIME;
goto done;
}
/* Find the most optimal image using the free levels above. */
for (uint32_t free_level_idx = 0; free_level_idx < ARRAY_SIZE(wsi_explicit_sync_free_levels); free_level_idx++) {
uint32_t free_level = wsi_explicit_sync_free_levels[free_level_idx];
uint64_t present_serial = UINT64_MAX;
for (uint32_t i = 0; i < unacquired_image_count; i++) {
/* Pick the image that was presented longest ago inside
* of this free level, so it has the highest chance of
* being totally free the soonest.
*/
if ((flags[i] & free_level) == free_level &&
images[indices[i]]->present_serial < present_serial) {
*image_index = indices[i];
present_serial = images[indices[i]]->present_serial;
}
}
if (present_serial != UINT64_MAX)
goto done;
}
/* Use DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE so we do not need to wait for the
* compositor's GPU work to be finished to acquire on the CPU side.
*
* We will forward the GPU signal to the VkSemaphore/VkFence of the acquire.
*/
uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, handles, points, unacquired_image_count,
wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
&first_signalled);
/* Return the first image that materialized. */
if (ret != 0)
goto done;
*image_index = indices[first_signalled];
done:
STACK_ARRAY_FINISH(flags);
STACK_ARRAY_FINISH(indices);
STACK_ARRAY_FINISH(points);
STACK_ARRAY_FINISH(handles);
if (ret == 0)
return VK_SUCCESS;
else if (ret == -ETIME)
return rel_timeout_ns ? VK_TIMEOUT : VK_NOT_READY;
else
return VK_ERROR_OUT_OF_DATE_KHR;
#else
return VK_ERROR_FEATURE_NOT_PRESENT;
#endif
}

View File

@@ -63,6 +63,7 @@ struct wsi_drm_image_params {
struct wsi_base_image_params base;
bool same_gpu;
bool explicit_sync;
uint32_t num_modifier_lists;
const uint32_t *num_modifiers;
@@ -84,6 +85,7 @@ struct wsi_image_info {
VkImageFormatListCreateInfo format_list;
VkImageDrmFormatModifierListCreateInfoEXT drm_mod_list;
bool explicit_sync;
bool prime_use_linear_modifier;
/* Not really part of VkImageCreateInfo but needed to figure out the
@@ -112,6 +114,21 @@ struct wsi_image_info {
struct wsi_image *image);
};
enum wsi_explicit_sync_timelines
{
WSI_ES_ACQUIRE,
WSI_ES_RELEASE,
WSI_ES_COUNT,
};
struct wsi_image_explicit_sync_timeline {
VkSemaphore semaphore;
uint64_t timeline;
int fd;
uint32_t handle;
};
enum wsi_swapchain_blit_type {
WSI_SWAPCHAIN_NO_BLIT,
WSI_SWAPCHAIN_BUFFER_BLIT,
@@ -128,6 +145,13 @@ struct wsi_image {
VkDeviceMemory memory;
VkCommandBuffer *cmd_buffers;
} blit;
/* Whether or not the image has been acquired
* on the CPU side via acquire_next_image.
*/
bool acquired;
uint64_t present_serial;
struct wsi_image_explicit_sync_timeline explicit_sync[WSI_ES_COUNT];
#ifndef _WIN32
uint64_t drm_modifier;
@@ -158,6 +182,8 @@ struct wsi_swapchain {
struct wsi_image_info image_info;
uint32_t image_count;
uint64_t present_serial;
struct {
enum wsi_swapchain_blit_type type;
@@ -266,8 +292,7 @@ VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
const struct wsi_image_info *info,
struct wsi_image *image,
VkExternalMemoryHandleTypeFlags handle_types,
bool implicit_sync);
VkExternalMemoryHandleTypeFlags handle_types);
VkResult
wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
@@ -319,6 +344,26 @@ wsi_create_sync_for_dma_buf_wait(const struct wsi_swapchain *chain,
const struct wsi_image *image,
enum vk_sync_features sync_features,
struct vk_sync **sync_out);
VkResult
wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
const struct wsi_image *image,
enum vk_sync_features req_features,
struct vk_sync **sync_out);
VkResult
wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
struct wsi_image *image);
void
wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
struct wsi_image *image);
VkResult
wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
uint32_t image_count,
struct wsi_image **images,
uint64_t rel_timeout_ns,
uint32_t *image_index);
#endif
struct wsi_interface {

View File

@@ -37,12 +37,14 @@
#include "drm-uapi/drm_fourcc.h"
#include "vk_instance.h"
#include "vk_device.h"
#include "vk_physical_device.h"
#include "vk_util.h"
#include "wsi_common_entrypoints.h"
#include "wsi_common_private.h"
#include "linux-dmabuf-unstable-v1-client-protocol.h"
#include "presentation-time-client-protocol.h"
#include "linux-drm-syncobj-v1-client-protocol.h"
#include "tearing-control-v1-client-protocol.h"
#include <util/compiler.h>
@@ -104,6 +106,7 @@ struct wsi_wl_display {
struct zwp_linux_dmabuf_v1 *wl_dmabuf;
struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
struct wp_tearing_control_manager_v1 *tearing_control_manager;
struct wp_linux_drm_syncobj_manager_v1 *wl_syncobj;
struct dmabuf_feedback_format_table format_table;
@@ -137,6 +140,8 @@ struct wsi_wl_image {
int shm_fd;
void *shm_ptr;
unsigned shm_size;
struct wp_linux_drm_syncobj_timeline_v1 *wl_syncobj_timeline[WSI_ES_COUNT];
};
enum wsi_wl_buffer_type {
@@ -156,6 +161,8 @@ struct wsi_wl_surface {
struct zwp_linux_dmabuf_feedback_v1 *wl_dmabuf_feedback;
struct dmabuf_feedback dmabuf_feedback, pending_dmabuf_feedback;
struct wp_linux_drm_syncobj_surface_v1 *wl_syncobj_surface;
};
struct wsi_wl_swapchain {
@@ -198,6 +205,14 @@ struct wsi_wl_swapchain {
VK_DEFINE_NONDISP_HANDLE_CASTS(wsi_wl_swapchain, base.base, VkSwapchainKHR,
VK_OBJECT_TYPE_SWAPCHAIN_KHR)
static bool
wsi_wl_use_explicit_sync(struct wsi_wl_display *display, struct wsi_device *device)
{
return device->has_timeline_semaphore &&
(device->timeline_semaphore_export_handle_types & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) &&
display->wl_syncobj != NULL;
}
enum wsi_wl_fmt_flag {
WSI_WL_FMT_ALPHA = 1 << 0,
WSI_WL_FMT_OPAQUE = 1 << 1,
@@ -799,6 +814,9 @@ registry_handle_global(void *data, struct wl_registry *registry,
MIN2(version, ZWP_LINUX_DMABUF_V1_GET_DEFAULT_FEEDBACK_SINCE_VERSION));
zwp_linux_dmabuf_v1_add_listener(display->wl_dmabuf,
&dmabuf_listener, display);
} else if (strcmp(interface, wp_linux_drm_syncobj_manager_v1_interface.name) == 0) {
display->wl_syncobj =
wl_registry_bind(registry, name, &wp_linux_drm_syncobj_manager_v1_interface, 1);
}
}
@@ -830,6 +848,8 @@ wsi_wl_display_finish(struct wsi_wl_display *display)
u_vector_finish(&display->formats);
if (display->wl_shm)
wl_shm_destroy(display->wl_shm);
if (display->wl_syncobj)
wp_linux_drm_syncobj_manager_v1_destroy(display->wl_syncobj);
if (display->wl_dmabuf)
zwp_linux_dmabuf_v1_destroy(display->wl_dmabuf);
if (display->wp_presentation_notwrapped)
@@ -1337,6 +1357,9 @@ wsi_wl_surface_destroy(VkIcdSurfaceBase *icd_surface, VkInstance _instance,
struct wsi_wl_surface *wsi_wl_surface =
wl_container_of((VkIcdSurfaceWayland *)icd_surface, wsi_wl_surface, base);
if (wsi_wl_surface->wl_syncobj_surface)
wp_linux_drm_syncobj_surface_v1_destroy(wsi_wl_surface->wl_syncobj_surface);
if (wsi_wl_surface->wl_dmabuf_feedback) {
zwp_linux_dmabuf_feedback_v1_destroy(wsi_wl_surface->wl_dmabuf_feedback);
dmabuf_feedback_fini(&wsi_wl_surface->dmabuf_feedback);
@@ -1598,6 +1621,15 @@ static VkResult wsi_wl_surface_init(struct wsi_wl_surface *wsi_wl_surface,
wsi_wl_surface->display->queue);
}
if (wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device)) {
wsi_wl_surface->wl_syncobj_surface =
wp_linux_drm_syncobj_manager_v1_get_surface(wsi_wl_surface->display->wl_syncobj,
wsi_wl_surface->surface);
if (!wsi_wl_surface->wl_syncobj_surface)
goto fail;
}
return VK_SUCCESS;
fail:
@@ -1666,7 +1698,6 @@ wsi_wl_swapchain_release_images(struct wsi_swapchain *wsi_chain,
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
for (uint32_t i = 0; i < count; i++) {
uint32_t index = indices[i];
assert(chain->images[index].busy);
chain->images[index].busy = false;
}
return VK_SUCCESS;
@@ -1813,9 +1844,34 @@ wsi_wl_swapchain_wait_for_present(struct wsi_swapchain *wsi_chain,
}
static VkResult
wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
wsi_wl_swapchain_acquire_next_image_explicit(struct wsi_swapchain *wsi_chain,
const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
{
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
/* See comments in queue_present() */
if (chain->retired)
return VK_ERROR_OUT_OF_DATE_KHR;
STACK_ARRAY(struct wsi_image*, images, wsi_chain->image_count);
for (uint32_t i = 0; i < chain->base.image_count; i++)
images[i] = &chain->images[i].base;
VkResult result = wsi_drm_wait_for_explicit_sync_release(wsi_chain,
wsi_chain->image_count,
images,
info->timeout,
image_index);
STACK_ARRAY_FINISH(images);
return result;
}
static VkResult
wsi_wl_swapchain_acquire_next_image_implicit(struct wsi_swapchain *wsi_chain,
const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
{
struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
struct timespec start_time, end_time;
@@ -1972,6 +2028,21 @@ wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
return VK_ERROR_OUT_OF_DATE_KHR;
}
if (chain->base.image_info.explicit_sync) {
struct wsi_wl_image *image = &chain->images[image_index];
/* Incremented by signal in base queue_present. */
uint64_t acquire_point = image->base.explicit_sync[WSI_ES_ACQUIRE].timeline;
uint64_t release_point = image->base.explicit_sync[WSI_ES_RELEASE].timeline;
wp_linux_drm_syncobj_surface_v1_set_acquire_point(wsi_wl_surface->wl_syncobj_surface,
image->wl_syncobj_timeline[WSI_ES_ACQUIRE],
(uint32_t)(acquire_point >> 32),
(uint32_t)(acquire_point & 0xffffffff));
wp_linux_drm_syncobj_surface_v1_set_release_point(wsi_wl_surface->wl_syncobj_surface,
image->wl_syncobj_timeline[WSI_ES_RELEASE],
(uint32_t)(release_point >> 32),
(uint32_t)(release_point & 0xffffffff));
}
assert(image_index < chain->base.image_count);
wl_surface_attach(wsi_wl_surface->surface, chain->images[image_index].buffer, 0, 0);
@@ -2127,6 +2198,17 @@ wsi_wl_image_init(struct wsi_wl_swapchain *chain,
chain->drm_format,
0);
zwp_linux_buffer_params_v1_destroy(params);
if (chain->base.image_info.explicit_sync) {
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
image->wl_syncobj_timeline[i] =
wp_linux_drm_syncobj_manager_v1_import_timeline(display->wl_syncobj,
image->base.explicit_sync[i].fd);
if (!image->wl_syncobj_timeline[i])
goto fail_image;
}
}
break;
}
@@ -2137,11 +2219,17 @@ wsi_wl_image_init(struct wsi_wl_swapchain *chain,
if (!image->buffer)
goto fail_image;
wl_buffer_add_listener(image->buffer, &buffer_listener, image);
/* No need to listen for release if we are explicit sync. */
if (!chain->base.image_info.explicit_sync)
wl_buffer_add_listener(image->buffer, &buffer_listener, image);
return VK_SUCCESS;
fail_image:
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (image->wl_syncobj_timeline[i])
wp_linux_drm_syncobj_timeline_v1_destroy(image->wl_syncobj_timeline[i]);
}
wsi_destroy_image(&chain->base, &image->base);
return VK_ERROR_OUT_OF_HOST_MEMORY;
@@ -2151,6 +2239,10 @@ static void
wsi_wl_swapchain_images_free(struct wsi_wl_swapchain *chain)
{
for (uint32_t i = 0; i < chain->base.image_count; i++) {
for (uint32_t j = 0; j < WSI_ES_COUNT; j++) {
if (chain->images[i].wl_syncobj_timeline[j])
wp_linux_drm_syncobj_timeline_v1_destroy(chain->images[i].wl_syncobj_timeline[j]);
}
if (chain->images[i].buffer) {
wl_buffer_destroy(chain->images[i].buffer);
wsi_destroy_image(&chain->base, &chain->images[i].base);
@@ -2322,6 +2414,7 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
drm_image_params = (struct wsi_drm_image_params) {
.base.image_type = WSI_IMAGE_TYPE_DRM,
.same_gpu = wsi_wl_surface->display->same_gpu,
.explicit_sync = wsi_wl_use_explicit_sync(wsi_wl_surface->display, wsi_device),
};
/* Use explicit DRM format modifiers when both the server and the driver
* support them.
@@ -2362,7 +2455,9 @@ wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
chain->base.destroy = wsi_wl_swapchain_destroy;
chain->base.get_wsi_image = wsi_wl_swapchain_get_wsi_image;
chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
chain->base.acquire_next_image = chain->base.image_info.explicit_sync
? wsi_wl_swapchain_acquire_next_image_explicit
: wsi_wl_swapchain_acquire_next_image_implicit;
chain->base.queue_present = wsi_wl_swapchain_queue_present;
chain->base.release_images = wsi_wl_swapchain_release_images;
chain->base.set_present_mode = wsi_wl_swapchain_set_present_mode;