Import libdrm 2.4.100

This commit is contained in:
jsg
2019-11-27 02:09:48 +00:00
parent 974a819012
commit a628e08ec1
46 changed files with 1785 additions and 197 deletions

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/bin/sh
set -u
@@ -46,15 +46,22 @@ amdgpu_cs_fence_to_handle
amdgpu_cs_import_syncobj
amdgpu_cs_query_fence_status
amdgpu_cs_query_reset_state
amdgpu_cs_query_reset_state2
amdgpu_query_sw_info
amdgpu_cs_signal_semaphore
amdgpu_cs_submit
amdgpu_cs_submit_raw
amdgpu_cs_submit_raw2
amdgpu_cs_syncobj_export_sync_file
amdgpu_cs_syncobj_export_sync_file2
amdgpu_cs_syncobj_import_sync_file
amdgpu_cs_syncobj_import_sync_file2
amdgpu_cs_syncobj_query
amdgpu_cs_syncobj_reset
amdgpu_cs_syncobj_signal
amdgpu_cs_syncobj_timeline_signal
amdgpu_cs_syncobj_timeline_wait
amdgpu_cs_syncobj_transfer
amdgpu_cs_syncobj_wait
amdgpu_cs_wait_fences
amdgpu_cs_wait_semaphore

View File

@@ -87,8 +87,8 @@ enum amdgpu_bo_handle_type {
/** DMA-buf fd handle */
amdgpu_bo_handle_type_dma_buf_fd = 2,
/** KMS handle, but re-importing as a DMABUF handle through
* drmPrimeHandleToFD is forbidden. (Glamor does that)
/** Deprecated in favour of and same behaviour as
* amdgpu_bo_handle_type_kms, use that instead of this
*/
amdgpu_bo_handle_type_kms_noimport = 3,
};
@@ -942,6 +942,21 @@ int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
uint32_t *state, uint32_t *hangs);
/**
* Query reset state for the specific GPU Context.
*
* \param context - \c [in] GPU Context handle
* \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_*
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
* \sa amdgpu_cs_ctx_create()
*
*/
int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags);
/*
* Command Buffers Management
*
@@ -1516,6 +1531,23 @@ int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs, uint32_t syncobj_count);
/**
* Signal kernel timeline sync objects.
*
* \param dev - \c [in] device handle
* \param syncobjs - \c [in] array of sync object handles
* \param points - \c [in] array of timeline points
* \param syncobj_count - \c [in] number of handles in syncobjs
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count);
/**
* Wait for one or all sync objects to signal.
*
@@ -1536,6 +1568,45 @@ int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Wait for one or all sync objects on their points to signal.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [in] array of sync points to wait
* \param num_handles - \c [in] self-explanatory
* \param timeout_nsec - \c [in] self-explanatory
* \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_*
* \param first_signaled - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled);
/**
* Query sync objects payloads.
*
* \param dev - \c [in] self-explanatory
* \param handles - \c [in] array of sync object handles
* \param points - \c [out] array of sync points returned, which presents
* syncobj payload.
* \param num_handles - \c [in] self-explanatory
*
* \return 0 on success\n
* -ETIME - Timeout
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles);
/**
* Export kernel sync object to shareable fd.
*
@@ -1594,6 +1665,62 @@ int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
uint32_t syncobj,
int sync_file_fd);
/**
* Export kernel timeline sync object to a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param flags - \c [in] flags
* \param sync_file_fd - \c [out] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd);
/**
* Import kernel timeline sync object from a sync_file.
*
* \param dev - \c [in] device handle
* \param syncobj - \c [in] sync object handle
* \param point - \c [in] timeline point
* \param sync_file_fd - \c [in] sync_file file descriptor.
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd);
/**
* transfer between syncbojs.
*
* \param dev - \c [in] device handle
* \param dst_handle - \c [in] sync object handle
* \param dst_point - \c [in] timeline point, 0 presents dst is binary
* \param src_handle - \c [in] sync object handle
* \param src_point - \c [in] timeline point, 0 presents src is binary
* \param flags - \c [in] flags
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
*
*/
int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags);
/**
* Export an amdgpu fence as a handle (syncobj or fd).

View File

@@ -147,12 +147,12 @@ drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
int master_fd,
unsigned priority)
{
union drm_amdgpu_sched args;
int r;
if (!dev || !context || master_fd < 0)
return -EINVAL;
union drm_amdgpu_sched args;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
@@ -188,6 +188,25 @@ drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
return r;
}
drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
uint64_t *flags)
{
union drm_amdgpu_ctx args;
int r;
if (!context)
return -EINVAL;
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
args.in.ctx_id = context->id;
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
&args, sizeof(args));
if (!r)
*flags = args.out.state.flags;
return r;
}
/**
* Submit command to kernel DRM
* \param dev - \c [in] Device handle
@@ -674,6 +693,18 @@ drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
const uint32_t *syncobjs,
uint64_t *points,
uint32_t syncobj_count)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineSignal(dev->fd, syncobjs,
points, syncobj_count);
}
drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
uint32_t *handles, unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
@@ -686,6 +717,29 @@ drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles,
int64_t timeout_nsec, unsigned flags,
uint32_t *first_signaled)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
timeout_nsec, flags, first_signaled);
}
drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
uint32_t *handles, uint64_t *points,
unsigned num_handles)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjQuery(dev->fd, handles, points, num_handles);
}
drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
uint32_t handle,
int *shared_fd)
@@ -726,6 +780,78 @@ drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
}
drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
uint32_t flags,
int *sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
syncobj, point, flags);
if (ret)
goto out;
ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
uint32_t syncobj,
uint64_t point,
int sync_file_fd)
{
uint32_t binary_handle;
int ret;
if (NULL == dev)
return -EINVAL;
if (!point)
return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
if (ret)
return ret;
ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
if (ret)
goto out;
ret = drmSyncobjTransfer(dev->fd, syncobj, point,
binary_handle, 0, 0);
out:
drmSyncobjDestroy(dev->fd, binary_handle);
return ret;
}
drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
uint32_t dst_handle,
uint64_t dst_point,
uint32_t src_handle,
uint64_t src_point,
uint32_t flags)
{
if (NULL == dev)
return -EINVAL;
return drmSyncobjTransfer(dev->fd,
dst_handle, dst_point,
src_handle, src_point,
flags);
}
drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
amdgpu_context_handle context,
amdgpu_bo_list_handle bo_list_handle,
@@ -733,12 +859,13 @@ drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
if (num_chunks == 0)
return -EINVAL;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
@@ -763,10 +890,11 @@ drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
struct drm_amdgpu_cs_chunk *chunks,
uint64_t *seq_no)
{
union drm_amdgpu_cs cs = {0};
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
int i, r;
memset(&cs, 0, sizeof(cs));
chunk_array = alloca(sizeof(uint64_t) * num_chunks);
for (i = 0; i < num_chunks; i++)
chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
@@ -803,9 +931,10 @@ drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
uint32_t what,
uint32_t *out_handle)
{
union drm_amdgpu_fence_to_handle fth = {0};
union drm_amdgpu_fence_to_handle fth;
int r;
memset(&fth, 0, sizeof(fth));
fth.in.fence.ctx_id = fence->context->id;
fth.in.fence.ip_type = fence->ip_type;
fth.in.fence.ip_instance = fence->ip_instance;

View File

@@ -43,8 +43,8 @@
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle fd_list;
static pthread_mutex_t dev_mutex = PTHREAD_MUTEX_INITIALIZER;
static amdgpu_device_handle dev_list;
static int fd_compare(int fd1, int fd2)
{
@@ -95,13 +95,13 @@ static int amdgpu_get_auth(int fd, int *auth)
static void amdgpu_device_free_internal(amdgpu_device_handle dev)
{
amdgpu_device_handle *node = &fd_list;
amdgpu_device_handle *node = &dev_list;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
while (*node != dev && (*node)->next)
node = &(*node)->next;
*node = (*node)->next;
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
close(dev->fd);
if ((dev->flink_fd >= 0) && (dev->fd != dev->flink_fd))
@@ -155,16 +155,16 @@ drm_public int amdgpu_device_initialize(int fd,
*device_handle = NULL;
pthread_mutex_lock(&fd_mutex);
pthread_mutex_lock(&dev_mutex);
r = amdgpu_get_auth(fd, &flag_auth);
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (1) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
for (dev = fd_list; dev; dev = dev->next)
for (dev = dev_list; dev; dev = dev->next)
if (fd_compare(dev->fd, fd) == 0)
break;
@@ -173,7 +173,7 @@ drm_public int amdgpu_device_initialize(int fd,
if (r) {
fprintf(stderr, "%s: amdgpu_get_auth (2) failed (%i)\n",
__func__, r);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}
if ((flag_auth) && (!flag_authexist)) {
@@ -182,14 +182,14 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
amdgpu_device_reference(device_handle, dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return 0;
}
dev = calloc(1, sizeof(struct amdgpu_device));
if (!dev) {
fprintf(stderr, "%s: calloc failed\n", __func__);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return -ENOMEM;
}
@@ -265,9 +265,9 @@ drm_public int amdgpu_device_initialize(int fd,
*major_version = dev->major_version;
*minor_version = dev->minor_version;
*device_handle = dev;
dev->next = fd_list;
fd_list = dev;
pthread_mutex_unlock(&fd_mutex);
dev->next = dev_list;
dev_list = dev;
pthread_mutex_unlock(&dev_mutex);
return 0;
@@ -275,7 +275,7 @@ cleanup:
if (dev->fd >= 0)
close(dev->fd);
free(dev);
pthread_mutex_unlock(&fd_mutex);
pthread_mutex_unlock(&dev_mutex);
return r;
}

View File

@@ -59,7 +59,7 @@ ext_libdrm_amdgpu = declare_dependency(
test(
'amdgpu-symbol-check',
prog_bash,
find_program('amdgpu-symbol-check'),
env : env_test,
args : [files('amdgpu-symbol-check'), libdrm_amdgpu]
args : libdrm_amdgpu,
)