@@ -257,6 +257,26 @@ struct msm_gpu_perfcntr {
257257 */
258258#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
259259
260+ /**
261+ * struct msm_file_private - per-drm_file context
262+ *
263+ * @queuelock: synchronizes access to submitqueues list
264+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
265+ * @queueid: counter incremented each time a submitqueue is created,
266+ * used to assign &msm_gpu_submitqueue.id
267+ * @aspace: the per-process GPU address-space
268+ * @ref: reference count
269+ * @seqno: unique per process seqno
270+ */
271+ struct msm_file_private {
272+ rwlock_t queuelock ;
273+ struct list_head submitqueues ;
274+ int queueid ;
275+ struct msm_gem_address_space * aspace ;
276+ struct kref ref ;
277+ int seqno ;
278+ };
279+
260280/**
261281 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
262282 *
@@ -304,6 +324,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
304324}
305325
306326/**
327+ * struct msm_gpu_submitqueues - Userspace created context.
328+ *
307329 * A submitqueue is associated with a gl context or vk queue (or equiv)
308330 * in userspace.
309331 *
@@ -321,7 +343,7 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
321343 * seqno, protected by submitqueue lock
322344 * @lock: submitqueue lock
323345 * @ref: reference count
324- * @entity: the submit job-queue
346+ * @entity: the submit job-queue
325347 */
326348struct msm_gpu_submitqueue {
327349 int id ;
@@ -421,6 +443,40 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
421443int msm_gpu_pm_suspend (struct msm_gpu * gpu );
422444int msm_gpu_pm_resume (struct msm_gpu * gpu );
423445
446+ int msm_submitqueue_init (struct drm_device * drm , struct msm_file_private * ctx );
447+ struct msm_gpu_submitqueue * msm_submitqueue_get (struct msm_file_private * ctx ,
448+ u32 id );
449+ int msm_submitqueue_create (struct drm_device * drm ,
450+ struct msm_file_private * ctx ,
451+ u32 prio , u32 flags , u32 * id );
452+ int msm_submitqueue_query (struct drm_device * drm , struct msm_file_private * ctx ,
453+ struct drm_msm_submitqueue_query * args );
454+ int msm_submitqueue_remove (struct msm_file_private * ctx , u32 id );
455+ void msm_submitqueue_close (struct msm_file_private * ctx );
456+
457+ void msm_submitqueue_destroy (struct kref * kref );
458+
459+ static inline void __msm_file_private_destroy (struct kref * kref )
460+ {
461+ struct msm_file_private * ctx = container_of (kref ,
462+ struct msm_file_private , ref );
463+
464+ msm_gem_address_space_put (ctx -> aspace );
465+ kfree (ctx );
466+ }
467+
468+ static inline void msm_file_private_put (struct msm_file_private * ctx )
469+ {
470+ kref_put (& ctx -> ref , __msm_file_private_destroy );
471+ }
472+
473+ static inline struct msm_file_private * msm_file_private_get (
474+ struct msm_file_private * ctx )
475+ {
476+ kref_get (& ctx -> ref );
477+ return ctx ;
478+ }
479+
424480void msm_devfreq_init (struct msm_gpu * gpu );
425481void msm_devfreq_cleanup (struct msm_gpu * gpu );
426482void msm_devfreq_resume (struct msm_gpu * gpu );
0 commit comments