diff options
| author | Raag Jadav <raag.jadav@intel.com> | 2025-10-30 17:53:54 +0530 |
|---|---|---|
| committer | Ashutosh Dixit <ashutosh.dixit@intel.com> | 2025-11-27 09:05:17 -0800 |
| commit | 99234edab8e1fd3fd2309193a3b3169970a7e770 (patch) | |
| tree | 64d6fa6aea2f2f59ea3326a2d081f003bc2148e8 /drivers/gpu/drm/xe/xe_guc_submit.c | |
| parent | 8e2610d9a5edefb99b1a708796a8f733358e5898 (diff) | |
drm/xe/vf: Update pause/unpause() helpers with VF naming
Now that pause/unpause() helpers have been updated for VF migration
usecase, update their naming to match the functionality and while at it,
add IS_SRIOV_VF() assert to make sure they are not abused.
v7: Add IS_SRIOV_VF() assert (Matthew Brost)
Use "vf" suffix (Michal)
Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Raag Jadav <raag.jadav@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patch.msgid.link/20251030122357.128825-2-raag.jadav@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_guc_submit.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_guc_submit.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 9a0842398e95..cc7559cab9b3 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -2169,14 +2169,15 @@ static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q) } /** - * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC. + * xe_guc_submit_pause_vf - Stop further runs of submission tasks for VF. * @guc: the &xe_guc struct instance whose scheduler is to be disabled */ -void xe_guc_submit_pause(struct xe_guc *guc) +void xe_guc_submit_pause_vf(struct xe_guc *guc) { struct xe_exec_queue *q; unsigned long index; + xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc))); xe_gt_assert(guc_to_gt(guc), vf_recovery(guc)); mutex_lock(&guc->submission_state.lock); @@ -2267,14 +2268,15 @@ static void guc_exec_queue_unpause_prepare(struct xe_guc *guc, } /** - * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC. + * xe_guc_submit_unpause_prepare_vf - Prepare unpause submission tasks for VF. * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause */ -void xe_guc_submit_unpause_prepare(struct xe_guc *guc) +void xe_guc_submit_unpause_prepare_vf(struct xe_guc *guc) { struct xe_exec_queue *q; unsigned long index; + xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc))); xe_gt_assert(guc_to_gt(guc), vf_recovery(guc)); mutex_lock(&guc->submission_state.lock); @@ -2342,14 +2344,16 @@ static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q) } /** - * xe_guc_submit_unpause - Allow further runs of submission tasks on given GuC. + * xe_guc_submit_unpause_vf - Allow further runs of submission tasks for VF. * @guc: the &xe_guc struct instance whose scheduler is to be enabled */ -void xe_guc_submit_unpause(struct xe_guc *guc) +void xe_guc_submit_unpause_vf(struct xe_guc *guc) { struct xe_exec_queue *q; unsigned long index; + xe_gt_assert(guc_to_gt(guc), IS_SRIOV_VF(guc_to_xe(guc))); + mutex_lock(&guc->submission_state.lock); xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { /* |
