accel/ivpu: Improve buffer object logging
commit a01e93ee44f7ed76f872d0ede82f8d31bf0a048a upstream.
- Fix missing alloc log when drm_gem_handle_create() fails in
drm_vma_node_allow() and open callback is not called
- Add ivpu_bo->ctx_id that enables to log the actual context
id instead of using 0 as default
- Add couple WARNs and errors so we can catch more memory
corruption issues
Fixes: 37dee2a2f4 ("accel/ivpu: Improve buffer object debug logs")
Cc: stable@vger.kernel.org # v6.8+
Reviewed-by: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://lore.kernel.org/r/20250506091303.262034-1-jacek.lawrynowicz@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
2facd42665
commit
648f1d5446
@@ -26,7 +26,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
|
||||
{
|
||||
ivpu_dbg(vdev, BO,
|
||||
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
|
||||
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
|
||||
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
|
||||
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
|
||||
(bool)bo->base.base.import_attach);
|
||||
}
|
||||
@@ -92,8 +92,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
|
||||
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
|
||||
}
|
||||
|
||||
ivpu_dbg_bo(vdev, bo, "alloc");
|
||||
|
||||
mutex_unlock(&bo->lock);
|
||||
|
||||
drm_dev_exit(idx);
|
||||
@@ -172,7 +170,7 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
|
||||
return &bo->base.base;
|
||||
}
|
||||
|
||||
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
|
||||
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem;
|
||||
struct ivpu_bo *bo;
|
||||
@@ -190,6 +188,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
|
||||
return ERR_CAST(shmem);
|
||||
|
||||
bo = to_ivpu_bo(&shmem->base);
|
||||
bo->ctx_id = ctx_id;
|
||||
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
|
||||
bo->flags = flags;
|
||||
|
||||
@@ -197,6 +196,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
|
||||
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
|
||||
mutex_unlock(&vdev->bo_list_lock);
|
||||
|
||||
ivpu_dbg_bo(vdev, bo, "alloc");
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
@@ -235,8 +236,13 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
|
||||
mutex_unlock(&vdev->bo_list_lock);
|
||||
|
||||
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
|
||||
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
|
||||
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
|
||||
|
||||
ivpu_bo_unbind_locked(bo);
|
||||
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
|
||||
drm_WARN_ON(&vdev->drm, bo->ctx);
|
||||
|
||||
mutex_destroy(&bo->lock);
|
||||
|
||||
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
|
||||
@@ -271,7 +277,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
bo = ivpu_bo_alloc(vdev, size, args->flags);
|
||||
bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
|
||||
if (IS_ERR(bo)) {
|
||||
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
|
||||
bo, file_priv->ctx.id, args->size, args->flags);
|
||||
@@ -279,7 +285,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||
}
|
||||
|
||||
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
|
||||
if (!ret)
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
|
||||
bo, file_priv->ctx.id, args->size, args->flags);
|
||||
else
|
||||
args->vpu_addr = bo->vpu_addr;
|
||||
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
@@ -302,7 +311,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
||||
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
|
||||
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
|
||||
|
||||
bo = ivpu_bo_alloc(vdev, size, flags);
|
||||
bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
|
||||
if (IS_ERR(bo)) {
|
||||
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
|
||||
bo, range->start, size, flags);
|
||||
@@ -406,7 +415,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
|
||||
mutex_lock(&bo->lock);
|
||||
|
||||
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
|
||||
bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
|
||||
bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
|
||||
bo->flags, kref_read(&bo->base.base.refcount));
|
||||
|
||||
if (bo->base.pages)
|
||||
|
||||
@@ -21,6 +21,7 @@ struct ivpu_bo {
|
||||
u64 vpu_addr;
|
||||
u32 flags;
|
||||
u32 job_status; /* Valid only for command buffer */
|
||||
u32 ctx_id;
|
||||
bool mmu_mapped;
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user