drm/xe: add interface to request physical alignment for buffer objects
[ Upstream commit 3ad86ae1da97d0091f673f08846848714f6dd745 ] Add xe_bo_create_pin_map_at_aligned() which augment xe_bo_create_pin_map_at() with alignment parameter allowing to pass required alignemnt if it differ from default. Signed-off-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com> Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com> Signed-off-by: Mika Kahola <mika.kahola@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241009151947.2240099-2-juhapekka.heikkila@gmail.com Stable-dep-of: f16873f42a06 ("drm/xe: move DPT l2 flush to a more sensible place") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
98e5c71e7e
commit
ce1ef3b64e
@@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
|
||||
|
||||
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
|
||||
NULL, size, start, end,
|
||||
ttm_bo_type_kernel, flags);
|
||||
ttm_bo_type_kernel, flags, 0);
|
||||
if (IS_ERR(bo)) {
|
||||
err = PTR_ERR(bo);
|
||||
bo = NULL;
|
||||
|
||||
@@ -1454,7 +1454,8 @@ static struct xe_bo *
|
||||
__xe_bo_create_locked(struct xe_device *xe,
|
||||
struct xe_tile *tile, struct xe_vm *vm,
|
||||
size_t size, u64 start, u64 end,
|
||||
u16 cpu_caching, enum ttm_bo_type type, u32 flags)
|
||||
u16 cpu_caching, enum ttm_bo_type type, u32 flags,
|
||||
u64 alignment)
|
||||
{
|
||||
struct xe_bo *bo = NULL;
|
||||
int err;
|
||||
@@ -1483,6 +1484,8 @@ __xe_bo_create_locked(struct xe_device *xe,
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
|
||||
bo->min_align = alignment;
|
||||
|
||||
/*
|
||||
* Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
|
||||
* to ensure the shared resv doesn't disappear under the bo, the bo
|
||||
@@ -1523,16 +1526,18 @@ struct xe_bo *
|
||||
xe_bo_create_locked_range(struct xe_device *xe,
|
||||
struct xe_tile *tile, struct xe_vm *vm,
|
||||
size_t size, u64 start, u64 end,
|
||||
enum ttm_bo_type type, u32 flags)
|
||||
enum ttm_bo_type type, u32 flags, u64 alignment)
|
||||
{
|
||||
return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
|
||||
return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
|
||||
flags, alignment);
|
||||
}
|
||||
|
||||
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
|
||||
struct xe_vm *vm, size_t size,
|
||||
enum ttm_bo_type type, u32 flags)
|
||||
{
|
||||
return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
|
||||
return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
|
||||
flags, 0);
|
||||
}
|
||||
|
||||
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
|
||||
@@ -1542,7 +1547,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
|
||||
{
|
||||
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
|
||||
cpu_caching, ttm_bo_type_device,
|
||||
flags | XE_BO_FLAG_USER);
|
||||
flags | XE_BO_FLAG_USER, 0);
|
||||
if (!IS_ERR(bo))
|
||||
xe_bo_unlock_vm_held(bo);
|
||||
|
||||
@@ -1565,6 +1570,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
|
||||
struct xe_vm *vm,
|
||||
size_t size, u64 offset,
|
||||
enum ttm_bo_type type, u32 flags)
|
||||
{
|
||||
return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
|
||||
type, flags, 0);
|
||||
}
|
||||
|
||||
struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
|
||||
struct xe_tile *tile,
|
||||
struct xe_vm *vm,
|
||||
size_t size, u64 offset,
|
||||
enum ttm_bo_type type, u32 flags,
|
||||
u64 alignment)
|
||||
{
|
||||
struct xe_bo *bo;
|
||||
int err;
|
||||
@@ -1576,7 +1592,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
|
||||
flags |= XE_BO_FLAG_GGTT;
|
||||
|
||||
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
|
||||
flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
|
||||
flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
|
||||
alignment);
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ struct xe_bo *
|
||||
xe_bo_create_locked_range(struct xe_device *xe,
|
||||
struct xe_tile *tile, struct xe_vm *vm,
|
||||
size_t size, u64 start, u64 end,
|
||||
enum ttm_bo_type type, u32 flags);
|
||||
enum ttm_bo_type type, u32 flags, u64 alignment);
|
||||
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
|
||||
struct xe_vm *vm, size_t size,
|
||||
enum ttm_bo_type type, u32 flags);
|
||||
@@ -94,6 +94,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
|
||||
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
|
||||
struct xe_vm *vm, size_t size, u64 offset,
|
||||
enum ttm_bo_type type, u32 flags);
|
||||
struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
|
||||
struct xe_tile *tile,
|
||||
struct xe_vm *vm,
|
||||
size_t size, u64 offset,
|
||||
enum ttm_bo_type type, u32 flags,
|
||||
u64 alignment);
|
||||
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
|
||||
const void *data, size_t size,
|
||||
enum ttm_bo_type type, u32 flags);
|
||||
|
||||
@@ -76,6 +76,11 @@ struct xe_bo {
|
||||
|
||||
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
|
||||
struct list_head vram_userfault_link;
|
||||
|
||||
/** @min_align: minimum alignment needed for this BO if different
|
||||
* from default
|
||||
*/
|
||||
u64 min_align;
|
||||
};
|
||||
|
||||
#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
|
||||
|
||||
@@ -620,7 +620,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
int err;
|
||||
u64 alignment = XE_PAGE_SIZE;
|
||||
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
|
||||
|
||||
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
|
||||
alignment = SZ_64K;
|
||||
|
||||
Reference in New Issue
Block a user