RDMA/mlx5: Store ndescs instead of the translation table size

Bugzilla: https://bugzilla.redhat.com/2049451
Upstream-status: v5.18-rc1

commit 9ee2516c43823652da597633aed9646dac51c1f8
Author: Aharon Landau <aharonl@nvidia.com>
Date:   Tue Feb 15 19:55:32 2022 +0200

    RDMA/mlx5: Store ndescs instead of the translation table size

    Currently, ent->xlt stores the translation table size. This data should
    not be stored in the cache entry but be written directly to the mailbox.
    Store ndescs instead, and deduce the translation table size from it
    according to the access mode.

    Link: https://lore.kernel.org/r/e9dbfaa1f279793a6bd28ee5a31cb4f0f0d70f05.1644947594.git.leonro@nvidia.com
    Signed-off-by: Aharon Landau <aharonl@nvidia.com>
    Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
    Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

Signed-off-by: Mohammad Kabat <mkabat@redhat.com>
This commit is contained in:
Mohammad Kabat 2022-08-11 07:22:09 -04:00
parent 31a91afd4d
commit d1a9f82f86
3 changed files with 25 additions and 10 deletions

View File

@ -763,9 +763,9 @@ struct mlx5_cache_ent {
char name[4];
u32 order;
u32 xlt;
u32 access_mode;
u32 page;
unsigned int ndescs;
u8 disabled:1;
u8 fill_to_high_water:1;

View File

@ -176,6 +176,25 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_unlock_irqrestore(&ent->lock, flags);
}
static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
{
int ret = 0;
switch (access_mode) {
case MLX5_MKC_ACCESS_MODE_MTT:
ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
sizeof(struct mlx5_mtt));
break;
case MLX5_MKC_ACCESS_MODE_KSM:
ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
sizeof(struct mlx5_klm));
break;
default:
WARN_ON(1);
}
return ret;
}
static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
{
struct mlx5_ib_mr *mr;
@ -191,7 +210,8 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page);
return mr;
}
@ -701,8 +721,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
continue;
ent->page = PAGE_SHIFT;
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->ndescs = 1 << ent->order;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&

View File

@ -1598,18 +1598,14 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
switch (ent->order - 2) {
case MLX5_IMR_MTT_CACHE_ENTRY:
ent->page = PAGE_SHIFT;
ent->xlt = MLX5_IMR_MTT_ENTRIES *
sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->ndescs = MLX5_IMR_MTT_ENTRIES;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
ent->limit = 0;
break;
case MLX5_IMR_KSM_CACHE_ENTRY:
ent->page = MLX5_KSM_PAGE_SHIFT;
ent->xlt = mlx5_imr_ksm_entries *
sizeof(struct mlx5_klm) /
MLX5_IB_UMR_OCTOWORD;
ent->ndescs = mlx5_imr_ksm_entries;
ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
ent->limit = 0;
break;