RDMA/mlx5: Support OOO RX WQE consumption
Support QP with out-of-order (OOO) capabilities enabled. This allows WRs on the receiver side of the QP to be consumed OOO, permitting the sender side to transmit messages without guaranteeing arrival order on the receiver side. When enabled, the completion ordering of WRs remains in-order, regardless of the Receive WRs consumption order. RDMA Read and RDMA Atomic operations on the responder side continue to be executed in-order, while the ordering of data placement for RDMA Write and Send operations is not guaranteed. Atomic operations larger than 8 bytes are currently not supported. Therefore, when this feature is enabled, the created QP restricts its atomic support to 8 bytes at most. In addition, when querying the device, a new flag is returned in response to indicate that the Kernel supports OOO QP. Signed-off-by: Edward Srouji <edwards@nvidia.com> Reviewed-by: Yishai Hadas <yishaih@nvidia.com> Link: https://patch.msgid.link/06ac609a5f358c8fb0a090d22c61a2f9329d82e6.1725362773.git.leon@kernel.org Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
8439662f6f
commit
8b36f7c3c6
|
|
@ -1182,6 +1182,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
|
||||
|
||||
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
|
||||
|
||||
if (MLX5_CAP_GEN_2(mdev, dp_ordering_force) &&
|
||||
(MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc) ||
|
||||
MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc) ||
|
||||
MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc) ||
|
||||
MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud) ||
|
||||
MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc)))
|
||||
resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP;
|
||||
}
|
||||
|
||||
if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
|
||||
|
|
|
|||
|
|
@ -521,6 +521,7 @@ struct mlx5_ib_qp {
|
|||
struct mlx5_bf bf;
|
||||
u8 has_rq:1;
|
||||
u8 is_rss:1;
|
||||
u8 is_ooo_rq:1;
|
||||
|
||||
/* only for user space QPs. For kernel
|
||||
* we have it from the bf object
|
||||
|
|
|
|||
|
|
@ -1960,7 +1960,7 @@ static int atomic_size_to_mode(int size_mask)
|
|||
}
|
||||
|
||||
static int get_atomic_mode(struct mlx5_ib_dev *dev,
|
||||
enum ib_qp_type qp_type)
|
||||
struct mlx5_ib_qp *qp)
|
||||
{
|
||||
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
|
||||
u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
|
||||
|
|
@ -1970,7 +1970,7 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
|
|||
if (!atomic)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (qp_type == MLX5_IB_QPT_DCT)
|
||||
if (qp->type == MLX5_IB_QPT_DCT)
|
||||
atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
|
||||
else
|
||||
atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
|
||||
|
|
@ -1984,6 +1984,10 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
|
|||
atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
|
||||
atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
|
||||
|
||||
/* OOO DP QPs do not support larger than 8-Bytes atomic operations */
|
||||
if (atomic_mode > MLX5_ATOMIC_MODE_8B && qp->is_ooo_rq)
|
||||
atomic_mode = MLX5_ATOMIC_MODE_8B;
|
||||
|
||||
return atomic_mode;
|
||||
}
|
||||
|
||||
|
|
@ -2839,6 +2843,29 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool get_dp_ooo_cap(struct mlx5_core_dev *mdev, enum ib_qp_type qp_type)
|
||||
{
|
||||
if (!MLX5_CAP_GEN_2(mdev, dp_ordering_force))
|
||||
return false;
|
||||
|
||||
switch (qp_type) {
|
||||
case IB_QPT_RC:
|
||||
return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc);
|
||||
case IB_QPT_XRC_INI:
|
||||
case IB_QPT_XRC_TGT:
|
||||
return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc);
|
||||
case IB_QPT_UC:
|
||||
return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc);
|
||||
case IB_QPT_UD:
|
||||
return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud);
|
||||
case MLX5_IB_QPT_DCI:
|
||||
case MLX5_IB_QPT_DCT:
|
||||
return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
|
||||
bool cond, struct mlx5_ib_qp *qp)
|
||||
{
|
||||
|
|
@ -3365,7 +3392,7 @@ static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
|
|||
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
|
||||
int atomic_mode;
|
||||
|
||||
atomic_mode = get_atomic_mode(dev, qp->type);
|
||||
atomic_mode = get_atomic_mode(dev, qp);
|
||||
if (atomic_mode < 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
@ -4316,6 +4343,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
|
||||
MLX5_SET(qpc, qpc, deth_sqpn, 1);
|
||||
|
||||
if (qp->is_ooo_rq && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
|
||||
MLX5_SET(qpc, qpc, dp_ordering_1, 1);
|
||||
MLX5_SET(qpc, qpc, dp_ordering_force, 1);
|
||||
}
|
||||
|
||||
mlx5_cur = to_mlx5_state(cur_state);
|
||||
mlx5_new = to_mlx5_state(new_state);
|
||||
|
||||
|
|
@ -4531,7 +4563,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
|
||||
int atomic_mode;
|
||||
|
||||
atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
|
||||
atomic_mode = get_atomic_mode(dev, qp);
|
||||
if (atomic_mode < 0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
@ -4573,6 +4605,10 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
||||
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
|
||||
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
|
||||
if (qp->is_ooo_rq) {
|
||||
MLX5_SET(dctc, dctc, dp_ordering_1, 1);
|
||||
MLX5_SET(dctc, dctc, dp_ordering_force, 1);
|
||||
}
|
||||
|
||||
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
||||
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
||||
|
|
@ -4676,11 +4712,16 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
min(udata->inlen, sizeof(ucmd))))
|
||||
return -EFAULT;
|
||||
|
||||
if (ucmd.comp_mask ||
|
||||
if (ucmd.comp_mask & ~MLX5_IB_MODIFY_QP_OOO_DP ||
|
||||
memchr_inv(&ucmd.burst_info.reserved, 0,
|
||||
sizeof(ucmd.burst_info.reserved)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (ucmd.comp_mask & MLX5_IB_MODIFY_QP_OOO_DP) {
|
||||
if (!get_dp_ooo_cap(dev->mdev, qp->type))
|
||||
return -EOPNOTSUPP;
|
||||
qp->is_ooo_rq = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (qp->type == IB_QPT_GSI)
|
||||
|
|
|
|||
|
|
@ -252,6 +252,7 @@ enum mlx5_ib_query_dev_resp_flags {
|
|||
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
|
||||
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
|
||||
MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
|
||||
MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP = 1 << 4,
|
||||
};
|
||||
|
||||
enum mlx5_ib_tunnel_offloads {
|
||||
|
|
@ -439,6 +440,10 @@ struct mlx5_ib_burst_info {
|
|||
__u16 reserved;
|
||||
};
|
||||
|
||||
enum mlx5_ib_modify_qp_mask {
|
||||
MLX5_IB_MODIFY_QP_OOO_DP = 1 << 0,
|
||||
};
|
||||
|
||||
struct mlx5_ib_modify_qp {
|
||||
__u32 comp_mask;
|
||||
struct mlx5_ib_burst_info burst_info;
|
||||
|
|
|
|||
Loading…
Reference in New Issue