Merge: [s390] [IBM 9.5 FEAT] Upgrade the qeth driver to latest from upstream, e.g. kernel 6.7

MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4394

  
JIRA: https://issues.redhat.com/browse/RHEL-23681  
Tested: by IBM  
Build-Info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=61640303  
Commits:  
d565fa4300d9 s390/ism: ism driver implies smc protocol  
2fe8a236436f s390/qeth: Fix potential loss of L3-IP@ in case of network issues  
afb373ff3f54 s390/qeth: handle deferred cc1  
83781384a96b s390/ism: Properly fix receive message buffer allocation  
8a2e4d37afb8 s390/qeth: Fix kernel panic after setting hsuid  
19d1c64b7741 s390/ctcm: replace deprecated strncpy with strscpy

e43e6d9582e0 s390/qeth: replace deprecated strncpy with strscpy

dbc9e341e365 s390/qeth: Fix typo 'weed' in comment
  
Signed-off-by: Tobias Huschle <thuschle@redhat.com>

Approved-by: Steve Best <sbest@redhat.com>
Approved-by: Tony Camuso <tcamuso@redhat.com>
Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com>

Merged-by: Lucas Zampieri <lzampier@redhat.com>
This commit is contained in:
Lucas Zampieri 2024-06-06 19:29:42 +00:00
commit 95a7ab1f55
5 changed files with 153 additions and 104 deletions

View File

@ -103,10 +103,11 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
depends on PCI
imply SMC
default n
help
Select this option if you want to use the Internal Shared Memory
vPCI Adapter.
vPCI Adapter. The adapter can be used with the SMC network protocol.
To compile as a module choose M. The module name is ism.
If unsure, choose N.

View File

@ -200,13 +200,13 @@ static void channel_free(struct channel *ch)
static void channel_remove(struct channel *ch)
{
struct channel **c = &channels;
char chid[CTCM_ID_SIZE+1];
char chid[CTCM_ID_SIZE];
int ok = 0;
if (ch == NULL)
return;
else
strncpy(chid, ch->id, CTCM_ID_SIZE);
strscpy(chid, ch->id, sizeof(chid));
channel_free(ch);
while (*c) {

View File

@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
static const struct smcd_ops ism_ops;
#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
@ -289,32 +288,19 @@ out:
return ret;
}
static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
u32 vid)
{
union ism_query_rgid cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_QUERY_RGID;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.vlan_valid = vid_valid;
cmd.request.vlan_id = vid;
return ism_cmd(ism, &cmd);
}
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
struct folio *folio;
unsigned long bit;
int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@ -331,14 +317,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
&dmb->dma_addr,
GFP_KERNEL | __GFP_NOWARN |
__GFP_NOMEMALLOC | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
__GFP_NORETRY, get_order(dmb->dmb_len));
return dmb->cpu_addr ? 0 : -ENOMEM;
if (!folio) {
rc = -ENOMEM;
goto out_bit;
}
dmb->cpu_addr = folio_address(folio);
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
virt_to_page(dmb->cpu_addr), 0,
dmb->dmb_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
rc = -ENOMEM;
goto out_free;
}
return 0;
out_free:
kfree(dmb->cpu_addr);
out_bit:
clear_bit(dmb->sba_idx, ism->sba_bitmap);
return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
@ -429,23 +431,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
union ism_sig_ieq cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.trigger_irq = trigger_irq;
cmd.request.event_code = event_code;
cmd.request.info = info;
return ism_cmd(ism, &cmd);
}
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
@ -503,14 +488,6 @@ u8 *ism_get_seid(void)
}
EXPORT_SYMBOL_GPL(ism_get_seid);
static u16 ism_get_chid(struct ism_dev *ism)
{
if (!ism || !ism->pdev)
return 0;
return to_zpci(ism->pdev)->pchid;
}
static void ism_handle_event(struct ism_dev *ism)
{
struct ism_event *entry;
@ -569,11 +546,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
return IRQ_HANDLED;
}
static u64 ism_get_local_gid(struct ism_dev *ism)
{
return ism->local_gid;
}
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
@ -774,6 +746,22 @@ module_exit(ism_exit);
/*************************** SMC-D Implementation *****************************/
#if IS_ENABLED(CONFIG_SMC)
static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
u32 vid)
{
union ism_query_rgid cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_QUERY_RGID;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.vlan_valid = vid_valid;
cmd.request.vlan_id = vid;
return ism_cmd(ism, &cmd);
}
static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
u32 vid)
{
@ -811,6 +799,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
}
static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
union ism_sig_ieq cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
cmd.request.hdr.len = sizeof(cmd.request);
cmd.request.rgid = rgid;
cmd.request.trigger_irq = trigger_irq;
cmd.request.event_code = event_code;
cmd.request.info = info;
return ism_cmd(ism, &cmd);
}
static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
@ -830,11 +835,24 @@ static int smcd_supports_v2(void)
SYSTEM_EID.type[0] != '0';
}
static u64 ism_get_local_gid(struct ism_dev *ism)
{
return ism->local_gid;
}
static u64 smcd_get_local_gid(struct smcd_dev *smcd)
{
return ism_get_local_gid(smcd->priv);
}
static u16 ism_get_chid(struct ism_dev *ism)
{
if (!ism || !ism->pdev)
return 0;
return to_zpci(ism->pdev)->pchid;
}
static u16 smcd_get_chid(struct smcd_dev *smcd)
{
return ism_get_chid(smcd->priv);

View File

@ -364,22 +364,6 @@ out:
return rc;
}
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
return -ENOMEM;
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
card->qdio.c_q = NULL;
}
return 0;
}
static void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
@ -388,6 +372,25 @@ static void qeth_free_cq(struct qeth_card *card)
}
}
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
if (!card->qdio.c_q) {
card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
dev_err(&card->gdev->dev,
"Failed to create completion queue\n");
return -ENOMEM;
}
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
qeth_free_cq(card);
}
return 0;
}
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
@ -1179,6 +1182,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
}
}
/**
* qeth_irq() - qeth interrupt handler
* @cdev: ccw device
* @intparm: expect pointer to iob
* @irb: Interruption Response Block
*
* In the good path:
* corresponding qeth channel is locked with last used iob as active_cmd.
* But this function is also called for error interrupts.
*
* Caller ensures that:
* Interrupts are disabled; ccw device lock is held;
*
*/
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
@ -1220,11 +1237,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
return;
@ -1268,6 +1284,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
@ -1276,6 +1293,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
/* channel command hasn't started: retry.
* active_cmd is still set to last iob
*/
QETH_CARD_TEXT(card, 2, "irqcc1");
rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
(addr_t)iob, 0, 0, iob->timeout);
if (rc) {
QETH_DBF_MESSAGE(2,
"ccw retry on %x failed, rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
qeth_unlock_channel(card, channel);
qeth_cancel_cmd(iob, rc);
}
return;
}
qeth_unlock_channel(card, channel);
if (iob) {
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
@ -2594,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "allcqdbf");
/* completion */
if (qeth_alloc_cq(card))
goto out_err;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
@ -2629,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
/* completion */
if (qeth_alloc_cq(card))
goto out_freeoutq;
return 0;
out_freeoutq:
@ -2643,6 +2680,8 @@ out_freeoutq:
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
qeth_free_cq(card);
out_err:
return -ENOMEM;
}
@ -2650,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
qeth_free_cq(card);
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@ -3675,7 +3715,7 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
/*
* check if weed have to switch to non-packing mode or if
* check if we have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
@ -3708,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
int rc;
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
rc = -1;
goto out;
} else {
if (card->options.cq == cq) {
rc = 0;
goto out;
}
qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
}
out:
return rc;
if (card->options.cq == QETH_CQ_NOTAVAILABLE)
return -1;
card->options.cq = cq;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
@ -6226,7 +6253,7 @@ static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
if (!new_entry)
goto err_dbg;
strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name));
new_entry->dbf_info = card->debug;
mutex_lock(&qeth_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qeth_dbf_list);

View File

@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
continue;
} else {
/* prepare for recovery */
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
if (!rc) {
if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
/* keep it in the records */
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
/* bad address */
hash_del(&addr->hnode);
kfree(addr);
}