From c9aec05228dc6fa5573c56dba6bed43250228069 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Tue, 28 Aug 2018 18:51:30 -0700 Subject: liquidio: improve soft command handling 1. Set LIO_SC_MAX_TMO_MS as the maximum timeout value for a soft command (sc). All sc's use this value as a hard timeout value. Add expiry_time in struct octeon_soft_command to keep the hard timeout value. The field wait_time and timeout in struct octeon_soft_command will be obsoleted in the last patch of this patch series. 2. Add processing a synchronous sc in sc response thread lio_process_ordered_list. The memory allocated for a synchronous sc will be freed by lio_process_ordered_list() to the sc pool. 3. Add two response lists for lio_process_ordered_list to process the storage allocated for sc's: OCTEON_DONE_SC_LIST response list keeps all sc's which will be freed to the pool after their requestors have finished processing the responses. OCTEON_ZOMBIE_SC_LIST response list keeps all sc's which have got LIO_SC_MAX_TMO_MS timeout. When an sc gets a hard timeout, lio_process_order_list() will recheck its status 1 ms later. If the status has not updated by the firmware at that time, the sc will be removed from OCTEON_DONE_SC_LIST response list to OCTEON_ZOMBIE_SC_LIST response list. The sc's in the OCTEON_ZOMBIE_SC_LIST response list will be freed when the driver is unloaded. Signed-off-by: Weilin Chang Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/request_manager.c | 114 +++++++++++++++------ 1 file changed, 85 insertions(+), 29 deletions(-) (limited to 'drivers/net/ethernet/cavium/liquidio/request_manager.c') diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 5de5ce9a8f54..bd0153e16deb 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -409,33 +409,22 @@ lio_process_iq_request_list(struct octeon_device *oct, else irh = (struct octeon_instr_irh *) &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -755,8 +744,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -791,11 +779,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -824,6 +877,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); -- cgit v1.2.3-55-g7522