LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH 00/10] staging: fnic2 Driver Introduction
@ 2018-04-05 21:15 Oliver Smith-Denny
  2018-04-05 21:16 ` [PATCH 01/10] staging: fnic2 add initialization Oliver Smith-Denny
                   ` (9 more replies)
  0 siblings, 10 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:15 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Oliver Smith-Denny, Sesidhar Baddela, Gian Carlo Boffa,
	linux-scsi, target-devel, linux-kernel

Hi,

Cisco is developing a target mode driver for its HBA interface called
fNIC which stands for FCoE NIC. This is a new storage functionality
supported on UCS platforms with VIC adapters. The driver is developed for
the Linux LIO target mode stack. This driver is named fnic2 to distinguish
it from the existing Cisco FCoE driver, which only supports SCSI initiator
mode. We intend to include this driver in the Linux staging tree while we
continue to develop it.


The new driver will include many new features as compared to the existing
fnic driver:

	- Support for SCSI Target Mode (This patch)

	- Multiqueue support

	- NVMe over FC support


We appreciate any comment from the Linux community.

Thank you,

Oliver Smith-Denny

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 01/10] staging: fnic2 add initialization
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
@ 2018-04-05 21:16 ` Oliver Smith-Denny
  2018-04-06  5:07   ` Greg Kroah-Hartman
  2018-04-05 21:17 ` [PATCH 02/10] staging: fnic2 add resource allocation Oliver Smith-Denny
                   ` (8 subsequent siblings)
  9 siblings, 1 reply; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:16 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain module load and unload, global driver context,
PCI registration, PCI probe and remove, and definitions of
the fnic2 global context.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fnic2.h      | 256 ++++++++++++
 drivers/staging/fnic2/src/fnic2_main.c | 711 +++++++++++++++++++++++++++++++++
 2 files changed, 967 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fnic2.h
 create mode 100644 drivers/staging/fnic2/src/fnic2_main.c

diff --git a/drivers/staging/fnic2/src/fnic2.h b/drivers/staging/fnic2/src/fnic2.h
new file mode 100644
index 0000000..81b54da
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2.h
@@ -0,0 +1,256 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2017 Cisco Systems, Inc.  All rights reserved.
+ */
+#ifndef _FNIC2_H_
+#define _FNIC2_H_
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/version.h>
+#include <linux/mempool.h>
+#include <linux/cpumask.h>
+#include "fnic2_fdls.h"
+#include "fnic2_res.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_wq_copy.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_scsi.h"
+#include "fnic2_lio.h"
+
+#define DRV_NAME			"fnic2"
+#define DRV_DESCRIPTION		"Cisco FCoE Target Driver"
+#define DRV_VERSION			"0.0.0.1"
+#define JIFFIES_PER_MINUTE      	(60 * HZ)
+
+#define DESC_CLEAN_LOW_WATERMARK	8
+#define FNIC2_MAX_TCMDS 		1024
+#define MAX_FNIC2S 			16
+#define MAX_DATA_LENGTH		(1024 * 1024)
+#define ABTS_MASK			0x10000
+
+#define SCSI_READ_MASK	0x08
+#define SCSI_WRITE_MASK	0x0A
+#define IS_SCSI_READ_CMD(_cmd_)  ((_cmd_->fchdr.r_ctl == FC_R_CTL_FC4_CMD) && ((_cmd_->cdb[0] & 0x1F) == SCSI_READ_MASK))
+#define IS_SCSI_WRITE_CMD(_cmd_) ((_cmd_->fchdr.r_ctl == FC_R_CTL_FC4_CMD) && ((_cmd_->cdb[0] & 0x1F) == SCSI_WRITE_MASK))
+
+extern const char *fnic2_state_str[];
+extern unsigned int fnic2_log_level;
+
+enum fnic2_intx_intr_index {
+	FNIC2_INTX_WQ_RQ_COPYWQ,
+	FNIC2_INTX_ERR,
+	FNIC2_INTX_NOTIFY
+};
+
+enum fnic2_msix_intr_index {
+	FNIC2_MSIX_RQ,
+	FNIC2_MSIX_WQ,
+	FNIC2_MSIX_WQ_COPY,
+	FNIC2_MSIX_ERR_NOTIFY,
+	FNIC2_MSIX_INTR_MAX
+};
+
+struct fnic2_msix_entry {
+	int		requested;
+	char		devname[IFNAMSIZ];
+	irqreturn_t	(*isr)(int, void *);
+	void		*devid;
+};
+
+enum fnic2_state {
+	FNIC2_IN_FC_MODE = 0,
+	FNIC2_IN_FC_TRANS_ETH_MODE,
+	FNIC2_IN_ETH_MODE,
+	FNIC2_IN_ETH_TRANS_FC_MODE
+};
+
+#define SCSI_NO_TAG -1
+
+#define FNIC2_WQ_COPY_MAX 1
+#define FNIC2_WQ_MAX 1
+#define FNIC2_RQ_MAX 1
+#define FNIC2_CQ_MAX (FNIC2_WQ_COPY_MAX + FNIC2_WQ_MAX + FNIC2_RQ_MAX)
+
+struct fnic2_frame_list {
+	/*
+	 * Link to frame lists
+	 */
+	struct list_head	links;
+	void			*fp;
+	int			frame_len;
+	int			rx_ethhdr_stripped;
+};
+
+/* Per-instance private data structure */
+struct fnic2 {
+	int						fnic2_num;
+	struct fnic2_lport				lport;
+	struct vnic_dev_bar				bar0;
+
+	struct msix_entry				msix_entry[FNIC2_MSIX_INTR_MAX];
+	struct fnic2_msix_entry				msix[FNIC2_MSIX_INTR_MAX];
+
+	struct vnic_stats				*stats;
+
+	/* time of stats update */
+	unsigned long					stats_time;
+
+	/* time of stats reset */
+	unsigned long					stats_reset_time;
+
+	struct vnic_nic_cfg				*nic_cfg;
+	char						name[IFNAMSIZ];
+
+	/* used for MSI interrupts */
+	struct timer_list				notify_timer;
+
+	unsigned int					fnic2_max_tag_id;
+	unsigned int					err_intr_offset;
+	unsigned int					link_intr_offset;
+
+	unsigned int					wq_count;
+	unsigned int					cq_count;
+
+	atomic64_t					io_cmpl_skip;
+
+	/* fnic2 device in removal */
+	uint32_t					in_remove:1;
+
+	/* stop proc. rx frames, link events */
+	uint32_t					stop_rx_link_events:1;
+
+	/* Device remove thread blocks */
+	struct completion				*remove_wait;
+
+	/* IO Counter */
+	atomic_t					in_flight;
+
+	bool						internal_reset_progress;
+
+	/* fill hole */
+	uint32_t _reserved;
+
+	enum						fnic2_state state;
+	spinlock_t					fnic2_lock;
+
+	/* VLAN tag including priority */
+	uint16_t					vlan_id;
+
+	uint8_t						data_src_addr[ETH_ALEN];
+
+	/* Internal statistics */
+	uint64_t					fcp_input_bytes;
+	uint64_t 					fcp_output_bytes;
+
+	uint32_t					link_down_cnt;
+	int						link_status;
+
+	struct list_head				list;
+	struct pci_dev					*pdev;
+	struct vnic_fc_config				config;
+	struct vnic_dev					*vdev;
+	unsigned int					raw_wq_count;
+	unsigned int					wq_copy_count;
+	unsigned int					rq_count;
+	int						fw_ack_index[FNIC2_WQ_COPY_MAX];
+	unsigned short					fw_ack_recd[FNIC2_WQ_COPY_MAX];
+	unsigned short					wq_copy_desc_low[FNIC2_WQ_COPY_MAX];
+	unsigned int					intr_count;
+	uint32_t __iomem				*legacy_pba;
+	struct fnic2_host_tag				*tags;
+
+	struct work_struct				link_work;
+	struct work_struct				frame_work;
+	struct list_head				frame_queue;
+	struct list_head				tx_queue;
+
+	/*** FIP related data members  -- start ***/
+	void						(*set_vlan)(struct fnic2 *, uint16_t vlan);
+	struct work_struct				fip_work;
+	struct list_head				fip_frame_queue;
+	struct list_head				vlan_list;
+	spinlock_t					vlans_lock;
+	struct timer_list				retry_fip_timer;
+	struct timer_list				fcs_ka_timer;
+	struct timer_list				enode_ka_timer;
+	struct timer_list				vn_ka_timer;
+
+	struct work_struct				event_work;
+	/*** FIP related data members  -- end ***/
+
+	/* copy work queue cache line section */
+	____cacheline_aligned struct vnic_wq_copy	wq_copy[FNIC2_WQ_COPY_MAX];
+	/* completion queue cache line section */
+	____cacheline_aligned struct vnic_cq		cq[FNIC2_CQ_MAX];
+
+	spinlock_t 					wq_copy_lock[FNIC2_WQ_COPY_MAX];
+
+	/* work queue cache line section */
+	____cacheline_aligned struct vnic_wq		wq[FNIC2_WQ_MAX];
+	spinlock_t					wq_lock[FNIC2_WQ_MAX];
+
+	/* receive queue cache line section */
+	____cacheline_aligned struct vnic_rq		rq[FNIC2_RQ_MAX];
+
+	/* interrupt resource cache line section */
+	____cacheline_aligned struct vnic_intr		intr[FNIC2_MSIX_INTR_MAX];
+
+       /* fnic2 related structures */
+	struct fnic2_lio				lio;
+	struct list_head				tcmd_list_free;
+	struct fnic2_cmd				*tcmd_pool;
+	spinlock_t					free_list_lock;
+
+	/* DBG related, temp */
+	int						freecmds;
+};
+
+static inline int get_cpu_to_queue(uint32_t tag)
+{
+	return tag % num_online_cpus();
+}
+
+extern struct workqueue_struct *fnic2_event_queue;
+extern struct workqueue_struct *fip_event_queue;
+extern struct workqueue_struct *fnic2_tcmd_wq;
+
+void fnic2_clear_intr_mode(struct fnic2 *fnic2);
+int fnic2_set_intr_mode(struct fnic2 *fnic2);
+void fnic2_free_intr(struct fnic2 *fnic2);
+int fnic2_request_intr(struct fnic2 *fnic2);
+
+void fnic2_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+void fnic2_handle_frame(struct work_struct *work);
+void fnic2_handle_fip_frame(struct work_struct *work);
+void fnic2_handle_link(struct work_struct *work);
+int fnic2_rq_cmpl_handler(struct fnic2 *fnic2, int);
+int fnic2_alloc_rq_frame(struct vnic_rq *rq);
+void fnic2_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+void fnic2_flush_tx(struct fnic2 *);
+int fnic2_wq_copy_cmpl_handler(struct fnic2 *fnic2, int);
+int fnic2_wq_cmpl_handler(struct fnic2 *fnic2, int);
+void fnic2_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc);
+int fnic2_fw_reset_handler(struct fnic2 *fnic2);
+const char *fnic2_state_to_str(unsigned int state);
+
+void fnic2_log_q_error(struct fnic2 *fnic2);
+void fnic2_handle_link_event(struct fnic2 *fnic2);
+
+void fnic2_send_fcp_resp(struct fnic2_cmd *tcmd);
+void fnic2_send_tmr_resp(struct fnic2_cmd *tcmd, u32 status, u8 code);
+void fnic2_complete_tm_rsp(struct fnic2_cmd *tcmd);
+void fnic2_send_abort_to_lio(struct fnic2_cmd *tcmd);
+void fnic2_send_abort_to_fw(struct fnic2_cmd *tcmd);
+void fnic2_fw_abort_done(struct fnic2_cmd *tcmd);
+void fnic2_recv_tcmd_timeout_intr(struct timer_list *timer);
+
+struct fnic2_sess *fnic2_find_session(struct fnic2 *fnic2, uint64_t wwpn);
+struct fnic2_sess *fnic2_find_sess_s_id(struct fnic2 *fnic2, uint32_t s_id);
+#endif /* _FNIC2_H_ */
diff --git a/drivers/staging/fnic2/src/fnic2_main.c b/drivers/staging/fnic2/src/fnic2_main.c
new file mode 100644
index 0000000..2643772
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_main.c
@@ -0,0 +1,711 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <linux/timer.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic2.h"
+
+uint8_t fip_all_enode_macs[6] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
+#define PCI_DEVICE_ID_CISCO_TNIC	0x0045
+
+/* Timer to poll notification area for events. Used for MSI interrupts */
+#define FNIC2_NOTIFY_TIMER_PERIOD	(2 * HZ)
+
+LIST_HEAD(fnic2_list);
+DEFINE_SPINLOCK(fnic2_list_lock);
+
+/* Supported devices by fnic2 module */
+static struct pci_device_id fnic2_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_TNIC) },
+	{ 0, }
+};
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Arulprabhu Ponnusamy <arulponn@cisco.com>");
+MODULE_AUTHOR("Gian Carlo Boffa <gcboffa@cisco.com>");
+MODULE_AUTHOR("Oliver Smith-Denny <osmithde@cisco.com>");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, fnic2_id_table);
+MODULE_LICENSE("GPL v2");
+
+extern void fnic2_lio_init(void);
+extern void fnic2_lio_cleanup(void);
+
+struct workqueue_struct *fnic2_tcmd_wq;
+
+static struct fnic2 *fnic2_alloc(void)
+{
+	struct fnic2 *fnic2;
+
+	fnic2 = kzalloc(sizeof(struct fnic2), GFP_KERNEL);
+	if (fnic2 == NULL) {
+		pr_err("ERROR Unable to create memory for fnic2 structure\n");
+		return NULL;
+
+	}
+	return fnic2;
+}
+
+static int fnic2_alloc_tcmd_pool(struct fnic2 *fnic2)
+{
+	struct fnic2_cmd *tcmd;
+	int tag;
+	int sz;
+	unsigned int flags;
+
+	/* Allocate Cmd pool and initialize them with cmd_tag */
+	sz = sizeof(struct fnic2_cmd) * FNIC2_MAX_TCMDS;
+	fnic2->tcmd_pool =
+		(struct fnic2_cmd *)kzalloc(sz, GFP_KERNEL);
+	if (!fnic2->tcmd_pool) {
+		pr_err("Unable to allocate tcmd pool\n");
+		WARN_ON(1);
+		return -ENOMEM;
+	}
+	pr_err("fnic2_alloc_tcmd_pool of total size: %d, pool: %pK\n", sz, fnic2->tcmd_pool);
+
+	/* Initialize free list */
+	INIT_LIST_HEAD(&fnic2->tcmd_list_free);
+
+	tcmd = &fnic2->tcmd_pool[0];
+	for (tag = 0; tag < FNIC2_MAX_TCMDS; tag++, tcmd++) {
+		tcmd->cmd_tag = tag;
+		tcmd->fnic2 = fnic2;
+		timer_setup(&tcmd->io_timer, fnic2_recv_tcmd_timeout_intr, flags);
+		list_add_tail(&tcmd->free_list, &fnic2->tcmd_list_free);
+	}
+	spin_lock_init(&fnic2->free_list_lock);
+	fnic2->freecmds = FNIC2_MAX_TCMDS;
+
+	pr_err("fnic2_alloc_tcmd_pool done\n");
+
+	return 0;
+}
+
+void fnic2_log_q_error(struct fnic2 *fnic2)
+{
+	unsigned int i;
+	uint32_t error_status;
+
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		error_status = ioread32(&fnic2->wq[i].ctrl->error_status);
+		if (error_status)
+			pr_err("WQ[%d] error_status %d\n",
+				i, error_status);
+	}
+
+	for (i = 0; i < fnic2->rq_count; i++) {
+		error_status = ioread32(&fnic2->rq[i].ctrl->error_status);
+		if (error_status)
+			pr_err("RQ[%d] error_status %d\n",
+				i, error_status);
+	}
+
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		error_status = ioread32(&fnic2->wq_copy[i].ctrl->error_status);
+		if (error_status)
+			pr_err("CWQ[%d] error_status %d\n",
+				i, error_status);
+	}
+}
+
+void fnic2_handle_link_event(struct fnic2 *fnic2)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	queue_work(fnic2_event_queue, &fnic2->link_work);
+
+	pr_info("fnic2_handle_link_event\n");
+
+}
+
+static int fnic2_notify_set(struct fnic2 *fnic2)
+{
+	int err;
+
+	switch (vnic_dev_get_intr_mode(fnic2->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = vnic_dev_notify_set(fnic2->vdev, FNIC2_INTX_NOTIFY);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		err = vnic_dev_notify_set(fnic2->vdev, -1);
+		break;
+	case VNIC_DEV_INTR_MODE_MSIX:
+		err = vnic_dev_notify_set(fnic2->vdev, FNIC2_MSIX_ERR_NOTIFY);
+		break;
+	default:
+		pr_err("Interrupt mode should be set up before devcmd notify set %d\n",
+			     vnic_dev_get_intr_mode(fnic2->vdev));
+		err = -1;
+		break;
+	}
+
+	return err;
+}
+
+static void fnic2_notify_timer(struct timer_list *timer)
+{
+	struct fnic2 *fnic2 = container_of(timer, struct fnic2, notify_timer);
+
+	fnic2_handle_link_event(fnic2);
+	mod_timer(&fnic2->notify_timer,
+		  round_jiffies(jiffies + FNIC2_NOTIFY_TIMER_PERIOD));
+}
+
+static void fnic2_notify_timer_start(struct fnic2 *fnic2)
+{
+	switch (vnic_dev_get_intr_mode(fnic2->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSI:
+		/*
+		 * Schedule first timeout immediately. The driver is
+		 * initiatialized and ready to look for link up notification
+		 */
+		mod_timer(&fnic2->notify_timer, jiffies);
+		break;
+	default:
+		/* Using intr for notification for INTx/MSI-X */
+		break;
+	};
+}
+
+static int fnic2_dev_wait(struct vnic_dev *vdev,
+			  int (*start)(struct vnic_dev *, int),
+			  int (*finished)(struct vnic_dev *, int *),
+			  int arg)
+{
+	unsigned long time;
+	int done;
+	int err;
+	int count = 0;
+
+	err = start(vdev, arg);
+	if (err)
+		return err;
+
+	/* Wait for func to complete...2 seconds max */
+	/*
+	* Sometime schedule_timeout_uninterruptible take long time
+	* to wake up so we do not retry as we are only waiting for
+	* 2 seconds in while loop. By adding count, we make sure
+	* we try atleast two times before returning -ETIMEDOUT
+	*/
+	time = jiffies + (HZ * 2);
+	do {
+		err = finished(vdev, &done);
+		count++;
+		if (err)
+			return err;
+		if (done)
+			return 0;
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(time, jiffies) || (count < 3));
+
+	return -ETIMEDOUT;
+}
+
+static int fnic2_cleanup(struct fnic2 *fnic2)
+{
+	unsigned int i;
+	int err;
+
+	vnic_dev_disable(fnic2->vdev);
+	for (i = 0; i < fnic2->intr_count; i++)
+		vnic_intr_mask(&fnic2->intr[i]);
+
+	for (i = 0; i < fnic2->rq_count; i++) {
+		err = vnic_rq_disable(&fnic2->rq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		err = vnic_wq_disable(&fnic2->wq[i]);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		err = vnic_wq_copy_disable(&fnic2->wq_copy[i]);
+		if (err)
+			return err;
+	}
+
+	/* Clean up completed IOs and FCS frames */
+	fnic2_wq_copy_cmpl_handler(fnic2, -1);
+	fnic2_wq_cmpl_handler(fnic2, -1);
+	fnic2_rq_cmpl_handler(fnic2, -1);
+
+	/* Clean up the IOs and FCS frames that have not completed */
+	for (i = 0; i < fnic2->raw_wq_count; i++)
+		vnic_wq_clean(&fnic2->wq[i], fnic2_free_wq_buf);
+	for (i = 0; i < fnic2->rq_count; i++)
+		vnic_rq_clean(&fnic2->rq[i], fnic2_free_rq_buf);
+	for (i = 0; i < fnic2->wq_copy_count; i++)
+		vnic_wq_copy_clean(&fnic2->wq_copy[i],
+				   fnic2_wq_copy_cleanup_handler);
+
+	for (i = 0; i < fnic2->cq_count; i++)
+		vnic_cq_clean(&fnic2->cq[i]);
+	for (i = 0; i < fnic2->intr_count; i++)
+		vnic_intr_clean(&fnic2->intr[i]);
+
+	return 0;
+}
+
+static void fnic2_iounmap(struct fnic2 *fnic2)
+{
+	if (fnic2->bar0.vaddr)
+		iounmap(fnic2->bar0.vaddr);
+}
+
+static void fnic2_set_vlan(struct fnic2 *fnic2, uint16_t vlan_id)
+{
+	uint16_t old_vlan;
+
+	old_vlan = vnic_dev_set_default_vlan(fnic2->vdev, vlan_id);
+}
+
+static int fnic2_probe(struct pci_dev *pdev,
+		       const struct pci_device_id *ent)
+{
+	struct fnic2 *fnic2;
+	struct fnic2_lport *lport;
+	int err;
+	int i;
+	unsigned long flags;
+	int fnic2_count = 0;
+	unsigned int fip_flags, fcs_ka_flags, enode_ka_flags, vn_ka_flags, notify_flags, fabric_flags;
+	struct list_head *counter_head; /* Used to determine which fnic2 number this is */
+	uint32_t open_flags = CMD_OPENF_RQ_ENABLE_FILL;
+
+	fnic2 = fnic2_alloc();
+	if (!fnic2) {
+	    pr_err("%s: Unable to allocate memory for fnic2\n", __func__);
+	    return  0;
+	}
+
+	/* Setup PCI resources */
+	pci_set_drvdata(pdev, fnic2);
+
+	fnic2->pdev = pdev;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		pr_err("Cannot enable PCI device, aborting.\n");
+		goto err_out_free_hba;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		pr_err("Cannot enable PCI resources, aborting\n");
+		goto err_out_disable_device;
+	}
+
+	pci_set_master(pdev);
+
+	/* Query PCI controller on system for DMA addressing
+	 * limitation for the device.  Try 64-bit first, and
+	 * fail to 32-bit.
+	 */
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			pr_err("No usable DMA configuration aborting\n");
+			goto err_out_release_regions;
+		}
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			pr_err("Unable to obtain 32-bit DMA for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+	} else {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			pr_err("Unable to obtain 64-bit DMA for consistent allocations, aborting.\n");
+			goto err_out_release_regions;
+		}
+	}
+
+	/* Map vNIC resources from BAR0 */
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		pr_err("BAR0 not memory-map'able, aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	fnic2->bar0.vaddr = pci_iomap(pdev, 0, 0);
+	fnic2->bar0.bus_addr = pci_resource_start(pdev, 0);
+	fnic2->bar0.len = pci_resource_len(pdev, 0);
+
+	if (!fnic2->bar0.vaddr) {
+		pr_err("Cannot memory-map BAR0 res hdr, aborting.\n");
+		err = -ENODEV;
+		goto err_out_release_regions;
+	}
+
+	fnic2->vdev = vnic_dev_register(NULL, fnic2, pdev, &fnic2->bar0);
+	if (!fnic2->vdev) {
+		pr_err("vNIC registration failed, aborting.\n");
+		err = -ENODEV;
+		goto err_out_iounmap;
+	}
+
+	err = fnic2_dev_wait(fnic2->vdev, vnic_dev_open,
+			    vnic_dev_open_done, open_flags);
+	if (err) {
+		pr_err("vNIC dev open failed, aborting.\n");
+		goto err_out_vnic_unregister;
+	}
+
+	err = vnic_dev_init(fnic2->vdev, 0);
+	if (err) {
+		pr_err("vNIC dev init failed, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	lport = &fnic2->lport;
+	err = vnic_dev_mac_addr(fnic2->vdev, lport->hwmac);
+	if (err) {
+		pr_err("vNIC get MAC addr failed\n");
+		goto err_out_dev_close;
+	}
+	/* set data_src for point-to-point mode and to keep it non-zero */
+	memcpy(fnic2->data_src_addr, lport->hwmac, ETH_ALEN);
+
+	/* Get vNIC configuration */
+	err = fnic2_get_vnic_config(fnic2);
+	if (err) {
+		pr_err("Get vNIC configuration failed, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	fnic2_get_res_counts(fnic2);
+
+	err = fnic2_set_intr_mode(fnic2);
+	if (err) {
+		pr_err("Failed to set intr mode, aborting.\n");
+		goto err_out_dev_close;
+	}
+
+	err = fnic2_alloc_vnic_resources(fnic2);
+	if (err) {
+		pr_err("Failed to alloc vNIC resources, aborting.\n");
+		goto err_out_clear_intr;
+	}
+
+	/* initialize all fnic2 locks */
+	spin_lock_init(&fnic2->fnic2_lock);
+
+	for (i = 0; i < FNIC2_WQ_MAX; i++)
+		spin_lock_init(&fnic2->wq_lock[i]);
+
+	for (i = 0; i < FNIC2_WQ_COPY_MAX; i++) {
+		spin_lock_init(&fnic2->wq_copy_lock[i]);
+		fnic2->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
+		fnic2->fw_ack_recd[i] = 0;
+		fnic2->fw_ack_index[i] = -1;
+	}
+
+	fnic2->vlan_id = 0;
+
+	if (fnic2->config.flags & VFCF_FIP_CAPABLE) {
+		pr_info("firmware supports FIP\n");
+		/* enable directed and multicast
+		 * vdev, directed, multicast, broadcast, promisc, allmulti
+		 */
+		vnic_dev_packet_filter(fnic2->vdev, 1, 1, 0, 0, 0);
+		vnic_dev_add_addr(fnic2->vdev, fip_all_enode_macs);
+		vnic_dev_add_addr(fnic2->vdev, lport->hwmac);
+		fnic2->set_vlan = fnic2_set_vlan;
+
+		spin_lock_init(&fnic2->vlans_lock);
+		INIT_WORK(&fnic2->fip_work, fnic2_handle_fip_frame);
+		INIT_LIST_HEAD(&fnic2->fip_frame_queue);
+		INIT_LIST_HEAD(&fnic2->vlan_list);
+		timer_setup(&fnic2->retry_fip_timer, fnic2_handle_fip_timer,
+			    fip_flags);
+		timer_setup(&fnic2->fcs_ka_timer, fnic2_handle_fcs_ka_timer,
+			    fcs_ka_flags);
+		timer_setup(&fnic2->enode_ka_timer, fnic2_handle_enode_ka_timer,
+			    enode_ka_flags);
+		timer_setup(&fnic2->vn_ka_timer, fnic2_handle_vn_ka_timer,
+			    vn_ka_flags);
+	} else {
+		pr_info("firmware uses non-FIP mode\n");
+	}
+	fnic2->state = FNIC2_IN_FC_MODE;
+
+	atomic_set(&fnic2->in_flight, 0);
+
+	/* Enable hardware stripping of vlan header on ingress
+	 * fnic2, rss: default cpu, hash_type, hash_bits, base cpu
+	 * rss enable, tso_ipid_split_en, ig_vlan_strip_en
+	 */
+	fnic2_set_nic_config(fnic2, 0, 0, 0, 0, 0, 0, 1);
+
+	/* Setup notification buffer area */
+	err = fnic2_notify_set(fnic2);
+	if (err) {
+		pr_err("Failed to alloc notify buffer, aborting.\n");
+		goto err_out_free_resources;
+	}
+
+	/* Setup notify timer when using MSI interrupts */
+	if (vnic_dev_get_intr_mode(fnic2->vdev) == VNIC_DEV_INTR_MODE_MSI)
+		timer_setup(&fnic2->notify_timer,
+			    fnic2_notify_timer, notify_flags);
+
+	/* Start local port initiatialization */
+	lport->max_flogi_retries =  fnic2->config.flogi_retries;
+	lport->max_plogi_retries = fnic2->config.plogi_retries;
+
+	lport->service_params =
+	    (FNIC2_FCP_SP_INITIATOR | FNIC2_FCP_SP_RD_XRDY_DIS |
+	    FNIC2_FCP_SP_CONF_CMPL);
+	if (fnic2->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+		lport->service_params |= FNIC2_FCP_SP_RETRY;
+	lport->boot_time = jiffies;
+	lport->e_d_tov = fnic2->config.ed_tov;
+	lport->r_a_tov = fnic2->config.ra_tov;
+	lport->link_supported_speeds = FNIC2_PORTSPEED_10GBIT;
+	lport->wwpn = fnic2->config.port_wwn;
+	lport->wwnn = fnic2->config.node_wwn;
+
+	pr_debug("lport wwpn: %llx\n", lport->wwpn);
+
+	lport->mfs = fnic2->config.maxdatafieldsize;
+
+	timer_setup(&(lport->fabric.retry_timer), fdls_fabric_timer_callback,
+		    fabric_flags);
+	if ((lport->mfs < FNIC2_FCOE_MIN_FRAME_SZ) ||
+	    (lport->mfs > FNIC2_FCOE_MAX_FRAME_SZ))
+		lport->mfs = FNIC2_FCOE_MAX_FRAME_SZ;
+
+	spin_lock_irqsave(&fnic2_list_lock, flags);
+	list_add_tail(&fnic2->list, &fnic2_list);
+	list_for_each(counter_head, &fnic2_list) {
+		fnic2_count++;
+	}
+	fnic2->fnic2_num = fnic2_count;
+	spin_unlock_irqrestore(&fnic2_list_lock, flags);
+
+	INIT_WORK(&fnic2->link_work, fnic2_handle_link);
+	INIT_WORK(&fnic2->frame_work, fnic2_handle_frame);
+
+	INIT_LIST_HEAD(&fnic2->frame_queue);
+	INIT_LIST_HEAD(&fnic2->tx_queue);
+	INIT_LIST_HEAD(&lport->rport_list);
+
+	INIT_LIST_HEAD(&fnic2->lio.sess_list);
+
+	err = fnic2_alloc_tcmd_pool(fnic2);
+	if (err < 0) {
+		pr_err("Failure allocating tcmd pool\n");
+		WARN_ON(1);
+		//goto err_tcmd_pool;
+	}
+	pr_err("initializing fdls\n");
+
+	fnic2_fdls_init(fnic2, (fnic2->config.flags & VFCF_FIP_CAPABLE));
+
+	/* Enable all queues */
+	for (i = 0; i < fnic2->raw_wq_count; i++)
+		vnic_wq_enable(&fnic2->wq[i]);
+	for (i = 0; i < fnic2->rq_count; i++)
+		vnic_rq_enable(&fnic2->rq[i]);
+	for (i = 0; i < fnic2->wq_copy_count; i++)
+		vnic_wq_copy_enable(&fnic2->wq_copy[i]);
+
+	/* allocate RQ buffers and post them to RQ*/
+	for (i = 0; i < fnic2->rq_count; i++) {
+		err = vnic_rq_fill(&fnic2->rq[i], fnic2_alloc_rq_frame);
+		if (err) {
+			pr_err("%s can't alloc frame\n", __func__);
+			goto err_out_free_rq_buf;
+		}
+	}
+
+	vnic_dev_enable(fnic2->vdev);
+
+	err = fnic2_request_intr(fnic2);
+	if (err) {
+		pr_err("Unable to request irq.\n");
+		goto err_out_free_rq_buf;
+	}
+
+	for (i = 0; i < fnic2->intr_count; i++)
+		vnic_intr_unmask(&fnic2->intr[i]);
+
+	fnic2_notify_timer_start(fnic2);
+
+	return 0;
+
+err_out_free_rq_buf:
+	for (i = 0; i < fnic2->rq_count; i++)
+		vnic_rq_clean(&fnic2->rq[i], fnic2_free_rq_buf);
+	vnic_dev_notify_unset(fnic2->vdev);
+err_out_free_resources:
+	fnic2_free_vnic_resources(fnic2);
+err_out_clear_intr:
+	fnic2_clear_intr_mode(fnic2);
+err_out_dev_close:
+	vnic_dev_close(fnic2->vdev);
+err_out_vnic_unregister:
+	vnic_dev_unregister(fnic2->vdev);
+err_out_iounmap:
+	fnic2_iounmap(fnic2);
+err_out_release_regions:
+	pci_release_regions(pdev);
+err_out_disable_device:
+	pci_disable_device(pdev);
+err_out_free_hba:
+	return err;
+}
+
+static void fnic2_remove(struct pci_dev *pdev)
+{
+	struct fnic2 *fnic2 = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	/*
+	 * Mark state so that the workqueue thread stops forwarding
+	 * received frames and link events to the local port. ISR and
+	 * other threads that can queue work items will also stop
+	 * creating work items on the fnic2 workqueue
+	 */
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	fnic2->stop_rx_link_events = 1;
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	if (vnic_dev_get_intr_mode(fnic2->vdev) == VNIC_DEV_INTR_MODE_MSI)
+		del_timer_sync(&fnic2->notify_timer);
+
+	/*
+	 * Flush the fnic2 event queue. After this call, there should
+	 * be no event queued for this fnic2 device in the workqueue
+	 */
+	flush_workqueue(fnic2_event_queue);
+	flush_workqueue(fip_event_queue);
+	flush_workqueue(fnic2_tcmd_wq);
+	list_free_buffer(&fnic2->frame_queue);
+	list_free_buffer(&fnic2->tx_queue);
+	list_free_buffer(&fnic2->lport.rport_list);
+
+	if (fnic2->config.flags & VFCF_FIP_CAPABLE) {
+		del_timer_sync(&fnic2->retry_fip_timer);
+		del_timer_sync(&fnic2->fcs_ka_timer);
+		del_timer_sync(&fnic2->enode_ka_timer);
+		del_timer_sync(&fnic2->vn_ka_timer);
+		list_free_buffer(&fnic2->fip_frame_queue);
+		fnic2_fcoe_reset_vlans(fnic2);
+		flush_workqueue(fip_event_queue);
+	}
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	fnic2->in_remove = 1;
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	fnic2_cleanup(fnic2);
+	fnic2_fdls_cleanup(fnic2);
+
+	spin_lock_irqsave(&fnic2_list_lock, flags);
+	list_del(&fnic2->list);
+	spin_unlock_irqrestore(&fnic2_list_lock, flags);
+
+	vnic_dev_notify_unset(fnic2->vdev);
+	fnic2_free_intr(fnic2);
+	fnic2_free_vnic_resources(fnic2);
+	fnic2_clear_intr_mode(fnic2);
+	vnic_dev_close(fnic2->vdev);
+	vnic_dev_unregister(fnic2->vdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver fnic2_driver = {
+	.name = DRV_NAME,
+	.id_table = fnic2_id_table,
+	.probe = fnic2_probe,
+	.remove = fnic2_remove,
+};
+
+static int __init fnic2_init_module(void)
+{
+	int err = 0;
+
+	pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+
+	fnic2_event_queue = create_singlethread_workqueue("fnic2_event_wq");
+	if (!fnic2_event_queue) {
+		pr_err("fnic2 work queue create failed\n");
+		err = -ENOMEM;
+		goto err_return;
+	}
+
+	fip_event_queue = create_singlethread_workqueue("fip_event_wq");
+	if (!fip_event_queue) {
+		pr_err("fip work queue create failed\n");
+		err = -ENOMEM;
+		goto err_fnic2_wq;
+	}
+
+	fnic2_tcmd_wq = alloc_workqueue("fnic2_tcmd", 0, 0);
+	if (!fnic2_tcmd_wq) {
+		err = -ENOMEM;
+		goto err_tcmd_wq;
+	}
+
+	fnic2_lio_init();
+
+	/* register the driver with PCI system */
+	err = pci_register_driver(&fnic2_driver);
+	if (err < 0) {
+		pr_err("pci register error\n");
+		goto err_pci_reg;
+	}
+
+	return err;
+
+err_pci_reg:
+	destroy_workqueue(fnic2_tcmd_wq);
+err_tcmd_wq:
+	destroy_workqueue(fip_event_queue);
+err_fnic2_wq:
+	destroy_workqueue(fnic2_event_queue);
+err_return:
+	return err;
+}
+
+static void __exit fnic2_cleanup_module(void)
+{
+	fnic2_lio_cleanup();
+	pci_unregister_driver(&fnic2_driver);
+	destroy_workqueue(fnic2_event_queue);
+	destroy_workqueue(fip_event_queue);
+	destroy_workqueue(fnic2_tcmd_wq);
+}
+module_init(fnic2_init_module);
+module_exit(fnic2_cleanup_module);
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 02/10] staging: fnic2 add resource allocation
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
  2018-04-05 21:16 ` [PATCH 01/10] staging: fnic2 add initialization Oliver Smith-Denny
@ 2018-04-05 21:17 ` Oliver Smith-Denny
  2018-04-06  5:08   ` Greg Kroah-Hartman
  2018-04-05 21:18 ` [PATCH 03/10] staging: fnic2 add fip handling Oliver Smith-Denny
                   ` (7 subsequent siblings)
  9 siblings, 1 reply; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:17 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain logic for interrupts, resource allocation,
and initialization of hardware queues.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fnic2_isr.c | 324 +++++++++++++++++++++++++
 drivers/staging/fnic2/src/fnic2_res.c | 430 ++++++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fnic2_res.h | 120 ++++++++++
 3 files changed, 874 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fnic2_isr.c
 create mode 100644 drivers/staging/fnic2/src/fnic2_res.c
 create mode 100644 drivers/staging/fnic2/src/fnic2_res.h

diff --git a/drivers/staging/fnic2/src/fnic2_isr.c b/drivers/staging/fnic2/src/fnic2_isr.c
new file mode 100644
index 0000000..07cf1a4
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_isr.c
@@ -0,0 +1,324 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "fnic2.h"
+
+static irqreturn_t fnic2_isr_legacy(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+	uint32_t pba;
+	unsigned long work_done = 0;
+
+	pba = vnic_intr_legacy_pba(fnic2->legacy_pba);
+	if (!pba)
+		return IRQ_NONE;
+
+	if (pba & (1 << FNIC2_INTX_NOTIFY)) {
+		vnic_intr_return_all_credits(&fnic2->intr[FNIC2_INTX_NOTIFY]);
+		fnic2_handle_link_event(fnic2);
+	}
+
+	if (pba & (1 << FNIC2_INTX_ERR)) {
+		vnic_intr_return_all_credits(&fnic2->intr[FNIC2_INTX_ERR]);
+		fnic2_log_q_error(fnic2);
+	}
+
+	if (pba & (1 << FNIC2_INTX_WQ_RQ_COPYWQ)) {
+		work_done += fnic2_wq_copy_cmpl_handler(fnic2, -1);
+		work_done += fnic2_wq_cmpl_handler(fnic2, -1);
+		work_done += fnic2_rq_cmpl_handler(fnic2, -1);
+
+		vnic_intr_return_credits(&fnic2->intr[FNIC2_INTX_WQ_RQ_COPYWQ],
+					 work_done,
+					 1 /* unmask intr */,
+					 1 /* reset intr timer */);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic2_isr_msi(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+	unsigned long work_done = 0;
+
+	work_done += fnic2_wq_copy_cmpl_handler(fnic2, -1);
+	work_done += fnic2_wq_cmpl_handler(fnic2, -1);
+	work_done += fnic2_rq_cmpl_handler(fnic2, -1);
+
+	vnic_intr_return_credits(&fnic2->intr[0],
+				 work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic2_isr_msix_rq(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+	unsigned long rq_work_done = 0;
+
+	rq_work_done = fnic2_rq_cmpl_handler(fnic2, -1);
+	vnic_intr_return_credits(&fnic2->intr[FNIC2_MSIX_RQ],
+				 rq_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic2_isr_msix_wq(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+	unsigned long wq_work_done = 0;
+
+	wq_work_done = fnic2_wq_cmpl_handler(fnic2, -1);
+	vnic_intr_return_credits(&fnic2->intr[FNIC2_MSIX_WQ],
+				 wq_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic2_isr_msix_wq_copy(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+	unsigned long wq_copy_work_done = 0;
+
+	wq_copy_work_done = fnic2_wq_copy_cmpl_handler(fnic2, -1);
+	vnic_intr_return_credits(&fnic2->intr[FNIC2_MSIX_WQ_COPY],
+				 wq_copy_work_done,
+				 1 /* unmask intr */,
+				 1 /* reset intr timer */);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fnic2_isr_msix_err_notify(int irq, void *data)
+{
+	struct fnic2 *fnic2 = data;
+
+	vnic_intr_return_all_credits(&fnic2->intr[FNIC2_MSIX_ERR_NOTIFY]);
+	fnic2_log_q_error(fnic2);
+	fnic2_handle_link_event(fnic2);
+
+	return IRQ_HANDLED;
+}
+
+void fnic2_free_intr(struct fnic2 *fnic2)
+{
+	int i;
+
+	switch (vnic_dev_get_intr_mode(fnic2->vdev)) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSI:
+		free_irq(fnic2->pdev->irq, fnic2);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+		for (i = 0; i < ARRAY_SIZE(fnic2->msix); i++)
+			if (fnic2->msix[i].requested)
+				free_irq(fnic2->msix_entry[i].vector,
+					 fnic2->msix[i].devid);
+		break;
+
+	default:
+		break;
+	}
+}
+
+int fnic2_request_intr(struct fnic2 *fnic2)
+{
+	int err = 0;
+	int i;
+
+	switch (vnic_dev_get_intr_mode(fnic2->vdev)) {
+
+	case VNIC_DEV_INTR_MODE_INTX:
+		err = request_irq(fnic2->pdev->irq, &fnic2_isr_legacy,
+				  IRQF_SHARED, DRV_NAME, fnic2);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSI:
+		err = request_irq(fnic2->pdev->irq, &fnic2_isr_msi,
+				  0, fnic2->name, fnic2);
+		break;
+
+	case VNIC_DEV_INTR_MODE_MSIX:
+
+		sprintf(fnic2->msix[FNIC2_MSIX_RQ].devname,
+			"%.11s-fcs-rq", fnic2->name);
+		fnic2->msix[FNIC2_MSIX_RQ].isr = fnic2_isr_msix_rq;
+		fnic2->msix[FNIC2_MSIX_RQ].devid = fnic2;
+
+		sprintf(fnic2->msix[FNIC2_MSIX_WQ].devname,
+			"%.11s-fcs-wq", fnic2->name);
+		fnic2->msix[FNIC2_MSIX_WQ].isr = fnic2_isr_msix_wq;
+		fnic2->msix[FNIC2_MSIX_WQ].devid = fnic2;
+
+		sprintf(fnic2->msix[FNIC2_MSIX_WQ_COPY].devname,
+			"%.11s-scsi-wq", fnic2->name);
+		fnic2->msix[FNIC2_MSIX_WQ_COPY].isr = fnic2_isr_msix_wq_copy;
+		fnic2->msix[FNIC2_MSIX_WQ_COPY].devid = fnic2;
+
+		sprintf(fnic2->msix[FNIC2_MSIX_ERR_NOTIFY].devname,
+			"%.11s-err-notify", fnic2->name);
+		fnic2->msix[FNIC2_MSIX_ERR_NOTIFY].isr =
+			fnic2_isr_msix_err_notify;
+		fnic2->msix[FNIC2_MSIX_ERR_NOTIFY].devid = fnic2;
+
+		for (i = 0; i < ARRAY_SIZE(fnic2->msix); i++) {
+			err = request_irq(fnic2->msix_entry[i].vector,
+					  fnic2->msix[i].isr, 0,
+					  fnic2->msix[i].devname,
+					  fnic2->msix[i].devid);
+			if (err) {
+				pr_err("MSIX: request_irq failed %d\n",
+					err);
+				fnic2_free_intr(fnic2);
+				break;
+			}
+			fnic2->msix[i].requested = 1;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+int fnic2_set_intr_mode(struct fnic2 *fnic2)
+{
+	unsigned int n = ARRAY_SIZE(fnic2->rq);
+	unsigned int m = ARRAY_SIZE(fnic2->wq);
+	unsigned int o = ARRAY_SIZE(fnic2->wq_copy);
+	unsigned int i;
+
+	/*
+	 * Set interrupt mode (INTx, MSI, MSI-X) depending
+	 * system capabilities.
+	 *
+	 * Try MSI-X first
+	 *
+	 * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
+	 * (last INTR is used for WQ/RQ errors and notification area)
+	 */
+
+	WARN_ON(ARRAY_SIZE(fnic2->msix_entry) < n + m + o + 1);
+	for (i = 0; i < n + m + o + 1; i++)
+		fnic2->msix_entry[i].entry = i;
+
+	if (fnic2->rq_count >= n &&
+	    fnic2->raw_wq_count >= m &&
+	    fnic2->wq_copy_count >= o &&
+	    fnic2->cq_count >= n + m + o) {
+		if (!pci_enable_msix_exact(fnic2->pdev, fnic2->msix_entry,
+				    n + m + o + 1)) {
+			fnic2->rq_count = n;
+			fnic2->raw_wq_count = m;
+			fnic2->wq_copy_count = o;
+			fnic2->wq_count = m + o;
+			fnic2->cq_count = n + m + o;
+			fnic2->intr_count = n + m + o + 1;
+			fnic2->err_intr_offset = FNIC2_MSIX_ERR_NOTIFY;
+
+			pr_debug("Using MSI-X Interrupts\n");
+			vnic_dev_set_intr_mode(fnic2->vdev,
+					       VNIC_DEV_INTR_MODE_MSIX);
+			return 0;
+		}
+	}
+
+	/*
+	 * Next try MSI
+	 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
+	 */
+	if (fnic2->rq_count >= 1 &&
+	    fnic2->raw_wq_count >= 1 &&
+	    fnic2->wq_copy_count >= 1 &&
+	    fnic2->cq_count >= 3 &&
+	    fnic2->intr_count >= 1 &&
+	    !pci_enable_msi(fnic2->pdev)) {
+
+		fnic2->rq_count = 1;
+		fnic2->raw_wq_count = 1;
+		fnic2->wq_copy_count = 1;
+		fnic2->wq_count = 2;
+		fnic2->cq_count = 3;
+		fnic2->intr_count = 1;
+		fnic2->err_intr_offset = 0;
+
+		pr_debug("Using MSI Interrupts\n");
+		vnic_dev_set_intr_mode(fnic2->vdev, VNIC_DEV_INTR_MODE_MSI);
+
+		return 0;
+	}
+
+	/*
+	 * Next try INTx
+	 * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
+	 * 1 INTR is used for all 3 queues, 1 INTR for queue errors
+	 * 1 INTR for notification area
+	 */
+
+	if (fnic2->rq_count >= 1 &&
+	    fnic2->raw_wq_count >= 1 &&
+	    fnic2->wq_copy_count >= 1 &&
+	    fnic2->cq_count >= 3 &&
+	    fnic2->intr_count >= 3) {
+
+		fnic2->rq_count = 1;
+		fnic2->raw_wq_count = 1;
+		fnic2->wq_copy_count = 1;
+		fnic2->cq_count = 3;
+		fnic2->intr_count = 3;
+
+		pr_debug("Using Legacy Interrupts\n");
+		vnic_dev_set_intr_mode(fnic2->vdev, VNIC_DEV_INTR_MODE_INTX);
+
+		return 0;
+	}
+
+	vnic_dev_set_intr_mode(fnic2->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+
+	return -EINVAL;
+}
+
+void fnic2_clear_intr_mode(struct fnic2 *fnic2)
+{
+	switch (vnic_dev_get_intr_mode(fnic2->vdev)) {
+	case VNIC_DEV_INTR_MODE_MSIX:
+		pci_disable_msix(fnic2->pdev);
+		break;
+	case VNIC_DEV_INTR_MODE_MSI:
+		pci_disable_msi(fnic2->pdev);
+		break;
+	default:
+		break;
+	}
+
+	vnic_dev_set_intr_mode(fnic2->vdev, VNIC_DEV_INTR_MODE_INTX);
+}
diff --git a/drivers/staging/fnic2/src/fnic2_res.c b/drivers/staging/fnic2/src/fnic2_res.c
new file mode 100644
index 0000000..65b9a26
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_res.c
@@ -0,0 +1,430 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "cq_enet_desc.h"
+#include "vnic_resource.h"
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "vnic_cq.h"
+#include "vnic_intr.h"
+#include "vnic_stats.h"
+#include "vnic_nic.h"
+#include "fnic2.h"
+
+int fnic2_get_vnic_config(struct fnic2 *fnic2)
+{
+	struct vnic_fc_config *c = &fnic2->config;
+	int err;
+
+#define GET_CONFIG(m) \
+	do { \
+		err = vnic_dev_spec(fnic2->vdev, \
+				    offsetof(struct vnic_fc_config, m), \
+				    sizeof(c->m), &c->m); \
+		if (err) { \
+			pr_err("Error getting %s, %d\n", #m, \
+				     err); \
+			return err; \
+		} \
+	} while (0);
+
+	GET_CONFIG(node_wwn);
+	GET_CONFIG(port_wwn);
+	GET_CONFIG(wq_enet_desc_count);
+	GET_CONFIG(wq_copy_desc_count);
+	GET_CONFIG(rq_desc_count);
+	GET_CONFIG(maxdatafieldsize);
+	GET_CONFIG(ed_tov);
+	GET_CONFIG(ra_tov);
+	GET_CONFIG(intr_timer);
+	GET_CONFIG(intr_timer_type);
+	GET_CONFIG(flags);
+	GET_CONFIG(flogi_retries);
+	GET_CONFIG(flogi_timeout);
+	GET_CONFIG(plogi_retries);
+	GET_CONFIG(plogi_timeout);
+	GET_CONFIG(io_throttle_count);
+	GET_CONFIG(link_down_timeout);
+	GET_CONFIG(port_down_timeout);
+	GET_CONFIG(port_down_io_retries);
+	GET_CONFIG(luns_per_tgt);
+
+	c->wq_enet_desc_count =
+		min_t(uint32_t, VNIC_FNIC2_WQ_DESCS_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_WQ_DESCS_MIN,
+			    c->wq_enet_desc_count));
+	c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
+
+	c->wq_copy_desc_count =
+		min_t(uint32_t, VNIC_FNIC2_WQ_COPY_DESCS_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_WQ_COPY_DESCS_MIN,
+			    c->wq_copy_desc_count));
+	c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
+
+	c->rq_desc_count =
+		min_t(uint32_t, VNIC_FNIC2_RQ_DESCS_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_RQ_DESCS_MIN,
+			    c->rq_desc_count));
+	c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
+
+	c->maxdatafieldsize =
+		min_t(uint16_t, VNIC_FNIC2_MAXDATAFIELDSIZE_MAX,
+		      max_t(uint16_t, VNIC_FNIC2_MAXDATAFIELDSIZE_MIN,
+			    c->maxdatafieldsize));
+	c->ed_tov =
+		min_t(uint32_t, VNIC_FNIC2_EDTOV_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_EDTOV_MIN,
+			    c->ed_tov));
+
+	c->ra_tov =
+		min_t(uint32_t, VNIC_FNIC2_RATOV_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_RATOV_MIN,
+			    c->ra_tov));
+
+	c->flogi_retries =
+		min_t(uint32_t, VNIC_FNIC2_FLOGI_RETRIES_MAX, c->flogi_retries);
+
+	c->flogi_timeout =
+		min_t(uint32_t, VNIC_FNIC2_FLOGI_TIMEOUT_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_FLOGI_TIMEOUT_MIN,
+			    c->flogi_timeout));
+
+	c->plogi_retries =
+		min_t(uint32_t, VNIC_FNIC2_PLOGI_RETRIES_MAX, c->plogi_retries);
+
+	c->plogi_timeout =
+		min_t(uint32_t, VNIC_FNIC2_PLOGI_TIMEOUT_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_PLOGI_TIMEOUT_MIN,
+			    c->plogi_timeout));
+
+	c->io_throttle_count =
+		min_t(uint32_t, VNIC_FNIC2_IO_THROTTLE_COUNT_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_IO_THROTTLE_COUNT_MIN,
+			    c->io_throttle_count));
+
+	c->link_down_timeout =
+		min_t(uint32_t, VNIC_FNIC2_LINK_DOWN_TIMEOUT_MAX,
+		      c->link_down_timeout);
+
+	c->port_down_timeout =
+		min_t(uint32_t, VNIC_FNIC2_PORT_DOWN_TIMEOUT_MAX,
+		      c->port_down_timeout);
+
+	c->port_down_io_retries =
+		min_t(uint32_t, VNIC_FNIC2_PORT_DOWN_IO_RETRIES_MAX,
+		      c->port_down_io_retries);
+
+	c->luns_per_tgt =
+		min_t(uint32_t, VNIC_FNIC2_LUNS_PER_TARGET_MAX,
+		      max_t(uint32_t, VNIC_FNIC2_LUNS_PER_TARGET_MIN,
+			    c->luns_per_tgt));
+
+	c->intr_timer = min_t(uint16_t, VNIC_INTR_TIMER_MAX, c->intr_timer);
+	c->intr_timer_type = c->intr_timer_type;
+
+	pr_info("vNIC wq/wq_copy/rq %d/%d/%d\n",
+		c->wq_enet_desc_count, c->wq_copy_desc_count,
+		c->rq_desc_count);
+	pr_info("vNIC node wwn %llx port wwn %llx\n",
+		c->node_wwn, c->port_wwn);
+	pr_info("vNIC ed_tov %d ra_tov %d\n",
+		c->ed_tov, c->ra_tov);
+	pr_info("vNIC mtu %d intr timer %d\n",
+		c->maxdatafieldsize, c->intr_timer);
+	pr_info("vNIC flags 0x%x luns per tgt %d\n",
+		c->flags, c->luns_per_tgt);
+	pr_info("vNIC flogi_retries %d flogi timeout %d\n",
+		c->flogi_retries, c->flogi_timeout);
+	pr_info("vNIC plogi retries %d plogi timeout %d\n",
+		c->plogi_retries, c->plogi_timeout);
+	pr_info("vNIC io throttle count %d link dn timeout %d\n",
+		c->io_throttle_count, c->link_down_timeout);
+	pr_info("vNIC port dn io retries %d port dn timeout %d\n",
+		c->port_down_io_retries, c->port_down_timeout);
+	return 0;
+}
+
+int fnic2_set_nic_config(struct fnic2 *fnic2, uint8_t rss_default_cpu,
+			 uint8_t rss_hash_type,
+			 uint8_t rss_hash_bits, uint8_t rss_base_cpu, uint8_t rss_enable,
+			 uint8_t tso_ipid_split_en, uint8_t ig_vlan_strip_en)
+{
+	uint64_t a0, a1;
+	uint32_t nic_cfg;
+	int wait = 1000;
+
+	vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
+		rss_hash_type, rss_hash_bits, rss_base_cpu,
+		rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
+
+	a0 = nic_cfg;
+	a1 = 0;
+
+	return vnic_dev_cmd(fnic2->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+}
+
+void fnic2_get_res_counts(struct fnic2 *fnic2)
+{
+	fnic2->wq_count = vnic_dev_get_res_count(fnic2->vdev, RES_TYPE_WQ);
+	fnic2->raw_wq_count = fnic2->wq_count - 1;
+	fnic2->wq_copy_count = fnic2->wq_count - fnic2->raw_wq_count;
+	fnic2->rq_count = vnic_dev_get_res_count(fnic2->vdev, RES_TYPE_RQ);
+	fnic2->cq_count = vnic_dev_get_res_count(fnic2->vdev, RES_TYPE_CQ);
+	fnic2->intr_count = vnic_dev_get_res_count(fnic2->vdev,
+		RES_TYPE_INTR_CTRL);
+}
+
+void fnic2_free_vnic_resources(struct fnic2 *fnic2)
+{
+	unsigned int i;
+
+	for (i = 0; i < fnic2->raw_wq_count; i++)
+		vnic_wq_free(&fnic2->wq[i]);
+
+	for (i = 0; i < fnic2->wq_copy_count; i++)
+		vnic_wq_copy_free(&fnic2->wq_copy[i]);
+
+	for (i = 0; i < fnic2->rq_count; i++)
+		vnic_rq_free(&fnic2->rq[i]);
+
+	for (i = 0; i < fnic2->cq_count; i++)
+		vnic_cq_free(&fnic2->cq[i]);
+
+	for (i = 0; i < fnic2->intr_count; i++)
+		vnic_intr_free(&fnic2->intr[i]);
+}
+
+int fnic2_alloc_vnic_resources(struct fnic2 *fnic2)
+{
+	enum vnic_dev_intr_mode intr_mode;
+	unsigned int mask_on_assertion;
+	unsigned int interrupt_offset;
+	unsigned int error_interrupt_enable;
+	unsigned int error_interrupt_offset;
+	unsigned int i, cq_index;
+	unsigned int wq_copy_cq_desc_count;
+	int err;
+
+	intr_mode = vnic_dev_get_intr_mode(fnic2->vdev);
+
+	pr_info("vNIC interrupt mode: %s\n",
+		     intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
+		     intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
+		     intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
+		     "MSI-X" : "unknown");
+
+	pr_info("vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
+		     fnic2->wq_count, fnic2->wq_copy_count, fnic2->raw_wq_count,
+		     fnic2->rq_count, fnic2->cq_count, fnic2->intr_count);
+
+	pr_info("Copy WQ desc count: %d\n",
+		fnic2->config.wq_copy_desc_count);
+
+	/* Allocate Raw WQ used for FCS frames */
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		err = vnic_wq_alloc(fnic2->vdev, &fnic2->wq[i], i,
+			fnic2->config.wq_enet_desc_count,
+			sizeof(struct wq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* Allocate Copy WQs used for SCSI IOs */
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		err = vnic_wq_copy_alloc(fnic2->vdev, &fnic2->wq_copy[i],
+			(fnic2->raw_wq_count + i),
+			fnic2->config.wq_copy_desc_count,
+			sizeof(struct fcpio_host_req));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* RQ for receiving FCS and FCP frames */
+	for (i = 0; i < fnic2->rq_count; i++) {
+		err = vnic_rq_alloc(fnic2->vdev, &fnic2->rq[i], i,
+			fnic2->config.rq_desc_count,
+			sizeof(struct rq_enet_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each RQ */
+	for (i = 0; i < fnic2->rq_count; i++) {
+		cq_index = i;
+		err = vnic_cq_alloc(fnic2->vdev,
+			&fnic2->cq[cq_index], cq_index,
+			fnic2->config.rq_desc_count,
+			sizeof(struct cq_enet_rq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each WQ */
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		cq_index = fnic2->rq_count + i;
+		err = vnic_cq_alloc(fnic2->vdev, &fnic2->cq[cq_index], cq_index,
+			fnic2->config.wq_enet_desc_count,
+			sizeof(struct cq_enet_wq_desc));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	/* CQ for each COPY WQ */
+	wq_copy_cq_desc_count = (fnic2->config.wq_copy_desc_count * 3);
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		cq_index = fnic2->raw_wq_count + fnic2->rq_count + i;
+		err = vnic_cq_alloc(fnic2->vdev, &fnic2->cq[cq_index],
+			cq_index,
+			wq_copy_cq_desc_count,
+			sizeof(struct fcpio_fw_req));
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	for (i = 0; i < fnic2->intr_count; i++) {
+		err = vnic_intr_alloc(fnic2->vdev, &fnic2->intr[i], i);
+		if (err)
+			goto err_out_cleanup;
+	}
+
+	fnic2->legacy_pba = vnic_dev_get_res(fnic2->vdev,
+				RES_TYPE_INTR_PBA_LEGACY, 0);
+
+	if (!fnic2->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+		pr_info("Failed to hook legacy pba resource\n");
+		err = -ENODEV;
+		goto err_out_cleanup;
+	}
+
+	/*
+	 * Init RQ/WQ resources.
+	 *
+	 * RQ[0 to n-1] point to CQ[0 to n-1]
+	 * WQ[0 to m-1] point to CQ[n to n+m-1]
+	 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
+	 *
+	 * Note for copy wq we always initialize with cq_index = 0
+	 *
+	 * Error interrupt is not enabled for MSI.
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_INTX:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		error_interrupt_enable = 1;
+		error_interrupt_offset = fnic2->err_intr_offset;
+		break;
+	default:
+		error_interrupt_enable = 0;
+		error_interrupt_offset = 0;
+		break;
+	}
+
+	for (i = 0; i < fnic2->rq_count; i++) {
+		cq_index = i;
+		vnic_rq_init(&fnic2->rq[i],
+			     cq_index,
+			     error_interrupt_enable,
+			     error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		cq_index = i + fnic2->rq_count;
+		vnic_wq_init(&fnic2->wq[i],
+			     cq_index,
+			     error_interrupt_enable,
+			     error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		vnic_wq_copy_init(&fnic2->wq_copy[i],
+				  0 /* cq_index 0 - always */,
+				  error_interrupt_enable,
+				  error_interrupt_offset);
+	}
+
+	for (i = 0; i < fnic2->cq_count; i++) {
+
+		switch (intr_mode) {
+		case VNIC_DEV_INTR_MODE_MSIX:
+			interrupt_offset = i;
+			break;
+		default:
+			interrupt_offset = 0;
+			break;
+		}
+
+		vnic_cq_init(&fnic2->cq[i],
+			0 /* flow_control_enable */,
+			1 /* color_enable */,
+			0 /* cq_head */,
+			0 /* cq_tail */,
+			1 /* cq_tail_color */,
+			1 /* interrupt_enable */,
+			1 /* cq_entry_enable */,
+			0 /* cq_message_enable */,
+			interrupt_offset,
+			0 /* cq_message_addr */);
+	}
+
+	/*
+	 * Init INTR resources
+	 *
+	 * mask_on_assertion is not used for INTx due to the level-
+	 * triggered nature of INTx
+	 */
+
+	switch (intr_mode) {
+	case VNIC_DEV_INTR_MODE_MSI:
+	case VNIC_DEV_INTR_MODE_MSIX:
+		mask_on_assertion = 1;
+		break;
+	default:
+		mask_on_assertion = 0;
+		break;
+	}
+
+	for (i = 0; i < fnic2->intr_count; i++) {
+		vnic_intr_init(&fnic2->intr[i],
+			fnic2->config.intr_timer,
+			fnic2->config.intr_timer_type,
+			mask_on_assertion);
+	}
+
+	/* init the stats memory by making the first call here */
+	err = vnic_dev_stats_dump(fnic2->vdev, &fnic2->stats);
+	if (err) {
+		pr_info("vnic_dev_stats_dump failed - 0x%x\n", err);
+		goto err_out_cleanup;
+	}
+
+	/* Clear LIF stats */
+	vnic_dev_stats_clear(fnic2->vdev);
+
+	return 0;
+
+err_out_cleanup:
+	fnic2_free_vnic_resources(fnic2);
+
+	return err;
+}
diff --git a/drivers/staging/fnic2/src/fnic2_res.h b/drivers/staging/fnic2/src/fnic2_res.h
new file mode 100644
index 0000000..dab106c
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_res.h
@@ -0,0 +1,120 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2017 Cisco Systems, Inc.  All rights reserved.
+ */
+#ifndef _FNIC2_RES_H_
+#define _FNIC2_RES_H_
+
+#include "wq_enet_desc.h"
+#include "rq_enet_desc.h"
+#include "vnic_wq.h"
+#include "vnic_rq.h"
+#include "fcpio.h"
+#include "vnic_wq_copy.h"
+#include "vnic_cq_copy.h"
+#include "fnic2_fdls.h"
+
+static inline void fnic2_queue_wq_desc(struct vnic_wq *wq,
+				       void *os_buf, dma_addr_t dma_addr,
+				       unsigned int len, unsigned int fc_eof,
+				       int vlan_tag_insert,
+				       unsigned int vlan_tag,
+				       int cq_entry, int sop, int eop)
+{
+	struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+
+	wq_enet_desc_enc(desc,
+			 (uint64_t)dma_addr | VNIC_PADDR_TARGET,
+			 (uint16_t)len,
+			 0, /* mss_or_csum_offset */
+			 (uint16_t)fc_eof,
+			 0, /* offload_mode */
+			 (uint8_t)eop, (uint8_t)cq_entry,
+			 1, /* fcoe_encap */
+			 (uint8_t)vlan_tag_insert,
+			 (uint16_t)vlan_tag,
+			 0 /* loopback */);
+
+	vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+}
+
+static inline void fnic2_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq,
+						      uint32_t fcpio_tag, uint8_t format,
+						      uint32_t s_id, uint8_t *gw_mac)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_FLOGI_REG;     /* enum fcpio_type */
+	desc->hdr.status = 0;                 /* header status entry */
+	desc->hdr._resvd = 0;                 /* reserved */
+	desc->hdr.fcpio_tag = fcpio_tag;      /* id for this request */
+
+	desc->u.flogi_reg.format = format;
+	desc->u.flogi_reg._resvd = 0;
+	hton24(desc->u.flogi_reg.s_id, s_id);
+	memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN);
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic2_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq,
+						    uint32_t fcpio_tag, uint32_t s_id,
+						    uint8_t *fcf_mac, uint8_t *ha_mac,
+						    uint32_t r_a_tov, uint32_t e_d_tov)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */
+	desc->hdr.status = 0;                 /* header status entry */
+	desc->hdr._resvd = 0;                 /* reserved */
+	desc->hdr.fcpio_tag = fcpio_tag;      /* id for this request */
+
+	desc->u.flogi_fip_reg._resvd0 = 0;
+	hton24(desc->u.flogi_fip_reg.s_id, s_id);
+	memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN);
+	desc->u.flogi_fip_reg._resvd1 = 0;
+	desc->u.flogi_fip_reg.r_a_tov = r_a_tov;
+	desc->u.flogi_fip_reg.e_d_tov = e_d_tov;
+	memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN);
+	desc->u.flogi_fip_reg._resvd2 = 0;
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic2_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq,
+						     uint32_t fcpio_tag)
+{
+	struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
+
+	desc->hdr.type = FCPIO_RESET;     /* enum fcpio_type */
+	desc->hdr.status = 0;             /* header status entry */
+	desc->hdr._resvd = 0;             /* reserved */
+	desc->hdr.fcpio_tag = fcpio_tag;  /* id for this request */
+
+	vnic_wq_copy_post(wq);
+}
+
+static inline void fnic2_queue_rq_desc(struct vnic_rq *rq,
+				       void *os_buf, dma_addr_t dma_addr,
+				       uint16_t len)
+{
+	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+
+	rq_enet_desc_enc(desc,
+		(uint64_t)dma_addr | VNIC_PADDR_TARGET,
+		RQ_ENET_TYPE_ONLY_SOP,
+		(uint16_t)len);
+
+	vnic_rq_post(rq, os_buf, 0, dma_addr, len);
+}
+
+int fnic2_get_vnic_config(struct fnic2 *);
+int fnic2_alloc_vnic_resources(struct fnic2 *);
+void fnic2_free_vnic_resources(struct fnic2 *);
+void fnic2_get_res_counts(struct fnic2 *);
+int fnic2_set_nic_config(struct fnic2 *fnic2, uint8_t rss_default_cpu,
+			 uint8_t rss_hash_type, uint8_t rss_hash_bits, uint8_t rss_base_cpu,
+			 uint8_t rss_enable, uint8_t tso_ipid_split_en,
+			 uint8_t ig_vlan_strip_en);
+
+#endif /* _FNIC2_RES_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 03/10] staging: fnic2 add fip handling
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
  2018-04-05 21:16 ` [PATCH 01/10] staging: fnic2 add initialization Oliver Smith-Denny
  2018-04-05 21:17 ` [PATCH 02/10] staging: fnic2 add resource allocation Oliver Smith-Denny
@ 2018-04-05 21:18 ` Oliver Smith-Denny
  2018-04-06  5:08   ` Greg Kroah-Hartman
  2018-04-05 21:19 ` [PATCH 04/10] staging: fnic2 add fdls system Oliver Smith-Denny
                   ` (6 subsequent siblings)
  9 siblings, 1 reply; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:18 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain structures and functions for handling
FIP frames.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fip.c | 804 ++++++++++++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fip.h | 336 +++++++++++++++++
 2 files changed, 1140 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fip.c
 create mode 100644 drivers/staging/fnic2/src/fip.h

diff --git a/drivers/staging/fnic2/src/fip.c b/drivers/staging/fnic2/src/fip.c
new file mode 100644
index 0000000..42c2cf3
--- /dev/null
+++ b/drivers/staging/fnic2/src/fip.c
@@ -0,0 +1,804 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*! \file */
+#include "fnic2.h"
+#include "fip.h"
+
+extern struct workqueue_struct *fip_event_queue;
+
+void fnic2_fcoe_send_vlan_req(struct fnic2 *fnic2);
+static void fnic2_fcoe_start_fcf_discovery(struct fnic2 *fnic2);
+static void fnic2_fcoe_start_flogi(struct fnic2 *fnic2);
+static void fnic2_fcoe_process_cvl(struct fnic2 *fnic2, struct fip_header *fiph);
+static void fnic2_vlan_discovery_timeout(struct fnic2 *fnic2);
+
+int drop_rsp = true;
+
+/****************************** Functions ***********************************/
+
+/**
+ * fnic2_fcoe_reset_vlans
+ *
+ * Frees up the list of discovered vlans
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+void fnic2_fcoe_reset_vlans(struct fnic2 *fnic2)
+{
+	unsigned long flags;
+	struct fcoe_vlan *vlan, *next;
+
+        pr_debug("fnic2 0x%pK enter reset vlan\n", fnic2);
+	spin_lock_irqsave(&fnic2->vlans_lock, flags);
+	if (!list_empty(&fnic2->vlan_list)) {
+	    list_for_each_entry_safe(vlan, next, &fnic2->vlan_list, list) {
+		list_del(&vlan->list);
+		kfree(vlan);
+	    }
+	}
+
+	spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+        pr_debug("fnic2 0x%pK reset vlan done\n", fnic2);
+}
+
+/*
+ * fnic2_fcoe_send_vlan_req
+ *
+ * Sends FIP vlan request to all FCFs MAC
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+void fnic2_fcoe_send_vlan_req(struct fnic2 *fnic2)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+	uint64_t vlan_tov;
+
+	int fr_len;
+	struct fip_vlan_req vlan_req;
+
+	pr_debug("fnic2 0x%pK Enter send vlan req\n", fnic2);
+	fnic2_fcoe_reset_vlans(fnic2);
+
+	fnic2->set_vlan(fnic2, 0);
+	pr_debug("fnic2 0x%pK set vlan done\n", fnic2);
+
+	fr_len = sizeof(struct fip_vlan_req);
+
+	pr_debug("got MAC %x %x %x %x %x %x\n",
+		lport->hwmac[0], lport->hwmac[1], lport->hwmac[2],
+		lport->hwmac[3], lport->hwmac[4], lport->hwmac[5]);
+
+	memcpy(&vlan_req, &fip_vlan_reqmpl, fr_len);
+	memcpy(vlan_req.eth.smac, lport->hwmac, ETH_ALEN);
+	memcpy(vlan_req.mac_desc.mac, lport->hwmac, ETH_ALEN);
+
+	fnic2_send_fip_frame(lport, &vlan_req, fr_len);
+	pr_debug("fnic2 0x%pK vlan req frame sent\n", fnic2);
+
+	lport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+
+	pr_debug("fnic2 0x%pK set timer\n", fnic2);
+	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+	mod_timer(&fnic2->retry_fip_timer, round_jiffies(vlan_tov));
+	pr_debug("fnic2 0x%pK timer done\n", fnic2);
+}
+
+/**
+ * fnic2_fcoe_process_vlan_resp
+ *
+ * Processes the vlan response from one FCF and populates VLAN list.
+ * Will wait for responses from multiple FCFs until timeout.
+ *
+ * @param fnic2 fnic2 driver instance
+ * @param fiph received fip frame
+ */
+
+static void fnic2_fcoe_process_vlan_resp(struct fnic2 *fnic2, struct fip_header *fiph)
+{
+	struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *) fiph;
+	uint16_t vid;
+	int num_vlan = 0;
+	int cur_desc, desc_len;
+	struct fcoe_vlan *vlan;
+	struct fip_vlan_desc *vlan_desc;
+	unsigned long flags;
+
+        pr_debug("fnic2 0x%pK got vlan resp\n", fnic2);
+
+	desc_len = ntohs(vlan_notif->fip.desc_len);
+        pr_debug("desc_len %d\n", desc_len);
+
+	spin_lock_irqsave(&fnic2->vlans_lock, flags);
+
+	cur_desc = 0;
+	while (desc_len > 0) {
+		vlan_desc = (struct fip_vlan_desc *)
+		    (((char *)vlan_notif->vlans_desc) + cur_desc * 4);
+		if (vlan_desc->type == FIP_TYPE_VLAN) {
+			if (vlan_desc->len != 1) {
+				pr_debug("Invalid descriptor length %x in VLan response\n",
+					vlan_desc->len);
+
+			}
+			num_vlan++;
+			vid = ntohs(vlan_desc->vlan);
+			pr_debug("process_vlan_resp: FIP VLAN %d\n", vid);
+			vlan = kmalloc(sizeof(*vlan), GFP_ATOMIC);
+
+			if (!vlan) {
+				/* retry from timer */
+				spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+				goto out;
+			}
+			memset(vlan, 0, sizeof(struct fcoe_vlan));
+			vlan->vid = vid & 0x0fff;
+			vlan->state = FIP_VLAN_AVAIL;
+			list_add_tail(&vlan->list, &fnic2->vlan_list);
+			break;
+		} else {
+                        pr_debug("Invalid descriptor type %x in VLan response\n",
+				vlan_desc->type);
+			// Note : received a type=2 descriptor here i.e. FIP
+			// MAC Address Descriptor
+		}
+		cur_desc += vlan_desc->len;
+		desc_len -= vlan_desc->len;
+	}
+
+	/* any VLAN descriptors present ? */
+	if (num_vlan == 0) {
+		pr_debug("fnic2 0x%pK No VLAN descriptors in FIP VLAN response\n", fnic2);
+	}
+
+	spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+
+out:
+	return;
+}
+
+/**
+ * fnic2_fcoe_start_fcf_discovery
+ *
+ * Starts FIP FCF discovery in a selected vlan
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+static void fnic2_fcoe_start_fcf_discovery(struct fnic2 *fnic2)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+	uint64_t fcs_tov;
+
+	int fr_len;
+	struct fip_discovery disc_sol;
+
+	pr_debug("fnic2 0x%pK start fcf discovery\n", fnic2);
+	fr_len = sizeof(struct fip_discovery);
+	memset(lport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+
+	memcpy(&disc_sol, &fip_discoverympl, fr_len);
+	memcpy(disc_sol.eth.smac, lport->hwmac, ETH_ALEN);
+	memcpy(disc_sol.mac_desc.mac, lport->hwmac, ETH_ALEN);
+	lport->selected_fcf.fcf_priority = 0xFF;
+
+	disc_sol.name_desc.name = cpu_to_be64(lport->wwnn);
+	fnic2_send_fip_frame(lport, &disc_sol, fr_len);
+
+	lport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED;
+
+	fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV);
+	mod_timer(&fnic2->retry_fip_timer, round_jiffies(fcs_tov));
+
+	pr_debug("fnic2 0x%pK Started FCF discovery", fnic2);
+
+}
+
+/*
+ * fnic2_fcoe_fip_discovery_resp
+ *
+ * Processes FCF advertisements.
+ * They can be:
+ * solicited   Sent in response of a discover FCF FIP request
+ *             We will only store the information of the FCF with
+ *             highest priority.
+ *             We wait until timeout in case of multiple FCFs.
+ * unsolicited Sent periodically by the FCF for keep alive.
+ *             If FLOGI is in progress or completed and the advertisement is
+ *             received by our selected FCF, refresh the keep alive timer.
+ *
+ * @param fnic2 fnic2 driver instance
+ * @param fiph received frame
+ */
+
+static void fnic2_fcoe_fip_discovery_resp(struct fnic2 *fnic2, struct fip_header *fiph)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+	struct fip_disc_adv *disc_adv = (struct fip_disc_adv *) fiph;
+	uint64_t fcs_ka_tov;
+	int desc_len = ntohs(disc_adv->fip.desc_len);
+
+        pr_debug("fnic2 0x%pK In fcf discovery resp\n", fnic2);
+        pr_debug("fip state %d\n", lport->fip.state);
+
+	if (!(desc_len == 12)) {
+		pr_debug("struct fip_disc_adv invalid Descriptor List len %x\n",
+			desc_len);
+	}
+        if (!((disc_adv->prio_desc.type == 1) && (disc_adv->prio_desc.len == 1)) ||
+		!((disc_adv->mac_desc.type == 2) && (disc_adv->mac_desc.len == 2)) ||
+		!((disc_adv->name_desc.type == 4) && (disc_adv->name_desc.len == 3)) ||
+		!((disc_adv->fabric_desc.type == 5) &&
+		(disc_adv->fabric_desc.len == 4)) ||
+		!((disc_adv->fka_adv_desc.type == 12) &&
+		(disc_adv->fabric_desc.len == 2))) {// this len comes 4 ??
+		pr_debug("struct fip_disc_adv invalid Descriptor type and len mix:  type %x len %x | type %x len %x | type %x len%x |type %x len %x | type %x len %x\n",
+			disc_adv->prio_desc.type, disc_adv->prio_desc.len,
+			disc_adv->mac_desc.type, disc_adv->mac_desc.len,
+			disc_adv->name_desc.type, disc_adv->name_desc.len,
+			disc_adv->fabric_desc.type, disc_adv->fabric_desc.len,
+			disc_adv->fka_adv_desc.type, disc_adv->fabric_desc.len);
+	}
+
+	if (lport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) {
+		if (ntohs(disc_adv->fip.flags) & FIP_FLAG_S) {
+			pr_debug("fnic2 0x%pK Solicited adv\n", fnic2);
+
+			if ((disc_adv->prio_desc.priority <
+				lport->selected_fcf.fcf_priority) &&
+				(ntohs(disc_adv->fip.flags) & FIP_FLAG_A)) {
+
+				pr_debug("fnic2 0x%pK FCF Available\n", fnic2);
+				memcpy(lport->selected_fcf.fcf_mac,
+					disc_adv->mac_desc.mac, ETH_ALEN);
+				lport->selected_fcf.fcf_priority =
+					disc_adv->prio_desc.priority;
+				lport->selected_fcf.fka_adv_period =
+					ntohl(disc_adv->fka_adv_desc.fka_adv);
+				pr_debug("adv time %d",
+					lport->selected_fcf.fka_adv_period);
+				lport->selected_fcf.ka_disabled =
+					(disc_adv->fka_adv_desc.rsvd_D & 1);
+			}
+		} else {
+			// ignore
+		}
+	} else if ((lport->fip.state == FDLS_FIP_FLOGI_STARTED) ||
+		(lport->fip.state == FDLS_FIP_FLOGI_COMPLETE)) {
+		if (!(ntohs(disc_adv->fip.flags) & FIP_FLAG_S)) {
+			//  same fcf
+			if (memcmp(lport->selected_fcf.fcf_mac,
+				disc_adv->mac_desc.mac, ETH_ALEN) == 0) {
+				if (lport->selected_fcf.fka_adv_period !=
+					ntohl(disc_adv->fka_adv_desc.fka_adv)) {
+					lport->selected_fcf.fka_adv_period =
+						ntohl(disc_adv->fka_adv_desc.fka_adv);
+					pr_debug("change fka to %d",
+						lport->selected_fcf.fka_adv_period);
+				}
+				if (!((lport->selected_fcf.ka_disabled) ||
+					(lport->selected_fcf.fka_adv_period == 0))) {
+
+					fcs_ka_tov = jiffies +
+						3 * msecs_to_jiffies(lport->selected_fcf.fka_adv_period);
+					mod_timer(&fnic2->fcs_ka_timer, round_jiffies(fcs_ka_tov));
+
+				}
+			}
+		}
+	}
+}
+
+/*
+ * fnic2_fcoe_start_flogi
+ *
+ * Sends FIP FLOGI to the selected FCF
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+static void fnic2_fcoe_start_flogi(struct fnic2 *fnic2)
+{
+        struct fnic2_lport *lport = &fnic2->lport;
+
+	int fr_len;
+	struct fip_flogi flogi_req;
+	uint64_t flogi_tov;
+
+	fr_len = sizeof(struct fip_flogi);
+        pr_debug("fnic2 0x%pK Start fip FLOGI\n", fnic2);
+
+	memcpy(&flogi_req, &fip_flogimpl, fr_len);
+	memcpy(flogi_req.eth.smac, lport->hwmac, ETH_ALEN);
+	if (lport->usefip) {
+	    memcpy(flogi_req.eth.dmac, lport->selected_fcf.fcf_mac, ETH_ALEN);
+	}
+	flogi_req.flogi_desc.flogi.nport_name = cpu_to_be64(lport->wwpn);
+	flogi_req.flogi_desc.flogi.node_name = cpu_to_be64(lport->wwnn);
+
+	fnic2_send_fip_frame(lport, &flogi_req, fr_len);
+	lport->fip.flogi_retry++;
+
+	lport->fip.state = FDLS_FIP_FLOGI_STARTED;
+        flogi_tov = jiffies + msecs_to_jiffies(fnic2->config.flogi_timeout);
+	mod_timer(&fnic2->retry_fip_timer, round_jiffies(flogi_tov));
+}
+
+/*
+ * fnic2_fcoe_process_flogi_resp
+ *
+ * Processes FLOGI response from FCF.
+ * If successful saves assigned fc_id and MAC, programs firmware
+ * and starts fdls discovery.
+ * Else restarts vlan discovery.
+ *
+ * @param fnic2 fnic2 driver instance
+ * @param fiph received frame
+ */
+
+void fnic2_fcoe_process_flogi_resp(struct fnic2 *fnic2, struct fip_header *fiph)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+	struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *) fiph;
+	int desc_len;
+	uint32_t s_id;
+
+        pr_debug("fnic2 0x%pK FIP FLOGI rsp\n", fnic2);
+	desc_len = ntohs(flogi_rsp->fip.desc_len);
+	if (desc_len != 38) {
+		pr_debug("%s invalid Descriptor List len %x\n",
+			__func__, desc_len);
+	}
+	if (!((flogi_rsp->rsp_desc.type == 7) && (flogi_rsp->rsp_desc.len == 36)) ||
+		!((flogi_rsp->mac_desc.type == 2) && (flogi_rsp->mac_desc.len == 2))) {
+                pr_debug("dropping frame. %s invalid Descriptor type and len mix:\n flogi_rsp->rsp_desc.type %x flogi_rsp->rsp_desc.len %x flogi_rsp->mac_desc.type %x flogi_rsp->mac_desc.len %x\n",
+			__func__, flogi_rsp->rsp_desc.type, flogi_rsp->rsp_desc.len,
+			flogi_rsp->mac_desc.type, flogi_rsp->mac_desc.len);
+
+	}
+	s_id = ntoh24(flogi_rsp->rsp_desc.els.fchdr.s_id);
+	if ((flogi_rsp->rsp_desc.els.fchdr.f_ctl[0] != 0x98) ||
+		(flogi_rsp->rsp_desc.els.fchdr.r_ctl != 0x23) ||
+		(s_id != 0xFFFFFE) ||
+		(flogi_rsp->rsp_desc.els.fchdr.ox_id != FNIC2_FLOGI_OXID) ||
+		(flogi_rsp->rsp_desc.els.fchdr.type != 0x01)) {
+                pr_debug("fnic2_fcoe_process_flogi_resp received Flogi resp with some Invalid fc frame bits s_id %x FCTL %x R_CTL %x type %x OX_ID %x Dropping the frame\n",
+			s_id,
+			ntoh24(flogi_rsp->rsp_desc.els.fchdr.f_ctl),
+			flogi_rsp->rsp_desc.els.fchdr.r_ctl,
+			flogi_rsp->rsp_desc.els.fchdr.type,
+			flogi_rsp->rsp_desc.els.fchdr.ox_id);
+		return;
+	}
+
+	if (lport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+		pr_debug("fnic2 0x%pK rsp for pending FLOGI\n", fnic2);
+
+		del_timer_sync(&fnic2->retry_fip_timer);
+
+		if ((ntohs(flogi_rsp->fip.desc_len) == 38) &&
+			(flogi_rsp->rsp_desc.els.command == FC_LS_ACC)) {
+
+			pr_debug("fnic2 0x%pK FLOGI success\n", fnic2);
+			memcpy(lport->fpma, flogi_rsp->mac_desc.mac, ETH_ALEN);
+			lport->fcid = ntoh24(flogi_rsp->rsp_desc.els.fchdr.d_id);
+
+			lport->r_a_tov =
+				ntohl(flogi_rsp->rsp_desc.els.u.csp_flogi.r_a_tov);
+			lport->e_d_tov =
+				ntohl(flogi_rsp->rsp_desc.els.u.csp_flogi.e_d_tov);
+			memcpy(fnic2->lport.fcfmac, lport->selected_fcf.fcf_mac,
+				ETH_ALEN);
+			vnic_dev_add_addr(fnic2->vdev, flogi_rsp->mac_desc.mac);
+
+			if (fnic2_fdls_register_portid(lport, lport->fcid, NULL) != 0) {
+				pr_debug("fnic2 0x%pK flogi registration failed\n", fnic2);
+				return;
+			}
+
+			lport->fip.state = FDLS_FIP_FLOGI_COMPLETE;
+			lport->state = FNIC2_IPORT_STATE_FABRIC_DISC;
+			pr_debug("lport->state: %d\n",
+				lport->state);
+			fnic2_fdls_disc_start(lport);
+			if (!lport->selected_fcf.ka_disabled) {
+				uint64_t tov;
+
+				tov = jiffies + msecs_to_jiffies(FCOE_CTLR_ENODE_KA_TOV);
+				mod_timer(&fnic2->enode_ka_timer, round_jiffies(tov));
+
+				tov = jiffies + msecs_to_jiffies(FCOE_CTLR_VN_KA_TOV);
+				mod_timer(&fnic2->vn_ka_timer, round_jiffies(tov));
+
+			}
+		} else {
+			/*
+			 * If there's FLOGI rejects - clear all
+			 * fcf's & restart from scratch
+			 * start FCoE VLAN discovery
+			 */
+			fnic2_fcoe_send_vlan_req(fnic2);
+
+			lport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+		}
+	}
+}
+
+/*
+ * fnic2_common_fip_cleanup
+ *
+ * Cleans up FCF info and timers in case of link down/CVL
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+void fnic2_common_fip_cleanup(struct fnic2 *fnic2)
+{
+
+        struct fnic2_lport *lport = &fnic2->lport;
+
+	if (!lport->usefip)
+		return;
+        pr_debug("fnic2 0x%pK fip cleanup\n", fnic2);
+
+	// cleanup flogi form hw
+	fnic2_fw_reset_handler(fnic2);
+
+	lport->fip.state = FDLS_FIP_INIT;
+
+	del_timer_sync(&fnic2->retry_fip_timer);
+	del_timer_sync(&fnic2->fcs_ka_timer);
+	del_timer_sync(&fnic2->enode_ka_timer);
+	del_timer_sync(&fnic2->vn_ka_timer);
+
+	vnic_dev_del_addr(fnic2->vdev, lport->fpma);
+
+	memset(lport->fpma, 0, ETH_ALEN);
+	lport->fcid = 0;
+	lport->r_a_tov = 0;
+	lport->e_d_tov = 0;
+	memset(fnic2->lport.fcfmac, 0, ETH_ALEN);
+	memset(lport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+	lport->selected_fcf.fcf_priority = 0;
+	lport->selected_fcf.fka_adv_period = 0;
+	lport->selected_fcf.ka_disabled = 0;
+
+	fnic2_fcoe_reset_vlans(fnic2);
+}
+
+/*
+ * fnic2_fcoe_process_cvl
+ *
+ * Processes Clear Virtual Link from FCF
+ * Verifies that cvl is received from our current FCF for our assigned MAC
+ * Cleans up and restarts the vlan discovery
+ *
+ * @param fnic2 fnic2 driver instance
+ * @param fiph received frame
+ */
+
+static void fnic2_fcoe_process_cvl(struct fnic2 *fnic2, struct fip_header *fiph)
+{
+        struct fnic2_lport *lport = &fnic2->lport;
+	struct fip_cvl *cvl_msg = (struct fip_cvl *) fiph;
+	int i;
+	int found = false;
+
+        pr_debug("fnic2 0x%pK clear virtual link handler\n", fnic2);
+
+	if (!((cvl_msg->fcf_mac_desc.type == 2) &&
+		(cvl_msg->fcf_mac_desc.len == 2)) ||
+		!((cvl_msg->name_desc.type == 4) &&
+		(cvl_msg->name_desc.len == 3))) {
+
+                pr_debug("fnic2_fcoe_process_cvl invalid Descriptor type and len mix: fcf_mac_desc.type %x fcf_mac_desc.len %x  cvl_msg->name_desc.type %x cvl_msg->name_desc.len %x\n",
+			cvl_msg->fcf_mac_desc.type, cvl_msg->fcf_mac_desc.len,
+			cvl_msg->name_desc.type, cvl_msg->name_desc.len);
+	}
+
+	if (memcmp(lport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.mac,
+	    ETH_ALEN) == 0) {
+		for (i = 0; i < ((ntohs(fiph->desc_len) / 5) - 1); i++) {
+			if (!((cvl_msg->vn_ports_desc[i].type == 11) &&
+			    (cvl_msg->vn_ports_desc[i].len == 5))) {
+
+				pr_debug("fnic2_fcoe_process_cvl invalid Descriptor type and len mix: vn_ports_desc[i].type %d vn_ports_desc[i].len %d\n",
+					cvl_msg->vn_ports_desc[i].type,
+					cvl_msg->vn_ports_desc[i].len);
+			}
+			if (memcmp(lport->fpma,
+			    cvl_msg->vn_ports_desc[i].vn_port_mac,
+			    ETH_ALEN) == 0) {
+				found = true;
+				break;
+			}
+		}
+		if (!found)
+			return;
+		fnic2_common_fip_cleanup(fnic2);
+		fnic2_fdls_link_down(lport);
+
+		fnic2_fcoe_send_vlan_req(fnic2);
+	}
+}
+
+/*
+ * fdls_fip_recv_frame
+ *
+ * Demultiplexer for FIP frames
+ *
+ * @param fnic2 driver instance
+ * @param frame received ethernet frame
+ * @return Frame processed by FIP
+ */
+
+int fdls_fip_recv_frame(struct fnic2 *fnic2, void *frame)
+{
+    struct eth_hdr *eth = (struct eth_hdr *) frame;
+    struct fip_header *fiph;
+    uint16_t protocol;
+    uint8_t sub;
+
+    if (eth->eth_type == ntohs(FIP_ETH_TYPE)) {
+
+	fiph = (struct fip_header *) (eth+1);
+	protocol = ntohs(fiph->protocol);
+	sub = ntohs(fiph->subcode);
+
+	pr_debug("recv fip frame op %x sub %x\n",
+		protocol, sub);
+
+	if (protocol == FIP_DISCOVERY && sub == FIP_SUBCODE_RESP) {
+		fnic2_fcoe_fip_discovery_resp(fnic2, fiph);
+	} else if (protocol == FIP_VLAN_DISC && sub == FIP_SUBCODE_RESP) {
+		fnic2_fcoe_process_vlan_resp(fnic2, fiph);
+	} else if (protocol == FIP_KA_CVL && sub == FIP_SUBCODE_RESP) {
+		fnic2_fcoe_process_cvl(fnic2, fiph);
+	} else if (protocol == FIP_FLOGI && sub == FIP_SUBCODE_RESP) {
+		fnic2_fcoe_process_flogi_resp(fnic2, fiph);
+	}
+	return 1;
+    }
+    return 0;
+}
+
+/*
+ * fnic2_handle_fip_timer
+ *
+ * Timeout handler for FIP discover phase.
+ * Based on the current state, starts next phase or restarts discovery
+ *
+ * @param data Opaque pointer to fnic2 structure
+ */
+
+void fnic2_handle_fip_timer(struct timer_list *timer)
+{
+	struct fnic2 *fnic2 = container_of(timer, struct fnic2, retry_fip_timer);
+        struct fnic2_lport *lport = &fnic2->lport;
+
+        pr_debug("fnic2 0x%pK fip timeout\n", fnic2);
+
+	if (lport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) {
+		// pr_debug("vlan discovey timeout\n");
+		fnic2_vlan_discovery_timeout(fnic2);
+	} else if (lport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) {
+		uint8_t zmac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+		pr_debug("fnic2 0x%pK fcf discovey timeout\n", fnic2);
+		if (memcmp(lport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
+			fnic2_fcoe_start_flogi(fnic2);
+			if (!((lport->selected_fcf.ka_disabled) ||
+				(lport->selected_fcf.fka_adv_period == 0))) {
+				uint64_t fcf_tov;
+
+				fcf_tov = jiffies +
+					3 * msecs_to_jiffies(lport->selected_fcf.fka_adv_period);
+				mod_timer(&fnic2->fcs_ka_timer, round_jiffies(fcf_tov));
+			}
+		} else {
+			pr_debug("fnic2 0x%pK FCF discovey timeout\n", fnic2);
+			fnic2_vlan_discovery_timeout(fnic2);
+		}
+	} else if (lport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+		pr_debug("fnic2 0x%pK flogi timeout\n", fnic2);
+		if (lport->fip.flogi_retry < fnic2->config.flogi_retries) {
+			fnic2_fcoe_start_flogi(fnic2);
+		} else {
+			fnic2_vlan_discovery_timeout(fnic2);
+		}
+	}
+}
+
+/*
+ * fnic2_handle_enode_ka_timer
+ *
+ * FIP node keep alive.
+ *
+ * @param data Opaque pointer to fnic2 struct
+ */
+void fnic2_handle_enode_ka_timer(struct timer_list *timer)
+{
+    struct fnic2 *fnic2 = container_of(timer, struct fnic2, enode_ka_timer);
+    struct fnic2_lport *lport = &fnic2->lport;
+    int fr_len;
+    struct fip_enode_ka enode_ka;
+    uint64_t enode_ka_tov;
+
+    pr_debug("fnic2 0x%pK ka timer\n", fnic2);
+
+    if (lport->fip.state != FDLS_FIP_FLOGI_COMPLETE) {
+	return;
+    }
+
+    fr_len = sizeof(struct fip_enode_ka);
+
+    memcpy(&enode_ka, &fip_enode_kampl, fr_len);
+    memcpy(enode_ka.eth.smac, lport->hwmac, ETH_ALEN);
+    memcpy(enode_ka.eth.dmac, lport->selected_fcf.fcf_mac, ETH_ALEN);
+    memcpy(enode_ka.mac_desc.mac, lport->hwmac, ETH_ALEN);
+
+    fnic2_send_fip_frame(lport, &enode_ka, fr_len);
+    enode_ka_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_ENODE_KA_TOV);
+    mod_timer(&fnic2->enode_ka_timer, round_jiffies(enode_ka_tov));
+}
+
+/*
+ * fnic2_handle_vn_ka_timer
+ *
+ * FIP virtual port keep alive.
+ *
+ * @param data Opaque pointer to fnic2 structure
+ */
+
+void fnic2_handle_vn_ka_timer(struct timer_list *timer)
+{
+	struct fnic2 *fnic2 = container_of(timer, struct fnic2, vn_ka_timer);
+        struct fnic2_lport *lport = &fnic2->lport;
+	int fr_len;
+	struct fip_vn_port_ka vn_port_ka;
+	uint64_t vn_ka_tov;
+	uint8_t fcid[3];
+
+        pr_debug("fnic2 0x%pK vn port ka timer\n", fnic2);
+
+	if (lport->fip.state != FDLS_FIP_FLOGI_COMPLETE) {
+		return;
+	}
+
+	fr_len = sizeof(struct fip_vn_port_ka);
+
+	memcpy(&vn_port_ka, &fip_vn_port_kampl, fr_len);
+	memcpy(vn_port_ka.eth.smac, lport->hwmac, ETH_ALEN);
+        memcpy(vn_port_ka.eth.dmac, lport->selected_fcf.fcf_mac, ETH_ALEN);
+	memcpy(vn_port_ka.mac_desc.mac, lport->hwmac, ETH_ALEN);
+	memcpy(vn_port_ka.vn_port_desc.vn_port_mac, lport->fpma, ETH_ALEN);
+	hton24(fcid, lport->fcid);
+	memcpy(vn_port_ka.vn_port_desc.vn_port_id, fcid, 3);
+	vn_port_ka.vn_port_desc.vn_port_name = cpu_to_be64(lport->wwpn);
+
+	fnic2_send_fip_frame(lport, &vn_port_ka, fr_len);
+	vn_ka_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_VN_KA_TOV);
+	mod_timer(&fnic2->vn_ka_timer, round_jiffies(vn_ka_tov));
+}
+
+/*
+ * fnic2_vlan_discovery_timeout
+ *
+ * End of VLAN discovery or FCF discovery time window
+ * Start the FCF discovery if VLAN was never used
+ * Retry in case of FCF not responding or move to next VLAN
+ *
+ * @param fnic2 fnic2 driver instance
+ */
+
+static void fnic2_vlan_discovery_timeout(struct fnic2 *fnic2)
+{
+	struct fcoe_vlan *vlan;
+        struct fnic2_lport *lport = &fnic2->lport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	if (!lport->usefip)
+		 return;
+
+	spin_lock_irqsave(&fnic2->vlans_lock, flags);
+	if (list_empty(&fnic2->vlan_list)) {
+		/* no vlans available, try again */
+		spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+		fnic2_fcoe_send_vlan_req(fnic2);
+		return;
+	}
+
+	vlan = list_first_entry(&fnic2->vlan_list, struct fcoe_vlan, list);
+
+	if (vlan->state == FIP_VLAN_SENT) {
+		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+			/*
+			 * no response on this vlan, remove  from the list.
+			 * Try the next vlan
+			 */
+			list_del(&vlan->list);
+			kfree(vlan);
+			vlan = NULL;
+			if (list_empty(&fnic2->vlan_list)) {
+			    /* we exhausted all vlans, restart vlan disc */
+			    spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+			    fnic2_fcoe_send_vlan_req(fnic2);
+			    return;
+			}
+			/* check the next vlan */
+			vlan = list_first_entry(&fnic2->vlan_list, struct fcoe_vlan, list);
+
+			fnic2->set_vlan(fnic2, vlan->vid);
+			vlan->state = FIP_VLAN_SENT; /* sent now */
+
+		}
+	} else {
+		fnic2->set_vlan(fnic2, vlan->vid);
+		vlan->state = FIP_VLAN_SENT; /* sent now */
+	}
+	vlan->sol_count++;
+	spin_unlock_irqrestore(&fnic2->vlans_lock, flags);
+	fnic2_fcoe_start_fcf_discovery(fnic2);
+}
+
+/*
+ * fnic2_work_on_fcs_ka_timer - finish handling fcs_ka_timer in process context
+ *
+ * We need to finish this timer in a process context so that we do
+ * not hand in fip_common_cleanup. Here we clean up, bring the link down
+ * and restart all FIP discovery.
+ *
+ * @work - the work queue that we will be servicing
+ */
+void fnic2_work_on_fcs_ka_timer(struct work_struct *work)
+{
+	struct fnic2 *fnic2 = container_of(work, struct fnic2, fip_work);
+	struct fnic2_lport *lport = &fnic2->lport;
+
+	pr_debug("fnic2 0x%pK fcs ka timeout\n", fnic2);
+
+	fnic2_common_fip_cleanup(fnic2);
+	fnic2_fdls_link_down(lport);
+
+	lport->state = FNIC2_IPORT_STATE_FIP;
+	fnic2_fcoe_send_vlan_req(fnic2);
+}
+
+/*
+ * fnic2_handle_fcs_ka_timer
+ *
+ * No keep alives received from FCF. Clean up, bring the link down
+ * and restart all the FIP discovery.
+ *
+ * @param data Opaque pointer to fnic2 structure
+ */
+void fnic2_handle_fcs_ka_timer(struct timer_list *timer)
+{
+	struct fnic2 *fnic2 = container_of(timer, struct fnic2, fcs_ka_timer);
+	INIT_WORK(&fnic2->fip_work, fnic2_work_on_fcs_ka_timer);
+	queue_work(fip_event_queue, &fnic2->fip_work);
+}
diff --git a/drivers/staging/fnic2/src/fip.h b/drivers/staging/fnic2/src/fip.h
new file mode 100644
index 0000000..04a522e
--- /dev/null
+++ b/drivers/staging/fnic2/src/fip.h
@@ -0,0 +1,336 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FIP_H_
+#define _FIP_H_
+
+#include "fdls_fc.h"
+
+#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
+#define FIP_ETH_TYPE 0x8914
+
+#define FIP_ETH_TYPE_LE 0x1489
+#define FCOE_MAX_SIZE_LE 0x2E08
+
+enum fcoe_ctlr {
+	FCOE_CTLR_MAX_SOL       = 5,
+	FCOE_CTLR_FIPVLAN_TOV	= (3*1000),
+	FCOE_CTLR_FCS_TOV	= (3*1000),
+	FCOE_CTLR_ENODE_KA_TOV	= (8*1000),
+	FCOE_CTLR_VN_KA_TOV	= (90*1000),
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+	struct list_head	list;
+	uint16_t		vid;		/* vlan ID */
+	uint16_t		sol_count;	/* no. of sols sent */
+	uint16_t		state;		/* state */
+};
+
+enum fdls_vlan_state {
+	FIP_VLAN_AVAIL,
+	FIP_VLAN_SENT
+};
+
+enum fdls_fip_state {
+	FDLS_FIP_INIT,
+	FDLS_FIP_VLAN_DISCOVERY_STARTED,
+	FDLS_FIP_FCF_DISCOVERY_STARTED,
+	FDLS_FIP_FLOGI_STARTED,
+	FDLS_FIP_FLOGI_COMPLETE,
+};
+
+enum fip_protocol_code {
+	FIP_DISCOVERY = 1,
+	FIP_FLOGI,
+	FIP_KA_CVL,
+	FIP_VLAN_DISC
+};
+
+#define FIP_SUBCODE_REQ  1
+#define FIP_SUBCODE_RESP 2
+
+struct eth_hdr {
+	uint8_t		dmac[6];
+	uint8_t		smac[6];
+	uint16_t	eth_type;
+};
+
+struct fip_header {
+	uint16_t	ver;
+
+	uint16_t	protocol;
+	uint16_t	subcode;
+
+	uint16_t	desc_len;
+	uint16_t	flags;
+} __attribute__((__packed__));
+
+#define FIP_FLAG_S 0x2
+#define FIP_FLAG_A 0x4
+
+enum fip_desc_type {
+	FIP_TYPE_MAC		= 2,
+	FIP_TYPE_NAME_ID	= 4,
+	FIP_TYPE_MAX_FCOE	= 6,
+	FIP_TYPE_FLOGI		= 7,
+	FIP_TYPE_VX_PORT	= 11,
+	FIP_TYPE_VLAN		= 14
+};
+
+struct fip_mac_desc {
+	uint8_t	type;
+	uint8_t len;
+	uint8_t mac[6];
+}  __attribute__((__packed__));
+
+struct fip_vlan_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint16_t	vlan;
+}  __attribute__((__packed__));
+
+struct fip_vlan_req {
+	struct eth_hdr		eth;
+	struct fip_header	fip;
+	struct fip_mac_desc	mac_desc;
+}  __attribute__((__packed__));
+
+ /*
+  * Variables:
+  * eth.smac, mac_desc.mac
+  */
+struct fip_vlan_req fip_vlan_reqmpl = {
+	.eth = { .dmac = FCOE_ALL_FCFS_MAC,
+		.eth_type = FIP_ETH_TYPE_LE},
+	.fip = { .ver = 0x10,
+		 .protocol = FIP_VLAN_DISC << 8,
+		 .subcode = FIP_SUBCODE_REQ << 8,
+		 .desc_len = 2 << 8},
+	.mac_desc = {.type = FIP_TYPE_MAC, .len = 2 }
+};
+
+struct fip_vlan_notif {
+	struct fip_header	fip;
+	struct fip_vlan_desc	vlans_desc[0];
+} __attribute__((__packed__));
+
+struct fip_vn_port_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint8_t		vn_port_mac[6];
+	uint8_t		rsvd[1];
+	uint8_t		vn_port_id[3];
+	uint64_t	vn_port_name;
+} __attribute__((__packed__));
+
+struct fip_vn_port_ka {
+	struct eth_hdr		eth;
+	struct fip_header	fip;
+	struct fip_mac_desc	mac_desc;
+	struct fip_vn_port_desc	vn_port_desc;
+} __attribute__((__packed__));
+
+/*
+ * Variables:
+ * fcf_mac, eth.smac, mac_desc.enode_mac
+ * vn_port_desc:mac, id, port_name
+ */
+struct fip_vn_port_ka fip_vn_port_kampl = {
+
+	.eth = {
+		.eth_type = FIP_ETH_TYPE_LE},
+	.fip = {
+		.ver = 0x10,
+		.protocol = FIP_KA_CVL << 8,
+		.subcode = FIP_SUBCODE_REQ << 8,
+		.desc_len = 7 << 8
+	       },
+	.mac_desc = {.type = FIP_TYPE_MAC, .len = 2 },
+	.vn_port_desc = {.type = FIP_TYPE_VX_PORT, .len = 5}
+};
+
+struct fip_enode_ka {
+	struct eth_hdr		eth;
+	struct fip_header	fip;
+	struct fip_mac_desc	mac_desc;
+} __attribute__((__packed__));
+
+/*
+ * Variables:
+ * fcf_mac, eth.smac, mac_desc.enode_mac
+ */
+struct fip_enode_ka fip_enode_kampl = {
+
+	.eth = {
+		.eth_type = FIP_ETH_TYPE_LE},
+	.fip = {
+		.ver = 0x10,
+		.protocol = FIP_KA_CVL << 8,
+		.subcode = FIP_SUBCODE_REQ << 8,
+		.desc_len = 2 << 8
+	       },
+	.mac_desc = {.type = FIP_TYPE_MAC, .len = 2 }
+};
+
+struct fip_name_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint8_t		rsvd[2];
+	uint64_t	name;
+} __attribute__((__packed__));
+
+struct fip_cvl {
+	struct fip_header	fip;
+	struct fip_mac_desc	fcf_mac_desc;
+	struct fip_name_desc	name_desc;
+	struct fip_vn_port_desc	vn_ports_desc[0];
+} __attribute__((__packed__));
+
+struct fip_flogi_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint16_t	rsvd;
+	struct fc_els	flogi;
+} __attribute__((__packed__));
+
+struct fip_flogi_rsp_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint16_t	rsvd;
+	struct fc_els	els;
+} __attribute__((__packed__));
+
+struct fip_flogi {
+	struct eth_hdr		eth;
+	struct fip_header	fip;
+	struct fip_flogi_desc	flogi_desc;
+	struct fip_mac_desc	mac_desc;
+} __attribute__((__packed__));
+
+struct fip_flogi_rsp {
+	struct fip_header		fip;
+	struct fip_flogi_rsp_desc	rsp_desc;
+	struct fip_mac_desc		mac_desc;
+} __attribute__((__packed__));
+
+/*
+ * Variables:
+ * fcf_mac, eth.smac, mac_desc.enode_mac
+ */
+struct fip_flogi fip_flogimpl = {
+
+	.eth = {
+		.eth_type = FIP_ETH_TYPE_LE},
+	.fip = {
+		.ver = 0x10,
+		.protocol = FIP_FLOGI << 8,
+		.subcode = FIP_SUBCODE_REQ << 8,
+		.desc_len = 38 << 8,
+		.flags = 0x80 },
+	.flogi_desc = {
+		.type = FIP_TYPE_FLOGI, .len = 36,
+		.flogi = {
+			.fchdr = {
+				.r_ctl = 0x22,
+				.d_id = {0xFF, 0xFF, 0xFE},
+				.type = 0x01,
+				.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00},
+				.ox_id = FNIC2_FLOGI_OXID,
+				.rx_id = 0xFFFF },
+			.command = FC_ELS_FLOGI_REQ,
+			.u.csp_flogi = {
+				.fc_ph_ver = FNIC2_FC_PH_VER,
+				.b2b_credits = FNIC2_FC_B2B_CREDIT,
+				.b2b_rdf_size = FNIC2_FC_B2B_RDF_SZ },
+			.spc3 = {0x88, 0x00}
+		}
+	},
+	.mac_desc = {.type = FIP_TYPE_MAC, .len = 2 }
+};
+
+struct fip_fcoe_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint16_t	max_fcoe_size;
+} __attribute__((__packed__));
+
+struct fip_discovery {
+	struct eth_hdr		eth;
+	struct fip_header	fip;
+	struct fip_mac_desc	mac_desc;
+	struct fip_name_desc	name_desc;
+	struct fip_fcoe_desc	fcoe_desc;
+} __attribute__((__packed__));
+
+/*
+ * Variables:
+ * eth.smac, mac_desc.enode_mac, node_name
+ */
+struct fip_discovery fip_discoverympl = {
+
+	.eth = {.dmac = FCOE_ALL_FCFS_MAC,
+		.eth_type = FIP_ETH_TYPE_LE},
+	.fip = {
+		.ver = 0x10, .protocol = FIP_DISCOVERY << 8,
+		.subcode = FIP_SUBCODE_REQ << 8, .desc_len = 6 << 8,
+		.flags = 0x80},
+	.mac_desc = {.type = FIP_TYPE_MAC, .len = 2 },
+	.name_desc = {.type = FIP_TYPE_NAME_ID, .len = 3},
+	.fcoe_desc = {
+	    .type = FIP_TYPE_MAX_FCOE, .len = 1,
+	    .max_fcoe_size = FCOE_MAX_SIZE_LE
+	}
+};
+
+struct fip_prio_desc {
+	uint8_t type;
+	uint8_t len;
+	uint8_t rsvd;
+	uint8_t priority;
+} __attribute__((__packed__));
+
+struct fip_fabric_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint16_t	vf_id;
+	uint8_t		rsvd;
+	uint8_t		fc_map[3];
+	uint64_t	fabric_name;
+} __attribute__((__packed__));
+
+struct fip_fka_adv_desc {
+	uint8_t		type;
+	uint8_t		len;
+	uint8_t		rsvd;
+	uint8_t		rsvd_D;
+	uint32_t	fka_adv;
+} __attribute__((__packed__));
+
+struct fip_disc_adv {
+	struct fip_header	fip;
+	struct fip_prio_desc	prio_desc;
+	struct fip_mac_desc	mac_desc;
+	struct fip_name_desc	name_desc;
+	struct fip_fabric_desc	fabric_desc;
+	struct fip_fka_adv_desc	fka_adv_desc;
+} __attribute__((__packed__));
+#endif /* _FIP_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 04/10] staging: fnic2 add fdls system
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (2 preceding siblings ...)
  2018-04-05 21:18 ` [PATCH 03/10] staging: fnic2 add fip handling Oliver Smith-Denny
@ 2018-04-05 21:19 ` Oliver Smith-Denny
  2018-04-05 21:20 ` [PATCH 05/10] staging: fnic2 add LIO interface Oliver Smith-Denny
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:19 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain Fibre Channel discovery handling
and fabric services.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fdls_disc.c  | 1539 ++++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fdls_fc.h    |  403 +++++++++
 drivers/staging/fnic2/src/fdls_if.c    | 1254 ++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fnic2_fdls.h |  232 +++++
 4 files changed, 3428 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fdls_disc.c
 create mode 100644 drivers/staging/fnic2/src/fdls_fc.h
 create mode 100644 drivers/staging/fnic2/src/fdls_if.c
 create mode 100644 drivers/staging/fnic2/src/fnic2_fdls.h

diff --git a/drivers/staging/fnic2/src/fdls_disc.c b/drivers/staging/fnic2/src/fdls_disc.c
new file mode 100644
index 0000000..e955116
--- /dev/null
+++ b/drivers/staging/fnic2/src/fdls_disc.c
@@ -0,0 +1,1539 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "fnic2.h"
+#include "fdls_fc.h"
+#include "fnic2_fdls.h"
+
+static void fdls_send_rpn_id(struct fnic2_lport *lport);
+struct fnic2_rport *fnic2_find_rport_by_fcid(struct fnic2_lport *lport, uint32_t fcid);
+struct fnic2_rport *fnic2_find_rport(struct fnic2_lport *lport, uint64_t wwpn);
+void fnic2_fdls_remove_rport(struct fnic2_lport *lport, struct fnic2_rport *rport);
+struct fnic2_rport *fnic2_fdls_add_rport(struct fnic2_lport *lport, struct fnic2_rport *rport);
+struct fnic2_rport *fdls_create_rport(struct fnic2_lport *lport, uint32_t fcid, uint64_t wwpn);
+void fdls_delete_rport(struct fnic2_lport *lport, struct fnic2_rport *rport);
+
+extern int fnic2_session_create(struct fnic2 *fnic2, struct fnic2_rport *rport);
+
+/* Frame initialization */
+
+/*
+ * Variables:
+ * s_id
+ */
+struct fc_els fnic2_flogi_req = {
+	.fchdr = {.r_ctl = 0x22, .d_id = {0xFF, 0xFF, 0xFE},
+		.type = 0x01, .f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00},
+	.ox_id = FNIC2_FLOGI_OXID, .rx_id = 0xFFFF},
+	.command = FC_ELS_FLOGI_REQ,
+	.u.csp_flogi = {.fc_ph_ver = FNIC2_FC_PH_VER,
+	.b2b_credits = FNIC2_FC_B2B_CREDIT,
+	.b2b_rdf_size = FNIC2_FC_B2B_RDF_SZ},
+	.spc3 = {0x88, 0x00}
+};
+
+/*
+ * Variables:
+ * s_id, d_id(nport logins), ox_id(nport logins), nport_name, node_name
+ */
+struct fc_els fnic2_plogi_req = {
+	.fchdr = {.r_ctl = 0x22, .d_id = {0xFF, 0xFF, 0xFC}, .type = 0x01,
+		.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00}, .ox_id = FNIC2_PLOGI_FABRIC_OXID,
+		.rx_id = 0xFFFF},
+	.command = FC_ELS_PLOGI_REQ,
+	.u.csp_plogi = {.fc_ph_ver = FNIC2_FC_PH_VER,
+		.b2b_credits = FNIC2_FC_B2B_CREDIT, .features = 0x0080,
+		.b2b_rdf_size = FNIC2_FC_B2B_RDF_SZ,
+		.total_concur_seqs = FNIC2_FC_CONCUR_SEQS,
+		.ro_info = FNIC2_FC_RO_INFO, .e_d_tov = FNIC2_E_D_TOV},
+	.spc3 = {0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+		 0x00, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}
+};
+
+/*
+ * Variables:
+ * s_id, d_id(nport logins), ox_id(nport logins), nport_name, node_name
+ */
+struct fc_els fnic2_plogi_resp = {
+	.fchdr = {.r_ctl = 0x23, .type = 0x01,
+		.f_ctl = {FNIC2_ELS_REP_FCTL, 0x00, 0x00} },
+	.command = FC_LS_ACC,
+	.u.csp_plogi = {.fc_ph_ver = FNIC2_FC_PH_VER,
+		.b2b_credits = FNIC2_FC_B2B_CREDIT, .features = 0x0080,
+		.b2b_rdf_size = FNIC2_FC_B2B_RDF_SZ,
+	.total_concur_seqs = FNIC2_FC_CONCUR_SEQS,
+	.ro_info = FNIC2_FC_RO_INFO, .e_d_tov = FNIC2_E_D_TOV},
+	.spc3 = {0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
+		 0x00, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}
+};
+
+/*
+ * Variables:
+ * s_id, port_id, port_name
+ */
+struct fc_rpn_id fnic2_rpn_id_req = {
+	.fchdr = {.r_ctl = 0x02, .d_id = {0xFF, 0xFF, 0xFC}, .type = 0x20,
+		.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00}, .ox_id = FNIC2_RPN_REQ_OXID,
+		.rx_id = 0xFFFF},
+	.fc_ct_hdr = {.rev = 0x01, .fs_type = 0xFC, .fs_subtype = 0x02,
+			.command = FC_CT_RPN_CMD}
+};
+
+/*
+ * Variables:
+ * s_id, port_id, port_name
+ */
+struct fc_rft_id fnic2_rft_id_req = {
+	.fchdr = {.r_ctl = 0x02, .d_id = {0xFF, 0xFF, 0xFC}, .type = 0x20,
+		.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00}, .ox_id = FNIC2_RFT_REQ_OXID,
+		.rx_id = 0xFFFF},
+	.fc_ct_hdr = {.rev = 0x01, .fs_type = 0xFC, .fs_subtype = 0x02,
+			.command = FC_CT_RFT_CMD}
+};
+
+/*
+ * Variables:
+ * s_id, port_id, port_name
+ */
+struct fc_rff_id fnic2_rff_id_req = {
+	.fchdr = {.r_ctl = 0x02, .d_id = {0xFF, 0xFF, 0xFC}, .type = 0x20,
+		.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00}, .ox_id = FNIC2_RFF_REQ_OXID,
+		.rx_id = 0xFFFF},
+	.fc_ct_hdr = {.rev = 0x01, .fs_type = 0xFC, .fs_subtype = 0x02,
+			.command = FC_CT_RFF_CMD},
+	.tgt = 0x1,
+	.fc4_type = 0x8
+};
+
+/*
+ * Variables:
+ * d_id, s_id, ox_id
+ */
+struct fc_els_prli fnic2_prli_resp = {
+	.fchdr = {.r_ctl = 0x23, .type = 0x01,
+		.f_ctl = {FNIC2_ELS_REP_FCTL, 0x00, 0x00} },
+	.command = FC_LS_ACC,
+	.page_len = 16,
+	.payload_len = 0x1400,
+	.sp = {.type = 0x08, .flags = 0x0021, .csp = 0x12040000}
+};
+
+/*
+ * Variables:
+ * d_id, ox_id, rx_id
+ */
+struct fc_els_acc fnic2_els_acc = {
+	.fchdr = {.r_ctl = 0x23, .d_id = {0xFF, 0xFF, 0xFD}, .type = 0x01,
+		.f_ctl = {FNIC2_ELS_REP_FCTL, 0x00, 0x00} },
+	.command = FC_LS_ACC,
+};
+
+struct fc_els_reject fnic2_els_rjt = {
+	.fchdr = {.r_ctl = 0x23, .type = 0x01, .f_ctl = {FNIC2_ELS_REP_FCTL, 0x00, 0x00} },
+	.command = FC_LS_REJ,
+};
+
+/*
+ * Variables:
+ * d_id, ox_id, rx_id
+ */
+struct fc_abts_ba_acc fnic2_ba_acc = {
+	.fchdr = {.r_ctl = 0x84,
+		.f_ctl = {FNIC2_FCP_RSP_FCTL, 0x00, 0x00}},
+	.low_seq_cnt = 0, .high_seq_cnt = 0xFFFF,
+};
+
+
+/*
+ * Variables:
+ * d_id, ox_id, rx_id, fcid, wwpn
+ */
+struct fc_logo_req fnic2_logo_req = {
+	.fchdr = {.r_ctl = 0x22, .type = 0x01,
+		.f_ctl = {FNIC2_ELS_REQ_FCTL, 0x00, 0x00} },
+	.command = FC_ELS_LOGO,
+};
+
+#define FDLS_TGT_OXID_POOL_SZ   (0x800)
+#define FDLS_TGT_OXID_BLOCK_SZ  (0x200)
+#define FDLS_PLOGI_OXID_BASE    (0x2000)
+#define FDLS_PRLI_OXID_BASE     (0x2200)
+
+uint8_t tgt_ox_id_pool[FDLS_TGT_OXID_POOL_SZ];
+
+/* Private Functions */
+static void fnic2_fdls_start_plogi(struct fnic2_lport *lport);
+
+static void fdls_init_tgt_ox_id_pool(void)
+{
+	memset(tgt_ox_id_pool, 0, FDLS_TGT_OXID_POOL_SZ);
+}
+
+static uint16_t fdls_alloc_tgt_ox_id(uint16_t base)
+{
+	int i;
+	int start, end;
+
+	start = base - FDLS_PLOGI_OXID_BASE;
+	end = start + FDLS_TGT_OXID_BLOCK_SZ;
+
+	for (i = start; i < end; i++) {
+		if (tgt_ox_id_pool[i] == 0) {
+			tgt_ox_id_pool[i] = 1;
+			return (i + FDLS_PLOGI_OXID_BASE);
+		}
+	}
+	return 0xFFFF;
+}
+static void fdls_free_tgt_ox_id(uint16_t ox_id)
+{
+	if (tgt_ox_id_pool[ox_id - FDLS_PLOGI_OXID_BASE] != 1)
+		pr_debug("%s Free unused ox_id:%x\n",
+			 __func__, ox_id);
+	tgt_ox_id_pool[ox_id - FDLS_PLOGI_OXID_BASE] = 0;
+}
+
+static void fdls_process_flogi_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr, void *rx_frame)
+{
+	struct fnic2_fdls_fabric *fabric = &lport->fabric;
+	struct fc_els *flogi_rsp = (struct fc_els *)fchdr;
+	uint8_t *fcid;
+	int rdf_size;
+	struct fc_els_reject *els_rjt;
+	uint8_t fcmac[6] = {0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00};
+
+	pr_debug("lport 0x%pK fnic2_fcoe_process_flogi_resp\n", lport);
+
+	if (fabric->state != FDLS_STATE_FABRIC_FLOGI) {
+		pr_debug("Flogi resp recvd in state %d. Dropping\n",
+			fabric->state);
+		return;
+	}
+
+	switch (flogi_rsp->command) {
+	case FC_LS_ACC:
+
+		if (lport->fabric.timer_pending) {
+			pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+			del_timer_sync(&fabric->retry_timer);
+		}
+		lport->fabric.timer_pending = 0;
+		lport->fabric.retry_counter = 0;
+
+		fcid = fchdr->d_id;
+		lport->fcid = ntoh24(fcid);
+
+		pr_debug("Flogi resp Accepted: 0x%08x\n", lport->fcid);
+
+		/* Learn the Service Params */
+		rdf_size = ntohl(flogi_rsp->u.csp_flogi.b2b_rdf_size);
+		if ((rdf_size >= FNIC2_FCOE_MIN_FRAME_SZ)  &&
+		    (rdf_size < FNIC2_FCOE_MAX_FRAME_SZ))
+			lport->mfs = rdf_size;
+
+		pr_debug("mfs from fabric:%d, set:%d\n", rdf_size, lport->mfs);
+		lport->r_a_tov = ntohl(flogi_rsp->u.csp_flogi.r_a_tov);
+		lport->e_d_tov = ntohl(flogi_rsp->u.csp_flogi.e_d_tov);
+
+		if (flogi_rsp->u.csp_flogi.features & FNIC2_FC_EDTOV_NSEC)
+			lport->e_d_tov = lport->e_d_tov / FNIC2_NSEC_TO_MSEC;
+
+		pr_debug("from fabric:%d, %d\n", lport->r_a_tov, lport->e_d_tov);
+
+		fnic2_fdls_learn_fcoe_macs(lport, rx_frame, fcid);
+
+		if (fnic2_fdls_register_portid(lport, lport->fcid, rx_frame) !=
+		    0) {
+			pr_debug("lport 0x%pK flogi registration failed\n", lport);
+			break;
+		}
+
+		memcpy(&fcmac[3], fcid, 3);
+		pr_debug("Adding vnic_dev_mac addr:%02x %02x %02x %02x %02x %02x\n",
+			fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], fcmac[5]);
+		vnic_dev_add_addr(lport->fnic2->vdev, fcmac);
+
+		fnic2_fdls_start_plogi(lport);
+		break;
+
+	case FC_LS_REJ:
+		els_rjt = (struct fc_els_reject *)fchdr;
+		if (((els_rjt->reason_code == FC_ELS_RJT_LOGICAL_BUSY) ||
+		    (els_rjt->reason_code == FC_ELS_RJT_BUSY)) &&
+		    (fabric->retry_counter < lport->max_flogi_retries)) {
+
+			pr_debug("lport 0x%pK Flogi returned FC_LS_REJ BUSY retry from timer routine..\n",
+				lport);
+
+			/*Retry Flogi again from the timer routine.*/
+			fabric->flags |= FNIC2_FDLS_RETRY_FRAME;
+			// change the name , make it common
+		} else {
+			pr_debug("lport 0x%pK Flogi returned FC_LS_REJ... halting discovery.\n", lport);
+			if (fabric->timer_pending) {
+				pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+				del_timer_sync(&fabric->retry_timer);
+			}
+			fabric->timer_pending = 0;
+			fabric->retry_counter = 0;
+		}
+		break;
+
+	default:
+		pr_debug("Flogi resp Not Accepted: %x\n", flogi_rsp->command);
+		/* TBD Handle it */
+		break;
+	}
+}
+
+static void fdls_process_fabric_plogi_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_els *plogi_rsp = (struct fc_els *)fchdr;
+	struct fc_els_reject *els_rjt = (struct fc_els_reject *)fchdr;
+
+	pr_debug("lport 0x%pK fnic2_fcoe_process_fabric_plogi_resp", lport);
+	if (lport->fabric.state != FDLS_STATE_FABRIC_PLOGI) {
+		pr_debug("plogi resp recvd in state %d. dropping\n",
+			 lport->fabric.state);
+		return;
+	}
+
+	switch (plogi_rsp->command) {
+	case FC_LS_ACC:
+		if (lport->fabric.timer_pending) {
+			pr_debug("lport 0x%pK Canceling fabric disc timer", lport);
+			del_timer_sync(&(lport->fabric.retry_timer));
+		}
+		lport->fabric.timer_pending = 0;
+		lport->fabric.retry_counter = 0;
+		fdls_send_rpn_id(lport);
+		lport->fabric.state = FDLS_STATE_RPN_ID;
+		break;
+
+	case FC_LS_REJ:
+		if (((els_rjt->reason_code == FC_ELS_RJT_LOGICAL_BUSY) ||
+			(els_rjt->reason_code == FC_ELS_RJT_BUSY)) &&
+			(lport->fabric.retry_counter < lport->max_plogi_retries)) {
+			pr_debug("lport 0x%pK Fabric plogi returned FC_LS_REJ BUSY retry from timer routine..\n", lport);
+
+			/*Retry Fabric Plogi again from the timer routine.*/
+			lport->fabric.flags |= FNIC2_FDLS_RETRY_FRAME;
+			return;
+		} else {
+			pr_debug("lport 0x%pK Fabric Plogi returned FC_LS_REJ...halting discovery.\n", lport);
+			if (lport->fabric.timer_pending) {
+				pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+				del_timer_sync(&(lport->fabric.retry_timer));
+			}
+
+			lport->fabric.timer_pending = 0;
+			lport->fabric.retry_counter = 0;
+			return;
+		}
+		break;
+
+	default:
+		pr_debug("plogi resp Not Accepted: %x", plogi_rsp->command);
+		/* TBD Handle it */
+		break;
+	}
+}
+
+void fdls_send_logout(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_logo_req logo;
+	uint8_t s_id[3];
+
+	memcpy(&logo, &fnic2_logo_req, sizeof(struct fc_logo_req));
+
+	hton24(s_id, lport->fcid);
+
+	memcpy(logo.fchdr.s_id, s_id, 3);
+	memcpy(logo.fchdr.d_id, fchdr->s_id, 3);
+	logo.fchdr.ox_id = FNIC2_LOGO_REQ_OXID;
+
+	memcpy(&logo.fcid, s_id, 3);
+	logo.wwpn = lport->wwpn;
+
+	fnic2_send_fcoe_frame(lport, &logo, sizeof(struct fc_logo_req));
+}
+
+/*
+ * fdls_construct_logo_req - build a logo request for session timeout
+ *
+ * @lport - the fnic2 local fc port
+ * @fchdr - the fc_hdr from the machine we want to logo with
+ * @sess - the session that will hold the logo request
+ *
+ * This function is used primarily in the case of a session timeout
+ * where we don't have an explicit lport or fchdr, so we need to
+ * construct one at the time that we initiate a session timer.
+ * The logo will then be used in the event of a session
+ * timeout and be freed from there.
+ */
+void fdls_construct_logo_req(struct fnic2_lport *lport,
+			     struct fc_hdr *fchdr,
+			     struct fnic2_sess *sess)
+{
+	struct fc_logo_req logo;
+	uint8_t s_id[3];
+
+	memcpy(&logo, &fnic2_logo_req, sizeof(struct fc_logo_req));
+
+	hton24(s_id, lport->fcid);
+
+	memcpy(logo.fchdr.s_id, s_id, 3);
+        memcpy(logo.fchdr.d_id, fchdr->s_id, 3);
+        logo.fchdr.ox_id = FNIC2_LOGO_REQ_OXID;
+
+	memcpy(&logo.fcid, s_id, 3);
+	logo.wwpn = lport->wwpn;
+
+	memcpy(&sess->timer_logo_req, &logo, sizeof(struct fc_logo_req));
+
+	return;
+
+}
+
+static void fdls_send_logo_resp(struct fnic2_lport *lport, struct fc_hdr *req_fchdr)
+{
+	struct fc_els_acc logo_resp;
+	uint16_t  ox_id;
+	uint8_t fcid[3];
+
+	memcpy(&logo_resp, &fnic2_els_acc, sizeof(struct fc_els_acc));
+
+	hton24(fcid, lport->fcid);
+	memcpy(&logo_resp.fchdr.s_id, fcid, 3);
+
+	ox_id = req_fchdr->ox_id;
+	logo_resp.fchdr.ox_id = ox_id;
+
+	logo_resp.fchdr.rx_id = FNIC2_LOGO_RESP_OXID; //TBD_REVISIT
+	fnic2_send_fcoe_frame(lport, &logo_resp, sizeof(struct fc_els_acc));
+}
+
+static void fdls_start_fabric_timer(struct fnic2_lport *lport)
+{
+	uint32_t fabric_tov;
+
+	lport->fabric.retry_counter++;
+	fabric_tov = jiffies + msecs_to_jiffies(2 * lport->e_d_tov);
+	mod_timer(&lport->fabric.retry_timer, round_jiffies(fabric_tov));
+	lport->fabric.timer_pending = 1;
+}
+
+static void fdls_start_abort_timer(struct fnic2_lport *lport)
+{
+	uint32_t abort_tov;
+
+	abort_tov = jiffies + msecs_to_jiffies(2 * lport->r_a_tov);
+	mod_timer(&lport->fabric.retry_timer, round_jiffies(abort_tov));
+	lport->fabric.timer_pending = 1;
+}
+
+static void fdls_send_fabric_abts(struct fnic2_lport *lport)
+{
+
+	uint8_t fcid[3];
+	struct fc_hdr  fc_ABTS_s = {
+		.r_ctl  = 0x81, //ABTS
+		.d_id    = {0xFF, 0xFF, 0xFF},
+		.cs_ctl = 0x00,
+		.s_id = {0x00, 0x00, 0x00},
+		.type   = 0x00,
+		.f_ctl  = {FNIC2_REQ_ABTS_FCTL, 0x00, 0x00},
+		.seq_id = 0x00,
+		.df_ctl = 0x00,
+		.seq_cnt = 0x0000,
+		.rx_id  = 0xFFFF,
+		.param  = 0x00000000, // bit:0 =0 Abort a exchange
+	};
+
+	struct fc_hdr *pfc_ABTS = &fc_ABTS_s;
+
+	switch (lport->fabric.state) {
+	case FDLS_STATE_FABRIC_FLOGI:
+		fc_ABTS_s.ox_id = FNIC2_FLOGI_OXID;
+		fc_ABTS_s.d_id[2] = 0xFE;
+		lport->fabric.flags |= FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+		break;
+
+	case FDLS_STATE_FABRIC_PLOGI:
+		hton24(fcid, lport->fcid);
+		memcpy(pfc_ABTS->s_id, fcid, 3);
+		fc_ABTS_s.ox_id = FNIC2_PLOGI_FABRIC_OXID;
+		fc_ABTS_s.d_id[2] = 0xFC;
+		lport->fabric.flags |= FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+		break;
+
+	case FDLS_STATE_RPN_ID:
+		hton24(fcid, lport->fcid);
+		memcpy(pfc_ABTS->s_id, fcid, 3);
+		fc_ABTS_s.ox_id = FNIC2_RPN_REQ_OXID;
+		fc_ABTS_s.d_id[2] = 0xFC;
+		lport->fabric.flags |= FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+		break;
+	case FDLS_STATE_REGISTER_FC4_TYPES:
+		hton24(fcid, lport->fcid);
+		memcpy(pfc_ABTS->s_id, fcid, 3);
+		fc_ABTS_s.ox_id = FNIC2_RFT_REQ_OXID;
+		fc_ABTS_s.d_id[2] = 0xFC;
+		lport->fabric.flags |= FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+		break;
+	case FDLS_STATE_REGISTER_FC4_FEATURES:
+		hton24(fcid, lport->fcid);
+		memcpy(pfc_ABTS->s_id, fcid, 3);
+		fc_ABTS_s.ox_id = FNIC2_RFF_REQ_OXID;
+		fc_ABTS_s.d_id[2] = 0xFC;
+		lport->fabric.flags |= FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+		break;
+	default:
+		break;
+	}
+	pr_debug("%s %d\n", __func__, lport->fabric.state);
+
+	fnic2_send_fcoe_frame(lport, &fc_ABTS_s, sizeof(struct fc_hdr));
+	fdls_start_abort_timer(lport);
+}
+
+static void fdls_send_fabric_flogi(struct fnic2_lport *lport)
+{
+	struct fc_els flogi;
+
+	memcpy(&flogi, &fnic2_flogi_req, sizeof(struct fc_els));
+	flogi.nport_name = cpu_to_be64(lport->wwpn);
+	flogi.node_name = cpu_to_be64(lport->wwnn);
+	flogi.u.csp_flogi.b2b_rdf_size = htons(lport->mfs);
+	flogi.u.csp_flogi.r_a_tov = htonl(lport->r_a_tov);
+	flogi.u.csp_flogi.e_d_tov = htonl(lport->e_d_tov);
+
+	fnic2_send_fcoe_frame(lport, &flogi, sizeof(struct fc_els));
+	fdls_start_fabric_timer(lport);
+}
+
+static void fdls_send_fabric_plogi(struct fnic2_lport *lport)
+{
+	struct fc_els plogi;
+	struct fc_hdr *fchdr = &plogi.fchdr;
+	uint8_t fcid[3];
+
+	pr_debug("%s lport 0x%pK\n", __func__, lport);
+
+	memcpy(&plogi, &fnic2_plogi_req, sizeof(struct fc_els));
+
+	hton24(fcid, lport->fcid);
+
+	memcpy(fchdr->s_id, fcid, 3);
+	plogi.nport_name = cpu_to_be64(lport->wwpn);
+	plogi.node_name = cpu_to_be64(lport->wwnn);
+
+	fnic2_send_fcoe_frame(lport, &plogi, sizeof(struct fc_els));
+	fdls_start_fabric_timer(lport);
+}
+
+static void fdls_send_rpn_id(struct fnic2_lport *lport)
+{
+	struct fc_rpn_id rpn_id;
+	uint8_t fcid[3];
+
+	memcpy(&rpn_id, &fnic2_rpn_id_req, sizeof(struct fc_rpn_id));
+
+	hton24(fcid, lport->fcid);
+
+	memcpy(&rpn_id.fchdr.s_id, fcid, 3);
+	memcpy(rpn_id.port_id, fcid, 3);
+	rpn_id.port_name = cpu_to_be64(lport->wwpn);
+
+	fnic2_send_fcoe_frame(lport, &rpn_id, sizeof(struct fc_rpn_id));
+	fdls_start_fabric_timer(lport);
+}
+
+static void fdls_send_register_fc4_types(struct fnic2_lport *lport)
+{
+	struct fc_rft_id rft_id;
+	uint8_t fcid[3];
+
+	memcpy(&rft_id, &fnic2_rft_id_req, sizeof(struct fc_rft_id));
+
+	hton24(fcid, lport->fcid);
+
+	memcpy(&rft_id.fchdr.s_id, fcid, 3);
+	memcpy(rft_id.port_id, fcid, 3);
+	rft_id.fc4_types[2] = 1;
+	fnic2_send_fcoe_frame(lport, &rft_id, sizeof(struct fc_rft_id));
+	fdls_start_fabric_timer(lport);
+}
+
+static void fdls_send_register_fc4_features(struct fnic2_lport *lport)
+{
+	struct fc_rff_id rff_id;
+	uint8_t fcid[3];
+
+	memcpy(&rff_id, &fnic2_rff_id_req, sizeof(struct fc_rff_id));
+
+	hton24(fcid, lport->fcid);
+
+	memcpy(&rff_id.fchdr.s_id, fcid, 3);
+	memcpy(rff_id.port_id, fcid, 3);
+
+	fnic2_send_fcoe_frame(lport, &rff_id, sizeof(struct fc_rff_id));
+	fdls_start_fabric_timer(lport);
+}
+
+static void fdls_process_rpn_id_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fnic2_fdls_fabric *fdls = &lport->fabric;
+	struct fc_rpn_id *rpn_rsp = (struct fc_rpn_id *)fchdr;
+	struct fc_els_reject *els_rjt = (struct fc_els_reject *)fchdr;
+	uint16_t rsp;
+
+	rsp = rpn_rsp->fc_ct_hdr.command;
+	pr_debug("%s 0x%04x\n", __func__, (uint32_t)rsp);
+
+	switch (rsp) {
+	case FC_CT_ACC:
+		if (lport->fabric.timer_pending) {
+			pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+			del_timer_sync(&fdls->retry_timer);
+		}
+		lport->fabric.timer_pending = 0;
+		fdls->retry_counter = 0;
+		fdls_send_register_fc4_types(lport);
+		fdls->state = FDLS_STATE_REGISTER_FC4_TYPES;
+		break;
+	case FC_LS_REJ:
+		if (((els_rjt->reason_code == FC_ELS_RJT_LOGICAL_BUSY) ||
+			(els_rjt->reason_code == FC_ELS_RJT_BUSY)) &&
+			(fdls->retry_counter < FDLS_RETRY_COUNT)) {
+
+			pr_debug("lport 0x%pK Rpn_id returned FC_LS_REJ BUSY retry from timer routine..\n", lport);
+
+		/*Retry again from the timer routine.*/
+			fdls->flags |= FNIC2_FDLS_RETRY_FRAME;
+		} else {
+			pr_debug("lport 0x%pK Rpn_id returned FC_LS_REJ...halting discovery.\n", lport);
+			if (fdls->timer_pending) {
+				pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+				del_timer_sync(&fdls->retry_timer);
+			}
+			fdls->timer_pending = 0;
+			fdls->retry_counter = 0;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void fdls_process_rft_id_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fnic2_fdls_fabric *fdls = &lport->fabric;
+	struct fc_rft_id *rft_rsp = (struct fc_rft_id *)fchdr;
+	struct fc_els_reject *els_rjt = (struct fc_els_reject *)fchdr;
+	uint16_t rsp;
+
+	rsp = rft_rsp->fc_ct_hdr.command;
+	pr_debug("%s 0x%04x\n", __func__, (uint32_t)rsp);
+
+	switch (rsp) {
+	case FC_CT_ACC:
+		if (lport->fabric.timer_pending) {
+			pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+			del_timer_sync(&fdls->retry_timer);
+		}
+		lport->fabric.timer_pending = 0;
+		fdls->retry_counter = 0;
+		fdls->state = FDLS_STATE_REGISTER_FC4_FEATURES;
+		break;
+	case FC_LS_REJ:
+		if (((els_rjt->reason_code == FC_ELS_RJT_LOGICAL_BUSY) ||
+			(els_rjt->reason_code == FC_ELS_RJT_BUSY)) &&
+			(fdls->retry_counter < FDLS_RETRY_COUNT)) {
+
+			pr_debug("lport 0x%pK rft_id returned FC_LS_REJ BUSY retry from timer routine..\n",
+				lport);
+
+			/*Retry again from the timer routine.*/
+			fdls->flags |= FNIC2_FDLS_RETRY_FRAME;
+		} else {
+			pr_debug("lport 0x%pK rft_id returned FC_LS_REJ...halting discovery.\n", lport);
+			if (fdls->timer_pending) {
+				pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+				del_timer_sync(&fdls->retry_timer);
+			}
+			fdls->timer_pending = 0;
+			fdls->retry_counter = 0;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void fdls_process_rff_id_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fnic2_fdls_fabric *fdls = &lport->fabric;
+	struct fc_rff_id *rff_rsp = (struct fc_rff_id *)fchdr;
+	struct fc_els_reject *els_rjt = (struct fc_els_reject *)fchdr;
+	uint16_t rsp;
+
+	rsp = rff_rsp->fc_ct_hdr.command;
+	pr_debug("%s 0x%04x\n", __func__, (uint32_t)rsp);
+
+	switch (rsp) {
+	case FC_CT_ACC:
+		if (lport->fabric.timer_pending) {
+			pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+			del_timer_sync(&fdls->retry_timer);
+		}
+		lport->fabric.timer_pending = 0;
+		fdls->retry_counter = 0;
+		// we are done
+		fdls->state = FDLS_STATE_TGT_ONLINE;
+		break;
+	case FC_LS_REJ:
+		if (((els_rjt->reason_code == FC_ELS_RJT_LOGICAL_BUSY) ||
+			(els_rjt->reason_code == FC_ELS_RJT_BUSY)) &&
+			(fdls->retry_counter < FDLS_RETRY_COUNT)) {
+
+			pr_debug("lport 0x%pK rff_id returned FC_LS_REJ BUSY retry from timer routine..\n", lport);
+
+			/*Retry again from the timer routine.*/
+			fdls->flags |= FNIC2_FDLS_RETRY_FRAME;
+		} else {
+			pr_debug("lport 0x%pK rff_id returned FC_LS_REJ...halting discovery.\n",
+				lport);
+			if (fdls->timer_pending) {
+				pr_debug("lport 0x%pK Canceling fabric disc timer\n", lport);
+				del_timer_sync(&fdls->retry_timer);
+			}
+			fdls->timer_pending = 0;
+			fdls->retry_counter = 0;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void fdls_error_fabric_disc(struct fnic2_lport *lport)
+{
+	pr_debug("FDLS discovery error from %d state\n", lport->fabric.state);
+
+	/* TBD_REVISIT what to do */
+
+}
+
+void fdls_fabric_timer_callback(struct timer_list *timer)
+{
+	struct fnic2_fdls_fabric *fabric = container_of(timer, struct fnic2_fdls_fabric, retry_timer);
+	struct fnic2_lport *lport = container_of(fabric, struct fnic2_lport, fabric);
+
+	pr_debug("%s %d %d %d %d\n", __func__,
+		lport->fabric.timer_pending, lport->fabric.state,
+		lport->fabric.retry_counter, lport->max_flogi_retries);
+
+
+	/* TBD - do we need this? timeout while rx frame in the queue? */
+	if (!lport->fabric.timer_pending)
+		return;
+
+	lport->fabric.timer_pending = 0;
+
+	/*The fabric state indicates which frames have time out, and we retry*/
+	switch (lport->fabric.state) {
+	case FDLS_STATE_FABRIC_FLOGI:
+		// Flogi received a LS_RJT with busy we retry from here
+		if ((lport->fabric.flags & FNIC2_FDLS_RETRY_FRAME) &&
+		    (lport->fabric.retry_counter < lport->max_flogi_retries)) {
+			lport->fabric.flags &= ~FNIC2_FDLS_RETRY_FRAME;
+			fdls_send_fabric_flogi(lport);
+			return;
+		}
+		// Flogi has time out 2*ed_tov send abts
+		if (!(lport->fabric.flags & FNIC2_FDLS_FABRIC_ABORT_ISSUED)) {
+			fdls_send_fabric_abts(lport);
+		} else {
+			/* Flogi ABTS have timed out and we have waited
+			 * (2 * ra_tov), we can retry safely with same
+			 * exchange id
+			 */
+			if (lport->fabric.retry_counter <
+			    lport->max_flogi_retries) {
+				lport->fabric.flags &=
+				    ~FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+				fdls_send_fabric_flogi(lport);
+			} else {
+				fdls_error_fabric_disc(lport);
+			}
+		}
+		break;
+	case FDLS_STATE_FABRIC_PLOGI:
+		// Plogi received a LS_RJT with busy we retry from here
+		if ((lport->fabric.flags & FNIC2_FDLS_RETRY_FRAME) &&
+		    (lport->fabric.retry_counter < lport->max_plogi_retries)) {
+			lport->fabric.flags &= ~FNIC2_FDLS_RETRY_FRAME;
+			fdls_send_fabric_plogi(lport);
+			return;
+		}
+		// Plogi has time out 2*ed_tov send abts
+		if (!(lport->fabric.flags & FNIC2_FDLS_FABRIC_ABORT_ISSUED)) {
+			fdls_send_fabric_abts(lport);
+		} else {
+			/* plogi ABTS has timed out and we have waited
+			 * (2 * ra_tov) can retry safely with same
+			 * exchange id
+			 */
+			if (lport->fabric.retry_counter <
+			    lport->max_plogi_retries) {
+				lport->fabric.flags &=
+				    ~FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+				fdls_send_fabric_plogi(lport);
+			} else {
+				fdls_error_fabric_disc(lport);
+			}
+		}
+		break;
+	case FDLS_STATE_RPN_ID:
+		//Rpn_id received a LS_RJT with busy we retry from here
+		if ((lport->fabric.flags & FNIC2_FDLS_RETRY_FRAME) &&
+		    (lport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+			lport->fabric.flags &= ~FNIC2_FDLS_RETRY_FRAME;
+			fdls_send_rpn_id(lport);
+			return;
+		}
+		// RPN have timed out send abts
+		if (!(lport->fabric.flags & FNIC2_FDLS_FABRIC_ABORT_ISSUED))
+			fdls_send_fabric_abts(lport);
+		else // ABTS has timed out (2*ra_tov)
+			fnic2_fdls_start_plogi(lport); //go back to fabric Plogi
+		break;
+	case FDLS_STATE_REGISTER_FC4_TYPES:
+		// Register FC4 types received a LS_RJT with busy we retry from
+		if ((lport->fabric.flags & FNIC2_FDLS_RETRY_FRAME) &&
+			(lport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+			lport->fabric.flags &= ~FNIC2_FDLS_RETRY_FRAME;
+			fdls_send_register_fc4_types(lport);
+			return;
+		}
+		break;
+	case FDLS_STATE_REGISTER_FC4_FEATURES:
+		// Register FC4 features received a LS_RJT with busy we retry from here
+		if ((lport->fabric.flags & FNIC2_FDLS_RETRY_FRAME) &&
+			(lport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+			lport->fabric.flags &= ~FNIC2_FDLS_RETRY_FRAME;
+			fdls_send_register_fc4_features(lport);
+			return;
+		}
+		// Register FC4 features have timed out send abts
+		if (!(lport->fabric.flags & FNIC2_FDLS_FABRIC_ABORT_ISSUED))
+			fdls_send_fabric_abts(lport);
+		else // ABTS has timed out (2*ra_tov)
+			fnic2_fdls_start_plogi(lport); //go back to fabric Plogi
+		break;
+	default:
+		break;
+	}
+}
+
+static void fnic2_fdls_start_flogi(struct fnic2_lport *lport)
+{
+	lport->fabric.retry_counter = 0;
+	fdls_send_fabric_flogi(lport);
+	lport->fabric.state = FDLS_STATE_FABRIC_FLOGI;
+	lport->fabric.flags = 0;
+}
+
+static void fnic2_fdls_start_plogi(struct fnic2_lport *lport)
+{
+	lport->fabric.retry_counter = 0;
+	fdls_send_fabric_plogi(lport);
+	lport->fabric.state = FDLS_STATE_FABRIC_PLOGI;
+	lport->fabric.flags &= ~FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+}
+
+static void fdls_process_fabric_abts_rsp(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	uint32_t s_id;
+	struct fc_abts_ba_acc *ba_acc = (struct fc_abts_ba_acc *)fchdr;
+	struct fc_abts_ba_rjt *ba_rjt;
+	uint32_t fabric_state = lport->fabric.state;
+
+	s_id = ntoh24(fchdr->s_id);
+	ba_rjt = (struct fc_abts_ba_rjt *)fchdr;
+
+	if (!((s_id == FC_DIR_SERVER) || (s_id == FC_DOMAIN_CONTR) ||
+		(s_id == FC_FABRIC_CONTROLLER))) {
+		pr_debug("fdls received abts_rsp with invalid SID %x.. dropping the frame.\n",
+			s_id);
+		return;
+	}
+
+	if (lport->fabric.timer_pending) {
+		pr_debug("lport 0x%pK Canceling fabric disc timer", lport);
+		del_timer_sync(&(lport->fabric.retry_timer));
+	}
+	lport->fabric.timer_pending = 0;
+	lport->fabric.flags &= ~FNIC2_FDLS_FABRIC_ABORT_ISSUED;
+
+	if (fchdr->r_ctl == FNIC2_BA_ACC_RCTL) {
+		pr_debug("fdls received abts_rsp BA_ACC for fabric_state %d ox_id %x\n",
+			fabric_state, ba_acc->ox_id);
+	} else if (fchdr->r_ctl == FNIC2_BA_RJT_RCTL) {
+		pr_debug("fdls received BA_RJT for fabric_state %d ox_id %x with reason code:%x reason code explanation:%x\n",
+			fabric_state, ba_rjt->fchdr.ox_id, ba_rjt->reason_code, ba_rjt->reason_explanation);
+	}
+
+	//currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT
+	switch (fabric_state) {
+	case FDLS_STATE_FABRIC_FLOGI:
+		if (fchdr->ox_id == FNIC2_FLOGI_OXID) {
+			if (lport->fabric.retry_counter < lport->max_flogi_retries)
+				fdls_send_fabric_flogi(lport);
+			else
+				fdls_error_fabric_disc(lport);
+		} else {
+			pr_debug("fdls received unknown abts_rsp ox_id %x in FDLS_STATE_FABRIC_FLOGI state.. dropping the frame\n",
+				fchdr->ox_id);
+		}
+		break;
+
+	case FDLS_STATE_FABRIC_PLOGI:
+		if (fchdr->ox_id == FNIC2_PLOGI_FABRIC_OXID) {
+			if (lport->fabric.retry_counter < lport->max_plogi_retries)
+				fdls_send_fabric_plogi(lport);
+			else
+				fdls_error_fabric_disc(lport);
+		} else {
+			pr_debug("fdls received unknown abts_rsp ox_id %x in FDLS_STATE_FABRIC_PLOGI state.. dropping the frame\n",
+				fchdr->ox_id);
+		}
+		break;
+	case FDLS_STATE_RPN_ID:
+		if (ba_acc->ox_id == FNIC2_RPN_REQ_OXID) {
+			if (lport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+				fdls_send_rpn_id(lport);
+			} else {
+				//go back to fabric Plogi
+				fnic2_fdls_start_plogi(lport);
+			}
+		} else {
+			pr_debug("fdls received unknown abts_rsp ox_id %x in FDLS_STATE_RPN_ID state.. dropping the frame\n",
+				fchdr->ox_id);
+		}
+		break;
+	case FDLS_STATE_REGISTER_FC4_TYPES:
+		if (ba_acc->ox_id == FNIC2_RFT_REQ_OXID) {
+			if (lport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+				fdls_send_register_fc4_types(lport);
+			} else {
+				//go back to fabric Plogi
+				fnic2_fdls_start_plogi(lport);
+			}
+		} else {
+			pr_debug("fdls received unknown abts_rsp ox_id %x in FDLS_STATE_REGISTER_FC4_TYPES state.. dropping the frame\n",
+				fchdr->ox_id);
+		}
+		break;
+	case FDLS_STATE_REGISTER_FC4_FEATURES:
+		if (ba_acc->ox_id == FNIC2_RFF_REQ_OXID) {
+			if (lport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+				fdls_send_register_fc4_features(lport);
+			} else {
+				//go back to fabric Plogi
+				fnic2_fdls_start_plogi(lport);
+			}
+		} else {
+			pr_debug("fdls received unknown abts_rsp ox_id %x in FDLS_STATE_REGISTER_FC4_FEATURES state.. dropping the frame\n",
+				fchdr->ox_id);
+		}
+		break;
+	default:
+		return;
+	}
+}
+
+static void fdls_process_plogi_req(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_els_reject   plogi_rej;
+	struct fc_els       plogi_acc;
+	struct fc_els       *plogi_req;
+	struct fnic2_rport   *rport;
+
+	uint16_t  rx_id;
+
+	if (lport->fcid != ntoh24(fchdr->d_id)) {
+		pr_debug("lport 0x%pK fdls received Plogi with illegal frame bits dropping..\n",
+			lport);
+		return;
+	}
+	pr_debug("%s from fcid:%x\n", __func__, ntoh24(fchdr->s_id));
+
+	if (fchdr->s_id[0] != 0xFF) {
+
+		plogi_req = (struct fc_els *) fchdr;
+		rport = fnic2_find_rport(lport, plogi_req->nport_name);
+
+		if (rport != NULL)
+			fdls_delete_rport(lport, rport);
+
+		pr_err("Allocating rport\n");
+
+		rport = fdls_create_rport(lport, ntoh24(fchdr->s_id),
+					 plogi_req->nport_name);
+
+		if (rport == NULL)
+			goto send_plogi_rej;
+
+		pr_err("New rport %pK\n", rport);
+
+		memcpy(&plogi_acc, &fnic2_plogi_resp, sizeof(struct fc_els));
+
+		memcpy(plogi_acc.fchdr.s_id, fchdr->d_id, 3);
+		memcpy(&plogi_acc.fchdr.d_id, fchdr->s_id, 3);
+
+		rx_id = htons(fdls_alloc_tgt_ox_id(FDLS_PLOGI_OXID_BASE));
+		if (rx_id == 0xFFFF) {
+			// Log and Err TBD
+
+			pr_debug("lport 0x%pK fdls_tgt_send_plogi_acc.. rx_id not available\n", lport);
+			return;
+		}
+		pr_debug("fdls_tgt_send_plogi_acc rx_id %x\n", ntohs(rx_id));
+
+		plogi_acc.fchdr.ox_id = fchdr->ox_id;
+		plogi_acc.fchdr.rx_id = rx_id;
+		plogi_acc.nport_name = cpu_to_be64(lport->wwpn);
+		plogi_acc.node_name = cpu_to_be64(lport->wwnn);
+
+		fnic2_send_fcoe_frame(lport, &plogi_acc, sizeof(struct fc_els));
+
+		// TBD should we run a timer ???
+		fdls_free_tgt_ox_id(ntohs(rx_id));
+
+	} else {
+send_plogi_rej:
+		memcpy(&plogi_rej, &fnic2_els_rjt,
+			sizeof(struct fc_els_reject));
+		plogi_rej.reason_code = 0x0B;
+		plogi_rej.reason_expl = 0x0;
+		plogi_rej.vendor_specific = 0x0;
+		memcpy(&plogi_rej.fchdr.s_id, fchdr->d_id, 3);
+		memcpy(&plogi_rej.fchdr.d_id, fchdr->s_id, 3);
+
+		rx_id = fchdr->ox_id;
+		plogi_rej.fchdr.rx_id = rx_id;
+		plogi_rej.fchdr.ox_id = fchdr->ox_id;
+
+		fnic2_send_fcoe_frame(lport, &plogi_rej,
+					sizeof(struct fc_els_reject));
+	}
+}
+
+static void fdls_send_rej(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_els_reject els_rej;
+
+	memcpy(&els_rej, &fnic2_els_rjt, sizeof(struct fc_els_reject));
+
+	memcpy(&els_rej.fchdr.s_id, fchdr->d_id, 3);
+	memcpy(&els_rej.fchdr.d_id, fchdr->s_id, 3);
+
+	els_rej.reason_code = 0x0B;
+	els_rej.fchdr.rx_id = 0xFFFF;
+	els_rej.fchdr.ox_id = fchdr->ox_id;
+
+	fnic2_send_fcoe_frame(lport, &els_rej, sizeof(struct fc_els_reject));
+}
+
+static void fdls_send_els_acc(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_els_acc els_acc;
+
+	memcpy(&els_acc, &fnic2_els_acc, sizeof(struct fc_els_acc));
+
+	memcpy(&els_acc.fchdr.s_id, fchdr->d_id, 3);
+	memcpy(&els_acc.fchdr.d_id, fchdr->s_id, 3);
+
+	els_acc.fchdr.rx_id = 0xFFFF;
+	els_acc.fchdr.ox_id = fchdr->ox_id;
+
+	fnic2_send_fcoe_frame(lport, &els_acc, sizeof(struct fc_els_acc));
+
+}
+
+void fdls_send_ba_acc(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_abts_ba_acc ba_acc;
+
+	memcpy(&ba_acc, &fnic2_ba_acc, sizeof(struct fc_abts_ba_acc));
+
+	memcpy(&ba_acc.fchdr.s_id, fchdr->d_id, 3);
+	memcpy(&ba_acc.fchdr.d_id, fchdr->s_id, 3);
+
+	ba_acc.fchdr.rx_id = fchdr->rx_id;
+	ba_acc.rx_id = ba_acc.fchdr.rx_id;
+	ba_acc.fchdr.ox_id = fchdr->ox_id;
+	ba_acc.ox_id = ba_acc.fchdr.ox_id;
+
+	fnic2_send_fcoe_frame(lport, &ba_acc, sizeof(struct fc_abts_ba_acc));
+
+}
+
+static void fdls_process_prli_req(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_els_prli  prli_acc;
+	struct fnic2_rport   *rport;
+	uint16_t  rx_id;
+
+	if (lport->fcid != ntoh24(fchdr->d_id)) {
+		pr_debug("lport 0x%pK fdls received PRLI to wrong d_id - dropping..\n", lport);
+		return;
+	}
+	pr_debug("%s from fcid:%x\n", __func__, ntoh24(fchdr->s_id));
+
+	rport = fnic2_find_rport_by_fcid(lport, ntoh24(fchdr->s_id));
+
+	pr_err("Found rport %pK\n", rport);
+
+	if (rport) {
+
+		rx_id = htons(fdls_alloc_tgt_ox_id(FDLS_PRLI_OXID_BASE));
+		if (rx_id == 0xFFFF) {
+			// Log and Err TBD
+
+			pr_debug("lport 0x%pK fdls_tgt_send_prli_acc.. rx_id not available\n", lport);
+			fdls_send_rej(lport, fchdr);
+			return;
+		}
+		if (!fnic2_session_create(lport->fnic2, rport)) {
+
+			memcpy(&prli_acc, &fnic2_prli_resp, sizeof(struct fc_els_prli));
+
+			memcpy(&prli_acc.fchdr.s_id, fchdr->d_id, 3);
+			memcpy(&prli_acc.fchdr.d_id, fchdr->s_id, 3);
+
+			pr_debug("fdls_tgt_send_prli_acc..rx_id %x\n", ntohs(rx_id));
+
+			prli_acc.fchdr.rx_id = rx_id;
+			prli_acc.fchdr.ox_id = fchdr->ox_id;
+
+			fnic2_send_fcoe_frame(lport, &prli_acc, sizeof(struct fc_els_prli));
+
+
+			// TBD should we run a timer ???
+			fdls_free_tgt_ox_id(ntohs(rx_id));
+		} else {
+			fdls_send_rej(lport, fchdr);
+		}
+	} else {
+		fdls_send_logout(lport, fchdr);
+	}
+}
+
+
+static void fdls_process_logo_req(struct fnic2_lport *lport, struct fc_hdr *fchdr)
+{
+	struct fc_logo_req *logo = (struct fc_logo_req *)fchdr;
+	uint32_t nport_id;
+	uint64_t nport_name;
+	struct fnic2_rport *rport;
+
+	nport_id = ntoh24(logo->fcid);
+	nport_name = logo->wwpn;
+
+	pr_debug("%s fcid: %x\n", __func__, nport_id);
+	rport = fnic2_find_rport_by_fcid(lport, nport_id);
+
+	if (!rport) {
+		/* We are not logged in with the nport, log and drop...*/
+		pr_debug("fdls recevied LOGO from an rport not logged in: %x\n",
+			nport_id);
+		return;
+	}
+	if (rport->fcid != nport_id) {
+		/* nport_id changed. TBD_REVISIT */
+		pr_debug("fdls recevied LOGO with invalid source port fcid: %x\n",
+			nport_id);
+		return;
+	}
+
+	fdls_delete_rport(lport, rport);
+	fdls_send_logo_resp(lport, &logo->fchdr);
+}
+
+/* Public Functions */
+
+void fnic2_fdls_disc_start(struct fnic2_lport *lport)
+{
+	if (!lport->usefip)
+		fnic2_fdls_start_flogi(lport);
+	else //review: plogi to name server
+		fnic2_fdls_start_plogi(lport);
+}
+
+/*
+ * Performs a validation for all FCOE frames and return the frame type
+ */
+static int fnic2_fdls_validate_and_get_frame_type(struct fnic2_lport *lport, void *rx_frame,
+	int len, int fchdr_offset)
+{
+	struct fc_hdr *fchdr;
+	uint8_t type;
+	uint8_t *fc_payload;
+	uint16_t ox_id;
+	uint32_t s_id;
+	uint32_t d_id;
+
+	struct fnic2_fdls_fabric *fabric = &lport->fabric;
+
+	fchdr = (struct fc_hdr *)((uint8_t *)rx_frame + fchdr_offset);
+	ox_id = fchdr->ox_id;
+	fc_payload = (uint8_t *)fchdr + sizeof(struct fc_hdr);
+	type = *fc_payload;
+
+	s_id = ntoh24(fchdr->s_id);
+	d_id = ntoh24(fchdr->d_id);
+
+	//some common validation
+	if (lport->fcid)
+		if (fabric->state > FDLS_STATE_FABRIC_FLOGI) {
+			if ((lport->fcid != d_id) ||
+				(fchdr->cs_ctl != FNIC2_FC_CS_CTL)) {
+				pr_debug("Invalid frame received with DID %x IPortfcid %x fabric state %x R_CTL %x type %x OX_ID %x  RX_ID %x CS_CTL %x Dropping\n",
+					d_id, lport->fcid, fabric->state,
+					fchdr->r_ctl, fchdr->type, fchdr->ox_id,
+					fchdr->rx_id, fchdr->cs_ctl);
+				return -1;
+			}
+		}
+
+	// ABTS response
+	if ((fchdr->r_ctl == FNIC2_BA_ACC_RCTL) ||
+		(fchdr->r_ctl == FNIC2_BA_RJT_RCTL)) {
+		if (fchdr->type != FNIC2_FC_TYPE_BLS) {
+			pr_debug("fdls recevied ABTS with some Invalid frame bits S_ID %x FCTL %x R_CTL %x type %x. Dropping the frame\n",
+				s_id, ntoh24(fchdr->f_ctl), fchdr->r_ctl, fchdr->type);
+			return -1;
+
+		}
+		return FNIC2_BLS_ABTS_RSP;
+	}
+
+	//unsolicited requests frames
+	if (fchdr->r_ctl == FNIC2_R_CTL_UNSOLICITED) {
+		switch (type) {
+		case FC_ELS_LOGO:
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_FIRST_LAST_SEQ_INIT) ||
+			    (fchdr->type != FNIC2_FC_TYPE_ELS)) {
+				pr_debug("fdls recevied LOGO with some Invalid frame bits S_ID %x FCTL %x R_CTL %x type %x. Dropping the frame\n",
+					s_id, ntoh24(fchdr->f_ctl), fchdr->r_ctl,
+					fchdr->type);
+				return -1;
+			}
+			return FNIC2_ELS_LOGO_REQ;
+		case FC_ELS_PLOGI_REQ:
+			return FNIC2_ELS_PLOGI_REQ;
+		case FC_ELS_PRLI_REQ:
+			return FNIC2_ELS_PRLI_REQ;
+		case FC_ELS_RTV_REQ:
+			return FNIC2_ELS_RTV_REQ;
+		case FC_ELS_RRQ_REQ:
+			return FNIC2_ELS_RRQ_REQ;
+		default:
+			pr_debug("fdls_recv_frame unsupported frame: %d\n", type);
+			return -1;
+		}
+	}
+
+	// Only responses for requests we sent
+	if (fchdr->f_ctl[0] & FNIC2_F_CTL_EXCH_RESPONDER) {
+
+		if (ntohs(ox_id) == FNIC2_LOGO_REQ_OXID)
+			return FNIC2_TPORT_LOGO_RSP;
+
+		/*response from fabric*/
+		switch (ox_id) {
+		case FNIC2_FLOGI_OXID:
+		if (type == FC_LS_ACC) {
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_LAST_END_SEQ) ||
+				(fchdr->r_ctl != FNIC2_R_CTL_SOLICITED_CTRL_REPLY) ||
+				(s_id != FC_DOMAIN_CONTR) ||
+				(fchdr->type != FNIC2_FC_TYPE_ELS)) {
+				pr_debug("fdls recevied Flogi resp with some Invalid frame bits s_id %x FCTL %x R_CTL %x type %x. Dropping the frame.\n",
+					s_id, ntoh24(fchdr->f_ctl),
+					fchdr->r_ctl, fchdr->type);
+				return -1;
+			}
+		}
+		return FNIC2_FABRIC_FLOGI_RSP;
+
+		case FNIC2_PLOGI_FABRIC_OXID:
+		if (type == FC_LS_ACC) {
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_LAST_END_SEQ) ||
+				(fchdr->r_ctl != FNIC2_R_CTL_SOLICITED_CTRL_REPLY) ||
+				(s_id != FC_DIR_SERVER) ||
+				(fchdr->type != FNIC2_FC_TYPE_ELS)) {
+				pr_debug("fdls recevied Plogi resp with some Invalid frame bits s_id %x FCTL %x R_CTL %x type %x. Dropping the frame.\n",
+					s_id, ntoh24(fchdr->f_ctl),
+					fchdr->r_ctl, fchdr->type);
+				return -1;
+			}
+		}
+		return FNIC2_FABRIC_PLOGI_RSP;
+
+		case FNIC2_RPN_REQ_OXID:
+		if (type == FC_LS_ACC) {
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_LAST_END_SEQ) ||
+				(fchdr->r_ctl != FNIC2_R_CTL_SCTL) ||
+				(s_id != FC_DIR_SERVER) ||
+				(fchdr->type != FNIC2_FC_TYPE_FC_GS))  {
+				pr_debug("fdls recevied rpn_id_rsp with some Invalid frame bits s_id %x FCTL %x R_CTL %x type %x. Dropping the frame.\n",
+					s_id, ntoh24(fchdr->f_ctl),
+					fchdr->r_ctl, fchdr->type);
+				return -1;
+			}
+		}
+		return FNIC2_FABRIC_RPN_RSP;
+		case FNIC2_RFT_REQ_OXID:
+		if (type == FC_LS_ACC) {
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_LAST_END_SEQ) ||
+				(fchdr->r_ctl != FNIC2_R_CTL_SCTL) ||
+				(s_id != FC_DIR_SERVER) ||
+				(fchdr->type != FNIC2_FC_TYPE_FC_GS))  {
+				pr_debug("fdls recevied rft_id_rsp with some Invalid frame bits s_id %x FCTL %x R_CTL %x type %x. Dropping the frame.\n",
+					s_id, ntoh24(fchdr->f_ctl),
+					fchdr->r_ctl, fchdr->type);
+				return -1;
+			}
+		}
+		return FNIC2_FABRIC_RFT_RSP;
+		case FNIC2_RFF_REQ_OXID:
+		if (type == FC_LS_ACC) {
+			if ((fchdr->f_ctl[0] != FNIC2_F_CTL_LAST_END_SEQ) ||
+				(fchdr->r_ctl != FNIC2_R_CTL_SCTL) ||
+				(s_id != FC_DIR_SERVER) ||
+				(fchdr->type != FNIC2_FC_TYPE_FC_GS))  {
+				pr_debug("fdls recevied rff_id_rsp with some Invalid frame bits s_id %x FCTL %x R_CTL %x type%x. Dropping the frame.\n",
+					s_id, ntoh24(fchdr->f_ctl),
+					fchdr->r_ctl, fchdr->type);
+				return -1;
+			}
+		}
+		return FNIC2_FABRIC_RFF_RSP;
+		default:
+		/* Drop the Rx frame and log/stats it */
+		pr_debug("fdls_recv_frame Solicited response Unknown ox_id: %x\n", ox_id);
+		return -1;
+		}
+	}
+	return -1;
+}
+
+void fnic2_fdls_recv_frame(struct fnic2_lport *lport, void *rx_frame, int len,
+	int fchdr_offset)
+{
+	uint16_t ox_id;
+	struct fc_hdr *fchdr;
+	uint32_t s_id = 0;
+	uint32_t d_id = 0;
+	char *frame_data = rx_frame;
+
+	int frame_type;
+
+	fchdr = (struct fc_hdr *)((uint8_t *)(frame_data + fchdr_offset));
+
+	s_id = ntoh24(fchdr->s_id);
+	d_id = ntoh24(fchdr->d_id);
+	pr_debug("fdls_recv_frame received frame of len %x with SID %x DID %x R_CTL %x F_CTL %x OX_ID %x RX_ID %x seq_id %x seq_cnt %x type %x offset %d\n",
+		len, s_id, d_id, fchdr->r_ctl, ntoh24(fchdr->f_ctl),
+		ntohs(fchdr->ox_id), fchdr->rx_id, fchdr->seq_id, fchdr->seq_cnt,
+		fchdr->type, fchdr_offset);
+
+	frame_type = fnic2_fdls_validate_and_get_frame_type(lport, frame_data,
+	    len, fchdr_offset);
+
+	switch (frame_type) {
+
+	case FNIC2_FABRIC_FLOGI_RSP:
+		fdls_process_flogi_rsp(lport, fchdr, frame_data);
+		break;
+
+	case FNIC2_FABRIC_PLOGI_RSP:
+		fdls_process_fabric_plogi_rsp(lport, fchdr);
+		break;
+	case FNIC2_FABRIC_RPN_RSP:
+		fdls_process_rpn_id_rsp(lport, fchdr);
+		break;
+	case FNIC2_FABRIC_RFT_RSP:
+		fdls_process_rft_id_rsp(lport, fchdr);
+		break;
+	case FNIC2_FABRIC_RFF_RSP:
+		fdls_process_rff_id_rsp(lport, fchdr);
+		break;
+	case FNIC2_BLS_ABTS_RSP:
+		ox_id = fchdr->ox_id;
+		if ((lport->fabric.flags & FNIC2_FDLS_FABRIC_ABORT_ISSUED) &&
+			(ox_id >= FNIC2_FLOGI_OXID && ox_id <= FNIC2_RFF_REQ_OXID)) {
+			fdls_process_fabric_abts_rsp(lport, fchdr);
+		}
+		break;
+
+	case FNIC2_ELS_PLOGI_REQ:
+		fdls_process_plogi_req(lport, fchdr);
+		break;
+	case FNIC2_ELS_PRLI_REQ:
+		fdls_process_prli_req(lport, fchdr);
+	break;
+	case FNIC2_ELS_RTV_REQ:
+		fdls_send_rej(lport, fchdr);
+		break;
+	case FNIC2_ELS_RRQ_REQ:
+		fdls_send_els_acc(lport, fchdr);
+		break;
+	case FNIC2_ELS_LOGO_REQ:
+		fdls_process_logo_req(lport, fchdr);
+		break;
+	default:
+		// pr_debug("Received unknown FCoE frame of len: %d. dropping.\n", len);
+		break;
+	}
+}
+
+void fnic2_fdls_disc_init(struct fnic2_lport *lport)
+{
+	fdls_init_tgt_ox_id_pool();
+	lport->fabric.state = FDLS_STATE_INIT;
+}
+
+void fnic2_fdls_link_down(struct fnic2_lport *lport)
+{
+	struct fnic2_rport *rport, *next;
+
+	pr_debug("%s lport 0x%pK\n", __func__, lport);
+
+	lport->fabric.state = FDLS_STATE_LINKDOWN;
+	lport->fabric.flags = 0;
+
+	fnic2_scsi_fcpio_reset(lport);
+
+	fdls_init_tgt_ox_id_pool();
+
+	list_for_each_entry_safe(rport, next, &lport->rport_list, links)   {
+		pr_debug("removing rport: %x", rport->fcid);
+		if (rport->state != fdls_rport_state_offlining)
+			fdls_delete_rport(lport, rport);
+	}
+
+	pr_debug("%s lport 0x%pK\n", __func__, lport);
+}
+
+struct fnic2_rport *fdls_create_rport(struct fnic2_lport *lport, uint32_t fcid, uint64_t wwpn)
+{
+	struct fnic2_rport rport;
+
+	pr_debug("%s fcid:%x, wwpn: %llx\n", __func__, fcid, wwpn);
+
+	memset(&rport, 0, sizeof(struct fnic2_rport));
+
+	rport.fcid = fcid;
+	rport.wwpn = wwpn;
+
+	rport.lport = lport;
+
+	rport.state = fdls_rport_state_plogi;
+	return fnic2_fdls_add_rport(lport, &rport);
+}
+
+void fdls_delete_rport(struct fnic2_lport *lport, struct fnic2_rport *rport)
+{
+	struct fnic2_sess *sess;
+	struct se_session *se_sess;
+
+	if (rport->state == fdls_rport_state_offlining)
+		return;
+	rport->state = fdls_rport_state_offlining;
+	// remove session, cleanup LIO
+	sess = fnic2_find_session(lport->fnic2, rport->wwpn);
+	if (sess) {
+		if ((try_to_del_timer_sync(&sess->sess_timer)) < 0)
+			pr_err("Failed to delete session %pK\n", sess);
+		se_sess = sess->se_sess;
+		target_sess_cmd_list_set_waiting(se_sess);
+		target_wait_for_sess_cmds(se_sess);
+
+		transport_deregister_session_configfs(se_sess);
+		transport_deregister_session(se_sess);
+		sess->se_sess = NULL;
+		list_del(&sess->list);
+		lport->fnic2->lio.sess_count--;
+		kfree(sess);
+	}
+	fnic2_fdls_remove_rport(lport, rport);
+}
+
+void fnic2_fdls_remove_rport(struct fnic2_lport *lport, struct fnic2_rport *rport)
+{
+	list_del(&rport->links);
+	kfree(rport);
+}
+
+
+struct fnic2_rport *fnic2_fdls_add_rport(struct fnic2_lport *lport, struct fnic2_rport *rport)
+{
+	struct fnic2_rport *new_rport;
+
+	new_rport = kmalloc(sizeof(struct fnic2_rport), GFP_ATOMIC);
+	if (new_rport == NULL)
+		return NULL;
+	pr_debug("created rport rport: %pK\n", new_rport);
+	memcpy(new_rport, rport, sizeof(struct fnic2_rport));
+
+	list_add_tail(&new_rport->links, &lport->rport_list);
+
+	return new_rport;
+}
+
+struct fnic2_rport *fnic2_find_rport(struct fnic2_lport *lport, uint64_t wwpn)
+{
+	struct fnic2_rport *rport;
+	struct list_head *init;
+
+	list_for_each(init, &lport->rport_list) {
+
+	rport =  list_entry(init, struct fnic2_rport, links);
+	if (rport->wwpn == wwpn)
+		return rport;
+	}
+	return NULL;
+}
+
+struct fnic2_rport *fnic2_find_rport_by_fcid(struct fnic2_lport *lport, uint32_t fcid)
+{
+	struct fnic2_rport *rport;
+	struct list_head *init;
+
+	list_for_each(init, &lport->rport_list) {
+
+		rport =  list_entry(init, struct fnic2_rport, links);
+		if (rport->fcid == fcid)
+			return rport;
+	}
+	return NULL;
+}
diff --git a/drivers/staging/fnic2/src/fdls_fc.h b/drivers/staging/fnic2/src/fdls_fc.h
new file mode 100644
index 0000000..2da1e4e
--- /dev/null
+++ b/drivers/staging/fnic2/src/fdls_fc.h
@@ -0,0 +1,403 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FDLS_FC_H_
+#define _FDLS_FC_H_
+
+#include <scsi/scsi.h>
+#include "vnic_wq_copy.h"
+
+/* This file contains the declarations for FC fabric services */
+
+enum fcp_sp {
+	FNIC2_FCP_SP_RD_XRDY_DIS	= 0x00000002,
+	FNIC2_FCP_SP_INITIATOR		= 0x00000020,
+	FNIC2_FCP_SP_CONF_CMPL		= 0x00000080,
+	FNIC2_FCP_SP_RETRY		= 0x00000100
+};
+
+#ifdef _BIG_ENDIAN
+
+enum be_ox_id {
+	FNIC2_FLOGI_OXID	= 0x1001,
+	FNIC2_PLOGI_FABRIC_OXID	= 0x1002,
+	FNIC2_RPN_REQ_OXID	= 0x1003,
+	FNIC2_RFT_REQ_OXID	= 0x1004,
+	FNIC2_RFF_REQ_OXID	= 0x1005,
+	FNIC2_LOGO_REQ_OXID	= 0x1007,
+	FNIC2_LOGO_RESP_OXID	= 0x1008
+}
+
+enum be_f_ctl {
+	FNIC2_ELS_REQ_FCTL	= 0x290000,
+	FNIC2_ELS_REP_FCTL	= 0x980000,
+	FNIC2_FCP_RSP_FCTL	= 0x990000
+};
+
+#define FNIC2_FC_PH_VER         (0x2020)
+#define FNIC2_FC_B2B_CREDIT     (0x000A)
+#define FNIC2_FC_B2B_RDF_SZ     (0x0800)
+
+#define FNIC2_REQ_ABTS_FCTL     (0x090000)
+
+#define FNIC2_FC_CONCUR_SEQS    (0x00FF)
+
+#define FNIC2_FC_RO_INFO        (0x001F)
+#define FNIC2_E_D_TOV           (0x07D0)
+
+enum be_fc_ct {
+	FC_CT_RPN_CMD	= 0x0212,
+	FC_CT_RFT_CMD	= 0x0217,
+	FC_CT_RFT_CMD	= 0x021F,
+	FC_CT_ACC	= 0x8002
+};
+
+#else //_LITTLE_ENDIAN
+
+enum le_ox_id {
+	FNIC2_FLOGI_OXID	= 0x0110,
+	FNIC2_PLOGI_FABRIC_OXID	= 0x0210,
+	FNIC2_RPN_REQ_OXID	= 0x0310,
+	FNIC2_RFT_REQ_OXID	= 0x0410,
+	FNIC2_RFF_REQ_OXID	= 0x0510,
+	FNIC2_LOGO_REQ_OXID	= 0x0710,
+	FNIC2_LOGO_RESP_OXID	= 0x0810
+};
+
+enum le_f_ctl {
+	FNIC2_ELS_REQ_FCTL	= 0x000029,
+	FNIC2_ELS_REP_FCTL	= 0x000098,
+	FNIC2_FCP_RSP_FCTL	= 0x000099
+};
+
+#define FNIC2_REQ_ABTS_FCTL     (0x000009)
+
+#define FNIC2_FC_PH_VER         (0x2020)
+#define FNIC2_FC_B2B_CREDIT     (0x0A00)
+#define FNIC2_FC_B2B_RDF_SZ     (0x0008)
+
+#define FNIC2_FC_CONCUR_SEQS    (0xFF00)
+#define FNIC2_FC_RO_INFO        (0x1F00)
+#define FNIC2_E_D_TOV           (0xD0070000)
+
+enum le_fc_ct {
+	FC_CT_ACC       = 0x0280,
+	FC_CT_RPN_CMD	= 0x1202,
+	FC_CT_RFT_CMD	= 0x1702,
+	FC_CT_RFF_CMD	= 0x1F02
+};
+
+#endif /* Big Endian */
+
+enum fc_well_known_addr {
+	FC_DIR_SERVER		= 0xFFFFFC,
+	FC_FABRIC_CONTROLLER	= 0xFFFFFD,
+	FC_DOMAIN_CONTR		= 0xFFFFFE
+};
+
+#define FC_LS_REJ               0x01
+#define FC_LS_ACC               0x02
+
+enum fc_els_command {
+	FC_ELS_PLOGI_REQ        = 0x03,
+	FC_ELS_FLOGI_REQ	= 0x04,
+	FC_ELS_LOGO             = 0x05,
+        FC_ELS_RJT_LOGICAL_BUSY = 0x05,
+	FC_ELS_RJT_BUSY         = 0x09,
+	FC_ELS_RTV_REQ		= 0x0E,
+	FC_ELS_RRQ_REQ		= 0x12,
+	FC_ELS_PRLI_REQ		= 0x20
+};
+
+#define FNIC2_FCOE_EOF         (0x42)
+
+#define FNIC2_FCOE_MAX_FRAME_SZ  (2048)
+#define FNIC2_FCOE_MIN_FRAME_SZ  (280)
+
+enum fc_r_ctl {
+	FNIC2_R_CTL_SCTL			= 0x03,
+	FC_R_CTL_FC4_CMD			= 0x06,
+	FC_R_CTL_FC4_STATUS			= 0x07,
+	FNIC2_R_CTL_UNSOLICITED			= 0x22,
+	FNIC2_R_CTL_SOLICITED_CTRL_REPLY	= 0x23,
+	FNIC2_FC_R_CTL_ABTS			= 0x81,
+	FNIC2_BA_ACC_RCTL			= 0x84,
+	FNIC2_BA_RJT_RCTL			= 0x85
+};
+
+enum fc_f_ctl {
+	FNIC2_F_CTL_FIRST_LAST_SEQ_INIT	= 0x29,
+	FNIC2_F_CTL_EXCH_RESPONDER	= 0x80,
+	FNIC2_F_CTL_LAST_END_SEQ        = 0x98
+};
+
+enum fc_type {
+	FNIC2_FC_TYPE_BLS	= 0x00,
+	FNIC2_FC_TYPE_ELS	= 0x01,
+	FNIC2_FC_TYPE_FC_GS	= 0x20
+};
+
+#define FNIC2_FC_CS_CTL				0x00
+
+#define FNIC2_FC_C3_RDF         (0xfff)
+
+#define FCP_RSP_FLAG_OVERFLOW   (1 << 2)
+#define FCP_RSP_FLAG_UNDERFLOW  (1 << 3)
+
+#define FNIC2_FC_EDTOV_NSEC    (0x400)
+#define FNIC2_NSEC_TO_MSEC     (0x1000000)
+
+#define FNIC2_FCOE_FCHDR_OFFSET \
+    (sizeof(struct fnic2_eth_hdr) + sizeof(struct fnic2_fcoe_hdr))
+
+enum fc_cmnd_req {
+        FC_CMND_REQ_NONE        = 0x00,
+        FC_CMND_REQ_WR          = 0x01,
+        FC_CMND_REQ_RD          = 0x02
+};
+
+#define FC_CMND_ADDL_CDB_LEN(_val)      \
+        ((_val & 0xFC) >> 2)
+
+#define FC_CMND_TASK_ATTR       (0x03)
+
+enum fc_ta {
+        FC_TA_SIMPLE_TAG        = 0x0,
+        FC_TA_HEAD_OF_QUEUE     = 0x1,
+        FC_TA_ORDERED           = 0x2,
+        FC_TA_ACA               = 0x4
+};
+
+/* Frame header */
+
+struct fnic2_eth_hdr {
+	uint8_t		dst_mac[6];
+	uint8_t		src_mac[6];
+	uint16_t	ether_type;
+} __attribute__((__packed__));
+
+struct fnic2_fcoe_hdr {
+	uint8_t	ver;
+	uint8_t	rsvd[12];
+	uint8_t	sof;
+} __attribute__((__packed__));
+
+/* Big Endian */
+struct fc_hdr {
+	uint8_t 	r_ctl;
+	uint8_t 	d_id[3];
+
+	uint8_t		cs_ctl;
+	uint8_t		s_id[3];
+
+	uint8_t		type;
+	uint8_t		f_ctl[3];
+
+	uint8_t		seq_id;
+	uint8_t		df_ctl;
+	uint16_t	seq_cnt;
+
+	uint16_t	ox_id;
+	uint16_t	rx_id;
+
+	uint32_t	param;
+} __attribute__((__packed__));
+
+struct fc_csp_flogi {
+	uint16_t	fc_ph_ver;
+	uint16_t	b2b_credits;
+
+	uint16_t	features;
+	uint16_t	b2b_rdf_size;
+
+	uint32_t	r_a_tov;
+	uint32_t	e_d_tov;
+} __attribute__((__packed__));
+
+struct fc_csp_plogi {
+	uint16_t	fc_ph_ver;
+	uint16_t	b2b_credits;
+
+	uint16_t	features;
+	uint16_t	b2b_rdf_size;
+
+	uint16_t	total_concur_seqs;
+	uint16_t	ro_info;
+
+	uint32_t	e_d_tov;
+} __attribute__((__packed__));
+
+// Revisit the correctness of union(though its same size now)
+struct fc_els {
+	struct	fc_hdr fchdr;
+	uint8_t			command;
+	uint8_t			rsvd[3];
+
+	union {
+		struct fc_csp_flogi csp_flogi;
+		struct fc_csp_plogi csp_plogi;
+	} u;
+
+	uint64_t		nport_name;
+	uint64_t		node_name;
+
+	uint8_t			spc1[16];
+	uint8_t			spc2[16];
+	uint8_t			spc3[16];
+	uint8_t			spc4[16];
+
+	uint8_t  vendor_ver_level[16];
+} __attribute__((__packed__));
+
+struct fc_els_acc {
+	struct fc_hdr	fchdr;
+	uint8_t			command;
+	uint8_t 		rsvd[3];
+} __attribute__((__packed__));
+
+struct fc_els_reject {
+	struct fc_hdr   fchdr;
+	uint32_t		command;
+	uint8_t			reserved;
+	uint8_t			reason_code;
+	uint8_t			reason_expl;
+	uint8_t			vendor_specific;
+} __attribute__((__packed__));
+
+struct fc_abts_ba_acc {
+	struct fc_hdr	fchdr;
+	uint8_t			seq_id_validity;
+	uint8_t			seq_id;
+	uint16_t		reserved;
+	uint16_t		ox_id;
+	uint16_t		rx_id;
+	uint16_t		low_seq_cnt;
+	uint16_t		high_seq_cnt;
+
+} __attribute__((__packed__));
+
+struct fc_abts_ba_rjt {
+	struct fc_hdr	fchdr;
+	uint8_t			vend_uniq;
+	uint8_t			reason_explanation;
+	uint8_t			reason_code;
+	uint8_t			reserved;
+
+} __attribute__((__packed__));
+
+struct fc_prli_sp {
+	uint8_t		type;
+	uint8_t		type_ext;
+	uint16_t	flags;
+
+	uint32_t	ox_proc_assoc;
+	uint32_t	rx_proc_assoc;
+	uint32_t	csp;
+} __attribute__((__packed__));
+
+struct fc_els_prli {
+	struct fc_hdr	fchdr;
+	uint8_t			command;
+	uint8_t			page_len;
+	uint16_t		payload_len;
+	struct fc_prli_sp 	sp;
+} __attribute__((__packed__));
+
+struct fc_ct_hdr {
+	uint32_t	rev: 8;
+	uint32_t	in_id: 24;
+
+	uint8_t		fs_type;
+	uint8_t		fs_subtype;
+	uint8_t		options;
+	uint8_t		rsvd;
+
+	uint16_t	command;
+	uint16_t	max_res_size;
+
+	uint8_t		rsvd1;
+	uint8_t		reason_code;
+	uint8_t		reason_expl;
+	uint8_t		vendor_specific;
+} __attribute__((__packed__));
+
+struct fc_rpn_id {
+	struct fc_hdr	fchdr;
+	struct fc_ct_hdr	fc_ct_hdr;
+
+	uint8_t			rsvd;
+	uint8_t			port_id[3];
+
+	uint64_t		port_name;
+} __attribute__((__packed__));
+
+struct fc_rft_id {
+	struct fc_hdr	fchdr;
+	struct fc_ct_hdr	fc_ct_hdr;
+
+	uint8_t			rsvd;
+	uint8_t			port_id[3];
+	uint8_t			fc4_types[64];
+} __attribute__((__packed__));
+
+struct fc_rff_id {
+	struct fc_hdr	fchdr;
+	struct fc_ct_hdr	fc_ct_hdr;
+
+	uint8_t			rsvd;
+	uint8_t			port_id[3];
+	uint8_t			rsvd1;
+	uint8_t			rsvd2;
+	uint8_t			tgt;
+	uint8_t			fc4_type;
+} __attribute__((__packed__));
+
+struct fc_logo_req {
+	struct fc_hdr	fchdr;
+	uint8_t			command;
+	uint8_t			rsvd[3];
+
+	uint8_t			rsvd1;
+	uint8_t			fcid[3];
+
+	uint64_t		wwpn;
+}  __attribute__((__packed__));
+
+struct fc_fcp_cmnd {
+	struct fc_hdr	fchdr;
+	struct scsi_lun		fcp_lun;
+	uint8_t			cmd_ref;
+	uint8_t			pri_ta;
+	uint8_t			tm_flags;
+	uint8_t			add_cdblen_flags;
+	uint8_t			cdb[16];
+	uint32_t		dl;
+} __attribute__((__packed__));
+
+struct fc_fcp_rsp {
+	struct fc_hdr	fchdr;
+	uint8_t			rsvd[8];
+	uint16_t		retry_delay_tmr;	/* TBD this vs status_qual? */
+	uint8_t			flags;
+	uint8_t			scsi_status;
+	uint32_t		resid;
+	uint32_t		sense_len;
+	uint32_t		rsp_len;
+} __attribute__((__packed__));
+
+#endif /* _FDLS_FC_H */
diff --git a/drivers/staging/fnic2/src/fdls_if.c b/drivers/staging/fnic2/src/fdls_if.c
new file mode 100644
index 0000000..bd095f8
--- /dev/null
+++ b/drivers/staging/fnic2/src/fdls_if.c
@@ -0,0 +1,1254 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "fnic2.h"
+#include "fnic2_res.h"
+#include "cq_enet_desc.h"
+#include "cq_exch_desc.h"
+#include "fdls_fc.h"
+#include "fnic2_fdls.h"
+
+static int fnic2_fcpio_cmpl_handler(struct vnic_dev *vdev,
+				    unsigned int cq_index,
+				    struct fcpio_fw_req *desc);
+static int fnic2_fcpio_fw_reset_cmpl_handler(struct fnic2 *fnic2, struct fcpio_fw_req *desc);
+
+const char *fnic2_state_str[] = {
+	[FNIC2_IN_FC_MODE] = "FNIC2_IN_FC_MODE",
+	[FNIC2_IN_FC_TRANS_ETH_MODE] = "FNIC2_IN_FC_TRANS_ETH_MODE",
+	[FNIC2_IN_ETH_MODE] = "FNIC2_IN_ETH_MODE",
+	[FNIC2_IN_ETH_TRANS_FC_MODE] = "FNIC2_IN_ETH_TRANS_FC_MODE",
+};
+
+static const char *fcpio_status_str[] =  {
+	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
+	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
+	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
+	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
+	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
+	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
+	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
+	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
+	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
+	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
+	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
+	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
+	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
+	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
+	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
+	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
+	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
+	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
+	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
+};
+
+static const char *fnic2_fcpio_status_to_str(unsigned int status)
+{
+    if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
+	return "unknown";
+
+    return fcpio_status_str[status];
+}
+
+const char *fnic2_state_to_str(unsigned int state)
+{
+    if (state >= ARRAY_SIZE(fnic2_state_str) || !fnic2_state_str[state])
+	return "unknown";
+
+    return fnic2_state_str[state];
+}
+
+/* Frame initialization */
+/*
+ * Variables:
+ * dst_mac, src_mac
+ */
+struct fnic2_eth_hdr fnic2_eth_hdr_fcoe = {
+	.ether_type = 0x0689
+};
+
+/*
+ * Variables:
+ * None
+ */
+struct fnic2_fcoe_hdr fnic2_fcoe_hdr = {
+	.sof = 0x2E
+};
+
+uint8_t fcoe_all_fcf_mac[6] = {0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe};
+
+struct workqueue_struct *fnic2_event_queue;
+struct workqueue_struct *fip_event_queue;
+
+/* external */
+extern int fnic2_fw_reset_handler(struct fnic2 *fnic2);
+extern void fnic2_fcpio_tcmd_cmpl_handler(struct work_struct *work);
+extern void fnic2_fcp_recv(struct fnic2 *fnic2, uint8_t *fp, int frame_len);
+extern struct fnic2_cmd *fnic2_find_tcmd(struct fnic2 *fnic2, uint32_t cmd_tag);
+
+static inline int fnic2_import_rq_eth_pkt(struct fnic2 *fnic2, void *fp);
+
+/*
+ * Internal Functions
+ * This function will initialize the src_mac address to be
+ * used in outgoing frames
+ */
+static inline void fnic2_fdls_set_fcoe_srcmac(struct fnic2 *fnic2, uint8_t *src_mac)
+{
+	pr_debug("setting src_mac %02x, %02x, %02x, %02x, %02x, %02x\n",
+		src_mac[0], src_mac[1], src_mac[2],
+		src_mac[3], src_mac[4], src_mac[5]);
+
+	memcpy(fnic2->lport.fpma, src_mac, 6);
+}
+
+
+/*
+ * This function will initialize the dst_mac address to be
+ * used in outgoing frames
+ */
+static inline void fnic2_fdls_set_fcoe_dstmac(struct fnic2 *fnic2, uint8_t *dst_mac)
+{
+	pr_debug("setting dst_mac %02x, %02x, %02x, %02x, %02x, %02x\n",
+		dst_mac[0], dst_mac[1], dst_mac[2],
+		dst_mac[3], dst_mac[4], dst_mac[5]);
+
+	memcpy(fnic2->lport.fcfmac, dst_mac, 6);
+}
+
+static void fnic2_fdls_link_status_change(struct fnic2 *fnic2, int linkup)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+
+	pr_debug("fnic2 %d: fdls_link_status_change linkup: %d, usefip: %d\n",
+		fnic2->fnic2_num, linkup, lport->usefip);
+
+	if (linkup) {
+	    if (lport->usefip) {
+		lport->state = FNIC2_IPORT_STATE_FIP;
+		fnic2_fcoe_send_vlan_req(fnic2);
+	     } else {
+		lport->state = FNIC2_IPORT_STATE_FABRIC_DISC;
+		fnic2_fdls_disc_start(lport);
+	    }
+	} else {
+	    lport->state = FNIC2_IPORT_STATE_LINK_WAIT;
+	    vnic_dev_del_addr(fnic2->vdev, lport->fpma);
+	    fnic2_common_fip_cleanup(fnic2);
+	    fnic2_fdls_link_down(lport);
+	}
+}
+
+/*
+ * fnic2_fcpio_flogi_reg_cmpl_handler
+ * Routine to handle flogi register completion
+ */
+static int fnic2_fcpio_flogi_reg_cmpl_handler(struct fnic2 *fnic2, struct fcpio_fw_req *desc)
+{
+	uint8_t type;
+	uint8_t hdr_status;
+	uint32_t fcpio_tag;
+	int ret = 0;
+	unsigned long flags;
+
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &fcpio_tag);
+
+	/* Update fnic2 state based on status of flogi reg completion */
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+
+	if (fnic2->state == FNIC2_IN_ETH_TRANS_FC_MODE) {
+
+		/* Check flogi registration completion status */
+		if (!hdr_status) {
+			pr_debug("flog reg succeeded\n");
+			fnic2->state = FNIC2_IN_FC_MODE;
+		} else {
+			pr_debug("fnic2 flogi reg: failed %s\n",
+				fnic2_fcpio_status_to_str(hdr_status));
+			fnic2->state = FNIC2_IN_ETH_MODE;
+			ret = -1;
+		}
+	} else {
+		pr_debug("Unexpected fnic2 state %s while processing flogi reg completion\n",
+			fnic2_state_to_str(fnic2->state));
+		ret = -1;
+	}
+
+	if (!ret) {
+		if (fnic2->stop_rx_link_events) {
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+			goto reg_cmpl_handler_end;
+		}
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		fnic2_flush_tx(fnic2);
+		queue_work(fnic2_event_queue, &fnic2->frame_work);
+	} else {
+	    spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+	}
+
+reg_cmpl_handler_end:
+	return ret;
+}
+
+/*
+ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
+ * or derive from FC_MAP and FCID combination. While it should be
+ * same, revisit this if there is any possibility of not-correct.
+ */
+void fnic2_fdls_learn_fcoe_macs(struct fnic2_lport *lport, void *rx_frame, uint8_t *fcid)
+{
+	struct fnic2 *fnic2 = lport->fnic2;
+	struct fnic2_eth_hdr *ethhdr = (struct fnic2_eth_hdr *)rx_frame;
+	uint8_t fcmac[6] = {0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00};
+
+	memcpy(&fcmac[3], fcid, 3);
+
+	pr_debug("learn_fcoe - dst_mac:%02x %02x %02x %02x %02x %02x\n",
+		ethhdr->dst_mac[0],  ethhdr->dst_mac[1],  ethhdr->dst_mac[2],
+		ethhdr->dst_mac[3],  ethhdr->dst_mac[4],  ethhdr->dst_mac[5]);
+
+	pr_debug("learn_fcoe - fc_mac:%02x %02x %02x %02x %02x %02x\n",
+		fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], fcmac[5]);
+
+	fnic2_fdls_set_fcoe_srcmac(fnic2, fcmac);
+	fnic2_fdls_set_fcoe_dstmac(fnic2, ethhdr->src_mac);
+}
+
+
+/* Public Functions */
+void fnic2_fdls_init(struct fnic2 *fnic2, int usefip)
+{
+	struct fnic2_lport *lport = &fnic2->lport;
+	uint8_t dstmac[6] = {0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe};
+
+	/* Initialize iPort structure */
+	lport->state = FNIC2_IPORT_STATE_INIT;
+	lport->fnic2 = fnic2;
+	lport->usefip = usefip;
+
+	/* Send the FCPIO_RESET to the firmware */
+	fnic2_scsi_fcpio_reset(lport);
+
+	pr_debug("lportsrcmac:%02x %02x %02x %02x %02x %02x\n",
+		lport->hwmac[0], lport->hwmac[1], lport->hwmac[2],
+		lport->hwmac[3], lport->hwmac[4], lport->hwmac[5]);
+
+	/* Initialize the src, and dst mac addresses for frame sender */
+	fnic2_fdls_set_fcoe_srcmac(fnic2, lport->hwmac);
+	fnic2_fdls_set_fcoe_dstmac(fnic2, dstmac);
+
+	fnic2_fdls_disc_init(lport);
+}
+
+/* At this time, should be called only during driver unload */
+void fnic2_fdls_cleanup(struct fnic2 *fnic2)
+{
+    pr_debug("fnic2 0x%pK Entered fnic2_fdls_cleanup\n", fnic2);
+
+}
+
+void fnic2_handle_link(struct work_struct *work)
+{
+	struct fnic2 *fnic2 = container_of(work, struct fnic2, link_work);
+	int old_link_status;
+	uint32_t old_link_down_cnt;
+	unsigned long flags;
+
+	pr_info("started fnic2_handle_link\n");
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		pr_debug("fnic2 0x%pK Stop RX link event", fnic2);
+		return;
+	 }
+
+	old_link_down_cnt = fnic2->link_down_cnt;
+	old_link_status = fnic2->link_status;
+	fnic2->link_status = vnic_dev_link_status(fnic2->vdev);
+	fnic2->link_down_cnt = vnic_dev_link_down_cnt(fnic2->vdev);
+
+	pr_debug("link status %d down cnt %d",
+		(int)fnic2->link_status, (int)fnic2->link_down_cnt);
+	pr_debug("old status %d old down cnt %d",
+		(int)old_link_status, old_link_down_cnt);
+
+	if (old_link_status == fnic2->link_status) {
+		if (!fnic2->link_status) {
+			/* DOWN -> DOWN */
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		} else {
+			if (old_link_down_cnt != fnic2->link_down_cnt) {
+				/* UP -> DOWN -> UP */
+				spin_unlock_irqrestore(&fnic2->fnic2_lock,
+							flags);
+				pr_debug("fnic2 %d: link down",
+					fnic2->fnic2_num);
+				fnic2_fdls_link_status_change(fnic2, 0);
+
+				pr_debug("fnic2 %d: link up",
+					 fnic2->fnic2_num);
+				fnic2_fdls_link_status_change(fnic2, 1);
+
+			} else {
+				/* UP -> UP */
+				spin_unlock_irqrestore(&fnic2->fnic2_lock,
+							 flags);
+			}
+		}
+	 } else if (fnic2->link_status) {
+		/* DOWN -> UP */
+
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		pr_debug("fnic2 %d: link up", fnic2->fnic2_num);
+		fnic2_fdls_link_status_change(fnic2, 1);
+
+	 } else {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		pr_debug("fnic2 %d: recvd up to down event", fnic2->fnic2_num);
+		/* UP -> DOWN */
+		fnic2_fdls_link_status_change(fnic2, 0);
+	}
+}
+
+void fnic2_handle_frame(struct work_struct *work)
+{
+	struct fnic2_frame_list *cur_frame, *next;
+	unsigned long flags;
+	struct fnic2 *fnic2 = container_of(work, struct fnic2, frame_work);
+	int fchdr_offset = 0;
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	list_for_each_entry_safe(cur_frame, next, &fnic2->frame_queue, links) {
+		if (fnic2->stop_rx_link_events) {
+			list_del(&cur_frame->links);
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+			kfree(cur_frame->fp);
+			kfree(cur_frame);
+			return;
+		}
+
+		/*
+		 * If we're in a transitional state, just re-queue and return.
+		 * The queue will be serviced when we get to a stable state.
+		 */
+		if (fnic2->state != FNIC2_IN_FC_MODE &&
+		    fnic2->state != FNIC2_IN_ETH_MODE) {
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+			return;
+		}
+
+		list_del(&cur_frame->links);
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		/* Frames from FCP_RQ will have ethhdrs stripped off */
+		fchdr_offset = (cur_frame->rx_ethhdr_stripped) ? 0 : FNIC2_FCOE_FCHDR_OFFSET;
+		fnic2_fdls_recv_frame(&fnic2->lport, cur_frame->fp,
+			cur_frame->frame_len, fchdr_offset);
+
+		kfree(cur_frame->fp);
+		kfree(cur_frame);
+		spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+}
+
+
+/* Process FIP frames */
+void fnic2_handle_fip_frame(struct work_struct *work)
+{
+	unsigned long flags;
+	struct fnic2_frame_list *cur_frame, *next;
+	struct fnic2 *fnic2 = container_of(work, struct fnic2, fip_work);
+
+	pr_debug("fnic2 0x%pK starting fnic2_handle_fip_frame\n", fnic2);
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	list_for_each_entry_safe(cur_frame, next, &fnic2->fip_frame_queue, links) {
+		pr_debug("fnic2 0x%pK got fip frame\n", fnic2);
+
+		if (fnic2->stop_rx_link_events) {
+		list_del(&cur_frame->links);
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		kfree(cur_frame->fp);
+		kfree(cur_frame);
+		return;
+		}
+
+		/*
+		 * If we're in a transitional state, just re-queue and return.
+		 * The queue will be serviced when we get to a stable state.
+		 */
+		if (fnic2->state != FNIC2_IN_FC_MODE &&
+		    fnic2->state != FNIC2_IN_ETH_MODE) {
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+			return;
+		}
+
+		list_del(&cur_frame->links);
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		if (fdls_fip_recv_frame(fnic2, cur_frame->fp)) {
+			pr_debug("fnic2 0x%pK fip frame processed\n", fnic2);
+			kfree(cur_frame->fp);
+			kfree(cur_frame);
+		}
+		spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+}
+
+/**
+ * fnic2_import_rq_eth_pkt() - handle received FCoE or FIP frame.
+ * @fnic2:        fnic2 instance.
+ * @skb:        Ethernet Frame.
+ */
+static int fnic2_import_rq_eth_pkt(struct fnic2 *fnic2, void *fp)
+{
+	struct ethhdr *eh;
+	struct fnic2_frame_list *fip_fr_elem;
+
+	pr_debug("fnic2 0x%pK fnic2_import_rq_eth_pkt\n", fnic2);
+	eh = (struct ethhdr *) fp;
+	if (eh->h_proto ==  htons(0x8914)) {
+		pr_debug("fnic2 0x%pK fip frame enqueued\n", fnic2);
+		fip_fr_elem = (struct fnic2_frame_list *)
+		kmalloc(sizeof(struct fnic2_frame_list), __GFP_NOFAIL);
+		memset(fip_fr_elem, 0, sizeof(struct fnic2_frame_list));
+		fip_fr_elem->fp = fp;
+		list_add_tail(&fip_fr_elem->links, &fnic2->fip_frame_queue);
+		INIT_WORK(&fnic2->fip_work, fnic2_handle_fip_frame);
+		queue_work(fip_event_queue, &fnic2->fip_work);
+
+		return 1;               /* let caller know packet was used */
+	} else {
+		return 0;
+	}
+}
+
+static void fnic2_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(rq->vdev);
+	uint8_t *fp;
+	unsigned int ethhdr_stripped;
+	uint8_t type, color, eop, sop, ingress_port, vlan_stripped;
+	uint8_t fcoe_fnic2_crc_ok = 1, fcoe_enc_error = 0;
+	uint8_t fcs_ok = 1, packet_error = 0;
+	uint16_t q_number, completed_index, vlan;
+	uint32_t rss_hash;
+	uint16_t checksum;
+	uint8_t csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
+	uint8_t tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+	uint8_t fcoe = 0, fcoe_sof, fcoe_eof;
+	uint16_t exchange_id, tmpl;
+	uint8_t sof = 0;
+	uint8_t eof = 0;
+	uint32_t fcp_bytes_written = 0;
+	uint16_t enet_bytes_written = 0;
+	uint32_t bytes_written = 0;
+	unsigned long flags;
+	struct fnic2_frame_list *frame_elem = NULL;
+	struct fc_hdr *fchdr;
+	uint8_t fcp4_frame = 0;
+
+	pci_unmap_single(fnic2->pdev, buf->dma_addr, buf->len,
+		     PCI_DMA_FROMDEVICE);
+	fp = (uint8_t *)buf->os_buf;
+	buf->os_buf = NULL;
+
+	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
+	if (type == CQ_DESC_TYPE_RQ_FCP) {
+		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
+				   &type, &color, &q_number, &completed_index,
+				   &eop, &sop, &fcoe_fnic2_crc_ok, &exchange_id,
+				   &tmpl, &fcp_bytes_written, &sof, &eof,
+				   &ingress_port, &packet_error,
+				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
+				   &vlan);
+		ethhdr_stripped = 1;
+		bytes_written = fcp_bytes_written;
+
+		fchdr = (struct fc_hdr *)fp;
+		if (fchdr->r_ctl == FC_R_CTL_FC4_CMD ||
+		    fchdr->r_ctl == FNIC2_FC_R_CTL_ABTS ||
+		    fchdr->r_ctl == FC_R_CTL_FC4_STATUS) {
+			fcp4_frame = 1;
+		}
+
+		pr_debug("recvd FCP frame. rctl %x sz: %d, fcp4: %d\n", fchdr->r_ctl, fcp_bytes_written, fcp4_frame);
+	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
+		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+				    &type, &color, &q_number, &completed_index,
+				    &ingress_port, &fcoe, &eop, &sop,
+				    &rss_type, &csum_not_calc, &rss_hash,
+				    &enet_bytes_written, &packet_error,
+				    &vlan_stripped, &vlan, &checksum,
+				    &fcoe_sof, &fcoe_fnic2_crc_ok,
+				    &fcoe_enc_error, &fcoe_eof,
+				    &tcp_udp_csum_ok, &udp, &tcp,
+				    &ipv4_csum_ok, &ipv6, &ipv4,
+				    &ipv4_fragment, &fcs_ok);
+
+		ethhdr_stripped = 0;
+		bytes_written = enet_bytes_written;
+
+		pr_debug("recvd ENET frame. sz: %d\n",
+			enet_bytes_written);
+
+		if (!fcs_ok) {
+			pr_debug("fnic2 0x%pK fcs error.  dropping packet.\n", fnic2);
+			goto drop;
+		}
+
+		if (fnic2_import_rq_eth_pkt(fnic2, fp)) {
+			return;
+	       }
+	} else {
+		/* wrong CQ type*/
+		pr_debug("fnic2 rq_cmpl wrong cq type 0x%x\n", type);
+		goto drop;
+	}
+
+	if (!fcs_ok || packet_error || !fcoe_fnic2_crc_ok || fcoe_enc_error) {
+		pr_debug("fnic2 rq_cmpl fcoe 0x%x fcsok 0x%x pkterr 0x%x fcoe_fnic2_crc_ok 0x%x, fcoe_enc_err 0x%x\n", fcoe, fcs_ok, packet_error,
+			fcoe_fnic2_crc_ok, fcoe_enc_error);
+		goto drop;
+	}
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		pr_debug("fnic2->stop_rx_link_events %x\n",
+		fnic2->stop_rx_link_events);
+		goto drop;
+	}
+
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	if (fcp4_frame) {
+		fnic2_fcp_recv(fnic2, fp, bytes_written);
+	} else {
+
+		frame_elem = (struct fnic2_frame_list *)
+			kmalloc(sizeof(struct fnic2_frame_list), __GFP_NOFAIL);
+		memset(frame_elem, 0, sizeof(struct fnic2_frame_list));
+		frame_elem->fp = fp;
+		frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
+		frame_elem->frame_len = bytes_written;
+
+		spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+		list_add_tail(&frame_elem->links, &fnic2->frame_queue);
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		queue_work(fnic2_event_queue, &fnic2->frame_work);
+	}
+	return;
+drop:
+	kfree(fp);
+}
+
+static int fnic2_rq_cmpl_handler_cont(struct vnic_dev *vdev, struct cq_desc *cq_desc, uint8_t type, uint16_t q_number, uint16_t completed_index)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(vdev);
+
+	vnic_rq_service(&fnic2->rq[q_number], cq_desc, completed_index,
+		VNIC_RQ_RETURN_DESC, fnic2_rq_cmpl_frame_recv);
+	return 0;
+}
+
+static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, uint16_t request_out)
+{
+	if (wq->to_clean_index <= wq->to_use_index) {
+		/* out of range, stale request_out index */
+		if (request_out < wq->to_clean_index ||
+		    request_out >= wq->to_use_index)
+			return 0;
+	} else {
+		/* out of range, stale request_out index */
+		if (request_out < wq->to_clean_index &&
+		    request_out >= wq->to_use_index)
+			return 0;
+	}
+	/* request_out index is in range */
+	return 1;
+}
+
+
+/*
+ * Mark that ack received and store the Ack index. If there are multiple
+ * acks received before Tx thread cleans it up, the latest value will be
+ * used which is correct behavior. This state should be in the copy Wq
+ * instead of in the fnic2
+ */
+static inline void fnic2_fcpio_ack_handler(struct fnic2 *fnic2, unsigned int cq_index, struct fcpio_fw_req *desc)
+{
+	struct vnic_wq_copy *wq;
+	uint16_t request_out = desc->u.ack.request_out;
+	unsigned long flags;
+
+	/* mark the ack state */
+	wq = &fnic2->wq_copy[cq_index - fnic2->raw_wq_count - fnic2->rq_count];
+	spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+
+	if (is_ack_index_in_range(wq, request_out)) {
+		fnic2->fw_ack_index[0] = request_out;
+		fnic2->fw_ack_recd[0] = 1;
+	}
+
+	spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+}
+
+
+/* Free up Copy Wq descriptors. Called with copy_wq lock held */
+int free_wq_copy_descs(struct fnic2 *fnic2, struct vnic_wq_copy *wq)
+{
+	/* if no Ack received from firmware, then nothing to clean */
+	if (!fnic2->fw_ack_recd[0])
+		return 1;
+
+	/*
+	 * Update desc_available count based on number of freed descriptors
+	 * Account for wraparound
+	 */
+	if (wq->to_clean_index <= fnic2->fw_ack_index[0])
+		wq->ring.desc_avail += (fnic2->fw_ack_index[0]
+		    - wq->to_clean_index + 1);
+	else
+		wq->ring.desc_avail += (wq->ring.desc_count
+		    - wq->to_clean_index + fnic2->fw_ack_index[0] + 1);
+
+	/*
+	 * just bump clean index to ack_index+1 accounting for wraparound
+	 * this will essentially free up all descriptors between
+	 * to_clean_index and fw_ack_index, both inclusive
+	 */
+	wq->to_clean_index =
+		(fnic2->fw_ack_index[0] + 1) % wq->ring.desc_count;
+
+	/* we have processed the acks received so far */
+	fnic2->fw_ack_recd[0] = 0;
+	return 0;
+}
+
+
+/*
+ * fnic2_flogi_reg_handler
+ * Routine to send flogi register msg to fw
+ */
+static int fnic2_flogi_reg_handler(struct fnic2 *fnic2, uint32_t fnic2_id)
+{
+	struct vnic_wq_copy *wq = &fnic2->wq_copy[0];
+	enum fcpio_flogi_reg_format_type format;
+	uint8_t gw_mac[ETH_ALEN];
+	int ret = 0;
+
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic2, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq)) {
+		ret = -1;
+		goto flogi_reg_ioreq_end;
+	}
+
+	memcpy(gw_mac, fnic2->lport.fcfmac, ETH_ALEN);
+	format = FCPIO_FLOGI_REG_GW_DEST;
+
+	if (fnic2->config.flags & VFCF_FIP_CAPABLE) {
+		fnic2_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fnic2_id,
+		    gw_mac, fnic2->lport.fpma, fnic2->lport.r_a_tov,
+		    fnic2->lport.e_d_tov);
+		pr_info("FLOGI FIP reg issued fcid %x src %pK dest %pK\n",
+			fnic2_id, fnic2->data_src_addr, gw_mac);
+	} else {
+		fnic2_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
+		    format, fnic2_id, gw_mac);
+		pr_info("FLOGI reg issued fcid %x gw_mac %x %x %x %x %x %x",
+			fnic2_id, gw_mac[0], gw_mac[1],
+			gw_mac[2], gw_mac[3], gw_mac[4], gw_mac[5]);
+	}
+
+flogi_reg_ioreq_end:
+	return ret;
+}
+
+int fnic2_rq_cmpl_handler(struct fnic2 *fnic2, int rq_work_to_do)
+{
+	unsigned int tot_rq_work_done = 0, cur_work_done;
+	unsigned int i;
+
+	for (i = 0; i < fnic2->rq_count; i++) {
+		cur_work_done = vnic_cq_service(&fnic2->cq[i], rq_work_to_do,
+		    fnic2_rq_cmpl_handler_cont);
+		if (cur_work_done && fnic2->stop_rx_link_events != 1) {
+			vnic_rq_fill(&fnic2->rq[i], fnic2_alloc_rq_frame);
+		}
+		tot_rq_work_done += cur_work_done;
+	}
+	return tot_rq_work_done;
+}
+
+/*
+ * This function is called once at init time to allocate and fill RQ
+ * buffers. Subsequently, it is called in the interrupt context after RQ
+ * buffer processing to replenish the buffers in the RQ
+ */
+int fnic2_alloc_rq_frame(struct vnic_rq *rq)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(rq->vdev);
+	void *buf;
+	uint16_t len;
+	dma_addr_t pa;
+
+	len = FNIC2_RQ_FRAME_LEN;
+	buf = kmalloc(len, __GFP_NORETRY);
+	if (!buf) {
+		pr_debug("Unable to allocate RQ sk_buff\n");
+		return -ENOMEM;
+	}
+
+	pa = pci_map_single(fnic2->pdev, buf, len, PCI_DMA_FROMDEVICE);
+	fnic2_queue_rq_desc(rq, buf, pa, len);
+	return 0;
+}
+
+
+void fnic2_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+	void *rq_buf = buf->os_buf;
+	struct fnic2 *fnic2 = vnic_dev_priv(rq->vdev);
+
+	pci_unmap_single(fnic2->pdev, buf->dma_addr, buf->len,
+	PCI_DMA_FROMDEVICE);
+
+	kfree(rq_buf);
+	buf->os_buf = NULL;
+}
+
+static void fnic2_wq_complete_frame_send(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(wq->vdev);
+
+	pci_unmap_single(fnic2->pdev, buf->dma_addr,
+	buf->len, PCI_DMA_TODEVICE);
+	kfree(buf->os_buf);
+	buf->os_buf = NULL;
+}
+
+
+static int fnic2_wq_cmpl_handler_cont(struct vnic_dev *vdev, struct cq_desc *cq_desc, uint8_t type, uint16_t q_number, uint16_t completed_index)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(vdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic2->wq_lock[q_number], flags);
+	vnic_wq_service(&fnic2->wq[q_number], cq_desc, completed_index,
+			fnic2_wq_complete_frame_send);
+	spin_unlock_irqrestore(&fnic2->wq_lock[q_number], flags);
+
+	return 0;
+}
+
+int fnic2_wq_cmpl_handler(struct fnic2 *fnic2, int work_to_do)
+{
+	unsigned int wq_work_done = 0;
+	unsigned int i;
+
+	for (i = 0; i < fnic2->raw_wq_count; i++) {
+		wq_work_done += vnic_cq_service(&fnic2->cq[fnic2->rq_count+i],
+		    work_to_do, fnic2_wq_cmpl_handler_cont);
+	}
+
+	return wq_work_done;
+}
+
+
+void fnic2_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(wq->vdev);
+
+	pci_unmap_single(fnic2->pdev, buf->dma_addr,
+			 buf->len, PCI_DMA_TODEVICE);
+
+	kfree(buf->os_buf);
+	buf->os_buf = NULL;
+}
+
+/*
+ * Send FC frame.
+ */
+int fnic2_send_frame(struct fnic2 *fnic2, void *frame, int frame_len)
+{
+	struct vnic_wq *wq = &fnic2->wq[0];
+	dma_addr_t pa;
+	int ret = 0;
+	unsigned long flags;
+
+	pa = pci_map_single(fnic2->pdev, frame, frame_len, PCI_DMA_TODEVICE);
+
+	spin_lock_irqsave(&fnic2->wq_lock[0], flags);
+
+	if (!vnic_wq_desc_avail(wq)) {
+		pci_unmap_single(fnic2->pdev, pa,
+			frame_len, PCI_DMA_TODEVICE);
+		pr_err("vnic work queue descriptor is not available");
+		ret = -1;
+		goto fnic2_send_frame_end;
+	}
+
+	fnic2_queue_wq_desc(wq, frame, pa, frame_len, FNIC2_FCOE_EOF,
+		0 /* hw inserts cos value */,
+		fnic2->vlan_id, 1, 1, 1);
+
+fnic2_send_frame_end:
+	spin_unlock_irqrestore(&fnic2->wq_lock[0], flags);
+
+	if (ret) {
+		// TBD free buffer
+	}
+
+	return ret;
+}
+
+int fdls_send_fcoe_frame(struct fnic2 *fnic2, void *payload, int payload_sz, uint8_t *srcmac, uint8_t *dstmac)
+{
+	uint8_t *frame;
+	struct fnic2_eth_hdr *ethhdr;
+	struct fnic2_frame_list *frame_elem;
+	int max_framesz = FNIC2_FCOE_FRAME_MAXSZ;
+	unsigned long flags;
+	int len = 0;
+	int ret;
+
+	frame = (uint8_t *) kmalloc(max_framesz, __GFP_NORETRY);
+	if (!frame) {
+		pr_debug("fnic2 0x%pK Failed to allocate frame for flogi\n", fnic2);
+		return -1;
+	}
+	memset(frame, 0, max_framesz);
+
+	ethhdr = (struct fnic2_eth_hdr *)frame;
+
+	memcpy(frame, (uint8_t *)&fnic2_eth_hdr_fcoe, sizeof(struct fnic2_eth_hdr));
+	len = sizeof(struct fnic2_eth_hdr);
+
+	memcpy(ethhdr->src_mac, srcmac, ETH_ALEN);
+	memcpy(ethhdr->dst_mac, dstmac, ETH_ALEN);
+
+	memcpy(frame + len, (uint8_t *)&fnic2_fcoe_hdr, sizeof(struct fnic2_fcoe_hdr));
+	len += sizeof(struct fnic2_fcoe_hdr);
+
+	memcpy(frame + len, (uint8_t *)payload, payload_sz);
+	len += payload_sz;
+
+	//review: Handle this with the state in fdls for reg_done, and remove
+	//this logic
+	/*
+	 * Queue frame if in a transitional state.
+	 * This occurs while registering the Port_ID / MAC address after FLOGI.
+	 */
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+
+	if ((fnic2->state != FNIC2_IN_FC_MODE) &&
+	    (fnic2->state != FNIC2_IN_ETH_MODE)) {
+		frame_elem = kmalloc(sizeof(struct fnic2_frame_list), __GFP_NORETRY);
+		if (frame_elem == NULL) {
+			spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+			return -ENOMEM;
+		}
+		memset(frame_elem, 0, sizeof(struct fnic2_frame_list));
+
+		pr_debug("Queueing frame: %pK\n", frame);
+
+		frame_elem->fp = frame;
+		frame_elem->frame_len = len;
+		list_add_tail(&frame_elem->links, &fnic2->tx_queue);
+
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	ret = fnic2_send_frame(fnic2, frame, len);
+	return ret;
+}
+
+int fdls_send_fip_frame(struct fnic2 *fnic2, void *payload, int payload_sz)
+{
+	uint8_t *frame;
+	int max_framesz = FNIC2_FCOE_FRAME_MAXSZ;
+	int ret;
+
+	frame = (uint8_t *) kmalloc(max_framesz, __GFP_NORETRY);
+	if (!frame) {
+		pr_debug("fnic2 0x%pK Failed to allocate fip frame\n", fnic2);
+		return -1;
+	}
+	memset(frame, 0, max_framesz);
+
+	memcpy(frame, (uint8_t *)payload, payload_sz);
+	ret = fnic2_send_frame(fnic2, frame, payload_sz);
+
+	return ret;
+}
+
+int fnic2_send_fcoe_frame(struct fnic2_lport *lport, void *payload, int payload_sz)
+{
+	struct fnic2 *fnic2 = lport->fnic2;
+	uint8_t *dstmac, *srcmac;
+	int ret = 0;
+
+	/* If module unload is in-progress, don't send */
+	if (fnic2->in_remove)
+		return -1;
+
+	if (lport->fabric.flags & FNIC2_FDLS_FPMA_LEARNT) {
+		srcmac = lport->fpma;
+		dstmac = lport->fcfmac;
+	} else {
+		srcmac = lport->hwmac;
+		dstmac = fcoe_all_fcf_mac;
+	}
+
+	ret = fdls_send_fcoe_frame(fnic2, payload, payload_sz, srcmac, dstmac);
+
+	return ret;
+}
+
+int fnic2_send_fip_frame(struct fnic2_lport *lport, void *payload, int payload_sz)
+{
+    struct fnic2 *fnic2 = lport->fnic2;
+
+    if (fnic2->in_remove) {
+	return -1;
+    }
+
+    return fdls_send_fip_frame(fnic2, payload, payload_sz);
+}
+
+/**
+ * fnic2_flush_tx() - send queued frames.
+ * @fnic2: fnic2 device
+ *
+ * Send frames that were waiting to go out in FC or Ethernet mode.
+ * Whenever changing modes we purge queued frames, so these frames should
+ * be queued for the stable mode that we're in, either FC or Ethernet.
+ *
+ * Called without fnic2_lock held.
+ */
+void fnic2_flush_tx(struct fnic2 *fnic2)
+{
+	return;
+}
+
+int fnic2_fdls_register_portid(struct fnic2_lport *lport, uint32_t port_id, void *fp)
+{
+	struct fnic2 *fnic2 = lport->fnic2;
+	struct fnic2_eth_hdr *ethhdr;
+	unsigned long flags;
+	int ret;
+
+	pr_debug("setting port id: %x and fp: %pK, fnic2 state: %d",
+		port_id, fp, fnic2->state);
+
+	if (fp) {
+		ethhdr = (struct fnic2_eth_hdr *)fp;
+		vnic_dev_add_addr(fnic2->vdev, ethhdr->dst_mac);
+	}
+
+	/* Change state to reflect transition to FC mode */
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->state == FNIC2_IN_ETH_MODE || fnic2->state == FNIC2_IN_FC_MODE) {
+		fnic2->state = FNIC2_IN_ETH_TRANS_FC_MODE;
+	} else {
+		pr_debug("fnic2 0x%pK Unexpected fnic2 state while processing flogi resp\n",
+			 fnic2);
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		return -1;
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	/*
+	 * Send FLOGI registration to firmware to set up FC mode.
+	 * The new address will be set up when registration completes.
+	 */
+	ret = fnic2_flogi_reg_handler(fnic2, port_id);
+
+	if (ret < 0) {
+		spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+
+		pr_debug("flogi reg handeler Error ret: %d, fnic2 state: %d\n",
+			ret, fnic2->state);
+
+		if (fnic2->state == FNIC2_IN_ETH_TRANS_FC_MODE)
+			fnic2->state = FNIC2_IN_ETH_MODE;
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+		return -1;
+	}
+
+	pr_debug("fnic2 0x%pK Waiting for flogi registration\n", fnic2);
+
+	/* TODO: Use a timeout and handle it */
+	msleep(500);
+
+	lport->fabric.flags |= FNIC2_FDLS_FPMA_LEARNT;
+
+	pr_debug("fnic2 0x%pK flogi registration Success\n", fnic2);
+
+	return 0;
+}
+
+void fnic2_scsi_fcpio_reset(struct fnic2_lport *lport)
+{
+	int err = 0;
+	enum fnic2_state old_state;
+	struct fnic2 *fnic2 = lport->fnic2;
+	unsigned long flags;
+
+	/* Issue firmware reset for fnic2, wait for reset to complete */
+retry_fw_reset:
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (fnic2->state == FNIC2_IN_FC_TRANS_ETH_MODE) {
+		/* fw reset is in progress, poll for its completion */
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		goto retry_fw_reset;
+	}
+	old_state = fnic2->state;
+	fnic2->state = FNIC2_IN_FC_TRANS_ETH_MODE;
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	err = fnic2_fw_reset_handler(fnic2);
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	if (err) {
+		if (fnic2->state == FNIC2_IN_FC_TRANS_ETH_MODE)
+			fnic2->state = old_state;
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		return;
+	}
+
+	if (lport->state != FNIC2_IPORT_STATE_INIT) {
+	    //TBD
+	}
+
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+}
+
+void fnic2_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc)
+{
+    /* TBD */
+}
+
+void list_free_buffer(struct list_head *list)
+{
+	struct fnic2_frame_list *frame, *next;
+
+	if (!list_empty(list)) {
+		list_for_each_entry_safe(frame, next, list, links) {
+			kfree(frame->fp);
+			list_del(&frame->links);
+			kfree(frame);
+		}
+	}
+}
+
+/*
+ * fnic2_fw_reset_handler
+ * Routine to send reset msg to fw
+ */
+int fnic2_fw_reset_handler(struct fnic2 *fnic2)
+{
+	struct vnic_wq_copy *wq = &fnic2->wq_copy[0];
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+	list_free_buffer(&fnic2->frame_queue);
+	list_free_buffer(&fnic2->tx_queue);
+	if (fnic2->config.flags & VFCF_FIP_CAPABLE) {
+	    list_free_buffer(&fnic2->fip_frame_queue);
+	}
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	/* wait for io cmpl */
+	while (atomic_read(&fnic2->in_flight))
+		schedule_timeout(msecs_to_jiffies(1));
+
+	spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic2, wq);
+
+	if (!vnic_wq_copy_desc_avail(wq))
+		ret = -EAGAIN;
+	else
+		fnic2_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+
+	spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+
+	if (!ret)
+		pr_debug("Issued fw reset\n");
+	else
+		pr_debug("Failed to issue fw reset\n");
+
+	return ret;
+}
+
+/*
+ * fnic2_wq_copy_cmpl_handler
+ * Routine to process wq copy
+ */
+int fnic2_wq_copy_cmpl_handler(struct fnic2 *fnic2, int copy_work_to_do)
+{
+	unsigned int wq_work_done = 0;
+	unsigned int i, cq_index;
+	unsigned int cur_work_done;
+
+	for (i = 0; i < fnic2->wq_copy_count; i++) {
+		cq_index = i + fnic2->raw_wq_count + fnic2->rq_count;
+		cur_work_done = vnic_cq_copy_service(&fnic2->cq[cq_index],
+						     fnic2_fcpio_cmpl_handler,
+						     copy_work_to_do);
+		wq_work_done += cur_work_done;
+	}
+	return wq_work_done;
+}
+
+/*
+ * fnic2_fcpio_cmpl_handler
+ * Routine to service the cq for wq_copy
+ */
+static int fnic2_fcpio_cmpl_handler(struct vnic_dev *vdev,
+				    unsigned int cq_index,
+				    struct fcpio_fw_req *desc)
+{
+	struct fnic2 *fnic2 = vnic_dev_priv(vdev);
+	struct fnic2_cmd *tcmd;
+	int cpu;
+
+	switch (desc->hdr.type) {
+	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
+		fnic2_fcpio_ack_handler(fnic2, cq_index, desc);
+		break;
+
+	case FCPIO_ICMND_CMPL: /* fw completed a command */
+		break;
+
+	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+		break;
+
+	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+		fnic2_fcpio_flogi_reg_cmpl_handler(fnic2, desc);
+		break;
+
+	case FCPIO_RESET_CMPL: /* fw completed reset */
+		fnic2_fcpio_fw_reset_cmpl_handler(fnic2, desc);
+		break;
+
+	case FCPIO_TCMD_SEND_DATA: /* fw completed sending rd data */
+	case FCPIO_TCMD_RECV_DATA: /* fw completed recving wr data */
+	case FCPIO_TCMD_FREE_CMD: /* fw completed freeing a cmd */
+	case FCPIO_TCMD_ABORT_CMD: /* fw completed an abort request */
+	case FCPIO_TCMD_TASK_MGMT: /* fw completed a task mgmt request */
+
+		pr_debug("FW completion received FDLS fcpio_tag %x\n", desc->hdr.fcpio_tag);
+		if (desc->hdr.fcpio_tag != 0xFFFFFFFF && desc->hdr.status != FCPIO_ABORTED) {
+			tcmd = fnic2_find_tcmd(fnic2, desc->hdr.fcpio_tag);
+			memcpy(&tcmd->fw_desc, desc, sizeof(struct fcpio_fw_req));
+
+			cpu = get_cpu_to_queue(tcmd->cmd_tag);
+
+			INIT_WORK(&tcmd->work, fnic2_fcpio_tcmd_cmpl_handler);
+			queue_work_on(cpu, fnic2_tcmd_wq, &tcmd->work);
+		}
+		break;
+
+	default:
+		pr_debug("firmware completion type %d\n", desc->hdr.type);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * fnic2_fcpio_fw_reset_cmpl_handler
+ * Routine to handle fw reset completion
+ */
+static int fnic2_fcpio_fw_reset_cmpl_handler(struct fnic2 *fnic2, struct fcpio_fw_req *desc)
+{
+	uint8_t type;
+	uint8_t hdr_status;
+	uint32_t fcpio_tag;
+	int ret = 0;
+	unsigned long flags;
+
+	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &fcpio_tag);
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, flags);
+
+	/* fnic2 should be in FC_TRANS_ETH_MODE */
+	if (fnic2->state == FNIC2_IN_FC_TRANS_ETH_MODE) {
+		/* Check status of reset completion */
+		if (!hdr_status) {
+			pr_debug("reset cmpl success\n");
+			/* Ready to send flogi out */
+			fnic2->state = FNIC2_IN_ETH_MODE;
+		} else {
+			pr_debug("fnic2 fw_reset : failed %s\n",
+				fnic2_fcpio_status_to_str(hdr_status));
+
+			/*
+			* Unable to change to eth mode, cannot send out flogi
+			* Change state to fc mode, so that subsequent Flogi
+			* requests from libFC will cause more attempts to
+			* reset the firmware. Free the cached flogi
+			*/
+			fnic2->state = FNIC2_IN_FC_MODE;
+			ret = -1;
+		}
+	} else {
+		pr_debug("Unexpected state %s while processing reset cmpl\n",
+			 fnic2_state_to_str(fnic2->state));
+		ret = -1;
+	}
+
+	/* Thread removing device blocks till firmware reset is complete */
+	if (fnic2->remove_wait)
+		complete(fnic2->remove_wait);
+
+	/*
+	* If fnic2 is being removed, or fw reset failed
+	* free the flogi frame. Else, send it out
+	*/
+	if (fnic2->remove_wait || ret) {
+		spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+		list_free_buffer(&fnic2->tx_queue);
+		goto reset_cmpl_handler_end;
+	}
+
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, flags);
+
+	fnic2_flush_tx(fnic2);
+
+reset_cmpl_handler_end:
+	return ret;
+}
diff --git a/drivers/staging/fnic2/src/fnic2_fdls.h b/drivers/staging/fnic2/src/fnic2_fdls.h
new file mode 100644
index 0000000..bd6f797
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_fdls.h
@@ -0,0 +1,232 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC2_FDLS_H_
+#define _FNIC2_FDLS_H_
+
+#include <linux/list.h>
+#include "fdls_fc.h"
+#include "fcpio.h"
+#include "fnic2_lio.h"
+
+#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
+
+#define hton24(p, v)    do {                    \
+		p[0] = (((v) >> 16) & 0xFF);    \
+		p[1] = (((v) >> 8) & 0xFF);     \
+		p[2] = ((v) & 0xFF);            \
+	} while (0)
+
+/* FDLS - Fabric discovery and login services
+ * -> VLAN discovery
+ *   -> retry every retry delay seconds until it succeeds.
+ *                        <- List of VLANs
+ *
+ * -> Solicitation
+ *                        <- Solicitation response (Advertisement)
+ *
+ * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
+ *                        <- FLOGI response
+ *
+ * -> FCF keep alive
+ *                         <- FCF keep alive
+ *
+ * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
+ *    -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ *                        <- PLOGI response
+ *    -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
+ *
+ * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
+ *    -> Retry on BUSY until it succeeds
+ *    -> Retry on BUSY until it succeeds
+ *    -> 2 retries on timeout
+ *
+ * -> RFT_ID to FFFFFC (DNS)        (RFT_ID timeout - 3 * R_A_TOV)
+ *    -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ *    -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
+ *    -> Ignore if both retires fail.
+ *
+ */
+#define FDLS_RETRY_COUNT 2
+
+struct fnic2_fip_fcf {
+	uint16_t	vlan_id;
+	uint8_t		fcf_mac[6];
+	uint8_t		fcf_priority; //??
+	uint16_t	fka_adv_period;
+	uint8_t		ka_disabled;
+};
+
+/* FDLS structure - not visible to fnic2 driver */
+enum fnic2_fdls_state {
+	FDLS_STATE_INIT = 0,
+	FDLS_STATE_LINKDOWN,
+	FDLS_STATE_FABRIC_FLOGI,
+	FDLS_STATE_FABRIC_PLOGI,
+	FDLS_STATE_RPN_ID,
+	FDLS_STATE_REGISTER_FC4_TYPES,
+	FDLS_STATE_REGISTER_FC4_FEATURES,
+	FDLS_STATE_TGT_ONLINE
+};
+
+#define FNIC2_FDLS_FABRIC_ABORT_ISSUED     0x1
+#define FNIC2_FDLS_FPMA_LEARNT             0x2
+
+struct fnic2_fdls_fabric {
+	enum fnic2_fdls_state	state;
+	uint32_t		flags;
+	struct list_head	rport_list; /* List of logged rports */
+
+	struct timer_list	retry_timer;
+	int			retry_counter;
+	int			timer_pending;
+
+};
+
+struct fnic2_fdls_fip {
+	uint32_t	state;
+	uint32_t	flogi_retry;
+};
+
+enum fdls_tgt_state {
+	fdls_rport_state_plogi		= 1,
+	fdls_rport_state_offlining	= 6
+};
+
+/* rport flags */
+#define FNIC2_FDLS_RETRY_FRAME          0x8
+
+struct fnic2_rport {
+	struct list_head	links; /* To link the rports */
+
+	enum fdls_tgt_state	state;
+	uint32_t		flags;
+	uint32_t		fcid;
+	uint64_t		wwpn;
+	uint64_t		wwnn;
+
+	struct timer_list	retry_timer;
+	int			retry_counter;
+	int			timer_pending;
+
+	void			*lport;
+};
+
+/* lport */
+enum fnic2_lport_state {
+	FNIC2_IPORT_STATE_INIT = 0,
+	FNIC2_IPORT_STATE_LINK_WAIT,
+	FNIC2_IPORT_STATE_FIP,
+	FNIC2_IPORT_STATE_FABRIC_DISC
+};
+
+struct fnic2_lport {
+	enum fnic2_lport_state		state;
+	struct fnic2			*fnic2;
+	uint64_t			boot_time;
+	uint32_t			flags;
+	int				usefip;
+	uint8_t				hwmac[6];  /* HW MAC Addr */
+	uint8_t				fpma[6];   /* Fabric Provided MA */
+	uint8_t				fcfmac[6]; /* MAC addr of Fabric */
+	uint16_t			vlan_id;
+	uint32_t			fcid;
+
+	struct fnic2_fip_fcf		selected_fcf;
+	struct fnic2_fdls_fip		fip;
+
+	struct fnic2_fdls_fabric	fabric;
+	struct list_head		rport_list;
+
+	/* Config block: */
+	uint32_t			e_d_tov; //msec
+	uint32_t			r_a_tov; //msec
+	uint32_t			link_supported_speeds;
+	uint32_t			max_flogi_retries;
+	uint32_t			max_plogi_retries;
+	uint32_t			service_params;
+	uint64_t			wwpn;
+	uint64_t			wwnn;
+	uint16_t			mfs;
+
+	spinlock_t			lport_lock;
+
+	struct fnic2_lport		*lport;
+};
+
+enum fnic2_recv_frame_type {
+	FNIC2_FABRIC_FLOGI_RSP	= 0,
+	FNIC2_FABRIC_PLOGI_RSP,
+	FNIC2_FABRIC_RPN_RSP,
+	FNIC2_FABRIC_RFT_RSP,
+	FNIC2_FABRIC_RFF_RSP,
+	FNIC2_TPORT_LOGO_RSP	= 9,
+	FNIC2_BLS_ABTS_RSP,
+	FNIC2_ELS_PLOGI_REQ,
+	FNIC2_ELS_PRLI_REQ,
+	FNIC2_ELS_LOGO_REQ,
+	FNIC2_ELS_RTV_REQ,
+	FNIC2_ELS_RRQ_REQ
+};
+
+#define FNIC2_FDLS_RETRY_FRAME          0x8
+
+/* Function Declarations */
+
+/* fdls_disc.c */
+void fnic2_fdls_disc_init(struct fnic2_lport *lport);
+void fnic2_fdls_disc_start(struct fnic2_lport *lport);
+void fnic2_fdls_recv_frame(struct fnic2_lport *lport, void *rx_frame, int len, int fchdr_offset);
+void fnic2_fdls_link_down(struct fnic2_lport *lport);
+void fdls_send_ba_acc(struct fnic2_lport *lport, struct fc_hdr *fchdr);
+void fdls_fabric_timer_callback(struct timer_list *timer);
+
+/* fdls_if.c */
+void fnic2_fdls_init(struct fnic2 *fnic2, int usefip);
+void fnic2_fdls_cleanup(struct fnic2 *fnic2);
+void fnic2_scsi_fcpio_reset(struct fnic2_lport *lport);
+
+int fnic2_send_fcoe_frame(struct fnic2_lport *lport, void *payload, int payload_sz);
+int fnic2_send_fip_frame(struct fnic2_lport *lport, void *payload, int payload_sz);
+void fnic2_fdls_learn_fcoe_macs(struct fnic2_lport *lport, void *rx_frame, uint8_t *fcid);
+void fnic2_set_port_id(struct fnic2_lport *lport, uint32_t port_id, void *fp);
+void fnic2_exch_mgr_reset(struct fnic2_lport *lport, uint32_t s_id, uint32_t d_id);
+void fnic2_fdls_link_down(struct fnic2_lport *lport);
+void list_free_buffer(struct list_head *list);
+int free_wq_copy_descs(struct fnic2 *fnic2, struct vnic_wq_copy *wq);
+
+/* fip.c */
+void fnic2_fcoe_reset_vlans(struct fnic2 *fnic2);
+void fnic2_fcoe_send_vlan_req(struct fnic2 *fnic2);
+void fnic2_common_fip_cleanup(struct fnic2 *fnic2);
+int fdls_fip_recv_frame(struct fnic2 *fnic2, void *frame);
+void fnic2_handle_fcs_ka_timer(struct timer_list *timer);
+void fnic2_handle_enode_ka_timer(struct timer_list *timer);
+void fnic2_handle_vn_ka_timer(struct timer_list *timer);
+void fnic2_handle_fip_timer(struct timer_list *timer);
+void fdls_send_logout(struct fnic2_lport *lport, struct fc_hdr *fchdr);
+
+/* utils */
+void fnic2_fdls_learn_fcoe_macs(struct fnic2_lport *lport, void *rx_frame, uint8_t *fcid);
+int fnic2_fdls_register_portid(struct fnic2_lport *lport, uint32_t port_id, void *fp);
+
+#define FNIC2_PORTSPEED_10GBIT   1
+#define FNIC2_FCOE_MAX_PAYLOAD   2048
+#define FNIC2_FCOE_FRAME_MAXSZ   2112
+#define FNIC2_RQ_FRAME_LEN       2148 //FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM
+#endif /* _FNIC2_FDLS_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 05/10] staging: fnic2 add LIO interface
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (3 preceding siblings ...)
  2018-04-05 21:19 ` [PATCH 04/10] staging: fnic2 add fdls system Oliver Smith-Denny
@ 2018-04-05 21:20 ` Oliver Smith-Denny
  2018-04-05 21:21 ` [PATCH 06/10] staging: fnic2 add main frame processing Oliver Smith-Denny
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:20 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contains structures and callback functions
for communicating with LIO.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fnic2_lio.c | 815 ++++++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fnic2_lio.h | 113 +++++
 2 files changed, 928 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fnic2_lio.c
 create mode 100644 drivers/staging/fnic2/src/fnic2_lio.h

diff --git a/drivers/staging/fnic2/src/fnic2_lio.c b/drivers/staging/fnic2/src/fnic2_lio.c
new file mode 100644
index 0000000..64ca5d2
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_lio.c
@@ -0,0 +1,815 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2017 Cisco Systems, Inc.  All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/fc/fc_fcp.h>
+#include "fnic2.h"
+#include "fnic2_lio.h"
+
+extern struct fnic2 *find_fnic2_wwpn(uint64_t wwpn);
+extern int fnic2_send_to_fw(struct fnic2_cmd *tcmd, int req_type);
+extern void fnic2_free_fw_res(struct fnic2_cmd *tcmd);
+extern void fnic2_free_tcmd(struct fnic2_cmd *tcmd);
+
+#define FT_VERSION "0.1"
+
+static const struct target_core_fabric_ops fnic2_ops;
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t ft_parse_wwn(const char *name, uint64_t *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	uint32_t byte = 0;
+	uint32_t pos = 0;
+	uint32_t err;
+	int val;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[FNIC2_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		val = hex_to_bin(c);
+		if (val < 0 || (strict && isupper(c)))
+			goto fail;
+		*wwn = (*wwn << 4) | val;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+		err, cp - name, pos, byte);
+	return -1;
+}
+
+static int fnic2_parse_wwpn(const char *wwn, uint64_t *wwpn)
+{
+	int i;
+	int shift = 60;
+	unsigned char c;
+	unsigned long long uint64_t = 0, d64;
+
+	for (i = 0; i < 23; i++) {
+		c = wwn[i];
+		if (c == ':')
+			continue;
+
+		if (c >= '0' && c <= '9') {
+			d64 = c - '0';
+		} else if (c >= 'A' && c <= 'F') {
+			d64 = 0xA + (c - 'A');
+		} else if (c >= 'a' && c <= 'f') {
+			d64 = 0xA + (c - 'a');
+		} else {
+			pr_err("Invalid WWPN %c\n", c);
+			return -1;
+		}
+
+		uint64_t = (d64 << shift) | uint64_t;
+		shift = shift - 4;
+	}
+	*wwpn = uint64_t;
+	return 0;
+
+}
+
+static ssize_t ft_format_wwn(char *buf, size_t len, uint64_t wwn)
+{
+	uint8_t b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static ssize_t ft_wwn_show(void *arg, char *buf)
+{
+	uint64_t *wwn = arg;
+	ssize_t len;
+
+	len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
+	buf[len++] = '\n';
+	return len;
+}
+
+static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
+{
+	ssize_t ret;
+	uint64_t wwn;
+
+	ret = ft_parse_wwn(buf, &wwn, 0);
+	if (ret > 0)
+		*(uint64_t *)arg = wwn;
+	return ret;
+}
+
+/*
+ * ACL auth ops.
+ */
+
+static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.port_name, page);
+}
+
+static ssize_t ft_nacl_port_name_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.port_name, page, count);
+}
+
+static ssize_t ft_nacl_node_name_show(struct config_item *item,
+		char *page)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.node_name, page);
+}
+
+static ssize_t ft_nacl_node_name_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.node_name, page, count);
+}
+
+CONFIGFS_ATTR(ft_nacl_, node_name);
+CONFIGFS_ATTR(ft_nacl_, port_name);
+
+static ssize_t ft_nacl_tag_show(struct config_item *item,
+		char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
+}
+
+static ssize_t ft_nacl_tag_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_node_acl *se_nacl = acl_to_nacl(item);
+	int ret;
+
+	ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+	if (ret < 0)
+		return ret;
+	return count;
+}
+
+CONFIGFS_ATTR(ft_nacl_, tag);
+
+static struct configfs_attribute *ft_nacl_base_attrs[] = {
+	&ft_nacl_attr_port_name,
+	&ft_nacl_attr_node_name,
+	&ft_nacl_attr_tag,
+	NULL,
+};
+
+/*
+ * ACL ops.
+ */
+
+static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
+{
+	struct ft_node_acl *acl =
+		container_of(nacl, struct ft_node_acl, se_node_acl);
+	uint64_t wwpn;
+
+	pr_err("Creating ACL for %s\n", name);
+
+	if (ft_parse_wwn(name, &wwpn, 1) < 0)
+		return -EINVAL;
+	pr_err("Created ACL for %s %llx\n", name, wwpn);
+
+	acl->node_auth.port_name = wwpn;
+	return 0;
+}
+
+static ssize_t ft_wwn_version_show(struct config_item *item, char *page)
+{
+        return sprintf(page, "CISCO SYSTEMS, INC. " FT_VERSION " on %s/%s on "
+                ""UTS_RELEASE"\n",  utsname()->sysname, utsname()->machine);
+}
+
+CONFIGFS_ATTR_RO(ft_wwn_, version);
+
+static struct configfs_attribute *ft_wwn_attrs[] = {
+	&ft_wwn_attr_version,
+	NULL,
+};
+
+/* fabric */
+static int fnic2_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int fnic2_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *fnic2_get_fabric_name(void)
+{
+	return "fnic2";
+}
+
+static char *fnic2_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct fnic2_tpg *tpg = container_of(se_tpg,
+				struct fnic2_tpg, se_tpg);
+	struct fnic2_lio_lport *lport = tpg->lport;
+
+
+	return &lport->lport_name[0];
+}
+
+static uint16_t fnic2_get_tag(struct se_portal_group *se_tpg)
+{
+	struct fnic2_tpg *tpg = container_of(se_tpg,
+				struct fnic2_tpg, se_tpg);
+	return tpg->lport_tpgt;
+}
+
+static uint32_t fnic2_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int fnic2_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+                struct fnic2_cmd, se_cmd);
+	pr_debug("check stop free for tag %x\n", tcmd->cmd_tag);
+	return target_put_sess_cmd(se_cmd);
+}
+
+static void fnic2_release_cmd(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+		struct fnic2_cmd, se_cmd);
+
+	pr_debug("DEBUG %s: %x, is_write: %d, rx_id: %x\n",
+		__func__, tcmd->cmd_tag, tcmd->flags & FNIC2_TCMD_WRITECMD, tcmd->rx_id);
+
+	if (tcmd->se_cmd.t_data_sg)
+		pr_err("t_data_sg not NULL\n");
+	fnic2_free_tcmd(tcmd);
+
+	return;
+}
+
+static uint32_t fnic2_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static int fnic2_write_pending(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+		struct fnic2_cmd, se_cmd);
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->flags & FNIC2_TCMD_ABORTED) {
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+	pr_debug("DEBUG write_pending tag: %x\n", tcmd->cmd_tag);
+
+	/* Check the se->cmd status and process accordingly */
+	if (se_cmd->scsi_status != SAM_STAT_GOOD) {
+		pr_err("ERROR fn: %s ln: %d, sts: %d\n",
+			__func__, __LINE__, se_cmd->scsi_status);
+		/* TBD send_resp or wait for q_sts? */
+		return 0;
+	}
+
+	tcmd->data_len_xfer = se_cmd->data_length;
+	if (tcmd->data_len_req != tcmd->data_len_xfer) {
+		/* TBD */
+		pr_err("ERROR fn: %s ln: %d\n", __func__, __LINE__);
+	}
+
+	tcmd->sgcnt = se_cmd->t_data_nents;
+	tcmd->sgl = se_cmd->t_data_sg;
+	tcmd->dma_direction = PCI_DMA_FROMDEVICE;
+
+	ret = fnic2_send_to_fw(tcmd, FCPIO_TCMD_RECV_DATA);
+
+	return ret;
+}
+
+static int fnic2_queue_status(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+		struct fnic2_cmd, se_cmd);
+	unsigned long flags;
+
+	pr_debug("DEBUG queue_status tag: %x\n", tcmd->cmd_tag);
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->flags & FNIC2_TCMD_ABORTED) {
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+	if (se_cmd->scsi_status != SAM_STAT_GOOD) {
+		pr_err("WARNING: Cmd %x SCSI Status Error: %x, cmd_tag: %x\n",
+			tcmd->ox_id, se_cmd->scsi_status, tcmd->cmd_tag);
+	}
+
+	fnic2_send_fcp_resp(tcmd);
+
+	/* Delete I/O timer if it is active */
+	if ((try_to_del_timer_sync(&tcmd->io_timer)) < 0)
+                pr_err("Could not delete timer on tcmd tag %x\n", tcmd->cmd_tag);
+
+	if (tcmd->flags & FNIC2_TCMD_WRITECMD)
+		fnic2_free_fw_res(tcmd);
+	else
+		transport_generic_free_cmd(se_cmd, 0);
+
+	return 0;
+}
+
+/*
+ * fnic2_queue_tm_rsp
+ * LIO has completed a task management command
+ * send a "bubble" command to fw that will be processed after
+ * all the aborted commands, then send the task mgmt response to initiator
+ */
+static void fnic2_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+				 struct fnic2_cmd, se_cmd);
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	unsigned long flags;
+	struct fcpio_host_req *host_req;
+        struct fcpio_host_req *desc;
+        struct vnic_wq_copy *wq;
+
+        pr_err("TMR RESPONSE se_cmd %pK tcmd %pK\n", se_cmd, tcmd);
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->flags & (FNIC2_TCMD_IS_ABTS | FNIC2_TCMD_ABORTED)) {
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+        /* Fill the structure to send it to fw */
+        host_req = kzalloc(sizeof(struct fcpio_host_req), GFP_KERNEL);
+        WARN_ON(host_req == NULL);
+        host_req->hdr.type = FCPIO_TCMD_TASK_MGMT;
+        host_req->hdr.fcpio_tag = tcmd->cmd_tag;
+
+        spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+        /* Use the Copy WQ to send it to FW */
+        wq = &fnic2->wq_copy[0];
+
+        if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0])
+                free_wq_copy_descs(fnic2, wq);
+
+        if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+                pr_err("DEBUG ERROR no wq desc for free_cmd\n");
+                kfree(host_req);
+                spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+                return;
+        }
+
+        desc = vnic_wq_copy_next_desc(wq);
+        memcpy(desc, host_req, sizeof(struct fcpio_host_req));
+
+        vnic_wq_copy_post(wq);
+        spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+
+        kfree(host_req);
+}
+
+void fnic2_send_abort_to_lio(struct fnic2_cmd *abort_tcmd)
+{
+	int tmr_flags, ret;
+	struct fnic2_cmd *killed_tcmd = abort_tcmd->killed_tcmd;
+	struct fc_hdr *fchdr = (struct fc_hdr *)killed_tcmd->rx_frame;
+	struct fnic2 *fnic2 = killed_tcmd->fnic2;
+	struct fnic2_sess *sess;
+	uint32_t s_id;
+
+	s_id = ntoh24(fchdr->s_id);
+	sess = fnic2_find_sess_s_id(fnic2, s_id);
+
+	if (!sess) {
+	    // TBD cleanup
+	    return;
+	}
+
+	// TBD sanity check on killed_tcmd ???
+
+	tmr_flags = TARGET_SCF_ACK_KREF | TARGET_SCF_LOOKUP_LUN_FROM_TAG;
+
+	ret= target_submit_tmr(&abort_tcmd->se_cmd, sess->se_sess, NULL,
+			0,
+			killed_tcmd,
+			TMR_ABORT_TASK, GFP_ATOMIC,
+			killed_tcmd->cmd_tag,
+			tmr_flags);
+
+	if (ret) {
+		pr_err("%s submitting tmr failed\n", __func__);
+		/* LIO failed the ABTS req, but I/O will be
+		* handled by Lun Reset or I/O timer reaper
+		*/
+	}
+}
+
+/*
+ * fnic2_complete_tm_rsp
+ * fw has aborted all exchanges
+ * send the final task management function response to initiator
+ */
+void fnic2_complete_tm_rsp(struct fnic2_cmd *tcmd)
+{
+	struct se_cmd *se_cmd = &tcmd->se_cmd;
+        struct se_tmr_req *tmr = se_cmd->se_tmr_req;
+        enum fcp_resp_rsp_codes code;
+	unsigned long flags;
+
+        pr_err("TMR RESPONSE fw completed - se_cmd %pK tcmd %pK tmr %pK\n",
+                se_cmd, tcmd, tmr);
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->flags & (FNIC2_TCMD_IS_ABTS | FNIC2_TCMD_ABORTED)) {
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+        switch (tmr->response) {
+        case TMR_FUNCTION_COMPLETE:
+                code = FCP_TMF_CMPL;
+                break;
+        case TMR_LUN_DOES_NOT_EXIST:
+                code = FCP_TMF_INVALID_LUN;
+                break;
+        case TMR_FUNCTION_REJECTED:
+                code = FCP_TMF_REJECTED;
+                break;
+        case TMR_TASK_DOES_NOT_EXIST:
+        case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+        default:
+                code = FCP_TMF_FAILED;
+                break;
+        }
+        pr_err("TMR RESPONSE fn %d resp %d fcp code %d\n",
+                  tmr->function, tmr->response, code);
+
+        fnic2_send_tmr_resp(tcmd, SAM_STAT_GOOD, code);
+
+        transport_generic_free_cmd(se_cmd, 0);
+}
+
+/*
+ * fnic2_aborted_task
+ * callback from LIO for each aborted real task (by ABTS or LUN RESET)
+ */
+static void fnic2_aborted_task(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *killed_tcmd = container_of(se_cmd, struct fnic2_cmd, se_cmd);
+	struct fnic2_cmd *abort_tcmd;
+
+	pr_err("tcmd %pK aborted by LIO\n", killed_tcmd);
+
+	if (killed_tcmd->flags & FNIC2_TCMD_ABORTED) {
+		/* ABTS path - already aborted with fw */
+		abort_tcmd = killed_tcmd->abort_tcmd;
+		transport_generic_free_cmd(&abort_tcmd->se_cmd, 0);
+	} else {
+		/* LUN RESET path - must abort with fw */
+		fnic2_send_abort_to_fw(killed_tcmd);
+	}
+}
+
+/*
+ * fnic2_fw_abort_done
+ * callback from fw for each aborted task (ABTS or LUN RESET)
+ */
+void fnic2_fw_abort_done(struct fnic2_cmd *tcmd)
+{
+	struct fnic2 *fnic2;
+	struct fc_hdr *fchdr;
+	struct fnic2_cmd *killed_tcmd;
+
+	pr_err("tcmd %pK aborted by fw\n", tcmd);
+
+	if (tcmd->flags & FNIC2_TCMD_IS_ABTS) {
+                /* ABTS path */
+		/* send ba_acc and abort with LIO */
+
+		killed_tcmd = tcmd->killed_tcmd;
+		// TBD killed_cmd sanity check
+
+		fnic2 = killed_tcmd->fnic2;
+		fchdr = (struct fc_hdr *)killed_tcmd->rx_frame;
+
+		fdls_send_ba_acc(&fnic2->lport, fchdr);
+		fnic2_send_abort_to_lio(tcmd);
+        }
+}
+
+/* configfs */
+
+static struct se_portal_group *fnic2_make_tpg(struct se_wwn *wwn,
+					      struct config_group *group,
+					      const char *name)
+{
+	struct fnic2_lio_lport *lport = container_of(wwn,
+			struct fnic2_lio_lport, lport_wwn);
+
+	struct fnic2_tpg *tpg;
+	unsigned long tpgt;
+	struct fnic2 *fnic2 = lport->fnic2;
+	int ret;
+
+	pr_debug("name: %s\n", name);
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct fnic2_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct fnic2_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+
+	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
+	if (ret < 0) {
+		pr_err("Error returned by core_tpg_register: %d\n",
+			ret);
+		kfree(tpg);
+		return ERR_PTR(-EFAULT);
+	}
+
+	fnic2 = lport->fnic2;
+	fnic2->lio.tpg = tpg;
+
+	return &tpg->se_tpg;
+}
+
+static void fnic2_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct fnic2_tpg *tpg = container_of(se_tpg,
+				struct fnic2_tpg, se_tpg);
+
+	core_tpg_deregister(se_tpg);
+	kfree(tpg);
+}
+
+static struct se_wwn *fnic2_make_lport(struct target_fabric_configfs *tf,
+				       struct config_group *group,
+				       const char *name)
+{
+	struct fnic2 *fnic2;
+	struct fnic2_lio_lport *lport;
+	uint64_t wwpn;
+
+	if (!name) {
+		pr_err("%s Null lport name received\n", __func__);
+		return NULL;
+
+	}
+	pr_debug("%s: lport name %s\n", __func__, name);
+
+	if (fnic2_parse_wwpn(name, &wwpn) < 0)
+		return ERR_PTR(-EINVAL);
+
+	pr_debug("%s: wwpn %llx\n", __func__, wwpn);
+
+	fnic2 = find_fnic2_wwpn(wwpn);
+	if (!fnic2) {
+		pr_err("Could not find fnic2 with wwpn: %llx\n",
+			wwpn);
+		return ERR_PTR(-EINVAL);
+	}
+	if (fnic2->lio.lport) {
+		pr_err("%s: lport already created %llx\n",
+			__func__, wwpn);
+		return ERR_PTR(-EEXIST);
+	}
+	lport = kzalloc(sizeof(struct fnic2_lio_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct fnic2_lio_lport\n");
+		WARN_ON(1);
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_wwpn = wwpn;
+
+	fnic2->lio.lport = lport;
+	lport->fnic2 = fnic2;
+
+	return (&lport->lport_wwn);
+}
+
+int fnic2_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+void fnic2_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+int fnic2_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+int fnic2_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct fnic2_cmd *tcmd = container_of(se_cmd,
+		struct fnic2_cmd, se_cmd);
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->flags & (FNIC2_TCMD_IS_ABTS | FNIC2_TCMD_ABORTED)) {
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+	pr_debug("DEBUG data_in tag: %x, sts: %x\n",
+		tcmd->cmd_tag, se_cmd->scsi_status);
+
+	/* Check the se->cmd status and process accordingly */
+	if (se_cmd->scsi_status != SAM_STAT_GOOD) {
+		/* TBD would LIO call data_in in case of error status? */
+		pr_err("Scsi status: %x, cmdtag: %d\n",
+			se_cmd->scsi_status, tcmd->cmd_tag);
+		return 0;
+	}
+
+	tcmd->data_len_xfer = se_cmd->data_length;
+
+	if (tcmd->data_len_xfer == 0) {
+		pr_debug("DEBUG Zero len Data tag: %x\n", tcmd->cmd_tag);
+		tcmd->rx_id = 0xffff;
+		fnic2_send_fcp_resp(tcmd);
+		transport_generic_free_cmd(&tcmd->se_cmd, 0);
+		return 0;
+	}
+
+	if (tcmd->data_len_req != tcmd->data_len_xfer) {
+		pr_debug("%s data_len_req: %d, data_len_xfer: %d\n",
+			__func__, tcmd->data_len_req, tcmd->data_len_xfer);
+	}
+	tcmd->sgcnt = se_cmd->t_data_nents;
+	tcmd->sgl = se_cmd->t_data_sg;
+	tcmd->dma_direction = PCI_DMA_TODEVICE;
+
+	ret = fnic2_send_to_fw(tcmd, FCPIO_TCMD_SEND_DATA);
+	return ret;
+}
+
+static void fnic2_drop_lport(struct se_wwn *wwn)
+{
+	struct fnic2_lio_lport *lport = container_of(wwn,
+				struct fnic2_lio_lport, lport_wwn);
+	struct fnic2 *fnic2;
+
+	/* Free from the corresponding fnic2 */
+	fnic2 = find_fnic2_wwpn(lport->lport_wwpn);
+	if (!fnic2) {
+		pr_err("%s: Unable to find fnic2 for the wwpn %llx\n",
+			__func__, lport->lport_wwpn);
+		return;
+	}
+	if (!fnic2->lio.lport) {
+		pr_err("%s lport does not exist %llx\n",
+			__func__, lport->lport_wwpn);
+		return;
+	}
+
+	fnic2->lio.lport = NULL;
+	kfree(lport);
+}
+
+
+/* Registration */
+
+static const struct target_core_fabric_ops fnic2_ops = {
+	.module                         	= THIS_MODULE,
+	.name                           	= "fnic2",
+	.node_acl_size				= sizeof(struct ft_node_acl),
+	.get_fabric_name                	= fnic2_get_fabric_name,
+	.tpg_get_wwn                    	= fnic2_get_fabric_wwn,
+	.tpg_get_tag                    	= fnic2_get_tag,
+	.tpg_check_demo_mode            	= fnic2_check_false,
+	.tpg_check_demo_mode_cache      	= fnic2_check_true,
+	.tpg_check_demo_mode_write_protect	= fnic2_check_false,
+	.tpg_check_prod_mode_write_protect	= fnic2_check_false,
+	.tpg_get_inst_index             	= fnic2_tpg_get_inst_index,
+	.check_stop_free			= fnic2_check_stop_free,
+	.release_cmd                    	= fnic2_release_cmd,
+	.sess_get_index                 	= fnic2_sess_get_index,
+	.sess_get_initiator_sid         	= NULL,
+	.write_pending                  	= fnic2_write_pending,
+	.write_pending_status           	= fnic2_write_pending_status,
+	.set_default_node_attributes    	= fnic2_set_default_node_attrs,
+	.get_cmd_state                  	= fnic2_get_cmd_state,
+	.queue_data_in                  	= fnic2_queue_data_in,
+	.queue_status                  		= fnic2_queue_status,
+	.queue_tm_rsp                   	= fnic2_queue_tm_rsp,
+	.aborted_task                   	= fnic2_aborted_task,
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn                	= fnic2_make_lport,
+	.fabric_drop_wwn                	= fnic2_drop_lport,
+	.fabric_make_tpg                	= fnic2_make_tpg,
+	.fabric_drop_tpg                	= fnic2_drop_tpg,
+	.fabric_init_nodeacl			= &ft_init_nodeacl,
+
+	.tfc_wwn_attrs                  	= ft_wwn_attrs,
+	.tfc_tpg_nacl_base_attrs        	= ft_nacl_base_attrs,
+};
+
+int fnic2_lio_init(void)
+{
+	int ret = 0;
+
+	ret = target_register_template(&fnic2_ops);
+	return ret;
+};
+
+void fnic2_lio_cleanup(void)
+{
+	target_unregister_template(&fnic2_ops);
+};
diff --git a/drivers/staging/fnic2/src/fnic2_lio.h b/drivers/staging/fnic2/src/fnic2_lio.h
new file mode 100644
index 0000000..82f7c01
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_lio.h
@@ -0,0 +1,113 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.
+ */
+#ifndef _FNIC2_LIO_H_
+#define _FNIC2_LIO_H_
+
+#define FNIC2_VERSION	"v0.1"
+#define FNIC2_NAMELEN	32
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+enum fnic2_cmd_flags {
+	FNIC2_TCMD_ABORTED	= (1 << 0),
+	FNIC2_TCMD_IS_ABTS	= (1 << 1),
+	FNIC2_TCMD_WRITECMD	= (1 << 2)
+};
+
+struct fnic2_tpg {
+	/* FC lport target portal group tag for TCM */
+	uint16_t		lport_tpgt;
+	/* Pointer back to fnic2_lport */
+	struct fnic2_lio_lport	*lport;
+	/* Returned by fnic2_make_tpg() */
+	struct se_portal_group	se_tpg;
+};
+
+/*
+ * Node ID and authentication.
+ */
+struct ft_node_auth {
+	uint64_t	port_name;
+	uint64_t	node_name;
+};
+
+/*
+ * Node ACL for remote port session.
+ */
+struct ft_node_acl {
+	struct se_node_acl	se_node_acl;
+	struct ft_node_auth	node_auth;
+};
+
+
+struct fnic2_lio_lport {
+	/* Binary World Wide unique Port Name for FC Target Lport */
+	uint64_t	lport_wwpn;
+	/* ASCII formatted WWPN for FC Target Lport */
+	char		lport_name[FNIC2_NAMELEN];
+	/* Returned by fnic2_make_lport() */
+	struct se_wwn	lport_wwn;
+
+	struct fnic2	*fnic2;
+};
+
+struct fnic2_sess {
+	struct list_head	list;
+	struct se_session	*se_sess;
+	struct kref		kref;	/* ref for hash and outstanding I/Os */
+	struct fnic2_lio_lport	*lport;
+	struct fnic2_rport	*rport;
+	struct list_head	cmd_list;
+	struct fnic2		*fnic2;
+	struct fc_logo_req	timer_logo_req;
+	struct timer_list	sess_timer;
+};
+
+struct fnic2_lio {
+	struct fnic2_tpg	*tpg;
+	struct fnic2_lio_lport	*lport;
+	struct list_head	sess_list;
+	int			sess_count;
+};
+
+struct fnic2_cmd {
+	struct list_head	free_list;
+	struct fnic2		*fnic2;
+	struct fnic2_sess	*sess;
+	uint32_t		cmd_tag;
+	struct fnic2_cmd        *killed_tcmd;
+	struct fnic2_cmd        *abort_tcmd;
+	uint32_t                abort_tag;
+	struct se_cmd		se_cmd;
+	uint32_t		ox_id;
+	uint32_t                s_id;
+	uint16_t		rx_id;
+	uint8_t			flags;		/* See #define above */
+	uint8_t			data_dir;
+	uint32_t		data_len_req;	/* Requested Data Len */
+	struct scatterlist	*sgl;
+	int			sgcnt;
+	int			data_len_xfer;	/* Xfered Data Len */
+	uint8_t			sense_buf[TRANSPORT_SENSE_BUFFER];
+	void			*host_req;
+	void			*rx_frame;
+	int			frame_len;
+	int			sg_desc_count;
+	struct host_sg_desc	*sg_desc_va_unaligned;
+	struct host_sg_desc	*sg_desc_va;
+	uint64_t		sg_desc_pa;
+	int			dma_direction;
+
+	void			*kmap_addr[256];
+
+	struct work_struct	work;
+	struct fcpio_fw_req	fw_desc;
+
+	spinlock_t		lock;
+	struct timer_list	io_timer;
+};
+
+#endif /* _FNIC2_LIO_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 06/10] staging: fnic2 add main frame processing
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (4 preceding siblings ...)
  2018-04-05 21:20 ` [PATCH 05/10] staging: fnic2 add LIO interface Oliver Smith-Denny
@ 2018-04-05 21:21 ` Oliver Smith-Denny
  2018-04-05 21:21 ` [PATCH 07/10] staging: fnic2 add queue descriptors Oliver Smith-Denny
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:21 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain logic for handling Fibre Channel frames
and communication with firmware and LIO.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/fcpio.h     |  347 ++++++++++
 drivers/staging/fnic2/src/fnic2_cmd.c | 1164 +++++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/fnic2_io.h  |   32 +
 3 files changed, 1543 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/fcpio.h
 create mode 100644 drivers/staging/fnic2/src/fnic2_cmd.c
 create mode 100644 drivers/staging/fnic2/src/fnic2_io.h

diff --git a/drivers/staging/fnic2/src/fcpio.h b/drivers/staging/fnic2/src/fcpio.h
new file mode 100644
index 0000000..7c8b879
--- /dev/null
+++ b/drivers/staging/fnic2/src/fcpio.h
@@ -0,0 +1,347 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FCPIO_H_
+#define _FCPIO_H_
+
+#include <linux/if_ether.h>
+
+/*
+ * This header file includes all of the data structures used for
+ * communication by the host driver to the fcp firmware.
+ */
+
+/*
+ * Exchange and sequence id space allocated to the host driver
+ */
+#define FCPIO_HOST_EXCH_RANGE_START         0x1000
+
+/*
+ * Command entry type
+ */
+enum fcpio_type {
+	/*
+	 * Initiator request types
+	 */
+	FCPIO_ICMND_16 = 0x1,
+	FCPIO_ICMND_32,
+	FCPIO_ICMND_CMPL,
+	FCPIO_ITMF,
+	FCPIO_ITMF_CMPL,
+
+	/*
+	 * Target request types
+	 */
+	FCPIO_TCMD_SEND_DATA = 0x11,	/* Send RD DATA, Send Resp, Free Xchg */
+	FCPIO_TCMD_RECV_DATA,	/* Send Xfer Rdy, Xfer recvd data to SGL */
+	FCPIO_TCMD_FREE_CMD,	/* Free the resources of cmd with given tag */
+	FCPIO_TCMD_SEND_RESP,	/* Send Resp for Cmd, Free Xchg */
+	FCPIO_TCMD_ABORT_CMD,
+	FCPIO_TCMD_TASK_MGMT,
+
+	/*
+	 * Misc request types
+	 */
+	FCPIO_ACK = 0x20,
+	FCPIO_RESET,
+	FCPIO_RESET_CMPL,
+	FCPIO_FLOGI_REG,
+	FCPIO_FLOGI_REG_CMPL,
+	FCPIO_ECHO,
+	FCPIO_ECHO_CMPL,
+	FCPIO_LUNMAP_CHNG,
+	FCPIO_LUNMAP_REQ,
+	FCPIO_LUNMAP_REQ_CMPL,
+	FCPIO_FLOGI_FIP_REG,
+	FCPIO_FLOGI_FIP_REG_CMPL,
+};
+
+/*
+ * Header status codes from the firmware
+ */
+enum fcpio_status {
+	FCPIO_SUCCESS = 0,              /* request was successful */
+
+	/*
+	 * If a request to the firmware is rejected, the original request
+	 * header will be returned with the status set to one of the following:
+	 */
+	FCPIO_INVALID_HEADER,    /* header contains invalid data */
+	FCPIO_OUT_OF_RESOURCE,   /* out of resources to complete request */
+	FCPIO_INVALID_PARAM,     /* some parameter in request is invalid */
+	FCPIO_REQ_NOT_SUPPORTED, /* request type is not supported */
+	FCPIO_IO_NOT_FOUND,      /* requested I/O was not found */
+
+	/*
+	 * Once a request is processed, the firmware will usually return
+	 * a cmpl message type.  In cases where errors occurred,
+	 * the header status field would be filled in with one of the following:
+	 */
+	FCPIO_ABORTED = 0x41,     /* request was aborted */
+	FCPIO_TIMEOUT,            /* request was timed out */
+	FCPIO_SGL_INVALID,        /* request was aborted due to sgl error */
+	FCPIO_MSS_INVALID,        /* request was aborted due to mss error */
+	FCPIO_DATA_CNT_MISMATCH,  /* recv/sent more/less data than exp. */
+	FCPIO_FW_ERR,             /* request was terminated due to fw error */
+	FCPIO_ITMF_REJECTED,      /* itmf req was rejected by remote node */
+	FCPIO_ITMF_FAILED,        /* itmf req was failed by remote node */
+	FCPIO_ITMF_INCORRECT_LUN, /* itmf req targeted incorrect LUN */
+	FCPIO_CMND_REJECTED,      /* request was invalid and rejected */
+	FCPIO_NO_PATH_AVAIL,      /* no paths to the lun was available */
+	FCPIO_PATH_FAILED,        /* i/o sent to current path failed */
+	FCPIO_LUNMAP_CHNG_PEND,   /* i/o rejected due to lunmap change */
+};
+
+/*
+ * The header for an fcpio request, whether from the firmware or from the
+ * host driver
+ */
+struct fcpio_header {
+	uint8_t            type;	/* enum fcpio_type */
+	uint8_t            status;	/* header status entry */
+	uint16_t           _resvd;	/* reserved */
+	uint32_t	   fcpio_tag;	/* header tag */
+};
+
+static inline void fcpio_header_enc(struct fcpio_header *hdr,
+				    uint8_t type, uint8_t status,
+				    uint32_t tag)
+{
+	hdr->type = type;
+	hdr->status = status;
+	hdr->_resvd = 0;
+	hdr->fcpio_tag = tag;
+}
+
+static inline void fcpio_header_dec(struct fcpio_header *hdr,
+				    uint8_t *type, uint8_t *status,
+				    uint32_t *tag)
+{
+	*type = hdr->type;
+	*status = hdr->status;
+	*tag = hdr->fcpio_tag;
+}
+
+#define CDB_16      16
+#define CDB_32      32
+#define LUN_ADDRESS 8
+
+/*
+ * fcpio_tcmd_data: host -> firmware request
+ * used to request the firmware to send data (rd) or receive data(wr)
+ * in target mode
+ * Used by: FCPIO_TCMD_SEND_DATA & FCPIO_TCMD_RECV_DATA
+ */
+
+struct fcpio_tcmd_data {
+	uint8_t		d_id[3];
+	uint8_t		rsvd;
+	uint8_t		s_id[3];
+	uint8_t		rsvd1;
+	uint16_t	ox_id;
+	uint8_t		rsvd2[2];
+	uint32_t	data_len;
+	uint64_t	sgl_pa;
+	uint16_t	sgl_cnt;
+	uint8_t		send_resp;	/* change it to common flags? */
+	uint8_t		rsp_iu_flags;
+	uint32_t	resid;
+	uint8_t		rsvd3[2];
+	u_int8_t        type;		/* command type read/Write */
+	u_int8_t        rsvd4;		/* reserved */
+	u_int32_t       rel_offset;	/* data sequence relative offset */
+	u_int16_t       npiv_id;	/* FC vNIC only: npiv id of request */
+	u_int16_t       mss;		/* FC vNIC only: max burst */
+	u_int32_t       r_a_tov;	/* FC vNIC only: R_A_TOV in msec */
+	u_int32_t       e_d_tov;
+} __attribute__((__packed__));
+
+/*
+ * fcpio_tcmd_fw_resp: firmware -> host response
+ * used by firmware to send response for a tcmd request
+ * Used as a response for:
+ * FCPIO_TCMD_SEND_DATA
+ * FCPIO_TCMD_RECV_DATA
+ * FCPIO_TCMD_FREE_CMD
+ *
+ */
+struct fcpio_tcmd_fw_rsp {
+	uint8_t		error_code;
+	uint8_t		rsvd;
+	uint16_t	rx_id;
+};
+
+/*
+ * fcpio_tabort: host -> firmware request
+ *
+ * used by the host to request the firmware to abort a target request that was
+ * received by the firmware
+ */
+struct fcpio_tabort {
+	uint32_t   cmd_tag;	/* tag of the target request */
+};
+
+enum fcpio_flogi_reg_format_type {
+	FCPIO_FLOGI_REG_DEF_DEST = 0,	/* Use the oui | s_id mac format */
+	FCPIO_FLOGI_REG_GW_DEST,	/* Use the fixed gateway mac */
+};
+
+/*
+ * fcpio_flogi_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_reg {
+	uint8_t format;
+	uint8_t s_id[3];		/* FC vNIC only: Source S_ID */
+	uint8_t gateway_mac[ETH_ALEN];	/* Destination gateway mac */
+	uint16_t _resvd;
+	uint32_t r_a_tov;		/* R_A_TOV in msec */
+	uint32_t e_d_tov;		/* E_D_TOV in msec */
+};
+
+/*
+ * fcpio_flogi_fip_reg: host -> firmware request
+ *
+ * fc vnic only
+ * used by the host to notify the firmware of the lif's s_id
+ * and destination mac address format
+ */
+struct fcpio_flogi_fip_reg {
+	uint8_t    _resvd0;
+	uint8_t     s_id[3];		/* FC vNIC only: Source S_ID */
+	uint8_t     fcf_mac[ETH_ALEN];	/* FCF Target destination mac */
+	uint16_t   _resvd1;
+	uint32_t   r_a_tov;		/* R_A_TOV in msec */
+	uint32_t   e_d_tov;		/* E_D_TOV in msec */
+	uint8_t    ha_mac[ETH_ALEN];	/* Host adapter source mac */
+	uint16_t   _resvd2;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the host to the
+ * firmware.  They are 128 bytes per structure.
+ */
+#define FCPIO_HOST_REQ_LEN      128     /* expected length of host requests */
+
+struct fcpio_host_req {
+	struct fcpio_header hdr;
+
+	union {
+		/*
+		 * Defines space needed for request
+		 */
+		uint8_t buf[FCPIO_HOST_REQ_LEN - sizeof(struct fcpio_header)];
+
+		/*
+		 * Target host requests
+		 */
+		struct fcpio_tabort		tabort;
+		struct fcpio_tcmd_data		tdata;
+
+		/*
+		 * Misc requests
+		 */
+		struct fcpio_flogi_reg		flogi_reg;
+		struct fcpio_flogi_fip_reg	flogi_fip_reg;
+	} u;
+};
+
+/*
+ * Task Management request
+ */
+enum tmr_type {
+	FCPIO_TTMF_ABT_TASK             = 0x01,	/* abort task */
+	FCPIO_TTMF_ABT_TASK_SET         = 0x02,	/* abort task set */
+	FCPIO_TTMF_CLR_TASK_SET         = 0x04,	/* clear task set */
+	FCPIO_TTMF_LUN_RESET            = 0x10,	/* logical unit reset task mgmt */
+	FCPIO_TTMF_CLR_ACA		= 0x40	/* Clear ACA condition */
+};
+
+/*
+ * fcpio_ack: firmware -> host response
+ *
+ * used by firmware to notify the host of the last work request received
+ */
+struct fcpio_ack {
+	uint16_t  request_out;	/* last host entry received */
+	uint16_t  _resvd;
+};
+
+/*
+ * fcpio_reset_cmpl: firmware -> host response
+ *
+ * use by firmware to respond to the host's reset request
+ */
+struct fcpio_reset_cmpl {
+	uint16_t   vnic_id;
+};
+
+/*
+ * Basic structure for all fcpio structures that are sent from the firmware to
+ * the host.  They are 64 bytes per structure.
+ */
+#define FCPIO_FW_REQ_LEN        64      /* expected length of fw requests */
+struct fcpio_fw_req {
+	struct fcpio_header hdr;
+
+	union {
+		/*
+		 * Defines space needed for request
+		 */
+		uint8_t buf[FCPIO_FW_REQ_LEN - sizeof(struct fcpio_header)];
+
+		/*
+		 * Target firmware responses
+		 */
+		struct fcpio_tcmd_fw_rsp	trsp;
+
+		/*
+		 * Firmware response to work received
+		 */
+		struct fcpio_ack		ack;
+
+		/*
+		 * Misc requests
+		 */
+	} u;
+};
+
+static inline void fcpio_color_dec(struct fcpio_fw_req *fw_req, uint8_t *color)
+{
+	uint8_t *c = ((uint8_t *) fw_req) + sizeof(struct fcpio_fw_req) - 1;
+
+	*color = *c >> 7;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+}
+
+#define FNIC2_SGL_SZ	(256 * 16)
+#define FNIC2_SGL_ALIGN	(16)
+
+#endif /* _FCPIO_H_ */
diff --git a/drivers/staging/fnic2/src/fnic2_cmd.c b/drivers/staging/fnic2/src/fnic2_cmd.c
new file mode 100644
index 0000000..0315825
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_cmd.c
@@ -0,0 +1,1164 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2017 Cisco Systems, Inc. All rights reserved
+ */
+#include <linux/workqueue.h>
+#include "fnic2.h"
+#include "fcpio.h"
+#include "fnic2_io.h"
+#include "fdls_fc.h"
+#include <asm/unaligned.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <linux/highmem.h>
+
+static void fnic2_format_wwnn(char *buf, int len, uint64_t wwn);
+static struct fcpio_host_req *fnic2_host_req_alloc_init(struct fnic2_cmd *tcmd, int req_type);
+static void fnic2_process_fcp_cmd(struct work_struct *work);
+static void fnic2_unmap_free_sgl(struct fnic2 *fnic2, struct fnic2_cmd *tcmd);
+static void tcmd_process_abts_req(struct fnic2_cmd *abort_tcmd);
+static void fnic2_send_tm(struct fnic2_cmd *tcmd);
+static void fnic2_process_tcmd_timeout(struct work_struct *work);
+static void fnic2_recv_sess_timer_intr(struct timer_list *timer);
+
+extern void fdls_construct_logo_req(struct fnic2_lport *lport, struct fc_hdr *fchdr, struct fnic2_sess *sess);
+extern void fdls_delete_rport(struct fnic2_lport *lport, struct fnic2_rport *rport);
+
+extern struct list_head fnic2_list;
+
+/* Frame Initialization */
+
+/* FCP RESP */
+struct fc_fcp_rsp fnic2_fcp_rsp = {
+	.fchdr = {.r_ctl = 0x07, .type = 0x08,
+		.f_ctl = {FNIC2_FCP_RSP_FCTL, 0x00, 0x00},
+		.seq_id = 0x02}
+};
+
+#define BUF_ALIGN_16(_addr)	(((uint64_t)_addr + 16 - 1) & ~0x0F)
+
+static int total_cmds;
+
+static struct fnic2_cmd *fnic2_get_tcmd_from_pool(struct fnic2 *fnic2)
+{
+	struct fnic2_cmd *tcmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic2->free_list_lock, flags);
+
+	tcmd = list_first_entry_or_null(&fnic2->tcmd_list_free,
+		struct fnic2_cmd, free_list);
+	if (tcmd) {
+		list_del(&tcmd->free_list);
+		fnic2->freecmds--;
+		pr_debug("Got tcmd from pool tag %x freecmds %d fnic2->fnic2_num %d\n", tcmd->cmd_tag, fnic2->freecmds, fnic2->fnic2_num);
+		total_cmds++;
+
+		tcmd->abort_tag = 0xFFFFFFFF;
+	}
+
+	spin_unlock_irqrestore(&fnic2->free_list_lock, flags);
+
+	return tcmd;
+}
+
+/*
+ * fnic2_free_tcmd - free tcmd memory and add to fnic2 free list
+ *
+ * @tcmd - tcmd to be freed
+ *
+ * It will delete the I/O timer and maintain the state of
+ * the spinlock.
+ */
+void fnic2_free_tcmd(struct fnic2_cmd *tcmd)
+{
+	uint32_t tag;
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	unsigned long flags;
+	unsigned long flags2;
+
+	/* Delete I/O timer on tcmd if it is active */
+	if ((try_to_del_timer_sync(&tcmd->io_timer)) < 0)
+		pr_err("Could not delete timer on tcmd tag %x\n", tcmd->cmd_tag);
+
+	spin_lock_irqsave(&tcmd->lock, flags2);
+
+	pr_err("freeing %pK tag %x\n", tcmd, tcmd->cmd_tag);
+
+	if (tcmd->rx_frame)
+		kfree(tcmd->rx_frame);
+
+	if (tcmd->sgcnt)
+		fnic2_unmap_free_sgl(fnic2, tcmd);
+
+	if (tcmd->host_req) {
+		kfree(tcmd->host_req);
+		tcmd->host_req = NULL;
+	}
+
+	tag = tcmd->cmd_tag;
+
+	/* Don't erase lock or timer */
+	memset(tcmd, 0, sizeof(struct fnic2_cmd) - sizeof(spinlock_t) - sizeof(struct timer_list));
+
+	tcmd->cmd_tag = tag;
+	tcmd->fnic2 = fnic2;
+
+	spin_unlock_irqrestore(&tcmd->lock, flags2);
+
+	spin_lock_irqsave(&fnic2->free_list_lock, flags);
+	list_add_tail(&tcmd->free_list, &fnic2->tcmd_list_free);
+	fnic2->freecmds++;
+	pr_debug("fnic2 num: %d freecmds %d tag %x rx_frame %pK\n", fnic2->fnic2_num, fnic2->freecmds, tcmd->cmd_tag, tcmd->rx_frame);
+	spin_unlock_irqrestore(&fnic2->free_list_lock, flags);
+
+}
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void fnic2_send_tmr_resp_and_free(struct fnic2_cmd *tcmd,
+					 int code)
+{
+	fnic2_send_tmr_resp(tcmd, SAM_STAT_GOOD, code);
+	fnic2_free_tcmd(tcmd);
+
+}
+
+void fnic2_fcp_recv(struct fnic2 *fnic2, uint8_t *fp, int frame_len)
+{
+	struct fc_hdr *fchdr = (struct fc_hdr *)fp;
+	struct fnic2_cmd *tcmd;
+	int cpu;
+	bool found;
+	struct fnic2_cmd *tcmd_idx;
+	int tag_idx;
+	struct fc_fcp_cmnd *fc_cmnd;
+
+	/* Get a tcmd from free pool */
+	tcmd = fnic2_get_tcmd_from_pool(fnic2);
+	if (!tcmd) {
+		pr_err("No TCMD Available\n");
+	        // TBD return BUSY
+		kfree(fp);
+		return;
+	}
+
+	/* Initial settings of the cmd */
+	tcmd->ox_id = ntohs(fchdr->ox_id);
+	tcmd->s_id = ntoh24(fchdr->s_id);
+	tcmd->rx_frame = fp;
+	tcmd->frame_len = frame_len;
+
+	/* Setup tcmd timeout */
+	tcmd->io_timer.expires = jiffies + JIFFIES_PER_MINUTE;
+	tcmd->io_timer.function = (void *)fnic2_recv_tcmd_timeout_intr;
+
+	fchdr = (struct fc_hdr *)tcmd->rx_frame;
+
+	fc_cmnd = (struct fc_fcp_cmnd *)fchdr;
+
+	/* Only start the I/O timer if this tcmd is representing a R/W */
+	if (IS_SCSI_READ_CMD(fc_cmnd) || IS_SCSI_WRITE_CMD(fc_cmnd))
+		add_timer(&tcmd->io_timer);
+
+	/* Queue */
+	cpu = get_cpu_to_queue(tcmd->cmd_tag);
+
+	if (fchdr->r_ctl == FNIC2_FC_R_CTL_ABTS) {
+
+		// BGC TEST
+		// fnic2_free_tcmd(tcmd);
+		// return;
+
+		tcmd->ox_id |= ABTS_MASK;
+		tcmd_idx = &fnic2->tcmd_pool[0];
+		found = 0;
+		for (tag_idx = 0; tag_idx < FNIC2_MAX_TCMDS; tag_idx++, tcmd_idx++) {
+			if ((ntohs(fchdr->ox_id) == tcmd_idx->ox_id) &&
+			   (tcmd_idx->s_id == ntoh24(fchdr->s_id))) {
+				cpu = get_cpu_to_queue(tcmd_idx->cmd_tag);
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			pr_debug("Did not find I/O ox_id %x to ABORT\n", tcmd->ox_id);
+			fdls_send_ba_acc(&fnic2->lport, fchdr);
+			fnic2_free_tcmd(tcmd);
+			return;
+		}
+	}
+
+	pr_err("New cmd %pK tag %x\n", tcmd, tcmd->cmd_tag);
+
+	INIT_WORK(&tcmd->work, fnic2_process_fcp_cmd);
+	queue_work_on(cpu, fnic2_tcmd_wq, &tcmd->work);
+}
+
+static void fnic2_process_fcp_cmd(struct work_struct *work)
+{
+	struct fnic2_cmd *tcmd =
+		container_of(work, struct fnic2_cmd, work);
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct fc_fcp_cmnd *fcp_cmd_req;
+	struct fc_hdr *fchdr;
+	struct fnic2_sess *sess;
+	uint32_t s_id;
+	int addl_len;
+	int task_attr = 0;
+	int flags = 0;
+	int ret;
+
+	fchdr = (struct fc_hdr *)tcmd->rx_frame;
+
+	if (fchdr->r_ctl == FNIC2_FC_R_CTL_ABTS) {
+		tcmd_process_abts_req(tcmd);
+		return;
+	}
+
+	/* Find the session that the request belongs to */
+	s_id = ntoh24(fchdr->s_id);
+	sess = fnic2_find_sess_s_id(fnic2, s_id);
+	if (!sess) {
+		/* We shouldn't be here */
+		pr_err("%s Could not find the login for: %x\n",
+			__func__, s_id);
+		fdls_send_logout(&fnic2->lport, fchdr);
+		//WARN_ON(!sess);
+		goto err;
+	}
+
+	if ((try_to_del_timer_sync(&sess->sess_timer)) < 0)
+		pr_err("%s failed to delete sess %pK timer\n", __func__, sess);
+
+	fcp_cmd_req = (struct fc_fcp_cmnd *)fchdr;
+	addl_len = FC_CMND_ADDL_CDB_LEN(fcp_cmd_req->add_cdblen_flags);
+	if (addl_len > 0) {
+		pr_err("Addl. len CDB not supported ini: %x, ox_id: %x\n",
+			s_id, fchdr->ox_id);
+		goto err;
+	}
+
+	tcmd->sess = sess;
+	tcmd->data_len_req = ntohl(fcp_cmd_req->dl);
+	tcmd->se_cmd.tag = tcmd->cmd_tag;
+
+	/*
+	 * Check for FCP task management flags
+	 */
+	if (fcp_cmd_req->tm_flags) {
+	    fnic2_send_tm(tcmd);
+	    return;
+	}
+
+	if (ntohl(fcp_cmd_req->dl) > MAX_DATA_LENGTH) {
+		pr_err("DEBUG Unsupported length: %x\n", ntohl(fcp_cmd_req->dl));
+		goto err;
+	}
+
+	/* Initialize tcmd based on the request received */
+	switch (fcp_cmd_req->add_cdblen_flags &
+		(FC_CMND_REQ_RD | FC_CMND_REQ_WR)) {
+		case FC_CMND_REQ_NONE:
+			pr_debug("DEBUG neither read nor write recvd! ln: %d\n", __LINE__);
+		case FC_CMND_REQ_RD:
+			tcmd->data_dir = DMA_FROM_DEVICE;
+		break;
+		case FC_CMND_REQ_WR:
+			tcmd->data_dir = DMA_TO_DEVICE;
+			tcmd->flags |= FNIC2_TCMD_WRITECMD;
+		break;
+		default:
+			pr_debug("DEBUG Bidirectional cmd recvd ln: %d\n", __LINE__);
+			/* Bi-directional not supported */
+			goto err;
+		break;
+	}
+
+	switch (fcp_cmd_req->pri_ta & FC_CMND_TASK_ATTR) {
+		case FC_TA_SIMPLE_TAG:
+			task_attr = TCM_SIMPLE_TAG;
+		break;
+		case FC_TA_HEAD_OF_QUEUE:
+			task_attr = TCM_HEAD_TAG;
+		break;
+		case FC_TA_ORDERED:
+			task_attr =  TCM_ORDERED_TAG;
+		break;
+		case FC_TA_ACA:
+			task_attr = TCM_ACA_TAG;
+		break;
+		default:
+		break;
+	}
+
+	flags = TARGET_SCF_ACK_KREF;
+
+	pr_err("New Cmd tag: %x, is_write: %d, ox_id: %x, op %x len: %d\n",
+		tcmd->cmd_tag, tcmd->flags & FNIC2_TCMD_WRITECMD, tcmd->ox_id, fcp_cmd_req->cdb[0],
+		tcmd->data_len_req);
+
+	/* Submit */
+	ret = target_submit_cmd(&tcmd->se_cmd, sess->se_sess, fcp_cmd_req->cdb,
+		tcmd->sense_buf, scsilun_to_int(&fcp_cmd_req->fcp_lun),
+		tcmd->data_len_req, task_attr, tcmd->data_dir, flags);
+
+	if (ret) {
+		pr_err("target_submit_cmd returned failure %d\n",
+			ret);
+		goto err;
+	}
+
+	return;
+err:
+	pr_err("ERROR fn: %s, ln: %d\n", __func__, __LINE__);
+	fnic2_free_tcmd(tcmd);
+	return;
+}
+
+/*
+ * tcmd_process_abts_req
+ * An ABTS request was received
+ * Lookup the tcmd in the LIO queue and start the abort process in fw
+ */
+static void tcmd_process_abts_req(struct fnic2_cmd *abort_tcmd)
+{
+	struct fc_hdr *fchdr = (struct fc_hdr *)abort_tcmd->rx_frame;
+	struct fnic2 *fnic2 = abort_tcmd->fnic2;
+	struct fnic2_sess *sess;
+	struct se_cmd *se_cmd;
+	struct fnic2_cmd *tcmd_loop;
+	uint32_t s_id;
+	unsigned long flags;
+	unsigned long abts_flags;
+	struct fc_fcp_cmnd *fcp_cmd_req;
+
+	pr_err("processing ABTS s_id %x ox_id %x\n",
+		ntoh24(fchdr->s_id), ntohs(fchdr->ox_id));
+
+	s_id = ntoh24(fchdr->s_id);
+	sess = fnic2_find_sess_s_id(fnic2, s_id);
+	if (!sess) {
+		/* We shouldn't be here */
+		WARN_ON(!sess);
+		goto abts_error;
+	}
+	pr_err("processing ABTS found session %pK\n",
+		sess);
+
+	if ((try_to_del_timer_sync(&sess->sess_timer)) < 0)
+		pr_err("%s failed to delete sess %pK timer\n", __func__, sess);
+
+	spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
+
+	list_for_each_entry(se_cmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
+		tcmd_loop = container_of(se_cmd, struct fnic2_cmd, se_cmd);
+		pr_err("scan tcmd ox_id %x\n", tcmd_loop->ox_id);
+		if (tcmd_loop->ox_id == ntohs(fchdr->ox_id)) {
+
+			fcp_cmd_req = (struct fc_fcp_cmnd *)tcmd_loop->rx_frame;
+			pr_err("ABORTING original tag: %x abort_tag %x\n",
+				tcmd_loop->cmd_tag, abort_tcmd->cmd_tag);
+			pr_err("se_cmd %pK se_sess %pK lun %llu\n",
+				&tcmd_loop->se_cmd, sess->se_sess,
+				(unsigned long long)scsilun_to_int(&fcp_cmd_req->fcp_lun));
+
+			spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
+
+			/* Delete I/O timer on original command if active */
+			if ((try_to_del_timer_sync(&tcmd_loop->io_timer)) < 0)
+				pr_err("Could not delete timer on tcmd tag %x\n", tcmd_loop->cmd_tag);
+
+			abort_tcmd->flags |= FNIC2_TCMD_IS_ABTS;
+
+			spin_lock_irqsave(&tcmd_loop->lock, abts_flags);
+
+			if (tcmd_loop->flags & FNIC2_TCMD_ABORTED) {
+				pr_err("tcmd %x already being aborted\n", tcmd_loop->abort_tag);
+				spin_unlock_irqrestore(&tcmd_loop->lock, abts_flags);
+				goto abts_error;
+			}
+
+			tcmd_loop->flags |= FNIC2_TCMD_ABORTED;
+
+			tcmd_loop->abort_tag = abort_tcmd->cmd_tag;
+			abort_tcmd->killed_tcmd = tcmd_loop;
+			tcmd_loop->abort_tcmd = abort_tcmd;
+
+			spin_unlock_irqrestore(&tcmd_loop->lock, abts_flags);
+
+			fnic2_send_abort_to_fw(tcmd_loop);
+
+			return;
+		}
+	}
+
+	spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
+	pr_err("ABTS for ox_id %x NOT FOUND\n",
+		ntohs(fchdr->ox_id));
+
+abts_error:
+
+	fdls_send_ba_acc(&fnic2->lport, fchdr);
+	fnic2_free_tcmd(abort_tcmd);
+
+	return;
+}
+
+void fnic2_send_abort_to_fw(struct fnic2_cmd *tcmd)
+{
+	struct fnic2 *fnic2 = tcmd->fnic2;
+
+	unsigned long flags;
+	struct fcpio_host_req *host_req;
+	struct fcpio_host_req *desc;
+	struct vnic_wq_copy *wq;
+
+	pr_err("tcmd %pK send abort to fw\n", tcmd);
+	pr_err("cmd_tag %x abort_tag %x\n", tcmd->cmd_tag, tcmd->abort_tag);
+
+	/* Fill the structure to send it to fw */
+	host_req = kzalloc(sizeof(struct fcpio_host_req), GFP_KERNEL);
+	WARN_ON(host_req == NULL);
+	host_req->hdr.type = FCPIO_TCMD_ABORT_CMD;
+	host_req->hdr.fcpio_tag = tcmd->abort_tag;
+	host_req->u.tabort.cmd_tag = tcmd->cmd_tag;
+
+	spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+	/* Use the Copy WQ to send it to FW */
+	wq = &fnic2->wq_copy[0];
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic2, wq);
+
+	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+		pr_err("DEBUG ERROR no wq desc for free_cmd\n");
+		kfree(host_req);
+		spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+		return;
+	}
+
+	desc = vnic_wq_copy_next_desc(wq);
+	memcpy(desc, host_req, sizeof(struct fcpio_host_req));
+
+	vnic_wq_copy_post(wq);
+	spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+
+	kfree(host_req);
+}
+
+static int fnic2_session_alloc_cb(struct se_portal_group *se_tpg,
+				  struct se_session *se_sess, void *arg)
+{
+	struct fnic2_sess *sess = (struct fnic2_sess *)arg;
+	struct fnic2 *fnic2;
+
+	fnic2 = sess->fnic2;
+	list_add_tail(&sess->list, &fnic2->lio.sess_list);
+	fnic2->lio.sess_count++;
+	return 0;
+}
+
+int fnic2_session_create(struct fnic2 *fnic2, struct fnic2_rport *rport)
+{
+	struct fnic2_sess *sess;
+	struct fnic2_tpg *tpg = fnic2->lio.tpg;
+	uint8_t portname_initiator[36];
+	unsigned int flags;
+
+	sess = fnic2_find_session(fnic2, rport->wwpn);
+	if (sess) {
+		pr_err("%s: Session already found %llx\n",
+			__func__, rport->wwpn);
+		return -1;
+	}
+
+	fnic2_format_wwnn(portname_initiator, 36, rport->wwpn);
+	pr_debug("Creating session for %s\n", portname_initiator);
+
+	if (tpg == NULL) {
+		pr_err("No tpg\n");
+		return -1;
+	}
+
+	sess = kzalloc(sizeof(struct fnic2_sess), GFP_KERNEL);
+	if (!sess) {
+		pr_err("%s: Unable to alloc memory for sess\n",
+			__func__);
+		WARN_ON(1);
+		return -1;
+	}
+
+	kref_init(&sess->kref); /* ref for table entry */
+	sess->rport = rport;
+	sess->lport = tpg->lport;
+	sess->fnic2 = fnic2;
+	INIT_LIST_HEAD(&sess->cmd_list);
+
+	sess->se_sess = target_alloc_session(&tpg->se_tpg, FNIC2_MAX_TCMDS,
+		sizeof(struct fnic2_cmd),
+		TARGET_PROT_NORMAL, portname_initiator,
+		sess, fnic2_session_alloc_cb);
+
+	if (IS_ERR(sess->se_sess)) {
+		pr_err("Session creating failed wwpn: %llx\n",
+			rport->wwpn);
+		kfree(sess);
+		return -1;
+	}
+
+	timer_setup(&sess->sess_timer, fnic2_recv_sess_timer_intr, flags);
+
+	return 0;
+}
+
+int fnic2_send_to_fw(struct fnic2_cmd *tcmd, int req_type)
+{
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct vnic_wq_copy *wq;
+	struct fcpio_host_req *desc;
+	unsigned long flags;
+
+	pr_debug("sending tag %x to FW\n", tcmd->cmd_tag);
+
+	tcmd->host_req = fnic2_host_req_alloc_init(tcmd, req_type);
+	if (!tcmd->host_req) {
+		return -1;
+	}
+
+	spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+
+	/* Use the Copy WQ to send it to FW */
+	wq = &fnic2->wq_copy[0];
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0]) {
+		pr_debug("Freeing the copy wq desc avail: %d\n", vnic_wq_copy_desc_avail(wq));
+		free_wq_copy_descs(fnic2, wq);
+	}
+
+	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+		pr_err("DEBUG %s FAILED No WQ_COPY desc\n", __func__);
+		WARN_ON(0);
+		spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+		return -1;
+	}
+
+	desc = vnic_wq_copy_next_desc(wq);
+	memcpy(desc, tcmd->host_req, sizeof(struct fcpio_host_req));
+
+	vnic_wq_copy_post(wq);
+
+	spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+
+	return 0;
+}
+
+static struct fcpio_host_req *fnic2_host_req_alloc_init(struct fnic2_cmd *tcmd, int req_type)
+{
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct fcpio_host_req *host_req;
+	struct fcpio_tcmd_data *tdata;
+	int i, len, offset;
+	struct page *page = NULL;
+	void *page_addr;
+	struct host_sg_desc *desc_sge;
+	uint64_t page_addr_pa;
+	int remaining;
+	int j;
+	struct scatterlist *sg = NULL;
+	struct se_cmd *se_cmd = &tcmd->se_cmd;
+
+	host_req = kzalloc(sizeof(struct fcpio_host_req), GFP_KERNEL);
+	if (!host_req)
+		return NULL;
+
+
+	host_req->hdr.type = req_type;
+	host_req->hdr.fcpio_tag = tcmd->cmd_tag;
+
+	tdata = &host_req->u.tdata;
+	hton24(tdata->d_id, tcmd->sess->rport->fcid);
+	hton24(tdata->s_id, fnic2->lport.fcid);
+	tdata->ox_id = tcmd->ox_id;
+
+	tdata->data_len = tcmd->data_len_xfer;
+	tdata->mss = FNIC2_FCOE_MAX_PAYLOAD;
+
+	tcmd->sg_desc_va_unaligned = (struct host_sg_desc *)kzalloc(FNIC2_SGL_SZ + FNIC2_SGL_ALIGN, GFP_KERNEL);
+	if (!tcmd->sg_desc_va_unaligned)
+		return NULL;
+
+	tcmd->sg_desc_va = (struct host_sg_desc *)BUF_ALIGN_16(tcmd->sg_desc_va_unaligned);
+
+	pr_debug("DEBUG fn: %s, ln: %d, tag: %d, va_unal: %pK, va: %pK\n",
+		__func__, __LINE__, tcmd->cmd_tag, tcmd->sg_desc_va_unaligned, tcmd->sg_desc_va);
+	pr_debug("DEBUG tag: %x, sgcnt: %d\n", tcmd->cmd_tag, tcmd->sgcnt);
+
+
+	desc_sge = &tcmd->sg_desc_va[0];
+
+	tcmd->sg_desc_count = tcmd->sgcnt;
+
+	i = 0; j = 0;
+	remaining = tcmd->se_cmd.data_length;
+	pr_debug("DEBUG datalenxfer: %d\n", remaining);
+
+	sg = tcmd->se_cmd.t_data_sg;
+	len = sg->length;
+	offset = sg->offset;
+	page = sg_page(sg);
+	if (len == 0) {
+		pr_debug("DEBUG zero len first entry: %d\n", 0);
+	} else {
+		page_addr =  kmap_atomic(page + (offset >> PAGE_SHIFT));
+		tcmd->kmap_addr[j] = page_addr;
+
+		page_addr_pa = pci_map_single(fnic2->pdev, page_addr, len, tcmd->dma_direction);
+		desc_sge->addr = page_addr_pa;
+
+		desc_sge->len =  len;
+		pr_debug("DEBUG addr: %llx len: %d\n", desc_sge->addr, len);
+		pr_debug("DEBUG page %pK pa %llx sge len %x rmain: %d", page_addr, page_addr_pa, desc_sge->len, remaining);
+		desc_sge++;
+		j++;
+
+	}
+	remaining = remaining - len;
+
+	pr_debug("DEBUG remaining: %d\n", remaining);
+
+	while (remaining) {
+
+		sg = sg_next(sg);
+		if (sg == NULL) {
+			pr_debug("DEBUG no mpre entry: %d\n", i);
+			break;
+		}
+		len = sg->length;
+		offset = sg->offset;
+		page = sg_page(sg);
+		pr_debug("DEBUG parse sge i: %d, len: %d\n", i, len);
+		if (!len) {
+			i++;
+			continue;
+		}
+		page_addr =  kmap_atomic(page + (offset >> PAGE_SHIFT));
+		tcmd->kmap_addr[j] = page_addr;
+
+		page_addr_pa = pci_map_single(fnic2->pdev, page_addr, len, tcmd->dma_direction);
+		desc_sge->addr = page_addr_pa;
+
+		desc_sge->len =  len;
+		pr_debug("DEBUG page %pK pa %llx sge len %x rmain: %d", page_addr, page_addr_pa, desc_sge->len, remaining);
+		desc_sge++;
+
+		remaining = remaining - len;
+		j++; i++;
+
+	}
+
+	tcmd->sg_desc_pa = pci_map_single(fnic2->pdev, tcmd->sg_desc_va,
+		sizeof(struct host_sg_desc) * tcmd->sgcnt,
+		PCI_DMA_TODEVICE);
+
+	pr_debug("DEBUG sg_desc %llx", tcmd->sg_desc_pa);
+
+	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
+		pr_debug("DBEUG resid valid flag: %x, count: %d\n",
+			se_cmd->se_cmd_flags, se_cmd->residual_count);
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+			tdata->rsp_iu_flags |= FCP_RSP_FLAG_OVERFLOW;
+		else
+			tdata->rsp_iu_flags |= FCP_RSP_FLAG_UNDERFLOW;
+		tdata->resid = se_cmd->residual_count;
+	}
+
+	tdata->sgl_pa = tcmd->sg_desc_pa;
+	tdata->sgl_cnt = tcmd->sgcnt;
+	tdata->send_resp = (req_type == FCPIO_TCMD_SEND_DATA) ? 1 : 0;
+
+	return host_req;
+}
+
+static void fnic2_send_rd_data_cmpl(struct fnic2_cmd *tcmd)
+{
+	/* Notify LIO that the cmd is completed */
+	transport_generic_free_cmd(&tcmd->se_cmd, 0);
+}
+
+static void fnic2_recv_wr_data_cmpl(struct fnic2_cmd *tcmd)
+{
+#if 0
+	// BGC
+	static int x = 0;
+
+	if (x++%10 == 0) {
+	    return;
+	}
+#endif
+
+	pr_debug("DEBUG wr_data_cmpl tag: %x\n", tcmd->cmd_tag);
+
+	/* Notify LIO that the cmd is completed */
+	target_execute_cmd(&tcmd->se_cmd);
+}
+
+void fnic2_send_tmr_resp(struct fnic2_cmd *tcmd, u32 status, u8 code)
+{
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct fnic2_sess *sess = tcmd->sess;
+	struct fnic2_rport *rport = sess->rport;
+	struct fc_fcp_rsp *fcp_rsp;
+	uint8_t s_id[3];
+	uint8_t d_id[3];
+	struct fc_hdr *fchdr;
+	struct se_cmd *se_cmd = &tcmd->se_cmd;
+	int addl_len = 0;
+	struct fcp_resp_rsp_info *info;
+
+	pr_err("SEND_TMR_RESP tcmd %pK fnic %pK sess %pK se_cmd%pK\n",
+		tcmd, fnic2, sess, se_cmd);
+
+	fcp_rsp = (struct fc_fcp_rsp *)kzalloc(2112, GFP_ATOMIC);
+	WARN_ON(!fcp_rsp);
+	fchdr = &fcp_rsp->fchdr;
+
+	hton24(s_id, fnic2->lport.fcid);
+	hton24(d_id, rport->fcid);
+
+	memcpy(fcp_rsp, &fnic2_fcp_rsp, sizeof(struct fc_fcp_rsp));
+	memcpy(fchdr->d_id, d_id, 3);
+	memcpy(fchdr->s_id, s_id, 3);
+	fchdr->ox_id = htons(tcmd->ox_id);
+	fchdr->rx_id = htons(tcmd->rx_id);
+
+	fcp_rsp->scsi_status = status;
+
+	if (se_cmd->scsi_status == SAM_STAT_GOOD) {
+		fcp_rsp->flags |= FCP_RSP_LEN_VAL;
+		addl_len = sizeof(struct fcp_resp_rsp_info);
+		info = (struct fcp_resp_rsp_info *)(fcp_rsp + 1);
+		info->rsp_code = code;
+	}
+
+	fnic2_send_fcoe_frame(&fnic2->lport, fcp_rsp, sizeof(struct fc_fcp_rsp) + addl_len);
+	kfree(fcp_rsp);
+}
+
+void fnic2_send_fcp_resp(struct fnic2_cmd *tcmd)
+{
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct fnic2_sess *sess = tcmd->sess;
+	struct fnic2_rport *rport = sess->rport;
+	struct fc_fcp_rsp *fcp_rsp;
+	uint8_t s_id[3];
+	uint8_t d_id[3];
+	struct fc_hdr *fchdr;
+	struct se_cmd *se_cmd = &tcmd->se_cmd;
+	int sense_len = 0;
+
+	fcp_rsp = (struct fc_fcp_rsp *)kzalloc(2112, GFP_ATOMIC);
+	WARN_ON(!fcp_rsp);
+	fchdr = &fcp_rsp->fchdr;
+
+	hton24(s_id, fnic2->lport.fcid);
+	hton24(d_id, rport->fcid);
+
+	memcpy(fcp_rsp, &fnic2_fcp_rsp, sizeof(struct fc_fcp_rsp));
+	memcpy(fchdr->d_id, d_id, 3);
+	memcpy(fchdr->s_id, s_id, 3);
+	fchdr->ox_id = htons(tcmd->ox_id);
+	fchdr->rx_id = htons(tcmd->rx_id);
+
+	fcp_rsp->scsi_status = se_cmd->scsi_status;
+
+	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
+		pr_debug("DBEUG resid valid flag: %x, count: %d\n",
+			se_cmd->se_cmd_flags, se_cmd->residual_count);
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+			fcp_rsp->flags |= FCP_RSP_FLAG_OVERFLOW;
+		else
+			fcp_rsp->flags |= FCP_RSP_FLAG_UNDERFLOW;
+		fcp_rsp->resid = htonl(se_cmd->residual_count);
+	}
+
+	if (se_cmd->scsi_status != SAM_STAT_GOOD) {
+		sense_len = se_cmd->scsi_sense_length;
+		if (sense_len > 0) {
+			fcp_rsp->flags |= FCP_SNS_LEN_VAL;
+			fcp_rsp->sense_len = htonl(sense_len);
+			memcpy((fcp_rsp + 1), se_cmd->sense_buffer, sense_len);
+		}
+	}
+
+	fnic2_send_fcoe_frame(&fnic2->lport, fcp_rsp, sizeof(struct fc_fcp_rsp) + sense_len);
+	kfree(fcp_rsp);
+}
+
+void fnic2_free_fw_res(struct fnic2_cmd *tcmd)
+{
+	struct fcpio_host_req *host_req;
+	struct fcpio_host_req *desc;
+	struct fnic2 *fnic2 = tcmd->fnic2;
+	struct vnic_wq_copy *wq;
+	unsigned long flags;
+
+	/* Fill the structure to send it to fw */
+	host_req = kzalloc(sizeof(struct fcpio_host_req), GFP_KERNEL);
+	WARN_ON(host_req == NULL);
+
+	host_req->hdr.type = FCPIO_TCMD_FREE_CMD;
+	/* FW uses rx_id for freeing write commands */
+	host_req->hdr.fcpio_tag = tcmd->rx_id;
+
+	spin_lock_irqsave(&fnic2->wq_copy_lock[0], flags);
+
+	/* Use the Copy WQ to send it to FW */
+	wq = &fnic2->wq_copy[0];
+
+	if (vnic_wq_copy_desc_avail(wq) <= fnic2->wq_copy_desc_low[0])
+		free_wq_copy_descs(fnic2, wq);
+
+	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
+		pr_err("DEBUG ERROR no wq desc for free_cmd\n");
+		spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+		kfree(host_req);
+		return;
+	}
+
+	desc = vnic_wq_copy_next_desc(wq);
+	memcpy(desc, host_req, sizeof(struct fcpio_host_req));
+
+	pr_debug("Sent free req to FW\n");
+
+	vnic_wq_copy_post(wq);
+
+	spin_unlock_irqrestore(&fnic2->wq_copy_lock[0], flags);
+
+	kfree(host_req);
+}
+
+static void fnic2_unmap_free_sgl(struct fnic2 *fnic2, struct fnic2_cmd *tcmd)
+{
+	struct host_sg_desc *desc_sge;
+	int i;
+
+	pci_unmap_single(fnic2->pdev, tcmd->sg_desc_pa, sizeof(struct host_sg_desc) * tcmd->sgcnt,
+		PCI_DMA_TODEVICE);
+
+	desc_sge = &tcmd->sg_desc_va[0];
+
+	/* Now, each entries in the sglist pci_unmap and kunmap */
+	for (i = 0; i < tcmd->sgcnt; i++) {
+		pci_unmap_single(fnic2->pdev, desc_sge->addr, desc_sge->len, tcmd->dma_direction);
+		desc_sge++;
+		kunmap(tcmd->kmap_addr[i]);
+	}
+
+	/* free the descriptor list */
+	kfree(tcmd->sg_desc_va_unaligned);
+}
+
+void fnic2_fcpio_tcmd_cmpl_handler(struct work_struct *work)
+{
+	struct fnic2_cmd *tcmd =
+		container_of(work, struct fnic2_cmd, work);
+	struct fcpio_fw_req *desc = &tcmd->fw_desc;
+
+	if (!desc) {
+		pr_err("Cmd tag %x freed before %s\n", tcmd->cmd_tag, __func__ );
+		return;
+	}
+
+	pr_debug("DEBUG FW completion tcmd: %pK, tag: %x, hdrtype: %d, hdrstatus: %x\n",
+		tcmd, tcmd->cmd_tag, desc->hdr.type, desc->hdr.status);
+
+	/* For now, only success cases */
+	if (desc->hdr.status != FCPIO_SUCCESS) {
+		pr_debug("ERROR hdrstatus not success: %d\n",
+			desc->hdr.status);
+	}
+
+	switch (desc->hdr.type) {
+
+	case FCPIO_TCMD_SEND_DATA:
+		fnic2_send_rd_data_cmpl(tcmd);
+	break;
+
+	case FCPIO_TCMD_RECV_DATA:
+
+		pr_debug("recv_data cmpl hdrtype: %d, tag: %x, rx_id: %x\nWrite completed ox_id %x\n",
+			desc->hdr.type, tcmd->cmd_tag, desc->u.trsp.rx_id,
+			tcmd->ox_id);
+
+		tcmd->rx_id = desc->u.trsp.rx_id;
+		fnic2_recv_wr_data_cmpl(tcmd);
+	break;
+
+	case FCPIO_TCMD_FREE_CMD:
+		pr_debug("free_cmd cmpl hdrtype: %d, tag: %x\n",
+			desc->hdr.type, tcmd->cmd_tag);
+
+		transport_generic_free_cmd(&tcmd->se_cmd, 0);
+	break;
+
+	case FCPIO_TCMD_ABORT_CMD:
+		pr_debug("abort_cmd cmpl hdrtype: %d, tag: %x\n",
+			desc->hdr.type, tcmd->cmd_tag);
+		fnic2_fw_abort_done(tcmd);
+		break;
+
+	case FCPIO_TCMD_TASK_MGMT:
+                pr_debug("task mgmt cmpl hdrtype: %d, tag: %x\n",
+                        desc->hdr.type, tcmd->cmd_tag);
+		fnic2_complete_tm_rsp(tcmd);
+                break;
+
+	default:
+		break;
+	}
+}
+
+/* Utilities */
+
+struct fnic2_cmd *fnic2_find_tcmd(struct fnic2 *fnic2, uint32_t cmd_tag)
+{
+	struct fnic2_cmd *tcmd;
+
+	pr_debug("DEBUG find tcmd: tag: %d\n", cmd_tag);
+
+	WARN_ON(cmd_tag >= FNIC2_MAX_TCMDS);
+
+	tcmd = &fnic2->tcmd_pool[cmd_tag];
+	pr_debug("DEBUG tcmd: %pK\n", tcmd);
+	return tcmd;
+}
+
+/*
+ * find_fnic2 - Routine to find fnic2 structure using wwpn provided
+ */
+struct fnic2 *find_fnic2_wwpn(uint64_t wwpn)
+{
+	struct fnic2 *fnic2;
+	struct fnic2_lport *lport;
+
+	list_for_each_entry(fnic2, &fnic2_list, list) {
+		lport = &fnic2->lport;
+		if (wwpn == lport->wwpn) {
+			return fnic2;
+		}
+	}
+	return NULL;
+}
+
+struct fnic2_sess *fnic2_find_sess_s_id(struct fnic2 *fnic2, uint32_t s_id)
+{
+	struct fnic2_sess *sess;
+
+	list_for_each_entry(sess, &fnic2->lio.sess_list, list) {
+		if (sess->rport->fcid == s_id)
+			return sess;
+	}
+	return NULL;
+}
+
+struct fnic2_sess *fnic2_find_session(struct fnic2 *fnic2, uint64_t wwpn)
+{
+	struct fnic2_sess *sess;
+
+	list_for_each_entry(sess, &fnic2->lio.sess_list, list) {
+		if (sess->rport && sess->lport->lport_wwpn == wwpn)
+			return sess;
+	}
+	return NULL;
+}
+
+static void fnic2_format_wwnn(char *buf, int len, uint64_t wwn)
+{
+	uint8_t b[8];
+
+	put_unaligned_be64(wwn, b);
+	snprintf(buf, len,
+		 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		 b[7], b[6], b[5], b[4], b[3], b[2], b[1], b[0]);
+}
+
+/*
+ * Handle Task Management Request.
+ */
+static void fnic2_send_tm(struct fnic2_cmd *tcmd)
+{
+	struct fc_fcp_cmnd *fcp_cmd_req;
+	uint8_t tm_func;
+	int rc;
+
+	fcp_cmd_req = (struct fc_fcp_cmnd *) tcmd->rx_frame;
+
+	pr_err("GOT TASK MANAGEMENT %x", fcp_cmd_req->tm_flags);
+
+	switch (fcp_cmd_req->tm_flags) {
+	case FCP_TMF_LUN_RESET:
+		tm_func = TMR_LUN_RESET;
+		break;
+	case FCP_TMF_TGT_RESET:
+		tm_func = TMR_TARGET_WARM_RESET;
+		break;
+	case FCP_TMF_CLR_TASK_SET:
+		tm_func = TMR_CLEAR_TASK_SET;
+		break;
+	case FCP_TMF_ABT_TASK_SET:
+		tm_func = TMR_ABORT_TASK_SET;
+		break;
+	case FCP_TMF_CLR_ACA:
+		tm_func = TMR_CLEAR_ACA;
+		break;
+	default:
+		pr_err("invalid FCP tm_flags %x\n", fcp_cmd_req->tm_flags);
+		fnic2_send_tmr_resp_and_free(tcmd, FCP_CMND_FIELDS_INVALID);
+		return;
+	}
+
+	rc = target_submit_tmr(&tcmd->se_cmd, tcmd->sess->se_sess,
+			       tcmd->sense_buf, scsilun_to_int(&fcp_cmd_req->fcp_lun),
+			       tcmd, tm_func, GFP_KERNEL, tcmd->cmd_tag,
+			       TARGET_SCF_ACK_KREF);
+
+	if (rc) {
+		pr_err("target_submit_tmr FAILED rc %d\n", rc);
+		fnic2_send_tmr_resp_and_free(tcmd, FCP_CMND_FIELDS_INVALID);
+	}
+}
+
+
+/*
+ * fnic2_recv_tcmd_timeout_intr - Process intr from tcmd timeout
+ *
+ * @intr_data - pointer to a tcmd that
+ * received an interrupt
+ *
+ * This function is called in an interrupt context when a tcmd
+ * exceeds the timeout value. It queues work on another
+ * function to clean up fnic2 resources.
+ */
+void fnic2_recv_tcmd_timeout_intr(struct timer_list *timer)
+{
+	struct fnic2_cmd *tcmd = container_of(timer, struct fnic2_cmd, io_timer);
+	int cpu;
+
+	pr_debug("Got interrupt on tag %x on fnic2 %d tcmds left: %d\n", tcmd->cmd_tag, tcmd->fnic2->fnic2_num, tcmd->fnic2->freecmds);
+
+	/* Queue on proper cpu */
+	cpu = get_cpu_to_queue(tcmd->cmd_tag);
+
+	INIT_WORK(&tcmd->work, fnic2_process_tcmd_timeout);
+	queue_work_on(cpu, fnic2_tcmd_wq, &tcmd->work);
+
+	return;
+}
+
+/*
+ * fnic2_process_tcmd_timeout - Process a tcmd that has timed out
+ *
+ * @work - work_struct that this function was queued with
+ *
+ * Free a tcmd that has been held by driver for the timeout period
+ * Create a dummy ABTS command and submit to LIO and FW
+ */
+static void fnic2_process_tcmd_timeout(struct work_struct *work)
+{
+	/* tcmd is original IO to be aborted, abort_tcmd is used as the ABTS cmd */
+	struct fnic2_cmd *tcmd, *abort_tcmd;
+	struct fnic2 *fnic2;
+	struct fnic2_sess *sess;
+	unsigned long flags;
+	unsigned long fnic2_flags;
+
+	tcmd = container_of(work, struct fnic2_cmd, work);
+
+	spin_lock_irqsave(&tcmd->lock, flags);
+
+	if (tcmd->abort_tag == 0) {
+		pr_err("tcmd tag %x freed before timeout\n", tcmd->cmd_tag);
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return;
+	}
+
+	fnic2 = tcmd->fnic2;
+	abort_tcmd = fnic2_get_tcmd_from_pool(fnic2);
+
+	if (!abort_tcmd) {
+		pr_err("No TCMDs available! %s\n", __func__);
+		spin_unlock_irqrestore(&tcmd->lock, flags);
+		return;
+	}
+
+	abort_tcmd->flags |= FNIC2_TCMD_IS_ABTS;
+
+	pr_err("Got tcmd tag %x for timeout for io %x on fnic %d, tcmd->abort_tag %x ox_id %x\n", abort_tcmd->cmd_tag, tcmd->cmd_tag, fnic2->fnic2_num, tcmd->abort_tag, tcmd->ox_id);
+
+	if (tcmd->flags & FNIC2_TCMD_ABORTED) {
+		pr_err("tcmd tag %x already being freed not in timeout\n", tcmd->cmd_tag);
+		goto timeout_error;
+	}
+
+	tcmd->flags |= FNIC2_TCMD_ABORTED;
+
+	sess = fnic2_find_sess_s_id(fnic2, tcmd->s_id);
+
+        if (!sess) {
+		pr_err("No session found for %s\n", __func__);
+		goto timeout_error;
+        }
+
+	spin_lock_irqsave(&fnic2->fnic2_lock, fnic2_flags);
+
+	if (!timer_pending(&sess->sess_timer)) {
+
+		fdls_construct_logo_req(&fnic2->lport, (struct fc_hdr *)tcmd->rx_frame, sess);
+
+		sess->sess_timer.expires = jiffies + (JIFFIES_PER_MINUTE * 5);
+
+		add_timer(&sess->sess_timer);
+	}
+
+	spin_unlock_irqrestore(&fnic2->fnic2_lock, fnic2_flags);
+
+	tcmd->abort_tag = abort_tcmd->cmd_tag;
+	abort_tcmd->killed_tcmd = tcmd;
+	tcmd->abort_tcmd = abort_tcmd;
+
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+
+	fnic2_send_abort_to_fw(tcmd);
+
+	return;
+
+timeout_error:
+	spin_unlock_irqrestore(&tcmd->lock, flags);
+	fnic2_free_tcmd(abort_tcmd);
+	return;
+}
+
+/*
+ * fnic2_recv_sess_timer_intr - receive an interrupt on a dead session
+ *
+ * @timer - timer inside session that received interrupt
+ *
+ * Session has sat idle for extended period of time
+ * Send a logo and delete the session
+ */
+static void fnic2_recv_sess_timer_intr(struct timer_list *timer)
+{
+	struct fnic2_sess *sess = container_of(timer, struct fnic2_sess, sess_timer);
+	struct fnic2 *fnic2 = sess->fnic2;
+
+	pr_err("Got sess %pK timer interrupt\n", sess);
+
+	fnic2_send_fcoe_frame(&fnic2->lport, &sess->timer_logo_req, sizeof(struct fc_logo_req));
+
+	fdls_delete_rport(&fnic2->lport, sess->rport);
+
+	return;
+}
diff --git a/drivers/staging/fnic2/src/fnic2_io.h b/drivers/staging/fnic2/src/fnic2_io.h
new file mode 100644
index 0000000..59cb97e
--- /dev/null
+++ b/drivers/staging/fnic2/src/fnic2_io.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC2_IO_H_
+#define _FNIC2_IO_H_
+
+#include <scsi/fc/fc_fcp.h>
+
+#define FNIC2_DFLT_SG_DESC_CNT	32
+#define FNIC2_MAX_SG_DESC_CNT	256	/* Maximum descriptors per sgl */
+
+struct host_sg_desc {
+	__le64		addr;
+	__le32		len;
+	uint32_t	_resvd;
+};
+
+#endif /* _FNIC2_IO_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 07/10] staging: fnic2 add queue descriptors
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (5 preceding siblings ...)
  2018-04-05 21:21 ` [PATCH 06/10] staging: fnic2 add main frame processing Oliver Smith-Denny
@ 2018-04-05 21:21 ` Oliver Smith-Denny
  2018-04-05 21:22 ` [PATCH 08/10] staging: fnic2 add vnic queue interface Oliver Smith-Denny
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:21 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain queueing interfaces for software.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/cq_desc.h      |  74 +++++++++++++++
 drivers/staging/fnic2/src/cq_enet_desc.h | 158 +++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/cq_exch_desc.h | 104 ++++++++++++++++++++
 drivers/staging/fnic2/src/rq_enet_desc.h |  45 +++++++++
 drivers/staging/fnic2/src/wq_enet_desc.h |  69 ++++++++++++++
 5 files changed, 450 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/cq_desc.h
 create mode 100644 drivers/staging/fnic2/src/cq_enet_desc.h
 create mode 100644 drivers/staging/fnic2/src/cq_exch_desc.h
 create mode 100644 drivers/staging/fnic2/src/rq_enet_desc.h
 create mode 100644 drivers/staging/fnic2/src/wq_enet_desc.h

diff --git a/drivers/staging/fnic2/src/cq_desc.h b/drivers/staging/fnic2/src/cq_desc.h
new file mode 100644
index 0000000..638753d
--- /dev/null
+++ b/drivers/staging/fnic2/src/cq_desc.h
@@ -0,0 +1,74 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_DESC_H_
+#define _CQ_DESC_H_
+
+/*
+ * Completion queue descriptor types
+ */
+enum cq_desc_types {
+	CQ_DESC_TYPE_RQ_ENET = 3,
+	CQ_DESC_TYPE_RQ_FCP = 4,
+};
+
+/* Completion queue descriptor: 16B
+ *
+ * All completion queues have this basic layout.  The
+ * type_specific area is unique for each completion
+ * queue type.
+ */
+struct cq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	uint8_t type_specific[11];
+	uint8_t type_color;
+};
+
+#define CQ_DESC_TYPE_BITS        4
+#define CQ_DESC_TYPE_MASK        ((1 << CQ_DESC_TYPE_BITS) - 1)
+#define CQ_DESC_COLOR_MASK       1
+#define CQ_DESC_COLOR_SHIFT      7
+#define CQ_DESC_Q_NUM_BITS       10
+#define CQ_DESC_Q_NUM_MASK       ((1 << CQ_DESC_Q_NUM_BITS) - 1)
+#define CQ_DESC_COMP_NDX_BITS    12
+#define CQ_DESC_COMP_NDX_MASK    ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
+
+static inline void cq_desc_dec(const struct cq_desc *desc_arg, uint8_t *type, uint8_t *color, uint16_t *q_number, uint16_t *completed_index)
+{
+	const struct cq_desc *desc = desc_arg;
+	const uint8_t type_color = desc->type_color;
+
+	*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+	/*
+	 * Make sure color bit is read from desc *before* other fields
+	 * are read from desc.  Hardware guarantees color bit is last
+	 * bit (byte) written.  Adding the rmb() prevents the compiler
+	 * and/or CPU from reordering the reads which would potentially
+	 * result in reading stale values.
+	 */
+
+	rmb();
+
+	*type = type_color & CQ_DESC_TYPE_MASK;
+	*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+	*completed_index = le16_to_cpu(desc->completed_index) &
+		CQ_DESC_COMP_NDX_MASK;
+}
+
+#endif /* _CQ_DESC_H_ */
diff --git a/drivers/staging/fnic2/src/cq_enet_desc.h b/drivers/staging/fnic2/src/cq_enet_desc.h
new file mode 100644
index 0000000..606cb89
--- /dev/null
+++ b/drivers/staging/fnic2/src/cq_enet_desc.h
@@ -0,0 +1,158 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_ENET_DESC_H_
+#define _CQ_ENET_DESC_H_
+
+#include "cq_desc.h"
+
+/* Ethernet completion queue descriptor: 16B */
+struct cq_enet_wq_desc {
+	__le16 completed_index;
+	__le16 q_number;
+	uint8_t reserved[11];
+	uint8_t type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 16B */
+struct cq_enet_rq_desc {
+	__le16 completed_index_flags;
+	__le16 q_number_rss_type_flags;
+	__le32 rss_hash;
+	__le16 bytes_written_flags;
+	__le16 vlan;
+	__le16 checksum_fcoe;
+	uint8_t flags;
+	uint8_t type_color;
+};
+
+#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT          (0x1 << 12)
+#define CQ_ENET_RQ_DESC_FLAGS_FCOE                  (0x1 << 13)
+#define CQ_ENET_RQ_DESC_FLAGS_EOP                   (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_SOP                   (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS               4
+#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
+	((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
+
+#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC         (0x1 << 14)
+
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS          14
+#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
+	((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED             (0x1 << 14)
+#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED         (0x1 << 15)
+
+#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS               4
+#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS               8
+#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
+	((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
+#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT              8
+
+#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK       (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK              (0x1 << 0)
+#define CQ_ENET_RQ_DESC_FLAGS_UDP                   (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR              (0x1 << 1)
+#define CQ_ENET_RQ_DESC_FLAGS_TCP                   (0x1 << 2)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK          (0x1 << 3)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV6                  (0x1 << 4)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4                  (0x1 << 5)
+#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT         (0x1 << 6)
+#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK                (0x1 << 7)
+
+static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, uint8_t *type,
+				       uint8_t *color, uint16_t *q_number,
+				       uint16_t *completed_index, uint8_t *ingress_port,
+				       uint8_t *fcoe, uint8_t *eop, uint8_t *sop,
+				       uint8_t *rss_type, uint8_t *csum_not_calc,
+				       uint32_t *rss_hash, uint16_t *bytes_written,
+				       uint8_t *packet_error, uint8_t *vlan_stripped,
+				       uint16_t *vlan, uint16_t *checksum, uint8_t *fcoe_sof,
+				       uint8_t *fcoe_fc_crc_ok, uint8_t *fcoe_enc_error,
+				       uint8_t *fcoe_eof,
+				       uint8_t *tcp_udp_csum_ok, uint8_t *udp, uint8_t *tcp,
+				       uint8_t *ipv4_csum_ok, uint8_t *ipv6, uint8_t *ipv4,
+				       uint8_t *ipv4_fragment, uint8_t *fcs_ok)
+{
+	uint16_t completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+	uint16_t q_number_rss_type_flags =
+		le16_to_cpu(desc->q_number_rss_type_flags);
+	uint16_t bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+	cq_desc_dec((struct cq_desc *)desc, type,
+		color, q_number, completed_index);
+
+	*ingress_port = (completed_index_flags &
+		CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+	*fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+		1 : 0;
+	*eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+		1 : 0;
+	*sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+		1 : 0;
+
+	*rss_type = (uint8_t)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+		CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+	*csum_not_calc = (q_number_rss_type_flags &
+		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+	*rss_hash = le32_to_cpu(desc->rss_hash);
+
+	*bytes_written = bytes_written_flags &
+		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_error = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+	*vlan_stripped = (bytes_written_flags &
+		CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+	*vlan = le16_to_cpu(desc->vlan);
+
+	if (*fcoe) {
+		*fcoe_sof = (uint8_t)(le16_to_cpu(desc->checksum_fcoe) &
+			CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+		*fcoe_fc_crc_ok = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+		*fcoe_enc_error = (desc->flags &
+			CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+		*fcoe_eof = (uint8_t)((desc->checksum_fcoe >>
+			CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+			CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+		*checksum = 0;
+	} else {
+		*fcoe_sof = 0;
+		*fcoe_fc_crc_ok = 0;
+		*fcoe_enc_error = 0;
+		*fcoe_eof = 0;
+		*checksum = le16_to_cpu(desc->checksum_fcoe);
+	}
+
+	*tcp_udp_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+	*udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+	*tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+	*ipv4_csum_ok =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+	*ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+	*ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+	*ipv4_fragment =
+		(desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+	*fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/staging/fnic2/src/cq_exch_desc.h b/drivers/staging/fnic2/src/cq_exch_desc.h
new file mode 100644
index 0000000..bc7e005c
--- /dev/null
+++ b/drivers/staging/fnic2/src/cq_exch_desc.h
@@ -0,0 +1,104 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _CQ_EXCH_DESC_H_
+#define _CQ_EXCH_DESC_H_
+
+#include "cq_desc.h"
+
+#define CQ_EXCH_WQ_STATUS_BITS      2
+#define CQ_EXCH_WQ_STATUS_MASK      ((1 << CQ_EXCH_WQ_STATUS_BITS) - 1)
+
+struct cq_fcp_rq_desc {
+	uint16_t completed_index_eop_sop_prt;
+	uint16_t q_number;
+	uint16_t exchange_id;
+	uint16_t tmpl;
+	uint16_t bytes_written;
+	uint16_t vlan;
+	uint8_t  sof;
+	uint8_t  eof;
+	uint8_t  fcs_fer_fck;
+	uint8_t  type_color;
+};
+
+#define CQ_FCP_RQ_DESC_FLAGS_SOP		(1 << 15)
+#define CQ_FCP_RQ_DESC_FLAGS_EOP		(1 << 14)
+#define CQ_FCP_RQ_DESC_FLAGS_PRT		(1 << 12)
+#define CQ_FCP_RQ_DESC_TMPL_MASK		0x1f
+#define CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK	0x3fff
+#define CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT		14
+#define CQ_FCP_RQ_DESC_PACKET_ERR_MASK (1 << CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT	15
+#define CQ_FCP_RQ_DESC_VS_STRIPPED_MASK (1 << CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT)
+#define CQ_FCP_RQ_DESC_FC_CRC_OK_MASK		0x1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT		1
+#define CQ_FCP_RQ_DESC_FCOE_ERR_MASK (1 << CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT)
+#define CQ_FCP_RQ_DESC_FCS_OK_SHIFT		7
+#define CQ_FCP_RQ_DESC_FCS_OK_MASK (1 << CQ_FCP_RQ_DESC_FCS_OK_SHIFT)
+
+static inline void cq_fcp_rq_desc_dec(struct cq_fcp_rq_desc *desc_ptr,
+				      uint8_t  *type,
+				      uint8_t  *color,
+				      uint16_t *q_number,
+				      uint16_t *completed_index,
+				      uint8_t  *eop,
+				      uint8_t  *sop,
+				      uint8_t  *fck,
+				      uint16_t *exchange_id,
+				      uint16_t *tmpl,
+				      uint32_t *bytes_written,
+				      uint8_t  *sof,
+				      uint8_t  *eof,
+				      uint8_t  *ingress_port,
+				      uint8_t  *packet_err,
+				      uint8_t  *fcoe_err,
+				      uint8_t  *fcs_ok,
+				      uint8_t  *vlan_stripped,
+				      uint16_t *vlan)
+{
+	cq_desc_dec((struct cq_desc *)desc_ptr, type,
+		    color, q_number, completed_index);
+	*eop = (desc_ptr->completed_index_eop_sop_prt &
+		CQ_FCP_RQ_DESC_FLAGS_EOP) ? 1 : 0;
+	*sop = (desc_ptr->completed_index_eop_sop_prt &
+		CQ_FCP_RQ_DESC_FLAGS_SOP) ? 1 : 0;
+	*ingress_port =
+		(desc_ptr->completed_index_eop_sop_prt &
+		 CQ_FCP_RQ_DESC_FLAGS_PRT) ? 1 : 0;
+	*exchange_id = desc_ptr->exchange_id;
+	*tmpl = desc_ptr->tmpl & CQ_FCP_RQ_DESC_TMPL_MASK;
+	*bytes_written =
+		desc_ptr->bytes_written & CQ_FCP_RQ_DESC_BYTES_WRITTEN_MASK;
+	*packet_err =
+		(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_PACKET_ERR_MASK) >>
+		CQ_FCP_RQ_DESC_PACKET_ERR_SHIFT;
+	*vlan_stripped =
+		(desc_ptr->bytes_written & CQ_FCP_RQ_DESC_VS_STRIPPED_MASK) >>
+		CQ_FCP_RQ_DESC_VS_STRIPPED_SHIFT;
+	*vlan = desc_ptr->vlan;
+	*sof = desc_ptr->sof;
+	*fck = desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FC_CRC_OK_MASK;
+	*fcoe_err = (desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCOE_ERR_MASK) >>
+		CQ_FCP_RQ_DESC_FCOE_ERR_SHIFT;
+	*eof = desc_ptr->eof;
+	*fcs_ok =
+		(desc_ptr->fcs_fer_fck & CQ_FCP_RQ_DESC_FCS_OK_MASK) >>
+		CQ_FCP_RQ_DESC_FCS_OK_SHIFT;
+}
+
+#endif /* _CQ_EXCH_DESC_H_ */
diff --git a/drivers/staging/fnic2/src/rq_enet_desc.h b/drivers/staging/fnic2/src/rq_enet_desc.h
new file mode 100644
index 0000000..c8d6e30
--- /dev/null
+++ b/drivers/staging/fnic2/src/rq_enet_desc.h
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _RQ_ENET_DESC_H_
+#define _RQ_ENET_DESC_H_
+
+/* Ethernet receive queue descriptor: 16B */
+struct rq_enet_desc {
+	__le64	address;
+	__le16	length_type;
+	uint8_t	reserved[6];
+};
+
+#define	RQ_ENET_TYPE_ONLY_SOP	0
+
+#define RQ_ENET_LEN_BITS	14
+#define RQ_ENET_LEN_MASK	((1 << RQ_ENET_LEN_BITS) - 1)
+#define RQ_ENET_TYPE_BITS	2
+#define RQ_ENET_TYPE_MASK	((1 << RQ_ENET_TYPE_BITS) - 1)
+
+static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+				    uint64_t address,
+				    uint8_t type,
+				    uint16_t length)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
+		((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
+}
+
+#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/staging/fnic2/src/wq_enet_desc.h b/drivers/staging/fnic2/src/wq_enet_desc.h
new file mode 100644
index 0000000..51384c6
--- /dev/null
+++ b/drivers/staging/fnic2/src/wq_enet_desc.h
@@ -0,0 +1,69 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _WQ_ENET_DESC_H_
+#define _WQ_ENET_DESC_H_
+
+/* Ethernet work queue descriptor: 16B */
+struct wq_enet_desc {
+	__le64 address;
+	__le16 length;
+	__le16 mss_loopback;
+	__le16 header_length_flags;
+	__le16 vlan_tag;
+};
+
+enum wq_enet {
+	WQ_ENET_LEN_BITS			= 14,
+	WQ_ENET_LEN_MASK			= ((1 << WQ_ENET_LEN_BITS) - 1),
+	WQ_ENET_MSS_BITS			= 14,
+	WQ_ENET_MSS_MASK			= ((1 << WQ_ENET_MSS_BITS) - 1),
+	WQ_ENET_MSS_SHIFT			= 2,
+	WQ_ENET_LOOPBACK_SHIFT			= 1,
+	WQ_ENET_HDRLEN_BITS			= 10,
+	WQ_ENET_HDRLEN_MASK			= ((1 << WQ_ENET_HDRLEN_BITS) - 1),
+	WQ_ENET_FLAGS_OM_BITS			= 2,
+	WQ_ENET_FLAGS_OM_MASK			= ((1 << WQ_ENET_FLAGS_OM_BITS) - 1),
+	WQ_ENET_FLAGS_EOP_SHIFT			= 12,
+	WQ_ENET_FLAGS_CQ_ENTRY_SHIFT		= 13,
+	WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT		= 14,
+	WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT	= 15
+};
+
+static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
+				    uint64_t address, uint16_t length,
+				    uint16_t mss, uint16_t header_length,
+				    uint8_t offload_mode, uint8_t eop,
+				    uint8_t cq_entry, uint8_t fcoe_encap,
+				    uint8_t vlan_tag_insert, uint16_t vlan_tag,
+				    uint8_t loopback)
+{
+	desc->address = cpu_to_le64(address);
+	desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
+	desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
+		WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
+	desc->header_length_flags = cpu_to_le16(
+		(header_length & WQ_ENET_HDRLEN_MASK) |
+		(offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
+		(eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
+		(cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
+		(fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
+		(vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
+	desc->vlan_tag = cpu_to_le16(vlan_tag);
+}
+
+#endif /* _WQ_ENET_DESC_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 08/10] staging: fnic2 add vnic queue interface
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (6 preceding siblings ...)
  2018-04-05 21:21 ` [PATCH 07/10] staging: fnic2 add queue descriptors Oliver Smith-Denny
@ 2018-04-05 21:22 ` Oliver Smith-Denny
  2018-04-05 21:23 ` [PATCH 09/10] staging: fnic2 add vnic handling Oliver Smith-Denny
  2018-04-05 21:24 ` [PATCH 10/10] staging: fnic2 add build and config Oliver Smith-Denny
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:22 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain low level queueing interfaces
for hardware.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/vnic_cq.c      |  85 ++++++++++++
 drivers/staging/fnic2/src/vnic_cq.h      | 120 ++++++++++++++++
 drivers/staging/fnic2/src/vnic_cq_copy.h |  61 ++++++++
 drivers/staging/fnic2/src/vnic_rq.c      | 196 ++++++++++++++++++++++++++
 drivers/staging/fnic2/src/vnic_rq.h      | 231 +++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/vnic_wq.c      | 183 ++++++++++++++++++++++++
 drivers/staging/fnic2/src/vnic_wq.h      | 174 +++++++++++++++++++++++
 drivers/staging/fnic2/src/vnic_wq_copy.c | 115 +++++++++++++++
 drivers/staging/fnic2/src/vnic_wq_copy.h | 127 +++++++++++++++++
 9 files changed, 1292 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/vnic_cq.c
 create mode 100644 drivers/staging/fnic2/src/vnic_cq.h
 create mode 100644 drivers/staging/fnic2/src/vnic_cq_copy.h
 create mode 100644 drivers/staging/fnic2/src/vnic_rq.c
 create mode 100644 drivers/staging/fnic2/src/vnic_rq.h
 create mode 100644 drivers/staging/fnic2/src/vnic_wq.c
 create mode 100644 drivers/staging/fnic2/src/vnic_wq.h
 create mode 100644 drivers/staging/fnic2/src/vnic_wq_copy.c
 create mode 100644 drivers/staging/fnic2/src/vnic_wq_copy.h

diff --git a/drivers/staging/fnic2/src/vnic_cq.c b/drivers/staging/fnic2/src/vnic_cq.c
new file mode 100644
index 0000000..ac78b1b
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_cq.c
@@ -0,0 +1,85 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+void vnic_cq_free(struct vnic_cq *cq)
+{
+	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
+
+	cq->ctrl = NULL;
+}
+
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	cq->index = index;
+	cq->vdev = vdev;
+
+	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
+	if (!cq->ctrl) {
+		pr_err("Failed to hook CQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+		  unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+		  unsigned int cq_tail_color, unsigned int interrupt_enable,
+		  unsigned int cq_entry_enable, unsigned int cq_message_enable,
+		  unsigned int interrupt_offset, uint64_t cq_message_addr)
+{
+	uint64_t paddr;
+
+	paddr = (uint64_t)cq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &cq->ctrl->ring_base);
+	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
+	iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
+	iowrite32(color_enable, &cq->ctrl->color_enable);
+	iowrite32(cq_head, &cq->ctrl->cq_head);
+	iowrite32(cq_tail, &cq->ctrl->cq_tail);
+	iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
+	iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
+	iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
+	iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
+	iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
+	writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+}
+
+void vnic_cq_clean(struct vnic_cq *cq)
+{
+	cq->to_clean = 0;
+	cq->last_color = 0;
+
+	iowrite32(0, &cq->ctrl->cq_head);
+	iowrite32(0, &cq->ctrl->cq_tail);
+	iowrite32(1, &cq->ctrl->cq_tail_color);
+
+	vnic_dev_clear_desc_ring(&cq->ring);
+}
diff --git a/drivers/staging/fnic2/src/vnic_cq.h b/drivers/staging/fnic2/src/vnic_cq.h
new file mode 100644
index 0000000..010c044
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_cq.h
@@ -0,0 +1,120 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_H_
+#define _VNIC_CQ_H_
+
+#include "cq_desc.h"
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_cq_service fnic2_cq_service
+#define vnic_cq_free fnic2_cq_free
+#define vnic_cq_alloc fnic2_cq_alloc
+#define vnic_cq_init fnic2_cq_init
+#define vnic_cq_clean fnic2_cq_clean
+
+/* Completion queue control */
+struct vnic_cq_ctrl {
+	uint64_t	ring_base;		/* 0x00 */
+	uint32_t	ring_size;		/* 0x08 */
+	uint32_t	pad0;
+	uint32_t	flow_control_enable;	/* 0x10 */
+	uint32_t	pad1;
+	uint32_t	color_enable;		/* 0x18 */
+	uint32_t	pad2;
+	uint32_t	cq_head;		/* 0x20 */
+	uint32_t	pad3;
+	uint32_t	cq_tail;		/* 0x28 */
+	uint32_t	pad4;
+	uint32_t	cq_tail_color;		/* 0x30 */
+	uint32_t	pad5;
+	uint32_t	interrupt_enable;	/* 0x38 */
+	uint32_t	pad6;
+	uint32_t	cq_entry_enable;	/* 0x40 */
+	uint32_t	pad7;
+	uint32_t	cq_message_enable;	/* 0x48 */
+	uint32_t	pad8;
+	uint32_t	interrupt_offset;	/* 0x50 */
+	uint32_t	pad9;
+	uint64_t	cq_message_addr;	/* 0x58 */
+	uint32_t	pad10;
+};
+
+struct vnic_cq {
+	unsigned int		index;
+	struct vnic_dev		*vdev;
+	struct vnic_cq_ctrl	__iomem *ctrl;	/* memory-mapped */
+	struct vnic_dev_ring	ring;
+	unsigned int		to_clean;
+	unsigned int		last_color;
+};
+
+static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
+					   unsigned int work_to_do,
+					   int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+					   uint8_t type, uint16_t q_number, uint16_t completed_index))
+{
+	struct cq_desc *cq_desc;
+	unsigned int work_done = 0;
+	uint16_t q_number, completed_index;
+	uint8_t type, color;
+
+	cq_desc = (struct cq_desc *)((uint8_t *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	cq_desc_dec(cq_desc, &type, &color,
+		&q_number, &completed_index);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq_desc, type,
+			q_number, completed_index))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		cq_desc = (struct cq_desc *)((uint8_t *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		cq_desc_dec(cq_desc, &type, &color,
+			&q_number, &completed_index);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+void vnic_cq_free(struct vnic_cq *cq);
+int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size);
+void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
+		  unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
+		  unsigned int cq_tail_color, unsigned int interrupt_enable,
+		  unsigned int cq_entry_enable, unsigned int message_enable,
+		  unsigned int interrupt_offset, uint64_t message_addr);
+void vnic_cq_clean(struct vnic_cq *cq);
+
+#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_cq_copy.h b/drivers/staging/fnic2/src/vnic_cq_copy.h
new file mode 100644
index 0000000..2ddf0b5
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_cq_copy.h
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_CQ_COPY_H_
+#define _VNIC_CQ_COPY_H_
+
+#include "fcpio.h"
+
+static inline unsigned int vnic_cq_copy_service(struct vnic_cq *cq,
+						int (*q_service)(struct vnic_dev *vdev,
+						unsigned int index,
+						struct fcpio_fw_req *desc),
+						unsigned int work_to_do)
+
+{
+	struct fcpio_fw_req *desc;
+	unsigned int work_done = 0;
+	uint8_t color;
+
+	desc = (struct fcpio_fw_req *)((uint8_t *)cq->ring.descs +
+		cq->ring.desc_size * cq->to_clean);
+	fcpio_color_dec(desc, &color);
+
+	while (color != cq->last_color) {
+
+		if ((*q_service)(cq->vdev, cq->index, desc))
+			break;
+
+		cq->to_clean++;
+		if (cq->to_clean == cq->ring.desc_count) {
+			cq->to_clean = 0;
+			cq->last_color = cq->last_color ? 0 : 1;
+		}
+
+		desc = (struct fcpio_fw_req *)((uint8_t *)cq->ring.descs +
+			cq->ring.desc_size * cq->to_clean);
+		fcpio_color_dec(desc, &color);
+
+		work_done++;
+		if (work_done >= work_to_do)
+			break;
+	}
+
+	return work_done;
+}
+
+#endif /* _VNIC_CQ_COPY_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_rq.c b/drivers/staging/fnic2/src/vnic_rq.c
new file mode 100644
index 0000000..1569091
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_rq.c
@@ -0,0 +1,196 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "vnic_dev.h"
+#include "vnic_rq.h"
+
+static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
+{
+	struct vnic_rq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = rq->ring.desc_count;
+	unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
+
+	vdev = rq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!rq->bufs[i]) {
+			pr_err("Failed to alloc rq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = rq->bufs[i];
+		for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (uint8_t *)rq->ring.descs +
+				rq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = rq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
+				buf->next = rq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	rq->to_use = rq->to_clean = rq->bufs[0];
+	rq->buf_index = 0;
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = rq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &rq->ring);
+
+	for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
+		kfree(rq->bufs[i]);
+		rq->bufs[i] = NULL;
+	}
+
+	rq->ctrl = NULL;
+}
+
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	rq->index = index;
+	rq->vdev = vdev;
+
+	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
+	if (!rq->ctrl) {
+		pr_err("Failed to hook RQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_rq_disable(rq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_rq_alloc_bufs(rq);
+	if (err) {
+		vnic_rq_free(rq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+		  unsigned int error_interrupt_enable,
+		  unsigned int error_interrupt_offset)
+{
+	uint64_t paddr;
+	uint32_t fetch_index;
+
+	paddr = (uint64_t)rq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &rq->ctrl->ring_base);
+	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
+	iowrite32(cq_index, &rq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
+	iowrite32(0, &rq->ctrl->dropped_packet_count);
+	iowrite32(0, &rq->ctrl->error_status);
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+}
+
+unsigned int vnic_rq_error_status(struct vnic_rq *rq)
+{
+	return ioread32(&rq->ctrl->error_status);
+}
+
+void vnic_rq_enable(struct vnic_rq *rq)
+{
+	iowrite32(1, &rq->ctrl->enable);
+}
+
+int vnic_rq_disable(struct vnic_rq *rq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &rq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&rq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	pr_err("Failed to disable RQ[%d]\n", rq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_rq_clean(struct vnic_rq *rq,
+		   void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+{
+	struct vnic_rq_buf *buf;
+	uint32_t fetch_index;
+
+	WARN_ON(ioread32(&rq->ctrl->enable));
+
+	buf = rq->to_clean;
+
+	while (vnic_rq_desc_used(rq) > 0) {
+
+		(*buf_clean)(rq, buf);
+
+		buf = rq->to_clean = buf->next;
+		rq->ring.desc_avail++;
+	}
+
+	/* Use current fetch_index as the ring starting point */
+	fetch_index = ioread32(&rq->ctrl->fetch_index);
+	rq->to_use = rq->to_clean =
+		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
+			[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
+	iowrite32(fetch_index, &rq->ctrl->posted_index);
+
+	rq->buf_index = 0;
+
+	vnic_dev_clear_desc_ring(&rq->ring);
+}
diff --git a/drivers/staging/fnic2/src/vnic_rq.h b/drivers/staging/fnic2/src/vnic_rq.h
new file mode 100644
index 0000000..4ffea9a
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_rq.h
@@ -0,0 +1,231 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RQ_H_
+#define _VNIC_RQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_rq_desc_avail fnic2_rq_desc_avail
+#define vnic_rq_desc_used fnic2_rq_desc_used
+#define vnic_rq_next_desc fnic2_rq_next_desc
+#define vnic_rq_next_index fnic2_rq_next_index
+#define vnic_rq_next_buf_index fnic2_rq_next_buf_index
+#define vnic_rq_post fnic2_rq_post
+#define vnic_rq_posting_soon fnic2_rq_posting_soon
+#define vnic_rq_return_descs fnic2_rq_return_descs
+#define vnic_rq_service fnic2_rq_service
+#define vnic_rq_fill fnic2_rq_fill
+#define vnic_rq_free fnic2_rq_free
+#define vnic_rq_alloc fnic2_rq_alloc
+#define vnic_rq_init fnic2_rq_init
+#define vnic_rq_error_status fnic2_rq_error_status
+#define vnic_rq_enable fnic2_rq_enable
+#define vnic_rq_disable fnic2_rq_disable
+#define vnic_rq_clean fnic2_rq_clean
+
+/* Receive queue control */
+struct vnic_rq_ctrl {
+	uint64_t ring_base;			/* 0x00 */
+	uint32_t ring_size;			/* 0x08 */
+	uint32_t pad0;
+	uint32_t posted_index;		/* 0x10 */
+	uint32_t pad1;
+	uint32_t cq_index;			/* 0x18 */
+	uint32_t pad2;
+	uint32_t enable;			/* 0x20 */
+	uint32_t pad3;
+	uint32_t running;			/* 0x28 */
+	uint32_t pad4;
+	uint32_t fetch_index;		/* 0x30 */
+	uint32_t pad5;
+	uint32_t error_interrupt_enable;	/* 0x38 */
+	uint32_t pad6;
+	uint32_t error_interrupt_offset;	/* 0x40 */
+	uint32_t pad7;
+	uint32_t error_status;		/* 0x48 */
+	uint32_t pad8;
+	uint32_t dropped_packet_count;	/* 0x50 */
+	uint32_t pad9;
+	uint32_t dropped_packet_count_rc;	/* 0x58 */
+	uint32_t pad10;
+};
+
+/* Break the vnic_rq_buf allocations into blocks of 64 entries */
+#define VNIC_RQ_BUF_BLK_ENTRIES 64
+#define VNIC_RQ_BUF_BLK_SZ \
+	(VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
+#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+
+#define VNIC_RQ_RETURN_DESC 0
+
+#ifndef VNIC_RQ_RETURN_RATE
+#define VNIC_RQ_RETURN_RATE             0xf     /* keep 2^n - 1 */
+#endif
+
+struct vnic_rq_buf {
+	struct vnic_rq_buf	*next;
+	dma_addr_t		dma_addr;
+	void			*os_buf;
+	unsigned int		os_buf_index;
+	unsigned int		len;
+	unsigned int		index;
+	void			*desc;
+};
+
+struct vnic_rq {
+	unsigned int			index;
+	struct vnic_dev			*vdev;
+	struct vnic_rq_ctrl __iomem	*ctrl;	/* memory-mapped */
+	struct vnic_dev_ring		ring;
+	struct vnic_rq_buf		*bufs[VNIC_RQ_BUF_BLKS_MAX];
+	struct vnic_rq_buf		*to_use;
+	struct vnic_rq_buf		*to_clean;
+	void				*os_buf_head;
+	unsigned int			buf_index;
+	unsigned int			pkts_outstanding;
+};
+
+static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
+{
+	/* how many does SW own? */
+	return rq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
+{
+	/* how many does HW own? */
+	return rq->ring.desc_count - rq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
+{
+	return rq->to_use->desc;
+}
+
+static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
+{
+	return rq->to_use->index;
+}
+
+static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
+{
+	return rq->buf_index++;
+}
+
+static inline void vnic_rq_post(struct vnic_rq *rq,
+				void *os_buf, unsigned int os_buf_index,
+				dma_addr_t dma_addr, unsigned int len)
+{
+	struct vnic_rq_buf *buf = rq->to_use;
+
+	buf->os_buf = os_buf;
+	buf->os_buf_index = os_buf_index;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	rq->to_use = buf;
+	rq->ring.desc_avail--;
+
+	/* Move the posted_index every nth descriptor
+	 */
+	if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+		/* Adding write memory barrier prevents compiler and/or CPU
+		 * reordering, thus avoiding descriptor posting before
+		 * descriptor is initialized. Otherwise, hardware can read
+		 * stale descriptor fields.
+		 */
+		wmb();
+		iowrite32(buf->index, &rq->ctrl->posted_index);
+	}
+}
+
+static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
+{
+	return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
+}
+
+static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
+{
+	rq->ring.desc_avail += count;
+}
+
+static inline void vnic_rq_service(struct vnic_rq *rq,
+				   struct cq_desc *cq_desc, uint16_t completed_index,
+				   int desc_return, void (*buf_service)(struct vnic_rq *rq,
+				   struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
+				   int skipped))
+{
+	struct vnic_rq_buf *buf;
+	int skipped;
+
+	buf = rq->to_clean;
+	while (1) {
+
+		skipped = (buf->index != completed_index);
+
+		(*buf_service)(rq, cq_desc, buf, skipped);
+
+		if (desc_return == VNIC_RQ_RETURN_DESC)
+			rq->ring.desc_avail++;
+
+		rq->to_clean = buf->next;
+
+		if (!skipped)
+			break;
+
+		buf = rq->to_clean;
+	}
+}
+
+static inline int vnic_rq_fill(struct vnic_rq *rq,
+			       int (*buf_fill)(struct vnic_rq *rq))
+{
+	int err;
+
+	while (vnic_rq_desc_avail(rq) > 1) {
+
+		err = (*buf_fill)(rq);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+void vnic_rq_free(struct vnic_rq *rq);
+int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size);
+void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
+		  unsigned int error_interrupt_enable,
+		  unsigned int error_interrupt_offset);
+unsigned int vnic_rq_error_status(struct vnic_rq *rq);
+void vnic_rq_enable(struct vnic_rq *rq);
+int vnic_rq_disable(struct vnic_rq *rq);
+void vnic_rq_clean(struct vnic_rq *rq,
+		   void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
+
+#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_wq.c b/drivers/staging/fnic2/src/vnic_wq.c
new file mode 100644
index 0000000..75138f0
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_wq.c
@@ -0,0 +1,183 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "vnic_dev.h"
+#include "vnic_wq.h"
+
+static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
+{
+	struct vnic_wq_buf *buf;
+	struct vnic_dev *vdev;
+	unsigned int i, j, count = wq->ring.desc_count;
+	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
+
+	vdev = wq->vdev;
+
+	for (i = 0; i < blks; i++) {
+		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
+		if (!wq->bufs[i]) {
+			pr_err("Failed to alloc wq_bufs\n");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < blks; i++) {
+		buf = wq->bufs[i];
+		for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
+			buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
+			buf->desc = (uint8_t *)wq->ring.descs +
+				wq->ring.desc_size * buf->index;
+			if (buf->index + 1 == count) {
+				buf->next = wq->bufs[0];
+				break;
+			} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
+				buf->next = wq->bufs[i + 1];
+			} else {
+				buf->next = buf + 1;
+				buf++;
+			}
+		}
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	return 0;
+}
+
+void vnic_wq_free(struct vnic_wq *wq)
+{
+	struct vnic_dev *vdev;
+	unsigned int i;
+
+	vdev = wq->vdev;
+
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+
+	for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
+		kfree(wq->bufs[i]);
+		wq->bufs[i] = NULL;
+	}
+
+	wq->ctrl = NULL;
+
+}
+
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		pr_err("Failed to hook WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	err = vnic_wq_alloc_bufs(wq);
+	if (err) {
+		vnic_wq_free(wq);
+		return err;
+	}
+
+	return 0;
+}
+
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+		  unsigned int error_interrupt_enable,
+		  unsigned int error_interrupt_offset)
+{
+	uint64_t paddr;
+
+	paddr = (uint64_t)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+	iowrite32(0, &wq->ctrl->error_status);
+}
+
+unsigned int vnic_wq_error_status(struct vnic_wq *wq)
+{
+	return ioread32(&wq->ctrl->error_status);
+}
+
+void vnic_wq_enable(struct vnic_wq *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_disable(struct vnic_wq *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	pr_err("Failed to disable WQ[%d]\n", wq->index);
+
+	return -ETIMEDOUT;
+}
+
+void vnic_wq_clean(struct vnic_wq *wq,
+		   void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+{
+	struct vnic_wq_buf *buf;
+
+	WARN_ON(ioread32(&wq->ctrl->enable));
+
+	buf = wq->to_clean;
+
+	while (vnic_wq_desc_used(wq) > 0) {
+
+		(*buf_clean)(wq, buf);
+
+		buf = wq->to_clean = buf->next;
+		wq->ring.desc_avail++;
+	}
+
+	wq->to_use = wq->to_clean = wq->bufs[0];
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
diff --git a/drivers/staging/fnic2/src/vnic_wq.h b/drivers/staging/fnic2/src/vnic_wq.h
new file mode 100644
index 0000000..bff2f4d
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_wq.h
@@ -0,0 +1,174 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_H_
+#define _VNIC_WQ_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+#include "vnic_cq.h"
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_wq_desc_avail fnic2_wq_desc_avail
+#define vnic_wq_desc_used fnic2_wq_desc_used
+#define vnic_wq_next_desc fni_cwq_next_desc
+#define vnic_wq_post fnic2_wq_post
+#define vnic_wq_service fnic2_wq_service
+#define vnic_wq_free fnic2_wq_free
+#define vnic_wq_alloc fnic2_wq_alloc
+#define vnic_wq_init fnic2_wq_init
+#define vnic_wq_error_status fnic2_wq_error_status
+#define vnic_wq_enable fnic2_wq_enable
+#define vnic_wq_disable fnic2_wq_disable
+#define vnic_wq_clean fnic2_wq_clean
+
+/* Work queue control */
+struct vnic_wq_ctrl {
+	uint64_t ring_base;			/* 0x00 */
+	uint32_t ring_size;			/* 0x08 */
+	uint32_t pad0;
+	uint32_t posted_index;			/* 0x10 */
+	uint32_t pad1;
+	uint32_t cq_index;			/* 0x18 */
+	uint32_t pad2;
+	uint32_t enable;			/* 0x20 */
+	uint32_t pad3;
+	uint32_t running;			/* 0x28 */
+	uint32_t pad4;
+	uint32_t fetch_index;			/* 0x30 */
+	uint32_t pad5;
+	uint32_t dca_value;			/* 0x38 */
+	uint32_t pad6;
+	uint32_t error_interrupt_enable;	/* 0x40 */
+	uint32_t pad7;
+	uint32_t error_interrupt_offset;	/* 0x48 */
+	uint32_t pad8;
+	uint32_t error_status;			/* 0x50 */
+	uint32_t pad9;
+};
+
+struct vnic_wq_buf {
+	struct vnic_wq_buf	*next;
+	dma_addr_t		dma_addr;
+	void			*os_buf;
+	unsigned int		len;
+	unsigned int		index;
+	int			sop;
+	void			*desc;
+};
+
+/* Break the vnic_wq_buf allocations into blocks of 64 entries */
+#define VNIC_WQ_BUF_BLK_ENTRIES 64
+#define VNIC_WQ_BUF_BLK_SZ \
+	(VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
+#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
+	DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+
+struct vnic_wq {
+	unsigned int			index;
+	struct vnic_dev			*vdev;
+	struct vnic_wq_ctrl __iomem	*ctrl;	/* memory-mapped */
+	struct vnic_dev_ring		ring;
+	struct vnic_wq_buf		*bufs[VNIC_WQ_BUF_BLKS_MAX];
+	struct vnic_wq_buf		*to_use;
+	struct vnic_wq_buf		*to_clean;
+	unsigned int			pkts_outstanding;
+};
+
+static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
+{
+	/* how many does SW own? */
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
+{
+	/* how many does HW own? */
+	return wq->ring.desc_count - wq->ring.desc_avail - 1;
+}
+
+static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
+{
+	return wq->to_use->desc;
+}
+
+static inline void vnic_wq_post(struct vnic_wq *wq,
+				void *os_buf, dma_addr_t dma_addr,
+				unsigned int len, int sop, int eop)
+{
+	struct vnic_wq_buf *buf = wq->to_use;
+
+	buf->sop = sop;
+	buf->os_buf = eop ? os_buf : NULL;
+	buf->dma_addr = dma_addr;
+	buf->len = len;
+
+	buf = buf->next;
+	if (eop) {
+		/* Adding write memory barrier prevents compiler and/or CPU
+		 * reordering, thus avoiding descriptor posting before
+		 * descriptor is initialized. Otherwise, hardware can read
+		 * stale descriptor fields.
+		 */
+		wmb();
+		iowrite32(buf->index, &wq->ctrl->posted_index);
+	}
+	wq->to_use = buf;
+
+	wq->ring.desc_avail--;
+}
+
+static inline void vnic_wq_service(struct vnic_wq *wq,
+				   struct cq_desc *cq_desc, uint16_t completed_index,
+				   void (*buf_service)(struct vnic_wq *wq,
+				   struct cq_desc *cq_desc, struct vnic_wq_buf *buf))
+{
+	struct vnic_wq_buf *buf;
+
+	buf = wq->to_clean;
+	while (1) {
+
+		(*buf_service)(wq, cq_desc, buf);
+
+		wq->ring.desc_avail++;
+
+		wq->to_clean = buf->next;
+
+		if (buf->index == completed_index)
+			break;
+
+		buf = wq->to_clean;
+	}
+}
+
+void vnic_wq_free(struct vnic_wq *wq);
+int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
+		  unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
+		  unsigned int error_interrupt_enable,
+		  unsigned int error_interrupt_offset);
+unsigned int vnic_wq_error_status(struct vnic_wq *wq);
+void vnic_wq_enable(struct vnic_wq *wq);
+int vnic_wq_disable(struct vnic_wq *wq);
+void vnic_wq_clean(struct vnic_wq *wq,
+		   void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+
+#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_wq_copy.c b/drivers/staging/fnic2/src/vnic_wq_copy.c
new file mode 100644
index 0000000..605c693
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_wq_copy.c
@@ -0,0 +1,115 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_wq_copy.h"
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
+{
+	iowrite32(1, &wq->ctrl->enable);
+}
+
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
+{
+	unsigned int wait;
+
+	iowrite32(0, &wq->ctrl->enable);
+
+	/* Wait for HW to ACK disable request */
+	for (wait = 0; wait < 100; wait++) {
+		if (!(ioread32(&wq->ctrl->running)))
+			return 0;
+		udelay(1);
+	}
+
+	pr_err("Failed to disable Copy WQ[%d], fetch index = %d, posted_index = %d\n",
+	       wq->index, ioread32(&wq->ctrl->fetch_index),
+	       ioread32(&wq->ctrl->posted_index));
+
+	return -ENODEV;
+}
+
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+			void (*q_clean)(struct vnic_wq_copy *wq,
+			struct fcpio_host_req *wq_desc))
+{
+	WARN_ON(ioread32(&wq->ctrl->enable));
+
+	if (vnic_wq_copy_desc_in_use(wq))
+		vnic_wq_copy_service(wq, -1, q_clean);
+
+	wq->to_use_index = wq->to_clean_index = 0;
+
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(0, &wq->ctrl->error_status);
+
+	vnic_dev_clear_desc_ring(&wq->ring);
+}
+
+void vnic_wq_copy_free(struct vnic_wq_copy *wq)
+{
+	struct vnic_dev *vdev;
+
+	vdev = wq->vdev;
+	vnic_dev_free_desc_ring(vdev, &wq->ring);
+	wq->ctrl = NULL;
+}
+
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+		       unsigned int index, unsigned int desc_count,
+		       unsigned int desc_size)
+{
+	int err;
+
+	wq->index = index;
+	wq->vdev = vdev;
+	wq->to_use_index = wq->to_clean_index = 0;
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
+	if (!wq->ctrl) {
+		pr_err("Failed to hook COPY WQ[%d] resource\n", index);
+		return -EINVAL;
+	}
+
+	vnic_wq_copy_disable(wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+		       unsigned int error_interrupt_enable,
+		       unsigned int error_interrupt_offset)
+{
+	uint64_t paddr;
+
+	paddr = (uint64_t)wq->ring.base_addr | VNIC_PADDR_TARGET;
+	writeq(paddr, &wq->ctrl->ring_base);
+	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
+	iowrite32(0, &wq->ctrl->fetch_index);
+	iowrite32(0, &wq->ctrl->posted_index);
+	iowrite32(cq_index, &wq->ctrl->cq_index);
+	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
+	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
+}
diff --git a/drivers/staging/fnic2/src/vnic_wq_copy.h b/drivers/staging/fnic2/src/vnic_wq_copy.h
new file mode 100644
index 0000000..2ef670c
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_wq_copy.h
@@ -0,0 +1,127 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_WQ_COPY_H_
+#define _VNIC_WQ_COPY_H_
+
+#include <linux/pci.h>
+#include "vnic_wq.h"
+#include "fcpio.h"
+
+struct vnic_wq_copy {
+	unsigned int			index;
+	struct vnic_dev			*vdev;
+	struct vnic_wq_ctrl __iomem	*ctrl;	/* memory-mapped */
+	struct vnic_dev_ring		ring;
+	unsigned int			to_use_index;
+	unsigned int			to_clean_index;
+};
+
+static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
+{
+	return wq->ring.desc_avail;
+}
+
+static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
+{
+	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
+}
+
+static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
+{
+	struct fcpio_host_req *desc = wq->ring.descs;
+
+	return &desc[wq->to_use_index];
+}
+
+static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
+{
+
+	((wq->to_use_index + 1) == wq->ring.desc_count) ?
+		(wq->to_use_index = 0) : (wq->to_use_index++);
+	wq->ring.desc_avail--;
+
+	/* Adding write memory barrier prevents compiler and/or CPU
+	 * reordering, thus avoiding descriptor posting before
+	 * descriptor is initialized. Otherwise, hardware can read
+	 * stale descriptor fields.
+	 */
+	wmb();
+
+	iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
+}
+
+static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, uint16_t index)
+{
+	unsigned int cnt;
+
+	if (wq->to_clean_index <= index)
+		cnt = (index - wq->to_clean_index) + 1;
+	else
+		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
+
+	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
+	wq->ring.desc_avail += cnt;
+
+}
+
+static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
+					uint16_t completed_index,
+					void (*q_service)(struct vnic_wq_copy *wq,
+					struct fcpio_host_req *wq_desc))
+{
+	struct fcpio_host_req *wq_desc = wq->ring.descs;
+	unsigned int curr_index;
+
+	while (1) {
+
+		if (q_service)
+			(*q_service)(wq, &wq_desc[wq->to_clean_index]);
+
+		wq->ring.desc_avail++;
+
+		curr_index = wq->to_clean_index;
+
+		/* increment the to-clean index so that we start
+		 * with an unprocessed index next time we enter the loop
+		 */
+		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
+			(wq->to_clean_index = 0) : (wq->to_clean_index++);
+
+		if (curr_index == completed_index)
+			break;
+
+		/* we have cleaned all the entries */
+		if ((completed_index == (uint16_t)-1) &&
+		    (wq->to_clean_index == wq->to_use_index))
+			break;
+	}
+}
+
+void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
+int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
+void vnic_wq_copy_free(struct vnic_wq_copy *wq);
+int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
+		       unsigned int index, unsigned int desc_count, unsigned int desc_size);
+void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
+		       unsigned int error_interrupt_enable,
+		       unsigned int error_interrupt_offset);
+void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
+			void (*q_clean)(struct vnic_wq_copy *wq,
+			struct fcpio_host_req *wq_desc));
+
+#endif /* _VNIC_WQ_COPY_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 09/10] staging: fnic2 add vnic handling
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (7 preceding siblings ...)
  2018-04-05 21:22 ` [PATCH 08/10] staging: fnic2 add vnic queue interface Oliver Smith-Denny
@ 2018-04-05 21:23 ` Oliver Smith-Denny
  2018-04-05 21:24 ` [PATCH 10/10] staging: fnic2 add build and config Oliver Smith-Denny
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:23 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain low level interfaces for managing
hardware and firmware features.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/fnic2/src/vnic_dev.c      | 701 ++++++++++++++++++++++++++++++
 drivers/staging/fnic2/src/vnic_dev.h      | 163 +++++++
 drivers/staging/fnic2/src/vnic_devcmd.h   | 349 +++++++++++++++
 drivers/staging/fnic2/src/vnic_intr.c     |  61 +++
 drivers/staging/fnic2/src/vnic_intr.h     | 116 +++++
 drivers/staging/fnic2/src/vnic_nic.h      |  78 ++++
 drivers/staging/fnic2/src/vnic_resource.h |  50 +++
 drivers/staging/fnic2/src/vnic_scsi.h     |  88 ++++
 drivers/staging/fnic2/src/vnic_stats.h    |  68 +++
 9 files changed, 1674 insertions(+)
 create mode 100644 drivers/staging/fnic2/src/vnic_dev.c
 create mode 100644 drivers/staging/fnic2/src/vnic_dev.h
 create mode 100644 drivers/staging/fnic2/src/vnic_devcmd.h
 create mode 100644 drivers/staging/fnic2/src/vnic_intr.c
 create mode 100644 drivers/staging/fnic2/src/vnic_intr.h
 create mode 100644 drivers/staging/fnic2/src/vnic_nic.h
 create mode 100644 drivers/staging/fnic2/src/vnic_resource.h
 create mode 100644 drivers/staging/fnic2/src/vnic_scsi.h
 create mode 100644 drivers/staging/fnic2/src/vnic_stats.h

diff --git a/drivers/staging/fnic2/src/vnic_dev.c b/drivers/staging/fnic2/src/vnic_dev.c
new file mode 100644
index 0000000..5e04d63
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_dev.c
@@ -0,0 +1,701 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/slab.h>
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+#include "vnic_dev.h"
+#include "vnic_stats.h"
+
+struct vnic_res {
+	void __iomem	*vaddr;
+	unsigned int	count;
+};
+
+struct vnic_dev {
+	void				*priv;
+	struct pci_dev			*pdev;
+	struct vnic_res			res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode		intr_mode;
+	struct vnic_devcmd __iomem	*devcmd;
+	struct vnic_devcmd_notify	*notify;
+	struct vnic_devcmd_notify	notify_copy;
+	dma_addr_t			notify_pa;
+	uint32_t			*linkstatus;
+	dma_addr_t			linkstatus_pa;
+	struct vnic_stats		*stats;
+	dma_addr_t			stats_pa;
+	struct vnic_devcmd_fw_info	*fw_info;
+	dma_addr_t			fw_info_pa;
+};
+
+#define VNIC_MAX_RES_HDR_SIZE \
+	(sizeof(struct vnic_resource_header) + \
+	sizeof(struct vnic_resource) * RES_TYPE_MAX)
+#define VNIC_RES_STRIDE	128
+
+void *vnic_dev_priv(struct vnic_dev *vdev)
+{
+	return vdev->priv;
+}
+
+static int vnic_dev_discover_res(struct vnic_dev *vdev,
+				 struct vnic_dev_bar *bar)
+{
+	struct vnic_resource_header __iomem *rh;
+	struct vnic_resource __iomem *r;
+	uint8_t type;
+
+	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
+		pr_err("vNIC BAR0 res hdr length error\n");
+		return -EINVAL;
+	}
+
+	rh = bar->vaddr;
+	if (!rh) {
+		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+		return -EINVAL;
+	}
+
+	if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
+		ioread32(&rh->version) != VNIC_RES_VERSION) {
+		pr_err("vNIC BAR0 res magic/version error exp %lx/%lx curr %x/%x\n",
+			VNIC_RES_MAGIC, VNIC_RES_VERSION,
+			ioread32(&rh->magic), ioread32(&rh->version));
+		return -EINVAL;
+	}
+
+	r = (struct vnic_resource __iomem *)(rh + 1);
+
+	while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
+
+		uint8_t bar_num = ioread8(&r->bar);
+		uint32_t bar_offset = ioread32(&r->bar_offset);
+		uint32_t count = ioread32(&r->count);
+		uint32_t len;
+
+		r++;
+
+		if (bar_num != 0)  /* only mapping in BAR0 resources */
+			continue;
+
+		switch (type) {
+		case RES_TYPE_WQ:
+		case RES_TYPE_RQ:
+		case RES_TYPE_CQ:
+		case RES_TYPE_INTR_CTRL:
+			/* each count is stride bytes long */
+			len = count * VNIC_RES_STRIDE;
+			if (len + bar_offset > bar->len) {
+				pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+					type, bar_offset,
+					len,
+					bar->len);
+				return -EINVAL;
+			}
+			break;
+		case RES_TYPE_INTR_PBA_LEGACY:
+		case RES_TYPE_DEVCMD:
+			len = count;
+			break;
+		default:
+			continue;
+		}
+
+		vdev->res[type].count = count;
+		vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
+	}
+
+	return 0;
+}
+
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+				    enum vnic_res_type type)
+{
+	return vdev->res[type].count;
+}
+
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+			       unsigned int index)
+{
+	if (!vdev->res[type].vaddr)
+		return NULL;
+
+	switch (type) {
+	case RES_TYPE_WQ:
+	case RES_TYPE_RQ:
+	case RES_TYPE_CQ:
+	case RES_TYPE_INTR_CTRL:
+		return (char __iomem *)vdev->res[type].vaddr +
+					index * VNIC_RES_STRIDE;
+	default:
+		return (char __iomem *)vdev->res[type].vaddr;
+	}
+}
+
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+				     unsigned int desc_count,
+				     unsigned int desc_size)
+{
+	/* The base address of the desc rings must be 512 byte aligned.
+	 * Descriptor count is aligned to groups of 32 descriptors.  A
+	 * count of 0 means the maximum 4096 descriptors.  Descriptor
+	 * size is aligned to 16 bytes.
+	 */
+
+	unsigned int count_align = 32;
+	unsigned int desc_align = 16;
+
+	ring->base_align = 512;
+
+	if (desc_count == 0)
+		desc_count = 4096;
+
+	ring->desc_count = ALIGN(desc_count, count_align);
+
+	ring->desc_size = ALIGN(desc_size, desc_align);
+
+	ring->size = ring->desc_count * ring->desc_size;
+	ring->size_unaligned = ring->size + ring->base_align;
+
+	return ring->size_unaligned;
+}
+
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
+{
+	memset(ring->descs, 0, ring->size);
+}
+
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+			     unsigned int desc_count, unsigned int desc_size)
+{
+	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
+
+	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+		ring->size_unaligned,
+		&ring->base_addr_unaligned);
+
+	if (!ring->descs_unaligned) {
+		pr_err("Failed to allocate ring. size = %d, aborting\n",
+			(int)ring->size);
+		return -ENOMEM;
+	}
+
+	ring->base_addr = ALIGN(ring->base_addr_unaligned,
+		ring->base_align);
+	ring->descs = (uint8_t *)ring->descs_unaligned +
+		(ring->base_addr - ring->base_addr_unaligned);
+
+	vnic_dev_clear_desc_ring(ring);
+
+	ring->desc_avail = ring->desc_count - 1;
+
+	return 0;
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+	if (ring->descs) {
+		pci_free_consistent(vdev->pdev,
+			ring->size_unaligned,
+			ring->descs_unaligned,
+			ring->base_addr_unaligned);
+		ring->descs = NULL;
+	}
+}
+
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+		 uint64_t *a0, uint64_t *a1, int wait)
+{
+	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
+	int delay;
+	uint32_t status;
+	int dev_cmd_err[] = {
+		/* convert from fw's version of error.h to host's version */
+		0,	/* ERR_SUCCESS */
+		EINVAL,	/* ERR_EINVAL */
+		EFAULT,	/* ERR_EFAULT */
+		EPERM,	/* ERR_EPERM */
+		EBUSY,  /* ERR_EBUSY */
+	};
+	int err;
+
+	status = ioread32(&devcmd->status);
+	if (status & STAT_BUSY) {
+		pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+		return -EBUSY;
+	}
+
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
+		writeq(*a0, &devcmd->args[0]);
+		writeq(*a1, &devcmd->args[1]);
+		wmb();
+	}
+
+	iowrite32(cmd, &devcmd->cmd);
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+			return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+
+		udelay(100);
+
+		status = ioread32(&devcmd->status);
+		if (!(status & STAT_BUSY)) {
+
+			if (status & STAT_ERROR) {
+				err = dev_cmd_err[(int)readq(&devcmd->args[0])];
+				pr_err("Error %d devcmd %d\n",
+					err, _CMD_N(cmd));
+				return -err;
+			}
+
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
+				rmb();
+				*a0 = readq(&devcmd->args[0]);
+				*a1 = readq(&devcmd->args[1]);
+			}
+
+			return 0;
+		}
+	}
+
+	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+	return -ETIMEDOUT;
+}
+
+int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info)
+{
+	uint64_t a0, a1 = 0;
+	int wait = 1000;
+	int err = 0;
+
+	if (!vdev->fw_info) {
+		vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_fw_info),
+			&vdev->fw_info_pa);
+		if (!vdev->fw_info)
+			return -ENOMEM;
+
+		a0 = vdev->fw_info_pa;
+
+		/* only get fw_info once and cache it */
+		err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+	}
+
+	*fw_info = vdev->fw_info;
+
+	return err;
+}
+
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
+		  void *value)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+	int err;
+
+	a0 = offset;
+	a1 = size;
+
+	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
+
+	switch (size) {
+	case 1:
+		*(uint8_t *)value = (uint8_t)a0;
+		break;
+	case 2:
+		*(uint16_t *)value = (uint16_t)a0;
+		break;
+	case 4:
+		*(uint32_t *)value = (uint32_t)a0;
+		break;
+	case 8:
+		*(uint64_t *)value = a0;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return err;
+}
+
+int vnic_dev_stats_clear(struct vnic_dev *vdev)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
+}
+
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+
+	if (!vdev->stats) {
+		vdev->stats = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_stats), &vdev->stats_pa);
+		if (!vdev->stats)
+			return -ENOMEM;
+	}
+
+	*stats = vdev->stats;
+	a0 = vdev->stats_pa;
+	a1 = sizeof(struct vnic_stats);
+
+	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
+}
+
+int vnic_dev_close(struct vnic_dev *vdev)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
+}
+
+int vnic_dev_enable(struct vnic_dev *vdev)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_disable(struct vnic_dev *vdev)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
+}
+
+int vnic_dev_open(struct vnic_dev *vdev, int arg)
+{
+	uint64_t a0 = (uint32_t)arg, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
+}
+
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+{
+	uint64_t a0 = (uint32_t)arg, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
+}
+
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	*done = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	*done = (a0 == 0);
+
+	return 0;
+}
+
+int vnic_dev_hang_notify(struct vnic_dev *vdev)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
+}
+
+int vnic_dev_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+	int err, i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = 0;
+
+	err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+	if (err)
+		return err;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = ((uint8_t *)&a0)[i];
+
+	return 0;
+}
+
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+			    int broadcast, int promisc, int allmulti)
+{
+	uint64_t a0, a1 = 0;
+	int wait = 1000;
+	int err;
+
+	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
+	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
+	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
+	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
+	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+
+	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+	if (err)
+		pr_err("Can't set packet filter\n");
+}
+
+void vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((uint8_t *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+	if (err)
+		pr_err("Can't add addr %02x:%02x:%02x:%02x:%02x:%02x, %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+void vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr)
+{
+	uint64_t a0 = 0, a1 = 0;
+	int wait = 1000;
+	int err;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		((uint8_t *)&a0)[i] = addr[i];
+
+	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+	if (err)
+		pr_err("Can't del addr %02x:%02x:%02x:%02x:%02x:%02x, %d\n",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
+			err);
+}
+
+int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+
+	if (!vdev->notify) {
+		vdev->notify = pci_alloc_consistent(vdev->pdev,
+			sizeof(struct vnic_devcmd_notify),
+			&vdev->notify_pa);
+		if (!vdev->notify)
+			return -ENOMEM;
+	}
+
+	a0 = vdev->notify_pa;
+	a1 = ((uint64_t)intr << 32) & 0x0000ffff00000000ULL;
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+void vnic_dev_notify_unset(struct vnic_dev *vdev)
+{
+	uint64_t a0, a1;
+	int wait = 1000;
+
+	a0 = 0;  /* paddr = 0 to unset notify buffer */
+	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
+	a1 += sizeof(struct vnic_devcmd_notify);
+
+	vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
+}
+
+static int vnic_dev_notify_ready(struct vnic_dev *vdev)
+{
+	uint32_t *words;
+	unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
+	unsigned int i;
+	uint32_t csum;
+
+	if (!vdev->notify)
+		return 0;
+
+	do {
+		csum = 0;
+		memcpy(&vdev->notify_copy, vdev->notify,
+			sizeof(struct vnic_devcmd_notify));
+		words = (uint32_t *)&vdev->notify_copy;
+		for (i = 1; i < nwords; i++)
+			csum += words[i];
+	} while (csum != words[0]);
+
+	return 1;
+}
+
+int vnic_dev_init(struct vnic_dev *vdev, int arg)
+{
+	uint64_t a0 = (uint32_t)arg, a1 = 0;
+	int wait = 1000;
+
+	return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
+}
+
+uint16_t vnic_dev_set_default_vlan(struct vnic_dev *vdev, uint16_t new_default_vlan)
+{
+	uint64_t a0 = new_default_vlan, a1 = 0;
+	int wait = 1000;
+	int old_vlan = 0;
+
+	old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+	return (uint16_t)old_vlan;
+}
+
+int vnic_dev_link_status(struct vnic_dev *vdev)
+{
+	if (vdev->linkstatus)
+		return *vdev->linkstatus;
+
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_state;
+}
+
+uint32_t vnic_dev_port_speed(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.port_speed;
+}
+
+uint32_t vnic_dev_msg_lvl(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.msglvl;
+}
+
+uint32_t vnic_dev_mtu(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.mtu;
+}
+
+uint32_t vnic_dev_link_down_cnt(struct vnic_dev *vdev)
+{
+	if (!vnic_dev_notify_ready(vdev))
+		return 0;
+
+	return vdev->notify_copy.link_down_cnt;
+}
+
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev, enum vnic_dev_intr_mode intr_mode)
+{
+	vdev->intr_mode = intr_mode;
+}
+
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
+	struct vnic_dev *vdev)
+{
+	return vdev->intr_mode;
+}
+
+void vnic_dev_unregister(struct vnic_dev *vdev)
+{
+	if (vdev) {
+		if (vdev->notify)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_notify),
+				vdev->notify,
+				vdev->notify_pa);
+		if (vdev->linkstatus)
+			pci_free_consistent(vdev->pdev,
+				sizeof(uint32_t),
+				vdev->linkstatus,
+				vdev->linkstatus_pa);
+		if (vdev->stats)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_stats),
+				vdev->stats, vdev->stats_pa);
+		if (vdev->fw_info)
+			pci_free_consistent(vdev->pdev,
+				sizeof(struct vnic_devcmd_fw_info),
+				vdev->fw_info, vdev->fw_info_pa);
+		kfree(vdev);
+	}
+}
+
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+				   void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
+{
+	if (!vdev) {
+		vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
+		if (!vdev)
+			return NULL;
+	}
+
+	vdev->priv = priv;
+	vdev->pdev = pdev;
+
+	if (vnic_dev_discover_res(vdev, bar))
+		goto err_out;
+
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		goto err_out;
+
+	return vdev;
+
+err_out:
+	vnic_dev_unregister(vdev);
+	return NULL;
+}
diff --git a/drivers/staging/fnic2/src/vnic_dev.h b/drivers/staging/fnic2/src/vnic_dev.h
new file mode 100644
index 0000000..9543909
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_dev.h
@@ -0,0 +1,163 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEV_H_
+#define _VNIC_DEV_H_
+
+#include "vnic_resource.h"
+#include "vnic_devcmd.h"
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_dev_priv fnic2_dev_priv
+#define vnic_dev_get_res_count fnic2_dev_get_res_count
+#define vnic_dev_get_res fnic2_dev_get_res
+#define vnic_dev_desc_ring_size fnic2_dev_desc_ring_siz
+#define vnic_dev_clear_desc_ring fnic2_dev_clear_desc_ring
+#define vnic_dev_alloc_desc_ring fnic2_dev_alloc_desc_ring
+#define vnic_dev_free_desc_ring fnic2_dev_free_desc_ring
+#define vnic_dev_cmd fnic2_dev_cmd
+#define vnic_dev_fw_info fnic2_dev_fw_info
+#define vnic_dev_spec fnic2_dev_spec
+#define vnic_dev_stats_clear fnic2_dev_stats_clear
+#define vnic_dev_stats_dump fnic2_dev_stats_dump
+#define vnic_dev_hang_notify fnic2_dev_hang_notify
+#define vnic_dev_packet_filter fnic2_dev_packet_filter
+#define vnic_dev_add_addr fnic2_dev_add_addr
+#define vnic_dev_del_addr fnic2_dev_del_addr
+#define vnic_dev_mac_addr fnic2_dev_mac_addr
+#define vnic_dev_notify_set fnic2_dev_notify_set
+#define vnic_dev_notify_unset fnic2_dev_notify_unset
+#define vnic_dev_link_status fnic2_dev_link_status
+#define vnic_dev_port_speed fnic2_dev_port_speed
+#define vnic_dev_msg_lvl fnic2_dev_msg_lvl
+#define vnic_dev_mtu fnic2_dev_mtu
+#define vnic_dev_link_down_cnt fnic2_dev_link_down_cnt
+#define vnic_dev_close fnic2_dev_close
+#define vnic_dev_enable fnic2_dev_enable
+#define vnic_dev_disable fnic2_dev_disable
+#define vnic_dev_open fnic2_dev_open
+#define vnic_dev_open_done fnic2_dev_open_done
+#define vnic_dev_init fnic2_dev_init
+#define vnic_dev_soft_reset fnic2_dev_soft_reset
+#define vnic_dev_soft_reset_done fnic2_dev_soft_reset_done
+#define vnic_dev_set_intr_mode fnic2_dev_set_intr_mode
+#define vnic_dev_get_intr_mode fnic2_dev_get_intr_mode
+#define vnic_dev_unregister fnic2_dev_unregister
+#define vnic_dev_register fnic2_dev_register
+
+#ifndef VNIC_PADDR_TARGET
+#define VNIC_PADDR_TARGET	0x0000000000000000ULL
+#endif
+
+#ifndef readq
+static inline uint64_t readq(void __iomem *reg)
+{
+	return ((uint64_t)readl(reg + 0x4UL) << 32) | (uint64_t)readl(reg);
+}
+
+static inline void writeq(uint64_t val, void __iomem *reg)
+{
+	writel(val & 0xffffffff, reg);
+	writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+enum vnic_dev_intr_mode {
+	VNIC_DEV_INTR_MODE_UNKNOWN,
+	VNIC_DEV_INTR_MODE_INTX,
+	VNIC_DEV_INTR_MODE_MSI,
+	VNIC_DEV_INTR_MODE_MSIX,
+};
+
+struct vnic_dev_bar {
+	void __iomem	*vaddr;
+	dma_addr_t	bus_addr;
+	unsigned long	len;
+};
+
+struct vnic_dev_ring {
+	void		*descs;
+	size_t		size;
+	dma_addr_t	base_addr;
+	size_t		base_align;
+	void		*descs_unaligned;
+	size_t		size_unaligned;
+	dma_addr_t	base_addr_unaligned;
+	unsigned int	desc_size;
+	unsigned int	desc_count;
+	unsigned int	desc_avail;
+};
+
+struct vnic_dev;
+struct vnic_stats;
+
+void *vnic_dev_priv(struct vnic_dev *vdev);
+unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
+				    enum vnic_res_type type);
+void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
+			       unsigned int index);
+unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
+				     unsigned int desc_count,
+				     unsigned int desc_size);
+void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+			     unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+			     struct vnic_dev_ring *ring);
+int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+		 uint64_t *a0, uint64_t *a1, int wait);
+int vnic_dev_fw_info(struct vnic_dev *vdev,
+		     struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
+		  unsigned int size, void *value);
+int vnic_dev_stats_clear(struct vnic_dev *vdev);
+int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
+int vnic_dev_hang_notify(struct vnic_dev *vdev);
+void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
+			    int broadcast, int promisc, int allmulti);
+void vnic_dev_add_addr(struct vnic_dev *vdev, uint8_t *addr);
+void vnic_dev_del_addr(struct vnic_dev *vdev, uint8_t *addr);
+int vnic_dev_mac_addr(struct vnic_dev *vdev, uint8_t *mac_addr);
+int vnic_dev_notify_set(struct vnic_dev *vdev, uint16_t intr);
+void vnic_dev_notify_unset(struct vnic_dev *vdev);
+int vnic_dev_link_status(struct vnic_dev *vdev);
+uint32_t vnic_dev_port_speed(struct vnic_dev *vdev);
+uint32_t vnic_dev_msg_lvl(struct vnic_dev *vdev);
+uint32_t vnic_dev_mtu(struct vnic_dev *vdev);
+uint32_t vnic_dev_link_down_cnt(struct vnic_dev *vdev);
+int vnic_dev_close(struct vnic_dev *vdev);
+int vnic_dev_enable(struct vnic_dev *vdev);
+int vnic_dev_disable(struct vnic_dev *vdev);
+int vnic_dev_open(struct vnic_dev *vdev, int arg);
+int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_init(struct vnic_dev *vdev, int arg);
+uint16_t vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+				uint16_t new_default_vlan);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
+void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
+			    enum vnic_dev_intr_mode intr_mode);
+enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+void vnic_dev_unregister(struct vnic_dev *vdev);
+struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
+				   void *priv, struct pci_dev *pdev,
+				   struct vnic_dev_bar *bar);
+
+#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_devcmd.h b/drivers/staging/fnic2/src/vnic_devcmd.h
new file mode 100644
index 0000000..1872523
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_devcmd.h
@@ -0,0 +1,349 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_DEVCMD_H_
+#define _VNIC_DEVCMD_H_
+
+#define _CMD_NBITS      14
+#define _CMD_VTYPEBITS	10
+#define _CMD_FLAGSBITS  6
+#define _CMD_DIRBITS	2
+
+#define _CMD_NMASK      ((1 << _CMD_NBITS)-1)
+#define _CMD_VTYPEMASK  ((1 << _CMD_VTYPEBITS)-1)
+#define _CMD_FLAGSMASK  ((1 << _CMD_FLAGSBITS)-1)
+#define _CMD_DIRMASK    ((1 << _CMD_DIRBITS)-1)
+
+#define _CMD_NSHIFT     0
+#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
+#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
+#define _CMD_DIRSHIFT   (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
+
+/*
+ * Direction bits (from host perspective).
+ */
+#define _CMD_DIR_NONE   0U
+#define _CMD_DIR_WRITE  1U
+#define _CMD_DIR_READ   2U
+#define _CMD_DIR_RW     (_CMD_DIR_WRITE | _CMD_DIR_READ)
+
+/*
+ * Flag bits.
+ */
+#define _CMD_FLAGS_NONE 0U
+#define _CMD_FLAGS_NOWAIT 1U
+
+/*
+ * vNIC type bits.
+ */
+#define _CMD_VTYPE_NONE  0U
+#define _CMD_VTYPE_ENET  1U
+#define _CMD_VTYPE_FC    2U
+#define _CMD_VTYPE_SCSI  4U
+#define _CMD_VTYPE_ALL   (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
+
+/*
+ * Used to create cmds..
+*/
+#define _CMDCF(dir, flags, vtype, nr)  \
+	(((dir)   << _CMD_DIRSHIFT) | \
+	((flags) << _CMD_FLAGSSHIFT) | \
+	((vtype) << _CMD_VTYPESHIFT) | \
+	((nr)    << _CMD_NSHIFT))
+#define _CMDC(dir, vtype, nr)    _CMDCF(dir, 0, vtype, nr)
+#define _CMDCNW(dir, vtype, nr)  _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
+
+/*
+ * Used to decode cmds..
+*/
+#define _CMD_DIR(cmd)            (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
+#define _CMD_FLAGS(cmd)          (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
+#define _CMD_VTYPE(cmd)          (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
+#define _CMD_N(cmd)              (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
+
+enum vnic_devcmd_cmd {
+	CMD_NONE                = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
+
+	/* mcpu fw info in mem: (uint64_t)a0=paddr to struct vnic_devcmd_fw_info */
+	CMD_MCPU_FW_INFO        = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+	/* dev-specific block member:
+	 *    in: (uint16_t)a0=offset,(uint8_t)a1=size
+	 *    out: a0=value */
+	CMD_DEV_SPEC            = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
+
+	/* stats clear */
+	CMD_STATS_CLEAR         = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
+
+	/* stats dump in mem: (uint64_t)a0=paddr to stats area,
+	 *                    (uint16_t)a1=sizeof stats area */
+	CMD_STATS_DUMP          = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
+
+	/* set Rx packet filter: (uint32_t)a0=filters (see CMD_PFILTER_*) */
+	CMD_PACKET_FILTER       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
+
+	/* hang detection notification */
+	CMD_HANG_NOTIFY         = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
+
+	/* MAC address in (u48)a0 */
+	CMD_MAC_ADDR            = _CMDC(_CMD_DIR_READ,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+
+	/* disable/enable promisc mode: (uint8_t)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_PROMISC_MODE        = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
+
+	/* disable/enable all-multi mode: (uint8_t)a0=0/1 */
+/***** XXX DEPRECATED *****/
+	CMD_ALLMULTI_MODE       = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
+
+	/* add addr from (u48)a0 */
+	CMD_ADDR_ADD            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
+
+	/* del addr from (u48)a0 */
+	CMD_ADDR_DEL            = _CMDCNW(_CMD_DIR_WRITE,
+					_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
+
+	/* add VLAN id in (uint16_t)a0 */
+	CMD_VLAN_ADD            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
+
+	/* del VLAN id in (uint16_t)a0 */
+	CMD_VLAN_DEL            = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
+
+	/* nic_cfg in (uint32_t)a0 */
+	CMD_NIC_CFG             = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
+	/* union vnic_rss_key in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */
+	CMD_RSS_KEY             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
+
+	/* union vnic_rss_cpu in mem: (uint64_t)a0=paddr, (uint16_t)a1=len */
+	CMD_RSS_CPU             = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
+
+	/* initiate softreset */
+	CMD_SOFT_RESET          = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
+
+	/* softreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_SOFT_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
+
+	/* set struct vnic_devcmd_notify buffer in mem:
+	 * in:
+	 *   (uint64_t)a0=paddr to notify (set paddr=0 to unset)
+	 *   (uint32_t)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
+	 *   (uint16_t)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
+	 * out:
+	 *   (uint32_t)a1 = effective size
+	 */
+	CMD_NOTIFY              = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
+
+	/* UNDI API: (uint64_t)a0=paddr to s_PXENV_UNDI_ struct,
+	 *           (uint8_t)a1=PXENV_UNDI_xxx */
+	CMD_UNDI                = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
+
+	/* initiate open sequence (uint32_t)a0=flags (see CMD_OPENF_*) */
+	CMD_OPEN		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
+
+	/* open status:
+	 *    out: a0=0 open complete, a0=1 open in progress */
+	CMD_OPEN_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
+
+	/* close vnic */
+	CMD_CLOSE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
+
+	/* initialize virtual link: (uint32_t)a0=flags (see CMD_INITF_*) */
+	CMD_INIT		= _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
+
+	/* variant of CMD_INIT, with provisioning info
+	 *     (uint64_t)a0=paddr of vnic_devcmd_provinfo
+	 *     (uint32_t)a1=sizeof provision info */
+	CMD_INIT_PROV_INFO	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
+
+	/* enable virtual link */
+	CMD_ENABLE		= _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
+
+	/* disable virtual link */
+	CMD_DISABLE		= _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
+
+	/* stats dump all vnics on uplink in mem: (uint64_t)a0=paddr (uint32_t)a1=uif */
+	CMD_STATS_DUMP_ALL	= _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
+
+	/* init status:
+	 *    out: a0=0 init complete, a0=1 init in progress
+	 *         if a0=0, a1=errno */
+	CMD_INIT_STATUS		= _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
+
+	/* INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct
+	 *            (uint8_t)a1=INT13_CMD_xxx */
+	CMD_INT13               = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
+
+	/* logical uplink enable/disable: (uint64_t)a0: 0/1=disable/enable */
+	CMD_LOGICAL_UPLINK      = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
+
+	/* undo initialize of virtual link */
+	CMD_DEINIT		= _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+	/* check fw capability of a cmd:
+	 * in:  (uint32_t)a0=cmd
+	 * out: (uint32_t)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+	CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+	/* persistent binding info
+	 * in:  (uint64_t)a0=paddr of arg
+	 *      (uint32_t)a1=CMD_PERBI_XXX */
+	CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+	/* Interrupt Assert Register functionality
+	 * in: (uint16_t)a0=interrupt number to assert
+	 */
+	CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+	/* initiate hangreset, like softreset after hang detected */
+	CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+	/* hangreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+	/*
+	 * Set hw ingress packet vlan rewrite mode:
+	 * in:  (uint32_t)a0=new vlan rewrite mode
+	 * out: (uint32_t)a0=old vlan rewrite mode */
+	CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+	/*
+	 * in:  (uint16_t)a0=bdf of target vnic
+	 *      (uint32_t)a1=cmd to proxy
+	 *      a2-a15=args to cmd in a1
+	 * out: (uint32_t)a0=status of proxied cmd
+	 *      a1-a15=out args of proxied cmd */
+	CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+	/*
+	 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+	 * or SR-IOV virtual vnic
+	 */
+	CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+	/*
+	 * For HPP toggle:
+	 * adapter-info-get
+	 * in:  (uint64_t)a0=phsical address of buffer passed in from caller.
+	 *      (uint16_t)a1=size of buffer specified in a0.
+	 * out: (uint64_t)a0=phsical address of buffer passed in from caller.
+	 *      (uint16_t)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+	 *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+	CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+	/*
+	 * INT13 API: (uint64_t)a0=paddr to vnic_int13_params struct
+	 *            (uint32_t)a1=INT13_CMD_xxx
+	 */
+	CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+	/*
+	 * Set default vlan:
+	 * in: (uint16_t)a0=new default vlan
+	 *     (uint16_t)a1=zero for overriding vlan with param a0,
+	 *             non-zero for resetting vlan to the default
+	 * out: (uint16_t)a0=old default vlan
+	 */
+	CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
+};
+
+/* flags for CMD_OPEN */
+#define CMD_OPENF_OPROM		 0x1	/* open coming from option rom */
+#define CMD_OPENF_RQ_ENABLE_FILL 0x2    /* Driver that posts RQ buffers after Enable */
+
+/* flags for CMD_INIT */
+#define CMD_INITF_DEFAULT_MAC	0x1	/* init with default mac addr */
+
+/* flags for CMD_PACKET_FILTER */
+#define CMD_PFILTER_DIRECTED		0x01
+#define CMD_PFILTER_MULTICAST		0x02
+#define CMD_PFILTER_BROADCAST		0x04
+#define CMD_PFILTER_PROMISCUOUS		0x08
+#define CMD_PFILTER_ALL_MULTICAST	0x10
+
+enum vnic_devcmd_status {
+	STAT_NONE = 0,
+	STAT_BUSY = 1 << 0,	/* cmd in progress */
+	STAT_ERROR = 1 << 1,	/* last cmd caused error (code in a0) */
+};
+
+enum vnic_devcmd_error {
+	ERR_SUCCESS = 0,
+	ERR_EINVAL = 1,
+	ERR_EFAULT = 2,
+	ERR_EPERM = 3,
+	ERR_EBUSY = 4,
+	ERR_ECMDUNKNOWN = 5,
+	ERR_EBADSTATE = 6,
+	ERR_ENOMEM = 7,
+	ERR_ETIMEDOUT = 8,
+	ERR_ELINKDOWN = 9,
+};
+
+struct vnic_devcmd_fw_info {
+	char fw_version[32];
+	char fw_build[32];
+	char hw_version[32];
+	char hw_serial_number[32];
+};
+
+struct vnic_devcmd_notify {
+	uint32_t csum;		/* checksum over following words */
+
+	uint32_t link_state;		/* link up == 1 */
+	uint32_t port_speed;		/* effective port speed (rate limit) */
+	uint32_t mtu;		/* MTU */
+	uint32_t msglvl;		/* requested driver msg lvl */
+	uint32_t uif;		/* uplink interface */
+	uint32_t status;		/* status bits (see VNIC_STF_*) */
+	uint32_t error;		/* error code (see ERR_*) for first ERR */
+	uint32_t link_down_cnt;	/* running count of link down transitions */
+};
+#define VNIC_STF_FATAL_ERR	0x0001	/* fatal fw error */
+
+struct vnic_devcmd_provinfo {
+	uint8_t oui[3];
+	uint8_t type;
+	uint8_t data[0];
+};
+
+/*
+ * Writing cmd register causes STAT_BUSY to get set in status register.
+ * When cmd completes, STAT_BUSY will be cleared.
+ *
+ * If cmd completed successfully STAT_ERROR will be clear
+ * and args registers contain cmd-specific results.
+ *
+ * If cmd error, STAT_ERROR will be set and args[0] contains error code.
+ *
+ * status register is read-only.  While STAT_BUSY is set,
+ * all other register contents are read-only.
+ */
+
+/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
+#define VNIC_DEVCMD_NARGS 15
+struct vnic_devcmd {
+	uint32_t status;			/* RO */
+	uint32_t cmd;				/* RW */
+	uint64_t args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
+};
+
+#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_intr.c b/drivers/staging/fnic2/src/vnic_intr.c
new file mode 100644
index 0000000..6cdebf7
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_intr.c
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "vnic_dev.h"
+#include "vnic_intr.h"
+
+void vnic_intr_free(struct vnic_intr *intr)
+{
+	intr->ctrl = NULL;
+}
+
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+		    unsigned int index)
+{
+	intr->index = index;
+	intr->vdev = vdev;
+
+	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
+	if (!intr->ctrl) {
+		pr_err("Failed to hook INTR[%d].ctrl resource\n",
+			index);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+		    unsigned int coalescing_type,
+		    unsigned int mask_on_assertion)
+{
+	iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+	iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
+	iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
+	iowrite32(0, &intr->ctrl->int_credits);
+}
+
+void vnic_intr_clean(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->int_credits);
+}
diff --git a/drivers/staging/fnic2/src/vnic_intr.h b/drivers/staging/fnic2/src/vnic_intr.h
new file mode 100644
index 0000000..d806465
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_intr.h
@@ -0,0 +1,116 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_INTR_H_
+#define _VNIC_INTR_H_
+
+#include <linux/pci.h>
+#include "vnic_dev.h"
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_intr_unmask fnic2_intr_unmask
+#define vnic_intr_mask fnic2_intr_mask
+#define vnic_intr_return_credits fnic2_intr_return_credits
+#define vnic_intr_credits fnic2_intr_credits
+#define vnic_intr_return_all_credits fnic2_intr_return_all_credits
+#define vnic_intr_legacy_pba fnic2_intr_legacy_pba
+#define vnic_intr_free fnic2_intr_free
+#define vnic_intr_alloc fnic2_intr_alloc
+#define vnic_intr_init fnic2_intr_init
+#define vnic_intr_clean fnic2_intr_clean
+
+#define VNIC_INTR_TIMER_MAX		0xffff
+
+/* Interrupt control */
+struct vnic_intr_ctrl {
+	uint32_t coalescing_timer;	/* 0x00 */
+	uint32_t pad0;
+	uint32_t coalescing_value;	/* 0x08 */
+	uint32_t pad1;
+	uint32_t coalescing_type;	/* 0x10 */
+	uint32_t pad2;
+	uint32_t mask_on_assertion;	/* 0x18 */
+	uint32_t pad3;
+	uint32_t mask;			/* 0x20 */
+	uint32_t pad4;
+	uint32_t int_credits;		/* 0x28 */
+	uint32_t pad5;
+	uint32_t int_credit_return;	/* 0x30 */
+	uint32_t pad6;
+};
+
+struct vnic_intr {
+	unsigned int			index;
+	struct vnic_dev			*vdev;
+	struct vnic_intr_ctrl __iomem	*ctrl;	/* memory-mapped */
+};
+
+static inline void vnic_intr_unmask(struct vnic_intr *intr)
+{
+	iowrite32(0, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_mask(struct vnic_intr *intr)
+{
+	iowrite32(1, &intr->ctrl->mask);
+}
+
+static inline void vnic_intr_return_credits(struct vnic_intr *intr,
+					    unsigned int credits,
+					    int unmask, int reset_timer)
+{
+#define VNIC_INTR_UNMASK_SHIFT		16
+#define VNIC_INTR_RESET_TIMER_SHIFT	17
+
+	uint32_t int_credit_return = (credits & 0xffff) |
+		(unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
+		(reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
+
+	iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
+}
+
+static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
+{
+	return ioread32(&intr->ctrl->int_credits);
+}
+
+static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
+{
+	unsigned int credits = vnic_intr_credits(intr);
+	int unmask = 1;
+	int reset_timer = 1;
+
+	vnic_intr_return_credits(intr, credits, unmask, reset_timer);
+}
+
+static inline uint32_t vnic_intr_legacy_pba(uint32_t __iomem *legacy_pba)
+{
+	/* read PBA without clearing */
+	return ioread32(legacy_pba);
+}
+
+void vnic_intr_free(struct vnic_intr *intr);
+int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
+		    unsigned int index);
+void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+		    unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_clean(struct vnic_intr *intr);
+
+#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_nic.h b/drivers/staging/fnic2/src/vnic_nic.h
new file mode 100644
index 0000000..aa5794c
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_nic.h
@@ -0,0 +1,78 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_NIC_H_
+#define _VNIC_NIC_H_
+
+/*
+ * These defines avoid symbol clash between fnic2 and enic (Cisco 10G Eth
+ * Driver) when both are built with CONFIG options =y
+ */
+#define vnic_set_nic_cfg fnic2_set_nic_cfg
+
+enum nic_cfg_rss {
+	NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD	= 0xffUL,
+	NIC_CFG_RSS_HASH_TYPE_MASK_FIELD	= 0xffUL,
+	NIC_CFG_RSS_HASH_TYPE_SHIFT		= 8,
+	NIC_CFG_RSS_HASH_BITS			= (7UL << 16),
+	NIC_CFG_RSS_HASH_BITS_MASK_FIELD	= 7UL,
+	NIC_CFG_RSS_HASH_BITS_SHIFT		= 16,
+	NIC_CFG_RSS_BASE_CPU			= (7UL << 19),
+	NIC_CFG_RSS_BASE_CPU_MASK_FIELD		= 7UL,
+	NIC_CFG_RSS_BASE_CPU_SHIFT		= 19,
+	NIC_CFG_RSS_ENABLE			= (1UL << 22),
+	NIC_CFG_RSS_ENABLE_MASK_FIELD		= 1UL,
+	NIC_CFG_RSS_ENABLE_SHIFT		= 22
+};
+
+enum nic_cfg_tso {
+	NIC_CFG_TSO_IPID_SPLIT_EN		= (1UL << 23),
+	NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD	= 1UL,
+	NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT		= 23
+};
+
+enum nic_cfg_ig_vlan_strip {
+	NIC_CFG_IG_VLAN_STRIP_EN		= (1UL << 24),
+	NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD	= 1UL,
+	NIC_CFG_IG_VLAN_STRIP_EN_SHIFT		= 24
+};
+
+static inline void vnic_set_nic_cfg(uint32_t *nic_cfg,
+				    uint8_t rss_default_cpu,
+				    uint8_t rss_hash_type,
+				    uint8_t rss_hash_bits,
+				    uint8_t rss_base_cpu,
+				    uint8_t rss_enable,
+				    uint8_t tso_ipid_split_en,
+				    uint8_t ig_vlan_strip_en)
+{
+	*nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
+		((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_TYPE_SHIFT) |
+		((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
+			<< NIC_CFG_RSS_HASH_BITS_SHIFT) |
+		((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
+			<< NIC_CFG_RSS_BASE_CPU_SHIFT) |
+		((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
+			<< NIC_CFG_RSS_ENABLE_SHIFT) |
+		((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
+			<< NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
+		((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
+			<< NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
+}
+
+#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_resource.h b/drivers/staging/fnic2/src/vnic_resource.h
new file mode 100644
index 0000000..4e6a05d
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_resource.h
@@ -0,0 +1,50 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_RESOURCE_H_
+#define _VNIC_RESOURCE_H_
+
+#define VNIC_RES_MAGIC		0x766E6963L	/* 'vnic' */
+#define VNIC_RES_VERSION	0x00000000L
+
+/* vNIC resource types */
+enum vnic_res_type {
+	RES_TYPE_EOL,				/* End-of-list */
+	RES_TYPE_WQ,				/* Work queues */
+	RES_TYPE_RQ,				/* Receive queues */
+	RES_TYPE_CQ,				/* Completion queues */
+	RES_TYPE_INTR_CTRL		= 10,	/* Interrupt ctrl table */
+	RES_TYPE_INTR_PBA		= 12,	/* MSI/MSI-X PBA table */
+	RES_TYPE_INTR_PBA_LEGACY,		/* Legacy intr status */
+	RES_TYPE_DEVCMD = 16,			/* Device command region */
+	RES_TYPE_MAX = 18			/* Count of resource types */
+};
+
+struct vnic_resource_header {
+	uint32_t magic;
+	uint32_t version;
+};
+
+struct vnic_resource {
+	uint8_t		type;
+	uint8_t		bar;
+	uint8_t		pad[2];
+	uint32_t	bar_offset;
+	uint32_t	count;
+};
+
+#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_scsi.h b/drivers/staging/fnic2/src/vnic_scsi.h
new file mode 100644
index 0000000..49e2304
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_scsi.h
@@ -0,0 +1,88 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_SCSI_H_
+#define _VNIC_SCSI_H_
+
+#define VNIC_FNIC2_WQ_DESCS_MIN              64
+#define VNIC_FNIC2_WQ_DESCS_MAX              128
+
+#define VNIC_FNIC2_WQ_COPY_DESCS_MIN         64
+#define VNIC_FNIC2_WQ_COPY_DESCS_MAX         512
+
+#define VNIC_FNIC2_RQ_DESCS_MIN              64
+#define VNIC_FNIC2_RQ_DESCS_MAX              128
+
+#define VNIC_FNIC2_EDTOV_MIN                 1000
+#define VNIC_FNIC2_EDTOV_MAX                 255000
+
+#define VNIC_FNIC2_RATOV_MIN                 1000
+#define VNIC_FNIC2_RATOV_MAX                 255000
+
+#define VNIC_FNIC2_MAXDATAFIELDSIZE_MIN      256
+#define VNIC_FNIC2_MAXDATAFIELDSIZE_MAX      2112
+
+#define VNIC_FNIC2_FLOGI_RETRIES_MAX         0xffffffff
+
+#define VNIC_FNIC2_FLOGI_TIMEOUT_MIN         1000
+#define VNIC_FNIC2_FLOGI_TIMEOUT_MAX         255000
+
+#define VNIC_FNIC2_PLOGI_RETRIES_MAX         255
+
+#define VNIC_FNIC2_PLOGI_TIMEOUT_MIN         1000
+#define VNIC_FNIC2_PLOGI_TIMEOUT_MAX         255000
+
+#define VNIC_FNIC2_IO_THROTTLE_COUNT_MIN     1
+#define VNIC_FNIC2_IO_THROTTLE_COUNT_MAX     2048
+
+#define VNIC_FNIC2_LINK_DOWN_TIMEOUT_MAX     240000
+
+#define VNIC_FNIC2_PORT_DOWN_TIMEOUT_MAX     240000
+
+#define VNIC_FNIC2_PORT_DOWN_IO_RETRIES_MAX  255
+
+#define VNIC_FNIC2_LUNS_PER_TARGET_MIN       1
+#define VNIC_FNIC2_LUNS_PER_TARGET_MAX       1024
+
+/* Device-specific region: scsi configuration */
+struct vnic_fc_config {
+	uint64_t	node_wwn;
+	uint64_t	port_wwn;
+	uint32_t	flags;
+	uint32_t	wq_enet_desc_count;
+	uint32_t	wq_copy_desc_count;
+	uint32_t	rq_desc_count;
+	uint32_t	flogi_retries;
+	uint32_t	flogi_timeout;
+	uint32_t	plogi_retries;
+	uint32_t	plogi_timeout;
+	uint32_t	io_throttle_count;
+	uint32_t	link_down_timeout;
+	uint32_t	port_down_timeout;
+	uint32_t	port_down_io_retries;
+	uint32_t	luns_per_tgt;
+	uint16_t	maxdatafieldsize;
+	uint16_t	ed_tov;
+	uint16_t	ra_tov;
+	uint16_t	intr_timer;
+	uint8_t		intr_timer_type;
+};
+
+#define VFCF_FCP_SEQ_LVL_ERR	0x1	/* Enable FCP-2 Error Recovery */
+#define VFCF_FIP_CAPABLE	0x4	/* firmware can handle FIP */
+
+#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/staging/fnic2/src/vnic_stats.h b/drivers/staging/fnic2/src/vnic_stats.h
new file mode 100644
index 0000000..c608a20
--- /dev/null
+++ b/drivers/staging/fnic2/src/vnic_stats.h
@@ -0,0 +1,68 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _VNIC_STATS_H_
+#define _VNIC_STATS_H_
+
+/* Tx statistics */
+struct vnic_tx_stats {
+	uint64_t tx_frames_ok;
+	uint64_t tx_unicast_frames_ok;
+	uint64_t tx_multicast_frames_ok;
+	uint64_t tx_broadcast_frames_ok;
+	uint64_t tx_bytes_ok;
+	uint64_t tx_unicast_bytes_ok;
+	uint64_t tx_multicast_bytes_ok;
+	uint64_t tx_broadcast_bytes_ok;
+	uint64_t tx_drops;
+	uint64_t tx_errors;
+	uint64_t tx_tso;
+	uint64_t rsvd[16];
+};
+
+/* Rx statistics */
+struct vnic_rx_stats {
+	uint64_t rx_frames_ok;
+	uint64_t rx_frames_total;
+	uint64_t rx_unicast_frames_ok;
+	uint64_t rx_multicast_frames_ok;
+	uint64_t rx_broadcast_frames_ok;
+	uint64_t rx_bytes_ok;
+	uint64_t rx_unicast_bytes_ok;
+	uint64_t rx_multicast_bytes_ok;
+	uint64_t rx_broadcast_bytes_ok;
+	uint64_t rx_drop;
+	uint64_t rx_no_bufs;
+	uint64_t rx_errors;
+	uint64_t rx_rss;
+	uint64_t rx_crc_errors;
+	uint64_t rx_frames_64;
+	uint64_t rx_frames_127;
+	uint64_t rx_frames_255;
+	uint64_t rx_frames_511;
+	uint64_t rx_frames_1023;
+	uint64_t rx_frames_1518;
+	uint64_t rx_frames_to_max;
+	uint64_t rsvd[16];
+};
+
+struct vnic_stats {
+	struct vnic_tx_stats tx;
+	struct vnic_rx_stats rx;
+};
+
+#endif /* _VNIC_STATS_H_ */
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 10/10] staging: fnic2 add build and config
  2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
                   ` (8 preceding siblings ...)
  2018-04-05 21:23 ` [PATCH 09/10] staging: fnic2 add vnic handling Oliver Smith-Denny
@ 2018-04-05 21:24 ` Oliver Smith-Denny
  9 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-05 21:24 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

These files contain build and config files.

Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
Signed-off-by: Anil Chintalapati <achintal@cisco.com>
Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
---
 drivers/staging/Kconfig            |  2 ++
 drivers/staging/Makefile           |  1 +
 drivers/staging/fnic2/Kconfig      |  6 ++++++
 drivers/staging/fnic2/LICENSE      | 13 +++++++++++++
 drivers/staging/fnic2/Makefile     |  3 +++
 drivers/staging/fnic2/TODO         | 14 ++++++++++++++
 drivers/staging/fnic2/src/Makefile | 28 ++++++++++++++++++++++++++++
 7 files changed, 67 insertions(+)
 create mode 100644 drivers/staging/fnic2/Kconfig
 create mode 100644 drivers/staging/fnic2/LICENSE
 create mode 100644 drivers/staging/fnic2/Makefile
 create mode 100644 drivers/staging/fnic2/TODO
 create mode 100644 drivers/staging/fnic2/src/Makefile

diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d5926f0..239e322 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -132,4 +132,6 @@ source "drivers/staging/mt7621-eth/Kconfig"
 
 source "drivers/staging/mt7621-dts/Kconfig"
 
+source "drivers/staging/fnic2/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 919753c..fdd9a63 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -57,3 +57,4 @@ obj-$(CONFIG_SOC_MT7621)	+= mt7621-dma/
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-mmc/
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-eth/
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-dts/
+obj-$(CONFIG_FNIC2)		+= fnic2/
diff --git a/drivers/staging/fnic2/Kconfig b/drivers/staging/fnic2/Kconfig
new file mode 100644
index 0000000..3983c30
--- /dev/null
+++ b/drivers/staging/fnic2/Kconfig
@@ -0,0 +1,6 @@
+config FNIC2
+        tristate "FNIC2 - CISCO FCoE HBA Driver"
+        depends on PCI && X86 && TARGET_CORE
+        help
+         This enables support for the Cisco FNIC2 FCoE HBA Driver
+         with SCSI Target Mode.
diff --git a/drivers/staging/fnic2/LICENSE b/drivers/staging/fnic2/LICENSE
new file mode 100644
index 0000000..1889afb
--- /dev/null
+++ b/drivers/staging/fnic2/LICENSE
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+This program is free software; you may redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/drivers/staging/fnic2/Makefile b/drivers/staging/fnic2/Makefile
new file mode 100644
index 0000000..8f14039
--- /dev/null
+++ b/drivers/staging/fnic2/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2018 Cisco Systems, Inc.  All rights reserved.
+obj-$(CONFIG_FNIC2)	+= src/
diff --git a/drivers/staging/fnic2/TODO b/drivers/staging/fnic2/TODO
new file mode 100644
index 0000000..5d93ec3
--- /dev/null
+++ b/drivers/staging/fnic2/TODO
@@ -0,0 +1,14 @@
+* Add multiqueue support
+* Integrate vnic files with other drivers
+* Coding style changes (already ran smatch, sparse, checkpatch)
+* run them + coccinelle after further changes
+* Add NVME over FC support
+* Check per cmd locking to see if needed
+* Change statically allocated cmd pool to mempool
+* Add function headers
+
+Send patches to:
+	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+	Arulprabhu Ponnusamy <arulponn@cisco.com>
+	Gian Carlo Boffa <gcboffa@cisco.com>
+	Oliver Smith-Denny <osmithde@cisco.com>
diff --git a/drivers/staging/fnic2/src/Makefile b/drivers/staging/fnic2/src/Makefile
new file mode 100644
index 0000000..0d22ffc
--- /dev/null
+++ b/drivers/staging/fnic2/src/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2018 Cisco Systems, Inc.
+# Makefile_wrap
+# This file prepended for out-of-kernel make by ofc/release/*/Makefile
+
+#
+# Add -I for building a set of modules out-of-kernel where they have
+# their own set of include files for include/scsi, for example.
+#
+CPPFLAGS := $(SUBTREE_INCLUDE) $(CPPFLAGS)
+# end of Makefile_wrap
+obj-$(CONFIG_FNIC2) += fnic2.o
+
+fnic2-y := \
+        fnic2_isr.o \
+	fnic2_main.o \
+	fnic2_res.o \
+	vnic_cq.o \
+	vnic_dev.o \
+	vnic_intr.o \
+	vnic_rq.o \
+	vnic_wq_copy.o \
+	vnic_wq.o \
+        fdls_if.o \
+	fip.o \
+	fdls_disc.o \
+	fnic2_cmd.o	\
+	fnic2_lio.o
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-05 21:16 ` [PATCH 01/10] staging: fnic2 add initialization Oliver Smith-Denny
@ 2018-04-06  5:07   ` Greg Kroah-Hartman
  2018-04-06 22:00     ` Oliver Smith-Denny
  0 siblings, 1 reply; 19+ messages in thread
From: Greg Kroah-Hartman @ 2018-04-06  5:07 UTC (permalink / raw)
  To: Oliver Smith-Denny
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel

On Thu, Apr 05, 2018 at 02:16:45PM -0700, Oliver Smith-Denny wrote:
> These files contain module load and unload, global driver context,
> PCI registration, PCI probe and remove, and definitions of
> the fnic2 global context.
> 
> Signed-off-by: Oliver Smith-Denny <osmithde@cisco.com>
> Signed-off-by: Sesidhar Baddela <sebaddel@cisco.com>
> Signed-off-by: Anil Chintalapati <achintal@cisco.com>
> Signed-off-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
> Signed-off-by: Gian Carlo Boffa <gcboffa@cisco.com>
> Co-Developed-by: Arulprabhu Ponnusamy <arulponn@cisco.com>
> Co-Developed-by: Gian Carlo Boffa <gcboffa@cisco.com>
> Co-Developed-by: Oliver Smith-Denny <osmithde@cisco.com>
> ---
>  drivers/staging/fnic2/src/fnic2.h      | 256 ++++++++++++
>  drivers/staging/fnic2/src/fnic2_main.c | 711 +++++++++++++++++++++++++++++++++
>  2 files changed, 967 insertions(+)
>  create mode 100644 drivers/staging/fnic2/src/fnic2.h
>  create mode 100644 drivers/staging/fnic2/src/fnic2_main.c

Why is this a drivers/staging/ driver at all?  What is keeping you from
getting this merged into the "proper" place in the kernel?

If you have a staging driver, you have to have a TODO file in the
directory listing what is keeping this in the staging section.

Also, one tiny thing to fix up:

> --- /dev/null
> +++ b/drivers/staging/fnic2/src/fnic2.h
> @@ -0,0 +1,256 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0

Please read the documentation on how to properly use SPDX tags on kernel
files.  This needs to be the first line of the file.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 02/10] staging: fnic2 add resource allocation
  2018-04-05 21:17 ` [PATCH 02/10] staging: fnic2 add resource allocation Oliver Smith-Denny
@ 2018-04-06  5:08   ` Greg Kroah-Hartman
  0 siblings, 0 replies; 19+ messages in thread
From: Greg Kroah-Hartman @ 2018-04-06  5:08 UTC (permalink / raw)
  To: Oliver Smith-Denny
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel

On Thu, Apr 05, 2018 at 02:17:52PM -0700, Oliver Smith-Denny wrote:
> --- /dev/null
> +++ b/drivers/staging/fnic2/src/fnic2_isr.c
> @@ -0,0 +1,324 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0
> + * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
> + *
> + * This program is free software; you may redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; version 2 of the License.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */

No need for the messy boilerplate text if you have a SPDX line, please
remove both of those paragraphs.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 03/10] staging: fnic2 add fip handling
  2018-04-05 21:18 ` [PATCH 03/10] staging: fnic2 add fip handling Oliver Smith-Denny
@ 2018-04-06  5:08   ` Greg Kroah-Hartman
  0 siblings, 0 replies; 19+ messages in thread
From: Greg Kroah-Hartman @ 2018-04-06  5:08 UTC (permalink / raw)
  To: Oliver Smith-Denny
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel

On Thu, Apr 05, 2018 at 02:18:37PM -0700, Oliver Smith-Denny wrote:
> --- /dev/null
> +++ b/drivers/staging/fnic2/src/fip.c
> @@ -0,0 +1,804 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0
> + * Copyright 2018 Cisco Systems, Inc.  All rights reserved.
> + *
> + * This program is free software; you may redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; version 2 of the License.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */
> +
> +/*! \file */

What is that line for?

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-06  5:07   ` Greg Kroah-Hartman
@ 2018-04-06 22:00     ` Oliver Smith-Denny
  2018-04-07  6:09       ` Greg Kroah-Hartman
  0 siblings, 1 reply; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-06 22:00 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel, Oliver Smith-Denny

On Fri, Apr 06, 2018 at 07:07:52AM +0200, Greg Kroah-Hartman wrote:
> Why is this a drivers/staging/ driver at all?  What is keeping you from
> getting this merged into the "proper" place in the kernel?
> 
> If you have a staging driver, you have to have a TODO file in the
> directory listing what is keeping this in the staging section.

Sorry Greg, we do have a TODO file in the directory, but it was
part of patch 10/10. I can move that to be part of patch 01/10.

We think that this driver is a drivers/staging driver because
there are some changes we want to make before submitting
the driver to the "proper" place. Specifically, we want to
change how we allocate memory (move from a static allocation
to a mempool scenario), which will require some other code
changes. Also, we want to investigate if we need to change
our locking schema. We think that making this driver
part of the drivers/staging community will allow interested
people to try the driver in its current state and offer up
ideas as to its continued development.

If you think that this driver doesn't belong in the
drivers/staging community, we are happy to explore getting
the driver fully ready on our side and getting it into the
"proper" place.

> Please read the documentation on how to properly use SPDX tags on kernel
> files.  This needs to be the first line of the file.

Thanks Greg, I fixed the SPDX tags and removed all the boilerplate,
including the LICENSE file. Also, I have updated the MAINTAINERS
file, which accidentally slipped by my first patch set. If you
think that drivers/staging is a good temporary home for this
driver, then I will send another patchset with the changes,
as well as the changes from your other replies to patch 02/10
and patch 03/10.

Thanks for your help,

Oliver Smith-Denny

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-06 22:00     ` Oliver Smith-Denny
@ 2018-04-07  6:09       ` Greg Kroah-Hartman
  2018-04-09 19:49         ` Oliver Smith-Denny
  2018-04-09 20:57         ` Martin K. Petersen
  0 siblings, 2 replies; 19+ messages in thread
From: Greg Kroah-Hartman @ 2018-04-07  6:09 UTC (permalink / raw)
  To: Oliver Smith-Denny
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel

On Fri, Apr 06, 2018 at 03:00:11PM -0700, Oliver Smith-Denny wrote:
> On Fri, Apr 06, 2018 at 07:07:52AM +0200, Greg Kroah-Hartman wrote:
> > Why is this a drivers/staging/ driver at all?  What is keeping you from
> > getting this merged into the "proper" place in the kernel?
> > 
> > If you have a staging driver, you have to have a TODO file in the
> > directory listing what is keeping this in the staging section.
> 
> Sorry Greg, we do have a TODO file in the directory, but it was
> part of patch 10/10. I can move that to be part of patch 01/10.

Ah, I missed that, sorry.

> We think that this driver is a drivers/staging driver because
> there are some changes we want to make before submitting
> the driver to the "proper" place. Specifically, we want to
> change how we allocate memory (move from a static allocation
> to a mempool scenario), which will require some other code
> changes.

Why not just take a week and do that?  I can't take any new patches
until 4.17-rc1 is out anyway.

> Also, we want to investigate if we need to change our locking schema.

Again, why not just take the time to do that?

> We think that making this driver part of the drivers/staging community
> will allow interested people to try the driver in its current state
> and offer up ideas as to its continued development.

Have you asked the scsi developers what they think about this?

And why not copy the staging developer mailing list on this patch
series?

> If you think that this driver doesn't belong in the
> drivers/staging community, we are happy to explore getting
> the driver fully ready on our side and getting it into the
> "proper" place.

I'll take anything in staging as long as it has the correct license and
builds, that's not an issue :)  But I do want to get agreement from the
SCSI maintainers that this is ok to have in this part of the kernel as
sometimes it can cause merge issues if there are core api changes.

> > Please read the documentation on how to properly use SPDX tags on kernel
> > files.  This needs to be the first line of the file.
> 
> Thanks Greg, I fixed the SPDX tags and removed all the boilerplate,
> including the LICENSE file. Also, I have updated the MAINTAINERS
> file, which accidentally slipped by my first patch set. If you
> think that drivers/staging is a good temporary home for this
> driver, then I will send another patchset with the changes,
> as well as the changes from your other replies to patch 02/10
> and patch 03/10.

A respin of this would be nice, and again, I can't do anything with it
until 4.17-rc1 is out.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-07  6:09       ` Greg Kroah-Hartman
@ 2018-04-09 19:49         ` Oliver Smith-Denny
  2018-04-09 20:57         ` Martin K. Petersen
  1 sibling, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-09 19:49 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Sesidhar Baddela, Gian Carlo Boffa, linux-scsi, target-devel,
	linux-kernel

On Sat, Apr 07, 2018 at 08:09:01AM +0200, Greg Kroah-Hartman wrote:
> Why not just take a week and do that?  I can't take any new patches
> until 4.17-rc1 is out anyway.

Thanks for your help in figuring out the best way forward
for this driver. We will work to finish the TODOs on our
end before 4.17-rc1 is out to see if we can have the driver
in a finished state to submit directly to the SCSI
maintainers. If staging still seems a viable option after
4.17-rc1 comes out, then I will resubmit the patch after
talking with the SCSI developers.
 
> And why not copy the staging developer mailing list on this patch
> series?

My bad, I missed that there was a staging developer mailing list. If
we continue down the staging route, I will CC them.
 
> I'll take anything in staging as long as it has the correct license and
> builds, that's not an issue :)  But I do want to get agreement from the
> SCSI maintainers that this is ok to have in this part of the kernel as
> sometimes it can cause merge issues if there are core api changes.

Sounds good. Thanks again for all your help.

Oliver Smith-Denny

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-07  6:09       ` Greg Kroah-Hartman
  2018-04-09 19:49         ` Oliver Smith-Denny
@ 2018-04-09 20:57         ` Martin K. Petersen
  2018-04-12 17:32           ` Oliver Smith-Denny
  1 sibling, 1 reply; 19+ messages in thread
From: Martin K. Petersen @ 2018-04-09 20:57 UTC (permalink / raw)
  To: Greg Kroah-Hartman
  Cc: Oliver Smith-Denny, Sesidhar Baddela, Gian Carlo Boffa,
	linux-scsi, target-devel, linux-kernel


Greg,

>> If you think that this driver doesn't belong in the drivers/staging
>> community, we are happy to explore getting the driver fully ready on
>> our side and getting it into the "proper" place.
>
> I'll take anything in staging as long as it has the correct license and
> builds, that's not an issue :)  But I do want to get agreement from the
> SCSI maintainers that this is ok to have in this part of the kernel as
> sometimes it can cause merge issues if there are core api changes.

This is the first I hear of fnic2.

My initial questions are: Why is a new driver necessary? Why can't the
existing fnic driver be extended? And if it can't, what can be shared
between the two drivers?

A good place to have that discussion would be on linux-scsi...

-- 
Martin K. Petersen	Oracle Linux Engineering

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH 01/10] staging: fnic2 add initialization
  2018-04-09 20:57         ` Martin K. Petersen
@ 2018-04-12 17:32           ` Oliver Smith-Denny
  0 siblings, 0 replies; 19+ messages in thread
From: Oliver Smith-Denny @ 2018-04-12 17:32 UTC (permalink / raw)
  To: Martin K. Petersen
  Cc: Greg Kroah-Hartman, Sesidhar Baddela, Gian Carlo Boffa,
	linux-scsi, target-devel, linux-kernel, Oliver Smith-Denny

On Mon, Apr 09, 2018 at 04:57:16PM -0400, Martin K. Petersen wrote:
>
> My initial questions are: Why is a new driver necessary? Why can't the
> existing fnic driver be extended? And if it can't, what can be shared
> between the two drivers?
> 
> A good place to have that discussion would be on linux-scsi...

Hi Martin,

Thanks for taking some time to discuss fnic2. linux-scsi is CC'ed to this
thread, for their input.

The main focus of the fnic2 driver is FCoE target mode support, NVMe over FC
initiator & target drivers, and Multi-queue support. We think these changes
need a new framework that we have developed in fnic2.

However, we do believe that a significant number of files and interfaces can
be shared between the two drivers. Specifically, all interaction with the
Cisco VNIC hardware can be shared. Also, there are some driver-specific
queueing interfaces that can be shared between the drivers. This will allow us
to share a significant number of files between fnic and fnic2. We are currently
looking at how the common files can be shared, which was one of our TODOs for
when we were looking at including this driver in the staging area.

Thanks,
Oliver Smith-Denny

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2018-04-12 17:32 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-05 21:15 [PATCH 00/10] staging: fnic2 Driver Introduction Oliver Smith-Denny
2018-04-05 21:16 ` [PATCH 01/10] staging: fnic2 add initialization Oliver Smith-Denny
2018-04-06  5:07   ` Greg Kroah-Hartman
2018-04-06 22:00     ` Oliver Smith-Denny
2018-04-07  6:09       ` Greg Kroah-Hartman
2018-04-09 19:49         ` Oliver Smith-Denny
2018-04-09 20:57         ` Martin K. Petersen
2018-04-12 17:32           ` Oliver Smith-Denny
2018-04-05 21:17 ` [PATCH 02/10] staging: fnic2 add resource allocation Oliver Smith-Denny
2018-04-06  5:08   ` Greg Kroah-Hartman
2018-04-05 21:18 ` [PATCH 03/10] staging: fnic2 add fip handling Oliver Smith-Denny
2018-04-06  5:08   ` Greg Kroah-Hartman
2018-04-05 21:19 ` [PATCH 04/10] staging: fnic2 add fdls system Oliver Smith-Denny
2018-04-05 21:20 ` [PATCH 05/10] staging: fnic2 add LIO interface Oliver Smith-Denny
2018-04-05 21:21 ` [PATCH 06/10] staging: fnic2 add main frame processing Oliver Smith-Denny
2018-04-05 21:21 ` [PATCH 07/10] staging: fnic2 add queue descriptors Oliver Smith-Denny
2018-04-05 21:22 ` [PATCH 08/10] staging: fnic2 add vnic queue interface Oliver Smith-Denny
2018-04-05 21:23 ` [PATCH 09/10] staging: fnic2 add vnic handling Oliver Smith-Denny
2018-04-05 21:24 ` [PATCH 10/10] staging: fnic2 add build and config Oliver Smith-Denny

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).