@@ -33,4 +33,4 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o qat_vf_mig.o
@@ -217,6 +217,17 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};
+struct adf_vfmig_ops {
+ int (*init_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ void (*shutdown_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ int (*save_state)(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz);
+ int (*load_state)(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz);
+ int (*suspend_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ int (*resume_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -263,6 +274,7 @@ struct adf_hw_device_data {
struct adf_hw_csr_info csr_info;
struct adf_pfvf_ops pfvf_ops;
struct adf_dc_ops dc_ops;
+ struct adf_vfmig_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -309,6 +321,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
new file mode 100644
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/bug.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/qat/qat_vf_mig.h>
+#include "adf_common_drv.h"
+
+int qat_vfmig_init_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->init_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->init_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_init_device);
+
+void qat_vfmig_shutdown_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->shutdown_device))
+ return;
+
+ GET_VFMIG_OPS(accel_dev)->shutdown_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_shutdown_device);
+
+int qat_vfmig_suspend_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->suspend_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->suspend_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_suspend_device);
+
+int qat_vfmig_resume_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->resume_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->resume_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_resume_device);
+
+int qat_vfmig_save_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->save_state))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->save_state(accel_dev, vf_nr, buf, buf_sz);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_state);
+
+int qat_vfmig_load_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->load_state))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->load_state(accel_dev, vf_nr, buf, buf_sz);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_state);
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef QAT_VF_MIG_H_
+#define QAT_VF_MIG_H_
+
+struct pci_dev;
+
+int qat_vfmig_init_device(struct pci_dev *pdev, u32 vf_nr);
+void qat_vfmig_shutdown_device(struct pci_dev *pdev, u32 vf_nr);
+int qat_vfmig_save_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz);
+int qat_vfmig_load_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz);
+int qat_vfmig_suspend_device(struct pci_dev *pdev, u32 vf_nr);
+int qat_vfmig_resume_device(struct pci_dev *pdev, u32 vf_nr);
+#endif /*QAT_VF_MIG_H_*/