*/
static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
{
+ struct pci_dev *pdev = pf->pdev;
struct ice_vfs *vfs = &pf->vfs;
+ struct pci_dev *vfdev = NULL;
struct ice_vf *vf;
- u16 vf_id;
- int err;
+ u16 vf_pdev_id;
+ int err, pos;
lockdep_assert_held(&vfs->table_lock);
- for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
+
+ for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf) {
err = -ENOMEM;
ice_initialize_vf_entry(vf);
+ do {
+ vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
+ } while (vfdev && vfdev->physfn != pdev);
+ vf->vfdev = vfdev;
vf->vf_sw_id = pf->first_sw;
+ pci_dev_get(vfdev);
+
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
+ /* Decrement of refcount done by pci_get_device() inside the loop does
+ * not touch the last iteration's vfdev, so it has to be done manually
+ * to balance pci_dev_get() added within the loop.
+ */
+ pci_dev_put(vfdev);
+
return 0;
err_free_entries:
/**
* ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
+ * @pf: pointer to the PF structure
*
* Called when recovering from a PF FLR to restore interrupt capability to
* the VFs.
*/
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
+ struct ice_vf *vf;
+ u32 bkt;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
+ ice_for_each_vf(pf, bkt, vf)
+ pci_restore_msi_state(vf->vfdev);
}
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,