@@ -425,52 +425,6 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
return err;
}
-static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes,
- u32 req_nodes)
-{
- struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
- unsigned long min_gap;
- unsigned long curr_gap;
-
- /* Special shortcut when a single range is required */
- if (req_nodes == 1) {
- unsigned long last;
-
- curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
- while (curr) {
- last = curr->last;
- prev = curr;
- curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
- if (prev != comb_start)
- interval_tree_remove(prev, root);
- }
- comb_start->last = last;
- return;
- }
-
- /* Combine ranges which have the smallest gap */
- while (cur_nodes > req_nodes) {
- prev = NULL;
- min_gap = ULONG_MAX;
- curr = interval_tree_iter_first(root, 0, ULONG_MAX);
- while (curr) {
- if (prev) {
- curr_gap = curr->start - prev->last;
- if (curr_gap < min_gap) {
- min_gap = curr_gap;
- comb_start = prev;
- comb_end = curr;
- }
- }
- prev = curr;
- curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
- }
- comb_start->last = comb_end->last;
- interval_tree_remove(comb_end, root);
- cur_nodes--;
- }
-}
-
static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
struct mlx5vf_pci_core_device *mvdev,
struct rb_root_cached *ranges, u32 nnodes)
@@ -493,7 +447,7 @@ static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev,
int i;
if (num_ranges > max_num_range) {
- combine_ranges(ranges, nnodes, max_num_range);
+ vfio_combine_iova_ranges(ranges, nnodes, max_num_range);
num_ranges = max_num_range;
}
@@ -1293,6 +1293,54 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
return 0;
}
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes)
+{
+ struct interval_tree_node *prev, *curr, *comb_start, *comb_end;
+ unsigned long min_gap;
+ unsigned long curr_gap;
+
+ /* Special shortcut when a single range is required */
+ if (req_nodes == 1) {
+ unsigned long last;
+
+ comb_start = interval_tree_iter_first(root, 0, ULONG_MAX);
+ curr = comb_start;
+ while (curr) {
+ last = curr->last;
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ if (prev != comb_start)
+ interval_tree_remove(prev, root);
+ }
+ comb_start->last = last;
+ return;
+ }
+
+ /* Combine ranges which have the smallest gap */
+ while (cur_nodes > req_nodes) {
+ prev = NULL;
+ min_gap = ULONG_MAX;
+ curr = interval_tree_iter_first(root, 0, ULONG_MAX);
+ while (curr) {
+ if (prev) {
+ curr_gap = curr->start - prev->last;
+ if (curr_gap < min_gap) {
+ min_gap = curr_gap;
+ comb_start = prev;
+ comb_end = curr;
+ }
+ }
+ prev = curr;
+ curr = interval_tree_iter_next(curr, 0, ULONG_MAX);
+ }
+ comb_start->last = comb_end->last;
+ interval_tree_remove(comb_end, root);
+ cur_nodes--;
+ }
+}
+EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges);
+
/* Ranges should fit into a single kernel page */
#define LOG_MAX_RANGES \
(PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))
@@ -196,6 +196,9 @@ int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state new_fsm,
enum vfio_device_mig_state *next_fsm);
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes);
+
/*
* External user API
*/