Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pallet-broker] Force-unpool provisionally pooled regions before redispatching them #4081

Open
wants to merge 15 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions substrate/frame/broker/src/dispatchable_impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -498,22 +498,4 @@ impl<T: Config> Pallet<T> {
let now = frame_system::Pallet::<T>::block_number();
Ok(Self::sale_price(&sale, now))
}

// Remove a region from on-demand pool contributions. Useful in cases where it was pooled
// provisionally and it is being redeployed (partition/interlace/assign).
//
// Takes both the region_id and (a reference to) the region as arguments to avoid another DB
// read. No-op for regions which have not been pooled.
fn force_unpool_region(region_id: RegionId, region: &RegionRecordOf<T>) {
// We don't care if this fails or not, just that it is removed if present. This is to
// account for the case where a region is pooled provisionally and redeployed.
if let Some(_) = InstaPoolContribution::<T>::take(region_id) {
Self::deposit_event(Event::<T>::RegionUnpooled { region_id });

// Account for this in `InstaPoolIo`.
let size = region_id.mask.count_ones() as i32;
InstaPoolIo::<T>::mutate(region_id.begin, |a| a.private.saturating_reduce(size));
InstaPoolIo::<T>::mutate(region.end, |a| a.private.saturating_accrue(size));
};
}
}
2 changes: 2 additions & 0 deletions substrate/frame/broker/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,8 @@ pub mod pallet {
RegionUnpooled {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the reason for the major bump, but since there is an event for a region being pooled we should also have one for it being unpooled.

/// The Region which has been force-removed from the pool.
region_id: RegionId,
/// The timeslice at which the region was force-removed.
when: Timeslice,
},
/// Some historical Instantaneous Core Pool payment record has been initialized.
HistoryInitialized {
Expand Down
60 changes: 60 additions & 0 deletions substrate/frame/broker/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -510,6 +510,66 @@ fn instapool_payouts_cannot_be_duplicated_through_partition() {
});
}

#[test]
fn instapool_io_updated_correctly_by_force_unpool() {
TestExt::new().endow(1, 1000).execute_with(|| {
// We'll be calling get() on this a lot.
type Io = InstaPoolIo<Test>;

let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() };
assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item])));
assert_ok!(Broker::do_start_sales(100, 3));
advance_to(2);

// Buy core to add to pool.
let region_id = Broker::do_purchase(1, u64::max_value()).unwrap();

// Ensure InstaPoolIo corresponds to one full region provided by the system for this period.
let region = Regions::<Test>::get(&region_id).unwrap();
assert_eq!(Io::get(region_id.begin), PoolIoRecord { private: 0, system: 80 });
assert_eq!(Io::get(region.end), PoolIoRecord { private: 0, system: -80 });

// Add region to pool with Provisional finality.
assert_ok!(Broker::do_pool(region_id, None, 2, Provisional));
// Pool IO registers this region entering and exiting at the correct points.
assert_eq!(Io::get(region_id.begin), PoolIoRecord { private: 80, system: 80 });
assert_eq!(Io::get(region.end), PoolIoRecord { private: -80, system: -80 });

// Force unpool before the region begins.
Broker::force_unpool_region(region_id, &region);
System::assert_last_event(
Event::<Test>::RegionUnpooled { region_id, when: region_id.begin }.into(),
);
// Pool IO does not change now.
assert_eq!(Io::get(Broker::current_timeslice()), PoolIoRecord { private: 0, system: 0 });
// But changes at the point of the region beginning.
assert_eq!(Io::get(region_id.begin), PoolIoRecord { private: 0, system: 80 });

// Pool it again.
assert_ok!(Broker::do_pool(region_id, None, 2, Provisional));
assert_eq!(Io::get(region_id.begin), PoolIoRecord { private: 80, system: 80 });

// Advance to the timeslice after the region starts.
advance_sale_period();
let timeslice_period: u64 = <Test as Config>::TimeslicePeriod::get();
advance_to(System::block_number() + 1 * timeslice_period);
let current_timeslice = Broker::current_timeslice();

// Check the Io right now at key timeslices and then force unpool.
assert_eq!(Io::get(region.end), PoolIoRecord { private: -80, system: -80 });
assert_eq!(Io::get(current_timeslice), PoolIoRecord { private: 0, system: 0 });
Broker::force_unpool_region(region_id, &region);
System::assert_last_event(
Event::<Test>::RegionUnpooled { region_id, when: current_timeslice }.into(),
);

// Then ensure only system removed at the end of the region.
assert_eq!(Io::get(region.end), PoolIoRecord { private: 0, system: -80 });
// And private removed right now.
assert_eq!(Io::get(current_timeslice), PoolIoRecord { private: -80, system: 0 });
});
}

#[test]
fn instapool_payouts_cannot_be_duplicated_through_interlacing() {
TestExt::new().endow(1, 1000).execute_with(|| {
Expand Down
29 changes: 29 additions & 0 deletions substrate/frame/broker/src/utility_impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,4 +118,33 @@ impl<T: Config> Pallet<T> {

Ok(Some((region_id, region)))
}

// Remove a region from on-demand pool contributions. Useful in cases where it was pooled
// provisionally and it is being redispatched (partition/interlace/assign).
//
// Takes both the region_id and (a reference to) the region as arguments to avoid another DB
// read. No-op for regions which have not been pooled.
pub(crate) fn force_unpool_region(region_id: RegionId, region: &RegionRecordOf<T>) {
// We don't care if this fails or not, just that it is removed if present. This is to
// account for the case where a region is pooled provisionally and redispatched.
if let Some(_) = InstaPoolContribution::<T>::take(region_id) {
seadanda marked this conversation as resolved.
Show resolved Hide resolved
let current_timeslice = Self::current_timeslice();
seadanda marked this conversation as resolved.
Show resolved Hide resolved
// Do no more for regions that have ended.
if region.end < current_timeslice {
return
};
seadanda marked this conversation as resolved.
Show resolved Hide resolved

// Account for this in `InstaPoolIo` from either the region begin or current timeslice
// if we are already part-way through the region.
let size = region_id.mask.count_ones() as i32;
let timeslice_removed_at = current_timeslice.max(region_id.begin);
InstaPoolIo::<T>::mutate(timeslice_removed_at, |a| a.private.saturating_reduce(size));
InstaPoolIo::<T>::mutate(region.end, |a| a.private.saturating_accrue(size));
eskimor marked this conversation as resolved.
Show resolved Hide resolved

Self::deposit_event(Event::<T>::RegionUnpooled {
region_id,
when: timeslice_removed_at,
});
};
}
}
Loading