@@ -270,6 +270,11 @@ struct hw_perf_event {
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
+
+ /*
+ * Indicates that the alternative_sample_period is used
+ */
+ bool using_alternative_sample_period;
#endif
};
@@ -379,6 +379,7 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
+#define PERF_ATTR_SIZE_VER9 144 /* add: alternative_sample_period */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -522,6 +523,8 @@ struct perf_event_attr {
__u64 sig_data;
__u64 config3; /* extension of config2 */
+
+ __u64 alternative_sample_period;
};
/*
@@ -4185,6 +4185,8 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bo
s64 period, sample_period;
s64 delta;
+ WARN_ON_ONCE(hwc->using_alternative_sample_period);
+
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
@@ -9806,6 +9808,7 @@ static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
+ struct hw_perf_event *hwc = &event->hw;
int events = atomic_read(&event->event_limit);
int ret = 0;
@@ -9822,6 +9825,26 @@ static int __perf_event_overflow(struct perf_event *event,
!bpf_overflow_handler(event, data, regs))
return ret;
+ /*
+ * Swap the sample period to the alternative period
+ */
+ if (event->attr.alternative_sample_period) {
+ bool using_alt = hwc->using_alternative_sample_period;
+ u64 sample_period = (using_alt ? event->attr.sample_period
+ : event->attr.alternative_sample_period);
+
+ hwc->sample_period = sample_period;
+ hwc->using_alternative_sample_period = !using_alt;
+
+ if (local64_read(&hwc->period_left) > 0) {
+ event->pmu->stop(event, PERF_EF_UPDATE);
+
+ local64_set(&hwc->period_left, 0);
+
+ event->pmu->start(event, PERF_EF_RELOAD);
+ }
+ }
+
/*
* XXX event_limit might not quite work as expected on inherited
* events
@@ -12244,6 +12267,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);
+ /*
+ * alternative_sample_period cannot be used with freq
+ */
+ if (attr->freq && attr->alternative_sample_period)
+ goto err_ns;
+
/*
* We do not support PERF_SAMPLE_READ on inherited events unless
* PERF_SAMPLE_TID is also selected, which allows inherited events to
@@ -12700,9 +12729,19 @@ SYSCALL_DEFINE5(perf_event_open,
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
+ if (attr.alternative_sample_period)
+ return -EINVAL;
} else {
if (attr.sample_period & (1ULL << 63))
return -EINVAL;
+ if (attr.alternative_sample_period) {
+ if (!attr.sample_period)
+ return -EINVAL;
+ if (attr.alternative_sample_period & (1ULL << 63))
+ return -EINVAL;
+ if (attr.alternative_sample_period == attr.sample_period)
+ attr.alternative_sample_period = 0;
+ }
}
/* Only privileged users can get physical addresses */