@@ -1471,6 +1471,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1478,6 +1479,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
@@ -77,6 +79,43 @@ static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
return false;
}
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
{
u64 sec, sec1, nsec;
@@ -275,6 +314,18 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
@@ -301,7 +352,25 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -341,6 +410,11 @@ static int ptp_probe(struct pci_dev *pdev,
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -365,6 +439,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -432,6 +509,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
@@ -17,7 +17,10 @@ struct ptp {
void __iomem *reg_base;
u64 (*read_ptp_tstmp)(struct ptp *ptp);
spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
@@ -73,6 +73,23 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -189,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -198,6 +213,7 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
@@ -235,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;