@@ -6,6 +6,21 @@
#include <net/ultraeth/uet_context.h>
#include <net/ultraeth/uet_pdc.h>
+struct metadata_dst *uet_pdc_dst(const struct uet_pdc_key *key, __be16 dport,
+ u8 tos)
+{
+ IP_TUNNEL_DECLARE_FLAGS(md_flags) = { };
+ struct metadata_dst *mdst;
+
+ mdst = __ip_tun_set_dst(key->src_ip, key->dst_ip, tos, 0, dport,
+ md_flags, 0, 0);
+ if (!mdst)
+ return NULL;
+ mdst->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
+
+ return mdst;
+}
+
static void uet_pdc_xmit(struct uet_pdc *pdc, struct sk_buff *skb)
{
skb->dev = pds_netdev(pdc->pds);
@@ -241,7 +256,6 @@ struct uet_pdc *uet_pdc_create(struct uet_pds *pds, u32 rx_base_psn, u8 state,
const struct uet_pdc_key *key, bool is_inbound)
{
struct uet_pdc *pdc, *pdc_ins = ERR_PTR(-ENOMEM);
- IP_TUNNEL_DECLARE_FLAGS(md_flags) = { };
int ret __maybe_unused;
switch (mode) {
@@ -287,8 +301,7 @@ struct uet_pdc *uet_pdc_create(struct uet_pds *pds, u32 rx_base_psn, u8 state,
if (!pdc->ack_bitmap)
goto err_ack_bitmap;
timer_setup(&pdc->rtx_timer, uet_pdc_rtx_timer_expired, 0);
- pdc->metadata = __ip_tun_set_dst(key->src_ip, key->dst_ip, tos, 0, dport,
- md_flags, 0, 0);
+ pdc->metadata = uet_pdc_dst(key, dport, tos);
if (!pdc->metadata)
goto err_tun_dst;
@@ -731,6 +744,19 @@ static void uet_pdc_rx_req_handle_ack(struct uet_pdc *pdc, unsigned int len,
}
}
+static bool uet_pdc_req_validate_mode(const struct uet_pdc *pdc,
+ const struct uet_pds_req_hdr *req)
+{
+ switch (uet_prologue_type(&req->prologue)) {
+ case UET_PDS_TYPE_RUD_REQ:
+ return pdc->mode == UET_PDC_MODE_RUD;
+ case UET_PDS_TYPE_ROD_REQ:
+ return pdc->mode == UET_PDC_MODE_ROD;
+ }
+
+ return false;
+}
+
int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
__be32 remote_fep_addr, __u8 tos)
{
@@ -743,6 +769,7 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
unsigned int len = skb->len;
bool first_ack = false;
enum mpr_pos psn_pos;
+ __u8 nack_code = 0;
int ret = -EINVAL;
spin_lock(&pdc->lock);
@@ -761,6 +788,11 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
if (unlikely(pdc->tx_busy))
goto err_dbg;
+ if (!uet_pdc_req_validate_mode(pdc, req)) {
+ drop_reason = "pdc mode doesn't match request";
+ nack_code = UET_PDS_NACK_PDC_MODE_MISMATCH;
+ goto err_dbg;
+ }
if (req_flags & UET_PDS_REQ_FLAG_RETX)
ack_flags |= UET_PDS_ACK_FLAG_RETX;
@@ -770,10 +802,15 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
switch (psn_pos) {
case UET_PDC_MPR_FUTURE:
drop_reason = "req psn is in a future MPR window";
+ if (req_flags & UET_PDS_REQ_FLAG_SYN)
+ nack_code = UET_PDS_NACK_INVALID_SYN;
+ else
+ nack_code = UET_PDS_NACK_PSN_OOR_WINDOW;
goto err_dbg;
case UET_PDC_MPR_PREV:
if ((int)(req_psn - pdc->rx_base_psn) < S16_MIN) {
drop_reason = "req psn is too far in the past";
+ nack_code = UET_PDS_NACK_PSN_OOR_WINDOW;
goto err_dbg;
}
uet_pdc_send_ses_ack(pdc, UET_SES_RSP_RC_NULL, ses_req->msg_id,
@@ -805,6 +842,7 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
if (!psn_bit_valid(psn_bit)) {
drop_reason = "req psn bit is invalid";
+ nack_code = UET_PDS_NACK_PSN_OOR_WINDOW;
goto err_dbg;
}
if (test_and_set_bit(psn_bit, pdc->rx_bitmap)) {
@@ -844,5 +882,40 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
pdc->state, pdc->dpdcid, pdc->spdcid,
be16_to_cpu(ses_req->msg_id), be32_to_cpu(req->psn),
be16_to_cpu(req->spdcid), be16_to_cpu(req->dpdcid));
+
+ if (nack_code)
+ uet_pds_send_nack(pdc->pds, &pdc->key,
+ pdc->metadata->u.tun_info.key.tp_dst, 0,
+ cpu_to_be16(pdc->spdcid),
+ cpu_to_be16(pdc->dpdcid),
+ nack_code, req->psn,
+ pds_req_to_nack_flags(req_flags));
goto out;
}
+
+void uet_pdc_rx_nack(struct uet_pdc *pdc, struct sk_buff *skb)
+{
+ struct uet_pds_nack_hdr *nack = pds_nack_hdr(skb);
+ u32 nack_psn = be32_to_cpu(nack->nack_psn_pkt_id);
+
+ spin_lock(&pdc->lock);
+ netdev_dbg(pds_netdev(pdc->pds), "%s: NACK pdc: [ spdcid: %u dpdcid: %u rx_base_psn %u ] "
+ "nack header: [ nack_code: %u vendor_code: %u nack_psn: %u ]\n",
+ __func__, pdc->spdcid, pdc->dpdcid, pdc->rx_base_psn,
+ nack->nack_code, nack->vendor_code, nack_psn);
+ if (psn_mpr_pos(pdc->rx_base_psn, nack_psn) != UET_PDC_MPR_CUR)
+ goto out;
+ switch (nack->nack_code) {
+ /* PDC_FATAL codes */
+ case UET_PDS_NACK_CLOSING_IN_ERR:
+ case UET_PDS_NACK_INV_DPDCID:
+ case UET_PDS_NACK_NO_RESOURCE:
+ case UET_PDS_NACK_PDC_HDR_MISMATCH:
+ case UET_PDS_NACK_INVALID_SYN:
+ case UET_PDS_NACK_PDC_MODE_MISMATCH:
+ uet_pdc_destroy(pdc);
+ break;
+ }
+out:
+ spin_unlock(&pdc->lock);
+}
@@ -149,6 +149,46 @@ void uet_pds_clean_job(struct uet_pds *pds, u32 job_id)
rhashtable_walk_exit(&iter);
}
+static void uet_pds_build_nack(struct sk_buff *skb, __be16 spdcid, __be16 dpdcid,
+ u8 nack_code, __be32 nack_psn, u8 flags)
+{
+ struct uet_pds_nack_hdr *nack = skb_put(skb, sizeof(*nack));
+
+ uet_pdc_build_prologue(&nack->prologue, UET_PDS_TYPE_NACK,
+ UET_PDS_NEXT_HDR_NONE, flags);
+ nack->nack_code = nack_code;
+ nack->vendor_code = 0;
+ nack->nack_psn_pkt_id = nack_psn;
+ nack->spdcid = spdcid;
+ nack->dpdcid = dpdcid;
+ nack->payload = 0;
+}
+
+void uet_pds_send_nack(struct uet_pds *pds, const struct uet_pdc_key *key,
+ __be16 dport, u8 tos, __be16 spdcid, __be16 dpdcid,
+ __u8 nack_code, __be32 nack_psn, __u8 flags)
+{
+ struct metadata_dst *mdst;
+ struct sk_buff *skb;
+
+ if (WARN_ON_ONCE(!key))
+ return;
+
+ skb = alloc_skb(sizeof(struct uet_pds_nack_hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->dev = pds_netdev(pds);
+ uet_pds_build_nack(skb, spdcid, dpdcid, nack_code, nack_psn, flags);
+ mdst = uet_pdc_dst(key, dport, tos);
+ if (!mdst) {
+ kfree_skb(skb);
+ return;
+ }
+ skb_dst_set(skb, &mdst->dst);
+ dev_queue_xmit(skb);
+}
+
static int uet_pds_rx_ack(struct uet_pds *pds, struct sk_buff *skb,
__be32 local_fep_addr, __be32 remote_fep_addr)
{
@@ -164,6 +204,20 @@ static int uet_pds_rx_ack(struct uet_pds *pds, struct sk_buff *skb,
return uet_pdc_rx_ack(pdc, skb, remote_fep_addr);
}
+static void uet_pds_rx_nack(struct uet_pds *pds, struct sk_buff *skb)
+{
+ struct uet_pds_nack_hdr *nack = pds_nack_hdr(skb);
+ u16 pdcid = be16_to_cpu(nack->dpdcid);
+ struct uet_pdc *pdc;
+
+ pdc = rhashtable_lookup_fast(&pds->pdcid_hash, &pdcid,
+ uet_pds_pdcid_rht_params);
+ if (!pdc)
+ return;
+
+ uet_pdc_rx_nack(pdc, skb);
+}
+
static struct uet_pdc *uet_pds_new_pdc_rx(struct uet_pds *pds,
struct sk_buff *skb,
__be16 dport, u32 ack_gen_trigger,
@@ -201,21 +255,45 @@ static int uet_pds_rx_req(struct uet_pds *pds, struct sk_buff *skb,
/* new flow */
if (unlikely(!pdc)) {
struct uet_prologue_hdr *prologue = pds_prologue_hdr(skb);
+ __u8 req_flags = uet_prologue_flags(prologue);
struct uet_context *ctx;
struct uet_job *job;
- if (!(uet_prologue_flags(prologue) & UET_PDS_REQ_FLAG_SYN))
+ if (!(uet_prologue_flags(prologue) & UET_PDS_REQ_FLAG_SYN)) {
+ uet_pds_send_nack(pds, &key, dport, 0, 0,
+ pds_req->spdcid,
+ UET_PDS_NACK_INV_DPDCID, pds_req->psn,
+ pds_req_to_nack_flags(req_flags));
return -EINVAL;
+ }
ctx = container_of(pds, struct uet_context, pds);
job = uet_job_find(&ctx->job_reg, key.job_id);
- if (!job)
+ if (!job) {
+ uet_pds_send_nack(pds, &key, dport, 0, 0,
+ pds_req->spdcid,
+ UET_PDS_NACK_NO_RESOURCE,
+ pds_req->psn,
+ pds_req_to_nack_flags(req_flags));
return -ENOENT;
+ }
fep = rcu_dereference(job->fep);
- if (!fep)
+ if (!fep) {
+ uet_pds_send_nack(pds, &key, dport, 0, 0,
+ pds_req->spdcid,
+ UET_PDS_NACK_NO_RESOURCE,
+ pds_req->psn,
+ pds_req_to_nack_flags(req_flags));
return -ECONNREFUSED;
- if (fep->addr.in_address.ip != local_fep_addr)
+ }
+ if (fep->addr.in_address.ip != local_fep_addr) {
+ uet_pds_send_nack(pds, &key, dport, 0, 0,
+ pds_req->spdcid,
+ UET_PDS_NACK_PDC_HDR_MISMATCH,
+ pds_req->psn,
+ pds_req_to_nack_flags(req_flags));
return -ENOENT;
+ }
pdc = uet_pds_new_pdc_rx(pds, skb, dport, fep->ack_gen_trigger,
fep->ack_gen_min_pkt_add, &key,
@@ -290,6 +368,15 @@ int uet_pds_rx(struct uet_pds *pds, struct sk_buff *skb, __be32 local_fep_addr,
ret = uet_pds_rx_req(pds, skb, local_fep_addr, remote_fep_addr,
dport, tos);
break;
+ case UET_PDS_TYPE_NACK:
+ if (uet_prologue_next_hdr(prologue) != UET_PDS_NEXT_HDR_NONE)
+ break;
+ offset += sizeof(struct uet_pds_nack_hdr);
+ if (!pskb_may_pull(skb, offset))
+ break;
+ ret = 0;
+ uet_pds_rx_nack(pds, skb);
+ break;
default:
break;
}
@@ -120,6 +120,9 @@ int uet_pdc_rx_req(struct uet_pdc *pdc, struct sk_buff *skb,
int uet_pdc_rx_ack(struct uet_pdc *pdc, struct sk_buff *skb,
__be32 remote_fep_addr);
int uet_pdc_tx_req(struct uet_pdc *pdc, struct sk_buff *skb, u8 type);
+void uet_pdc_rx_nack(struct uet_pdc *pdc, struct sk_buff *skb);
+struct metadata_dst *uet_pdc_dst(const struct uet_pdc_key *key, __be16 dport,
+ u8 tos);
static inline void uet_pdc_build_prologue(struct uet_prologue_hdr *prologue,
u8 type, u8 next, u8 flags)
@@ -7,6 +7,7 @@
#include <linux/rhashtable.h>
#include <uapi/linux/ultraeth.h>
#include <linux/skbuff.h>
+#include <net/ultraeth/uet_pdc.h>
/**
* struct uet_pds - Packet Delivery Sublayer state structure
@@ -43,6 +44,10 @@ int uet_pds_rx(struct uet_pds *pds, struct sk_buff *skb, __be32 local_fep_addr,
int uet_pds_tx(struct uet_pds *pds, struct sk_buff *skb, __be32 local_fep_addr,
__be32 remote_fep_addr, __be16 dport, u32 job_id);
+void uet_pds_send_nack(struct uet_pds *pds, const struct uet_pdc_key *key,
+ __be16 dport, u8 tos, __be16 spdcid, __be16 dpdcid,
+ __u8 nack_code, __be32 nack_psn, __u8 flags);
+
static inline struct uet_prologue_hdr *pds_prologue_hdr(const struct sk_buff *skb)
{
return (struct uet_prologue_hdr *)skb_network_header(skb);
@@ -92,4 +97,9 @@ static inline __be16 pds_ses_rsp_hdr_pack(__u8 opcode, __u8 version, __u8 list,
(ses_rc & UET_SES_RSP_RC_MASK) <<
UET_SES_RSP_RC_SHIFT);
}
+
+static inline __u8 pds_req_to_nack_flags(__u8 req_flags)
+{
+ return req_flags & UET_PDS_REQ_FLAG_RETX ? UET_PDS_NACK_FLAG_RETX : 0;
+}
#endif /* _UECON_PDS_H */
@@ -192,6 +192,61 @@ static inline __u8 uet_pds_ack_ext_cc_type(const struct uet_pds_ack_ext_hdr *ack
UET_PDS_ACK_EXT_CC_TYPE_MASK;
}
+/* NACK codes */
+enum {
+ UET_PDS_NACK_TRIMMED = 0x01,
+ UET_PDS_NACK_TRIMMED_LASTHOP = 0x02,
+ UET_PDS_NACK_TRIMMED_ACK = 0x03,
+ UET_PDS_NACK_NO_PDC_AVAIL = 0x04,
+ UET_PDS_NACK_NO_CCC_AVAIL = 0x05,
+ UET_PDS_NACK_NO_BITMAP = 0x06,
+ UET_PDS_NACK_NO_PKT_BUFFER = 0x07,
+ UET_PDS_NACK_NO_GTD_DEL_AVAIL = 0x08,
+ UET_PDS_NACK_NO_SES_MSG_AVAIL = 0x09,
+ UET_PDS_NACK_NO_RESOURCE = 0x0A,
+ UET_PDS_NACK_PSN_OOR_WINDOW = 0x0B,
+ UET_PDS_NACK_FIRST_ROD_OOO = 0x0C,
+ UET_PDS_NACK_ROD_OOO = 0x0D,
+ UET_PDS_NACK_INV_DPDCID = 0x0E,
+ UET_PDS_NACK_PDC_HDR_MISMATCH = 0x0F,
+ UET_PDS_NACK_CLOSING = 0x10,
+ UET_PDS_NACK_CLOSING_IN_ERR = 0x11,
+ UET_PDS_NACK_PKT_NOT_RCVD = 0x12,
+ UET_PDS_NACK_GTD_RESP_UNAVAIL = 0x13,
+ UET_PDS_NACK_ACK_WITH_DATA = 0x14,
+ UET_PDS_NACK_INVALID_SYN = 0x15,
+ UET_PDS_NACK_PDC_MODE_MISMATCH = 0x16,
+ UET_PDS_NACK_NEW_START_PSN = 0x17,
+ UET_PDS_NACK_RCVD_SES_PROCG = 0x18,
+ UET_PDS_NACK_UNEXP_EVENT = 0x19,
+ UET_PDS_NACK_RCVR_INFER_LOSS = 0x1A,
+ /* 0x1B - 0xFC reserved for UET */
+ UET_PDS_NACK_EXP_NACK_NORMAL = 0xFD,
+ UET_PDS_NACK_T_EXP_NACK_ERR = 0xFE,
+ UET_PDS_NACK_EXP_NACK_FATAL = 0xFF
+};
+
+/* NACK flags */
+enum {
+ UET_PDS_NACK_FLAG_RSV21 = (1 << 0),
+ UET_PDS_NACK_FLAG_RSV22 = (1 << 1),
+ UET_PDS_NACK_FLAG_RSV23 = (1 << 2),
+ UET_PDS_NACK_FLAG_NT = (1 << 3),
+ UET_PDS_NACK_FLAG_RETX = (1 << 4),
+ UET_PDS_NACK_FLAG_M = (1 << 5),
+ UET_PDS_NACK_FLAG_RSV = (1 << 6)
+};
+
+struct uet_pds_nack_hdr {
+ struct uet_prologue_hdr prologue;
+ __u8 nack_code;
+ __u8 vendor_code;
+ __be32 nack_psn_pkt_id;
+ __be16 spdcid;
+ __be16 dpdcid;
+ __be32 payload;
+} __attribute__ ((__packed__));
+
/* ses request op codes */
enum {
UET_SES_REQ_OP_NOOP = 0x00,