@@ -392,7 +392,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
endif
-# vmlinux.h is first dumped to a temprorary file and then compared to
+# vmlinux.h is first dumped to a temporary file and then compared to
# the previous version. This helps to avoid unnecessary re-builds of
# $(TRUNNER_BPF_OBJS)
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
@@ -497,7 +497,7 @@ extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
-/* pure counting benchmarks to establish theoretical lmits */
+/* pure counting benchmarks to establish theoretical limits */
extern const struct bench bench_trig_usermode_count;
extern const struct bench bench_trig_syscall_count;
extern const struct bench bench_trig_kernel_count;
@@ -63,7 +63,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
/* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
* so it's impossible to determine correct pointer size; but if they
- * do, it should be 8 regardless of host architecture, becaues BPF
+ * do, it should be 8 regardless of host architecture, because BPF
* target is always 64-bit
*/
if (!t->known_ptr_sz) {
@@ -293,7 +293,7 @@ static int get_btf_id_by_fd(int btf_fd, __u32 *id)
* 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1
* 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2
* 3) Close the btf_fd, now refcnt=1
- * Wait and check that BTF stil exists.
+ * Wait and check that BTF still exists.
*/
static void check_fd_array_cnt__referenced_btfs(void)
{
@@ -463,7 +463,7 @@ static bool skip_entry(char *name)
return false;
}
-/* Do comparision by ignoring '.llvm.<hash>' suffixes. */
+/* Do comparison by ignoring '.llvm.<hash>' suffixes. */
static int compare_name(const char *name1, const char *name2)
{
const char *res1, *res2;
@@ -609,7 +609,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a, x.b);
*newy = range(t, y.a + 1, y.b);
} else if (x.a == x.b && x.b == y.b) {
- /* X is a constant matching rigth side of Y */
+ /* X is a constant matching right side of Y */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b - 1);
} else if (y.a == y.b && x.a == y.a) {
@@ -617,7 +617,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newx = range(t, x.a + 1, x.b);
*newy = range(t, y.a, y.b);
} else if (y.a == y.b && x.b == y.b) {
- /* Y is a constant matching rigth side of X */
+ /* Y is a constant matching right side of X */
*newx = range(t, x.a, x.b - 1);
*newy = range(t, y.a, y.b);
} else {
@@ -101,7 +101,7 @@ static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
tp->snd_cwnd = pkts_in_flight + sndcnt;
}
-/* Decide wheather to run the increase function of congestion control. */
+/* Decide whether to run the increase function of congestion control. */
static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
if (tcp_sk(sk)->reordering > TCP_REORDERING)
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
-/* WARNING: This implemenation is not necessarily the same
+/* WARNING: This implementation is not necessarily the same
* as the tcp_dctcp.c. The purpose is mainly for testing
* the kernel BPF logic.
*/
@@ -7,7 +7,7 @@
SEC("freplace/btf_unreliable_kprobe")
/* context type is what BPF verifier expects for kprobe context, but target
- * program has `stuct whatever *ctx` argument, so freplace operation will be
+ * program has `struct whatever *ctx` argument, so freplace operation will be
* rejected with the following message:
*
* arg0 replace_btf_unreliable_kprobe(struct pt_regs *) doesn't match btf_unreliable_kprobe(struct whatever *)
@@ -345,7 +345,7 @@ int __naked read_from_iter_slot_fail(void)
"r3 = 1000;"
"call %[bpf_iter_num_new];"
- /* attemp to leak bpf_iter_num state */
+ /* attempt to leak bpf_iter_num state */
"r7 = *(u64 *)(r6 + 0);"
"r8 = *(u64 *)(r6 + 8);"
@@ -494,7 +494,7 @@ static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_header
*offset += sizeof(*next_hop);
- /* Skip the remainig next hops (may be zero). */
+ /* Skip the remaining next hops (may be zero). */
return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1);
}
@@ -21,7 +21,7 @@
* ns_src | ns_fwd | ns_dst
*
* ns_src and ns_dst: ENDHOST namespace
- * ns_fwd: Fowarding namespace
+ * ns_fwd: Forwarding namespace
*/
#define ctx_ptr(field) (void *)(long)(field)
@@ -10,14 +10,14 @@ char _license[] SEC("license") = "GPL";
SEC("uprobe.session")
__success
-int uprobe_sesison_return_0(struct pt_regs *ctx)
+int uprobe_session_return_0(struct pt_regs *ctx)
{
return 0;
}
SEC("uprobe.session")
__success
-int uprobe_sesison_return_1(struct pt_regs *ctx)
+int uprobe_session_return_1(struct pt_regs *ctx)
{
return 1;
}
@@ -25,7 +25,7 @@ int uprobe_sesison_return_1(struct pt_regs *ctx)
SEC("uprobe.session")
__failure
__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]")
-int uprobe_sesison_return_2(struct pt_regs *ctx)
+int uprobe_session_return_2(struct pt_regs *ctx)
{
return 2;
}
@@ -27,7 +27,7 @@ SEC("uprobe//proc/self/exe:target_1")
int BPF_UPROBE(uprobe_1)
{
/* target_1 is recursive wit depth of 2, so we capture two separate
- * stack traces, depending on which occurence it is
+ * stack traces, depending on which occurrence it is
*/
static bool recur = false;
@@ -261,7 +261,7 @@ l0_%=: r2 += r1; \
SEC("xdp")
__success
-__naked void not_an_inifinite_loop(void)
+__naked void not_an_infinite_loop(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
@@ -349,7 +349,7 @@ __naked void precision_two_ids(void)
SEC("socket")
__success __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
-/* check thar r0 and r6 have different IDs after 'if',
+/* check that r0 and r6 have different IDs after 'if',
* collect_linked_regs() can't tie more than 6 registers for a single insn.
*/
__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
@@ -306,7 +306,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
* Update 1 to tgt_free/2
* => The original 1 to tgt_free/2 will be removed due to
* the LRU shrink process
- * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
+ * Re-insert 1 to tgt_free/2 again and do a lookup immediately
* Insert 1+tgt_free to tgt_free*3/2
* Insert 1+tgt_free*3/2 to tgt_free*5/2
* => Key 1+tgt_free to tgt_free*3/2
@@ -371,7 +371,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
}
/* Re-insert 1 to tgt_free/2 again and do a lookup
- * immeidately.
+ * immediately.
*/
end_key = 1 + batch_size;
value[0] = 4321;
@@ -164,7 +164,7 @@ setup()
ip -netns ${NS2} link set veth7 vrf red
fi
- # configure addesses: the top route (1-2-3-4)
+ # configure addresses: the top route (1-2-3-4)
ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
@@ -1372,7 +1372,7 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
} else
fprintf(stderr, "unknown test\n");
out:
- /* Detatch and zero all the maps */
+ /* Detach and zero all the maps */
bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS);
for (i = 0; i < ARRAY_SIZE(links); i++) {
@@ -1375,7 +1375,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
/* write into map value */
@@ -1439,7 +1439,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -1493,7 +1493,7 @@
/* second time with fp-16 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
+ /* fetch second map_value_ptr from the stack */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
/* write into map value */
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
@@ -155,7 +155,7 @@ int main(int argc, char **argv)
}
if (!server) {
- /* Only supports IPv4; see hints initiailization above. */
+ /* Only supports IPv4; see hints initialization above. */
if (getaddrinfo(argv[optind], NULL, &hints, &a) || !a) {
fprintf(stderr, "Could not resolve %s\n", argv[optind]);
return 1;
@@ -94,7 +94,7 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
* cached_cons is r->size bigger than the real consumer pointer so
* that this addition can be avoided in the more frequently
* executed code that computs free_entries in the beginning of
- * this function. Without this optimization it whould have been
+ * this function. Without this optimization it would have been
* free_entries = r->cached_prod - r->cached_cons + r->size.
*/
r->cached_cons = __atomic_load_n(r->consumer, __ATOMIC_ACQUIRE);
Fixed multiple spelling issues in the kselftests bpf modules. Signed-off-by: Armin <Armin.Mahdilou@gmail.com> --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/bench.c | 2 +- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 2 +- tools/testing/selftests/bpf/prog_tests/fd_array.c | 2 +- tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c | 2 +- tools/testing/selftests/bpf/prog_tests/reg_bounds.c | 4 ++-- tools/testing/selftests/bpf/progs/bpf_cc_cubic.c | 2 +- tools/testing/selftests/bpf/progs/bpf_dctcp.c | 2 +- .../testing/selftests/bpf/progs/freplace_unreliable_prog.c | 2 +- tools/testing/selftests/bpf/progs/iters_state_safety.c | 2 +- .../testing/selftests/bpf/progs/test_cls_redirect_dynptr.c | 2 +- tools/testing/selftests/bpf/progs/test_tc_dtime.c | 2 +- tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c | 6 +++--- tools/testing/selftests/bpf/progs/uretprobe_stack.c | 2 +- tools/testing/selftests/bpf/progs/verifier_loops1.c | 2 +- tools/testing/selftests/bpf/progs/verifier_scalar_ids.c | 2 +- tools/testing/selftests/bpf/test_lru_map.c | 4 ++-- tools/testing/selftests/bpf/test_lwt_ip_encap.sh | 2 +- tools/testing/selftests/bpf/test_sockmap.c | 2 +- tools/testing/selftests/bpf/verifier/calls.c | 6 +++--- tools/testing/selftests/bpf/xdping.c | 2 +- tools/testing/selftests/bpf/xsk.h | 2 +- 22 files changed, 28 insertions(+), 28 deletions(-)