Skip to content

Commit 3084b33

Browse files
authored
Merge pull request #743 from dgsudharsan/bgp_fixes_1
2 parents f256202 + 47d1abd commit 3084b33

3 files changed

+137
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
From f63a4be085b28c5138b95d55681f2bfb38bdaf4f Mon Sep 17 00:00:00 2001
2+
From: Donald Sharp <sharpd@nvidia.com>
3+
Date: Fri, 24 Jan 2025 15:04:13 -0500
4+
Subject: [PATCH] bgpd: Optimize evaluate paths for a peer going down
5+
6+
Currently when a directly connected peer is going down
7+
BGP gets a call back for nexthop tracking in addition
8+
the interface down events. On the interface down
9+
event BGP goes through and sets up a per peer Q that
10+
holds all the bgp path info's associated with that peer
11+
and then it goes and processes this in the future. In
12+
the meantime zebra is also at work and sends a nexthop
13+
removal event to BGP as well. This triggers a complete
14+
walk of all path info's associated with the bnc( which
15+
happens to be all the path info's already scheduled
16+
for removal here shortly). This evaluate paths
17+
is not an inexpensive operation in addition the work
18+
for handling this is already being done via the
19+
peer down queue. Let's optimize the bnc handling
20+
of evaluate paths and check to see if the peer is
21+
still up to actually do the work here.
22+
23+
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
24+
25+
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
26+
index 3b6db31ea0..196cc00385 100644
27+
--- a/bgpd/bgp_nht.c
28+
+++ b/bgpd/bgp_nht.c
29+
@@ -1258,6 +1258,25 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
30+
}
31+
32+
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
33+
+ /*
34+
+ * Currently when a peer goes down, bgp immediately
35+
+ * sees this via the interface events( if it is directly
36+
+ * connected). And in this case it takes and puts on
37+
+ * a special peer queue all path info's associated with
38+
+ * but these items are not yet processed typically when
39+
+ * the nexthop is being handled here. Thus we end
40+
+ * up in a situation where the process Queue for BGP
41+
+ * is being asked to look at the same path info multiple
42+
+ * times. Let's just cut to the chase here and if
43+
+ * the bnc has a peer associated with it and the path info
44+
+ * being looked at uses that peer and the peer is no
45+
+ * longer established we know the path_info is being
46+
+ * handled elsewhere and we do not need to process
47+
+ * it here at all since the pathinfo is going away
48+
+ */
49+
+ if (peer && path->peer == peer && !peer_established(peer->connection))
50+
+ continue;
51+
+
52+
if (path->type == ZEBRA_ROUTE_BGP &&
53+
(path->sub_type == BGP_ROUTE_NORMAL ||
54+
path->sub_type == BGP_ROUTE_STATIC ||
55+
--
56+
2.43.2
57+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
From 086c32eb5bf2ebfb4805f76219c1a3bc5dd9213e Mon Sep 17 00:00:00 2001
2+
From: dgsudharsan <sudharsand@nvidia.com>
3+
Date: Wed, 19 Feb 2025 17:24:39 +0000
4+
Subject: [PATCH] Revert "bgpd: upon if event, evaluate bnc with matching
5+
nexthop"
6+
7+
This reverts commit 58592be57783a3b24e7351af2a5afc61299768df.
8+
9+
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
10+
index 196cc00385..78eb1a9183 100644
11+
--- a/bgpd/bgp_nht.c
12+
+++ b/bgpd/bgp_nht.c
13+
@@ -751,10 +751,6 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
14+
struct interface *ifp, bool up)
15+
{
16+
struct bgp_nexthop_cache *bnc;
17+
- struct nexthop *nhop;
18+
- uint16_t other_nh_count;
19+
- bool nhop_ll_found = false;
20+
- bool nhop_found = false;
21+
22+
if (ifp->ifindex == IFINDEX_INTERNAL) {
23+
zlog_warn("%s: The interface %s ignored", __func__, ifp->name);
24+
@@ -762,42 +758,9 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
25+
}
26+
27+
frr_each (bgp_nexthop_cache, table, bnc) {
28+
- other_nh_count = 0;
29+
- nhop_ll_found = bnc->ifindex_ipv6_ll == ifp->ifindex;
30+
- for (nhop = bnc->nexthop; nhop; nhop = nhop->next) {
31+
- if (nhop->ifindex == bnc->ifindex_ipv6_ll)
32+
- continue;
33+
-
34+
- if (nhop->ifindex != ifp->ifindex) {
35+
- other_nh_count++;
36+
- continue;
37+
- }
38+
- if (nhop->vrf_id != ifp->vrf->vrf_id) {
39+
- other_nh_count++;
40+
- continue;
41+
- }
42+
- nhop_found = true;
43+
- }
44+
-
45+
- if (!nhop_found && !nhop_ll_found)
46+
- /* The event interface does not match the nexthop cache
47+
- * entry */
48+
- continue;
49+
-
50+
- if (!up && other_nh_count > 0)
51+
- /* Down event ignored in case of multiple next-hop
52+
- * interfaces. The other might interfaces might be still
53+
- * up. The cases where all interfaces are down or a bnc
54+
- * is invalid are processed by a separate zebra rnh
55+
- * messages.
56+
- */
57+
+ if (bnc->ifindex_ipv6_ll != ifp->ifindex)
58+
continue;
59+
60+
- if (!nhop_ll_found) {
61+
- evaluate_paths(bnc);
62+
- continue;
63+
- }
64+
-
65+
bnc->last_update = monotime(NULL);
66+
bnc->change_flags = 0;
67+
68+
@@ -810,7 +773,6 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
69+
if (up) {
70+
SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
71+
SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED);
72+
- /* change nexthop number only for ll */
73+
bnc->nexthop_num = 1;
74+
} else {
75+
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
76+
--
77+
2.43.2
78+

src/sonic-frr/patch/series

+2
Original file line numberDiff line numberDiff line change
@@ -59,3 +59,5 @@
5959
0077-frr-vtysh-dependencies-for-srv6-static-patches.patch
6060
0078-vtysh-de-conditionalize-and-reorder-install-node.patch
6161
0079-staticd-add-support-for-srv6.patch
62+
0081-bgpd-Optimize-evaluate-paths-for-a-peer-going-down.patch
63+
0082-Revert-bgpd-upon-if-event-evaluate-bnc-with-matching.patch

0 commit comments

Comments
 (0)