|
| 1 | +From f63a4be085b28c5138b95d55681f2bfb38bdaf4f Mon Sep 17 00:00:00 2001 |
| 2 | +From: Donald Sharp <sharpd@nvidia.com> |
| 3 | +Date: Fri, 24 Jan 2025 15:04:13 -0500 |
| 4 | +Subject: [PATCH] bgpd: Optimize evaluate paths for a peer going down |
| 5 | + |
| 6 | +Currently when a directly connected peer is going down |
| 7 | +BGP gets a call back for nexthop tracking in addition |
| 8 | +the interface down events. On the interface down |
| 9 | +event BGP goes through and sets up a per peer Q that |
| 10 | +holds all the bgp path info's associated with that peer |
| 11 | +and then it goes and processes this in the future. In |
| 12 | +the meantime zebra is also at work and sends a nexthop |
| 13 | +removal event to BGP as well. This triggers a complete |
| 14 | +walk of all path info's associated with the bnc( which |
| 15 | +happens to be all the path info's already scheduled |
| 16 | +for removal here shortly). This evaluate paths |
| 17 | +is not an inexpensive operation in addition the work |
| 18 | +for handling this is already being done via the |
| 19 | +peer down queue. Let's optimize the bnc handling |
| 20 | +of evaluate paths and check to see if the peer is |
| 21 | +still up to actually do the work here. |
| 22 | + |
| 23 | +Signed-off-by: Donald Sharp <sharpd@nvidia.com> |
| 24 | + |
| 25 | +diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c |
| 26 | +index 3b6db31ea0..196cc00385 100644 |
| 27 | +--- a/bgpd/bgp_nht.c |
| 28 | ++++ b/bgpd/bgp_nht.c |
| 29 | +@@ -1258,6 +1258,25 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc) |
| 30 | + } |
| 31 | + |
| 32 | + LIST_FOREACH (path, &(bnc->paths), nh_thread) { |
| 33 | ++ /* |
| 34 | ++ * Currently when a peer goes down, bgp immediately |
| 35 | ++ * sees this via the interface events( if it is directly |
| 36 | ++ * connected). And in this case it takes and puts on |
| 37 | ++ * a special peer queue all path info's associated with |
| 38 | ++ * but these items are not yet processed typically when |
| 39 | ++ * the nexthop is being handled here. Thus we end |
| 40 | ++ * up in a situation where the process Queue for BGP |
| 41 | ++ * is being asked to look at the same path info multiple |
| 42 | ++ * times. Let's just cut to the chase here and if |
| 43 | ++ * the bnc has a peer associated with it and the path info |
| 44 | ++ * being looked at uses that peer and the peer is no |
| 45 | ++ * longer established we know the path_info is being |
| 46 | ++ * handled elsewhere and we do not need to process |
| 47 | ++ * it here at all since the pathinfo is going away |
| 48 | ++ */ |
| 49 | ++ if (peer && path->peer == peer && !peer_established(peer->connection)) |
| 50 | ++ continue; |
| 51 | ++ |
| 52 | + if (path->type == ZEBRA_ROUTE_BGP && |
| 53 | + (path->sub_type == BGP_ROUTE_NORMAL || |
| 54 | + path->sub_type == BGP_ROUTE_STATIC || |
| 55 | +-- |
| 56 | +2.43.2 |
| 57 | + |
0 commit comments