diff --git a/.azure-pipelines/docker-sonic-vs/start.sh b/.azure-pipelines/docker-sonic-vs/start.sh index f7dbde8dcf..337661fd09 100755 --- a/.azure-pipelines/docker-sonic-vs/start.sh +++ b/.azure-pipelines/docker-sonic-vs/start.sh @@ -75,6 +75,14 @@ elif [ "$HWSKU" == "DPU-2P" ]; then cp /usr/share/sonic/hwsku/sai_dpu_2p.profile /usr/share/sonic/hwsku/sai.profile fi +if [ "$BFDOFFLOAD" == "false" ]; then + if ! grep -q "SAI_VS_BFD_OFFLOAD_SUPPORTED=" /usr/share/sonic/hwsku/sai.profile; then + echo 'SAI_VS_BFD_OFFLOAD_SUPPORTED=false' >> /usr/share/sonic/hwsku/sai.profile + else + sed -i "s/SAI_VS_BFD_OFFLOAD_SUPPORTED.*/SAI_VS_BFD_OFFLOAD_SUPPORTED=false/g" /usr/share/sonic/hwsku/sai.profile + fi +fi + mkdir -p /etc/swss/config.d/ rm -f /var/run/rsyslogd.pid diff --git a/.gitignore b/.gitignore index 4e61e19163..e115801e48 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,8 @@ cfgmgr/sflowmgrd cfgmgr/macsecmgrd cfgmgr/coppmgrd cfgmgr/tunnelmgrd +cfgmgr/fabricmgrd +cfgmgr/stpmgrd fpmsyncd/fpmsyncd gearsyncd/gearsyncd mclagsyncd/mclagsyncd @@ -94,3 +96,4 @@ tests/tests.log tests/tests.trs tests/mock_tests/**/*log tests/mock_tests/**/*trs +orchagent/p4orch/tests/p4orch_tests diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index adc97e5734..a0764ab46c 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -1462,6 +1462,26 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons continue; } + // If cable len is 0m, remove lossless PG, keep lossy PG. + if (cable_length == "0m" && portPg.lossless) + { + if (oldProfile.empty()) + { + SWSS_LOG_INFO("No lossless profile found for port %s when cable length is set to '0m'.", port.c_str()); + continue; + } + + if (m_bufferProfileLookup.find(oldProfile) != m_bufferProfileLookup.end()) + { + m_bufferProfileLookup[oldProfile].port_pgs.erase(key); + } + + updateBufferObjectToDb(key, oldProfile, false); + profilesToBeReleased.insert(oldProfile); + portPg.running_profile_name.clear(); + continue; + } + string threshold; // Calculate new headroom size if (portPg.static_configured) diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index acf1011d85..8456509da6 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -107,6 +107,8 @@ orchagent_SOURCES = \ response_publisher.cpp \ nvgreorch.cpp \ zmqorch.cpp \ + dash/dashenifwdorch.cpp \ + dash/dashenifwdinfo.cpp \ dash/dashorch.cpp \ dash/dashrouteorch.cpp \ dash/dashvnetorch.cpp \ diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 60ab40dca6..c0ccf08e5d 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -51,6 +51,8 @@ extern Directory gDirectory; const int TCP_PROTOCOL_NUM = 6; // TCP protocol number +#define MAC_EXACT_MATCH "ff:ff:ff:ff:ff:ff" + acl_rule_attr_lookup_t aclMatchLookup = { { MATCH_IN_PORTS, SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS }, @@ -79,6 +81,8 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_TUNNEL_VNI, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI }, { MATCH_INNER_ETHER_TYPE, SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE }, { MATCH_INNER_IP_PROTOCOL, SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL }, + { MATCH_INNER_SRC_MAC, SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_MAC }, + { MATCH_INNER_DST_MAC, SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_MAC }, { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, @@ -648,6 +652,7 @@ void AclRule::TunnelNH::clear() vxlan_orch->removeNextHopTunnel(tunnel_name, endpoint_ip, mac, vni); } + string AclTableType::getName() const { return m_name; @@ -908,6 +913,13 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) { matchData.data.booldata = (attr_name == "true"); } + else if (attr_name == MATCH_INNER_DST_MAC || attr_name == MATCH_INNER_SRC_MAC) + { + swss::MacAddress mac(attr_value); + swss::MacAddress mask(MAC_EXACT_MATCH); + memcpy(matchData.data.mac, mac.getMac(), sizeof(sai_mac_t)); + memcpy(matchData.mask.mac, mask.getMac(), sizeof(sai_mac_t)); + } else if (attr_name == MATCH_IN_PORTS) { auto ports = tokenize(attr_value, ','); diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index beb389260e..5cf3241e42 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -51,6 +51,8 @@ #define MATCH_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" #define MATCH_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define MATCH_INNER_SRC_MAC "INNER_SRC_MAC" +#define MATCH_INNER_DST_MAC "INNER_DST_MAC" #define MATCH_BTH_OPCODE "BTH_OPCODE" #define MATCH_AETH_SYNDROME "AETH_SYNDROME" #define MATCH_TUNNEL_TERM "TUNNEL_TERM" diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index c4d63e9788..7c04ffbf03 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -113,9 +113,11 @@ void BfdOrch::doTask(Consumer &consumer) SWSS_LOG_ENTER(); BgpGlobalStateOrch* bgp_global_state_orch = gDirectory.get(); bool tsa_enabled = false; + bool use_software_bfd = true; if (bgp_global_state_orch) { tsa_enabled = bgp_global_state_orch->getTsaState(); + use_software_bfd = bgp_global_state_orch->getSoftwareBfd(); } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -128,7 +130,8 @@ void BfdOrch::doTask(Consumer &consumer) if (op == SET_COMMAND) { - if (gMySwitchType == "dpu") { + if (use_software_bfd) + { //program entry in software BFD table m_stateSoftBfdSessionTable->set(createStateDBKey(key), data); it = consumer.m_toSync.erase(it); @@ -176,7 +179,8 @@ void BfdOrch::doTask(Consumer &consumer) } else if (op == DEL_COMMAND) { - if (gMySwitchType == "dpu") { + if (use_software_bfd) + { //delete entry from software BFD table m_stateSoftBfdSessionTable->del(createStateDBKey(key)); it = consumer.m_toSync.erase(it); @@ -704,6 +708,8 @@ BgpGlobalStateOrch::BgpGlobalStateOrch(DBConnector *db, string tableName): { SWSS_LOG_ENTER(); tsa_enabled = false; + bool ipv6 = true; + bfd_offload = (offload_supported(!ipv6) && offload_supported(ipv6)); } BgpGlobalStateOrch::~BgpGlobalStateOrch(void) @@ -716,6 +722,51 @@ bool BgpGlobalStateOrch::getTsaState() SWSS_LOG_ENTER(); return tsa_enabled; } + +bool BgpGlobalStateOrch::getSoftwareBfd() +{ + SWSS_LOG_ENTER(); + return !bfd_offload; +} + +bool BgpGlobalStateOrch::offload_supported(bool get_ipv6) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + attr.id = SAI_SWITCH_ATTR_SUPPORTED_IPV4_BFD_SESSION_OFFLOAD_TYPE; + if(get_ipv6) + { + attr.id = SAI_SWITCH_ATTR_SUPPORTED_IPV6_BFD_SESSION_OFFLOAD_TYPE; + } + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + attr.id, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query BFD offload capability"); + return false; + } + if (!capability.set_implemented) + { + SWSS_LOG_ERROR("BFD offload type not implemented"); + return false; + } + + uint32_t list[1] = { 1 }; + attr.value.u32list.count = 1; + attr.value.u32list.list = list; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if(status == SAI_STATUS_SUCCESS && attr.value.u32list.count > 0) + { + SWSS_LOG_INFO("BFD offload type: %d", attr.value.u32list.list[0]); + return (attr.value.u32list.list[0] != SAI_BFD_SESSION_OFFLOAD_TYPE_NONE); + } + SWSS_LOG_ERROR("Could not get supported BFD offload type, rv: %d", status); + return false; +} + void BgpGlobalStateOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h index fb44f08a76..4beb3272f8 100644 --- a/orchagent/bfdorch.h +++ b/orchagent/bfdorch.h @@ -54,9 +54,12 @@ class BgpGlobalStateOrch : public Orch BgpGlobalStateOrch(swss::DBConnector *db, std::string tableName); virtual ~BgpGlobalStateOrch(void); bool getTsaState(); + bool getSoftwareBfd(); private: bool tsa_enabled; + bool bfd_offload; + bool offload_supported(bool get_ipv6); }; #endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index bca1b9e4a6..172f94570d 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1858,6 +1858,7 @@ void BufferOrch::doTask() continue; consumer->drain(); } + gPortsOrch->flushCounters(); } void BufferOrch::doTask(Consumer &consumer) @@ -1921,4 +1922,6 @@ void BufferOrch::doTask(Consumer &consumer) { (this->*(m_bufferFlushHandlerMap[map_type_name]))(consumer); } + + gPortsOrch->flushCounters(); } diff --git a/orchagent/copporch.cpp b/orchagent/copporch.cpp index 6c6a7157e5..3295d40108 100644 --- a/orchagent/copporch.cpp +++ b/orchagent/copporch.cpp @@ -188,13 +188,17 @@ void CoppOrch::initDefaultTrapIds() attr.value.oid = m_trap_group_map[default_trap_group]; trap_id_attrs.push_back(attr); - /* Mellanox platform doesn't support trap priority setting */ - /* Marvell platform doesn't support trap priority. */ + /* + * Use a default trap priority > 0 to avoid undesirable packet trapping + * behavior on some platforms that use 0 as default SAI-internal priority. + * Note: Mellanox and Marvell platforms don't support trap priority setting. + */ + char *platform = getenv("platform"); if (!platform || (!strstr(platform, MLNX_PLATFORM_SUBSTRING) && (!strstr(platform, MRVL_PRST_PLATFORM_SUBSTRING)))) { attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_PRIORITY; - attr.value.u32 = 0; + attr.value.u32 = 1; trap_id_attrs.push_back(attr); } diff --git a/orchagent/dash/dashenifwdinfo.cpp b/orchagent/dash/dashenifwdinfo.cpp new file mode 100644 index 0000000000..5f808c6108 --- /dev/null +++ b/orchagent/dash/dashenifwdinfo.cpp @@ -0,0 +1,466 @@ +#include "dashenifwdorch.h" + +using namespace swss; +using namespace std; + +const int EniAclRule::BASE_PRIORITY = 9996; +const vector EniAclRule::RULE_NAMES = { + "IN", + "OUT", + "IN_TERM", + "OUT_TERM" +}; + +unique_ptr EniNH::createNextHop(dpu_type_t type, const IpAddress& ip) +{ + if (type == dpu_type_t::LOCAL) + { + return unique_ptr(new LocalEniNH(ip)); + } + return unique_ptr(new RemoteEniNH(ip)); +} + + +void LocalEniNH::resolve(EniInfo& eni) +{ + auto& ctx = eni.getCtx(); + auto alias = ctx->getNbrAlias(endpoint_); + + NextHopKey nh(endpoint_, alias); + if (ctx->isNeighborResolved(nh)) + { + setStatus(endpoint_status_t::RESOLVED); + return ; + } + + ctx->resolveNeighbor(nh); + setStatus(endpoint_status_t::UNRESOLVED); +} + +string LocalEniNH::getRedirectVal() +{ + return endpoint_.to_string(); +} + + +void RemoteEniNH::resolve(EniInfo& eni) +{ + auto& ctx = eni.getCtx(); + auto vnet = eni.getVnet(); + + if (!ctx->findVnetTunnel(vnet, tunnel_name_)) + { + SWSS_LOG_ERROR("Couldn't find tunnel name for Vnet %s", vnet.c_str()); + setStatus(endpoint_status_t::UNRESOLVED); + return ; + } + + if (ctx->handleTunnelNH(tunnel_name_, endpoint_, true)) + { + setStatus(endpoint_status_t::RESOLVED); + } + else + { + setStatus(endpoint_status_t::UNRESOLVED); + } +} + +void RemoteEniNH::destroy(EniInfo& eni) +{ + auto& ctx = eni.getCtx(); + ctx->handleTunnelNH(tunnel_name_, endpoint_, false); +} + +string RemoteEniNH::getRedirectVal() +{ + return endpoint_.to_string() + "@" + tunnel_name_; +} + +void EniAclRule::setKey(EniInfo& eni) +{ + name_ = string(ENI_REDIRECT_TABLE) + ":" + eni.toKey() + "_" + EniAclRule::RULE_NAMES[type_]; +} + +update_type_t EniAclRule::processUpdate(EniInfo& eni) +{ + SWSS_LOG_ENTER(); + auto& ctx = eni.getCtx(); + IpAddress primary_endp; + dpu_type_t primary_type = LOCAL; + update_type_t update_type = PRIMARY_UPDATE; + uint64_t primary_id; + + if (type_ == rule_type_t::INBOUND_TERM || type_ == rule_type_t::OUTBOUND_TERM) + { + /* Tunnel term entries always use local endpoint regardless of primary id */ + if (!eni.findLocalEp(primary_id)) + { + SWSS_LOG_ERROR("No Local endpoint was found for Rule: %s", getKey().c_str()); + return update_type_t::INVALID; + } + } + else + { + primary_id = eni.getPrimaryId(); + } + + if (!ctx->dpu_info.getType(primary_id, primary_type)) + { + SWSS_LOG_ERROR("No primaryId in DPU Table %" PRIu64 "", primary_id); + return update_type_t::INVALID; + } + + if (primary_type == LOCAL) + { + ctx->dpu_info.getPaV4(primary_id, primary_endp); + } + else + { + ctx->dpu_info.getNpuV4(primary_id, primary_endp); + } + + if (nh_ == nullptr) + { + /* Create Request */ + update_type = update_type_t::CREATE; + } + else if (nh_->getType() != primary_type || nh_->getEp() != primary_endp) + { + /* primary endpoint is switched */ + update_type = update_type_t::PRIMARY_UPDATE; + SWSS_LOG_NOTICE("Endpoint IP for Rule %s updated from %s -> %s", getKey().c_str(), + nh_->getEp().to_string().c_str(), primary_endp.to_string().c_str()); + } + else if(nh_->getStatus() == RESOLVED) + { + /* No primary update and nexthop resolved, no update + Neigh Down on a existing local endpoint needs special handling */ + return update_type_t::IDEMPOTENT; + } + + if (update_type == update_type_t::PRIMARY_UPDATE || update_type == update_type_t::CREATE) + { + if (nh_ != nullptr) + { + nh_->destroy(eni); + } + nh_.reset(); + nh_ = EniNH::createNextHop(primary_type, primary_endp); + } + + /* Try to resolve the neighbor */ + nh_->resolve(eni); + return update_type; +} + +void EniAclRule::fire(EniInfo& eni) +{ + /* + Process an ENI update and handle the ACL rule accordingly + 1) See if the update is valid and infer if the nexthop is local or remote + 2) Create a NextHop object and if resolved, proceed with installing the ACL Rule + */ + SWSS_LOG_ENTER(); + + auto update_type = processUpdate(eni); + + if (update_type == update_type_t::INVALID || update_type == update_type_t::IDEMPOTENT) + { + if (update_type == update_type_t::INVALID) + { + setState(rule_state_t::FAILED); + } + return ; + } + + auto& ctx = eni.getCtx(); + auto key = getKey(); + + if (state_ == rule_state_t::INSTALLED && update_type == update_type_t::PRIMARY_UPDATE) + { + /* + Delete the complete rule before updating it, + ACLOrch Doesn't support incremental updates + */ + ctx->rule_table->del(key); + setState(rule_state_t::UNINSTALLED); + SWSS_LOG_NOTICE("EniFwd ACL Rule %s deleted", key.c_str()); + } + + if (nh_->getStatus() != endpoint_status_t::RESOLVED) + { + /* Wait until the endpoint is resolved */ + setState(rule_state_t::PENDING); + return ; + } + + vector fv_ = { + { RULE_PRIORITY, to_string(BASE_PRIORITY + static_cast(type_)) }, + { MATCH_DST_IP, ctx->getVip().to_string() }, + { getMacMatchDirection(eni), eni.getMac().to_string() }, + { ACTION_REDIRECT_ACTION, nh_->getRedirectVal() } + }; + + if (type_ == rule_type_t::INBOUND_TERM || type_ == rule_type_t::OUTBOUND_TERM) + { + fv_.push_back({MATCH_TUNNEL_TERM, "true"}); + } + + if (type_ == rule_type_t::OUTBOUND || type_ == rule_type_t::OUTBOUND_TERM) + { + fv_.push_back({MATCH_TUNNEL_VNI, to_string(eni.getOutVni())}); + } + + ctx->rule_table->set(key, fv_); + setState(INSTALLED); + SWSS_LOG_NOTICE("EniFwd ACL Rule %s installed", key.c_str()); +} + +string EniAclRule::getMacMatchDirection(EniInfo& eni) +{ + if (type_ == OUTBOUND || type_ == OUTBOUND_TERM) + { + return eni.getOutMacLookup(); + } + return MATCH_INNER_DST_MAC; +} + +void EniAclRule::destroy(EniInfo& eni) +{ + if (state_ == rule_state_t::INSTALLED) + { + auto key = getKey(); + auto& ctx = eni.getCtx(); + ctx->rule_table->del(key); + if (nh_ != nullptr) + { + nh_->destroy(eni); + } + nh_.reset(); + setState(rule_state_t::UNINSTALLED); + } +} + +void EniAclRule::setState(rule_state_t state) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_INFO("EniFwd ACL Rule: %s State Change %d -> %d", getKey().c_str(), state_, state); + state_ = state; +} + + +EniInfo::EniInfo(const string& mac_str, const string& vnet, const shared_ptr& ctx) : + mac_(mac_str), vnet_name_(vnet), ctx(ctx) +{ + formatMac(); +} + +string EniInfo::toKey() const +{ + return vnet_name_ + "_" + mac_key_; +} + +void EniInfo::fireRule(rule_type_t rule_type) +{ + auto rule_itr = rule_container_.find(rule_type); + if (rule_itr != rule_container_.end()) + { + rule_itr->second.fire(*this); + } +} + +void EniInfo::fireAllRules() +{ + for (auto& rule_tuple : rule_container_) + { + fireRule(rule_tuple.first); + } +} + +bool EniInfo::destroy(const Request& db_request) +{ + for (auto& rule_tuple : rule_container_) + { + rule_tuple.second.destroy(*this); + } + rule_container_.clear(); + return true; +} + +bool EniInfo::create(const Request& db_request) +{ + SWSS_LOG_ENTER(); + + auto updates = db_request.getAttrFieldNames(); + auto itr_ep_list = updates.find(ENI_FWD_VDPU_IDS); + auto itr_primary_id = updates.find(ENI_FWD_PRIMARY); + auto itr_out_vni = updates.find(ENI_FWD_OUT_VNI); + auto itr_out_mac_dir = updates.find(ENI_FWD_OUT_MAC_LOOKUP); + + /* Validation Checks */ + if (itr_ep_list == updates.end() || itr_primary_id == updates.end()) + { + SWSS_LOG_ERROR("Invalid DASH_ENI_FORWARD_TABLE request: No endpoint/primary"); + return false; + } + + ep_list_ = db_request.getAttrUintList(ENI_FWD_VDPU_IDS); + primary_id_ = db_request.getAttrUint(ENI_FWD_PRIMARY); + + uint64_t local_id; + bool tunn_term_allow = findLocalEp(local_id); + bool outbound_allow = false; + + /* Create Rules */ + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::INBOUND), + forward_as_tuple(rule_type_t::INBOUND, *this)); + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::OUTBOUND), + forward_as_tuple(rule_type_t::OUTBOUND, *this)); + + if (tunn_term_allow) + { + /* Create rules for tunnel termination if required */ + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::INBOUND_TERM), + forward_as_tuple(rule_type_t::INBOUND_TERM, *this)); + rule_container_.emplace(piecewise_construct, + forward_as_tuple(rule_type_t::OUTBOUND_TERM), + forward_as_tuple(rule_type_t::OUTBOUND_TERM, *this)); + } + + /* Infer Direction to check MAC for outbound rules */ + if (itr_out_mac_dir == updates.end()) + { + outbound_mac_lookup_ = MATCH_INNER_SRC_MAC; + } + else + { + auto str = db_request.getAttrString(ENI_FWD_OUT_MAC_LOOKUP); + if (str == OUT_MAC_DIR) + { + outbound_mac_lookup_ = MATCH_INNER_DST_MAC; + } + else + { + outbound_mac_lookup_ = MATCH_INNER_SRC_MAC; + } + } + + /* Infer tunnel_vni for the outbound rules */ + if (itr_out_vni == updates.end()) + { + if (ctx->findVnetVni(vnet_name_, outbound_vni_)) + { + outbound_allow = true; + } + else + { + SWSS_LOG_ERROR("Invalid VNET: No VNI. Cannot install outbound rules: %s", toKey().c_str()); + } + } + else + { + outbound_vni_ = db_request.getAttrUint(ENI_FWD_OUT_VNI); + outbound_allow = true; + } + + fireRule(rule_type_t::INBOUND); + + if (tunn_term_allow) + { + fireRule(rule_type_t::INBOUND_TERM); + } + + if (outbound_allow) + { + fireRule(rule_type_t::OUTBOUND); + } + + if (tunn_term_allow && outbound_allow) + { + fireRule(rule_type_t::OUTBOUND_TERM); + } + + return true; +} + +bool EniInfo::update(const NeighborUpdate& nbr_update) +{ + if (nbr_update.add) + { + fireAllRules(); + } + else + { + /* + Neighbor Delete handling not supported yet + When this update comes, ACL rule must be deleted first, followed by the NEIGH object + */ + } + return true; +} + +bool EniInfo::update(const Request& db_request) +{ + SWSS_LOG_ENTER(); + + /* Only primary_id is expected to change after ENI is created */ + auto updates = db_request.getAttrFieldNames(); + auto itr_primary_id = updates.find(ENI_FWD_PRIMARY); + + /* Validation Checks */ + if (itr_primary_id == updates.end()) + { + throw logic_error("Invalid DASH_ENI_FORWARD_TABLE update: No primary idx"); + } + + if (getPrimaryId() == db_request.getAttrUint(ENI_FWD_PRIMARY)) + { + /* No update in the primary id, return true */ + return true; + } + + /* Update local primary id and fire the rules */ + primary_id_ = db_request.getAttrUint(ENI_FWD_PRIMARY); + fireAllRules(); + + return true; +} + +bool EniInfo::findLocalEp(uint64_t& local_endpoint) const +{ + /* Check if atleast one of the endpoints is local */ + bool found = false; + for (auto idx : ep_list_) + { + dpu_type_t val = dpu_type_t::EXTERNAL; + if (ctx->dpu_info.getType(idx, val) && val == dpu_type_t::LOCAL) + { + if (!found) + { + found = true; + local_endpoint = idx; + } + else + { + SWSS_LOG_WARN("Multiple Local Endpoints for the ENI %s found, proceeding with %" PRIu64 "" , + mac_.to_string().c_str(), local_endpoint); + } + } + } + return found; +} + +void EniInfo::formatMac() +{ + /* f4:93:9f:ef:c4:7e -> F4939FEFC47E */ + mac_key_.clear(); + auto mac_orig = mac_.to_string(); + for (char c : mac_orig) { + if (c != ':') { // Skip colons + mac_key_ += static_cast(toupper(c)); + } + } +} diff --git a/orchagent/dash/dashenifwdorch.cpp b/orchagent/dash/dashenifwdorch.cpp new file mode 100644 index 0000000000..ad8ae860ff --- /dev/null +++ b/orchagent/dash/dashenifwdorch.cpp @@ -0,0 +1,572 @@ +#include +#include +#include "dashenifwdorch.h" +#include "directory.h" + +extern Directory gDirectory; + +using namespace swss; +using namespace std; + +DashEniFwdOrch::DashEniFwdOrch(DBConnector* cfgDb, DBConnector* applDb, const std::string& tableName, NeighOrch* neighOrch) + : Orch2(applDb, tableName, request_), neighorch_(neighOrch) +{ + SWSS_LOG_ENTER(); + acl_table_type_ = make_unique(applDb, APP_ACL_TABLE_TYPE_TABLE_NAME); + acl_table_ = make_unique(applDb, APP_ACL_TABLE_TABLE_NAME); + ctx = make_shared(cfgDb, applDb); + if (neighorch_) + { + /* Listen to Neighbor events */ + neighorch_->attach(this); + } +} + +DashEniFwdOrch::~DashEniFwdOrch() +{ + if (neighorch_) + { + neighorch_->detach(this); + } +} + +void DashEniFwdOrch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + switch(type) { + case SUBJECT_TYPE_NEIGH_CHANGE: + { + NeighborUpdate *update = static_cast(cntx); + handleNeighUpdate(*update); + break; + } + default: + // Ignore the update + return; + } +} + +void DashEniFwdOrch::handleNeighUpdate(const NeighborUpdate& update) +{ + /* + Refresh ENI's that are hosted on the DPU with the corresponding Neighboo + */ + SWSS_LOG_ENTER(); + auto ipaddr = update.entry.ip_address; + auto dpu_id_itr = neigh_dpu_map_.find(ipaddr); + if (dpu_id_itr == neigh_dpu_map_.end()) + { + return ; + } + SWSS_LOG_NOTICE("Neighbor Update: %s, add: %d", ipaddr.to_string().c_str(), update.add); + + auto dpu_id = dpu_id_itr->second; + auto itr = dpu_eni_map_.lower_bound(dpu_id); + auto itr_end = dpu_eni_map_.upper_bound(dpu_id); + + while (itr != itr_end) + { + /* Find the eni_itr */ + auto eni_itr = eni_container_.find(itr->second); + if (eni_itr != eni_container_.end()) + { + eni_itr->second.update(update); + } + itr++; + } +} + +void DashEniFwdOrch::initAclTableCfg() +{ + vector match_list = { + MATCH_TUNNEL_VNI, + MATCH_DST_IP, + MATCH_INNER_SRC_MAC, + MATCH_INNER_DST_MAC, + MATCH_TUNNEL_TERM + }; + + auto concat = [](const std::string &a, const std::string &b) { return a + "," + b; }; + + std::string matches = std::accumulate( + std::next(match_list.begin()), match_list.end(), match_list[0], + concat); + + string bpoint_types = string(BIND_POINT_TYPE_PORT) + "," + string(BIND_POINT_TYPE_PORTCHANNEL); + + vector fv_ = { + { ACL_TABLE_TYPE_MATCHES, matches}, + { ACL_TABLE_TYPE_ACTIONS, ACTION_REDIRECT_ACTION }, + { ACL_TABLE_TYPE_BPOINT_TYPES, bpoint_types} + }; + + acl_table_type_->set(ENI_REDIRECT_TABLE_TYPE, fv_); + + auto ports = ctx->getBindPoints(); + std::string ports_str; + + if (!ports.empty()) + { + ports_str = std::accumulate(std::next(ports.begin()), ports.end(), ports[0], concat); + } + + /* Write ACL Table */ + vector table_fv_ = { + { ACL_TABLE_DESCRIPTION, "Contains Rule for DASH ENI Based Forwarding"}, + { ACL_TABLE_TYPE, ENI_REDIRECT_TABLE_TYPE }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, ports_str } + }; + + acl_table_->set(ENI_REDIRECT_TABLE, table_fv_); +} + +void DashEniFwdOrch::initLocalEndpoints() +{ + auto ids = ctx->dpu_info.getIds(); + dpu_type_t primary_type = CLUSTER; + IpAddress local_endp; + for (auto id : ids) + { + if(ctx->dpu_info.getType(id, primary_type) && primary_type == dpu_type_t::LOCAL) + { + if(ctx->dpu_info.getPaV4(id, local_endp)) + { + neigh_dpu_map_.insert(make_pair(local_endp, id)); + SWSS_LOG_NOTICE("Local DPU endpoint detected %s", local_endp.to_string().c_str()); + + /* Try to resovle the neighbor */ + auto alias = ctx->getNbrAlias(local_endp); + NextHopKey nh(local_endp, alias); + + if (ctx->isNeighborResolved(nh)) + { + SWSS_LOG_WARN("Neighbor already populated.. Not Expected"); + } + ctx->resolveNeighbor(nh); + } + } + } +} + +void DashEniFwdOrch::handleEniDpuMapping(uint64_t id, MacAddress mac, bool add) +{ + /* Make sure id is local */ + dpu_type_t primary_type = CLUSTER; + if(ctx->dpu_info.getType(id, primary_type) && primary_type == dpu_type_t::LOCAL) + { + if (add) + { + dpu_eni_map_.insert(make_pair(id, mac)); + } + else + { + auto range = dpu_eni_map_.equal_range(id); + for (auto it = range.first; it != range.second; ++it) + { + if (it->second == mac) + { + dpu_eni_map_.erase(it); + break; + } + } + } + } +} + +void DashEniFwdOrch::lazyInit() +{ + if (ctx_initialized_) + { + return ; + } + /* + 1. DpuRegistry + 2. Other Orch ptrs + 3. Internal dpu-id mappings + 4. Write ACL Table Cfg + */ + ctx->initialize(); + ctx->populateDpuRegistry(); + initAclTableCfg(); + initLocalEndpoints(); + ctx_initialized_ = true; +} + +bool DashEniFwdOrch::addOperation(const Request& request) +{ + lazyInit(); + + bool new_eni = false; + auto vnet_name = request.getKeyString(0); + auto eni_id = request.getKeyMacAddress(1); + auto eni_itr = eni_container_.find(eni_id); + + if (eni_itr == eni_container_.end()) + { + new_eni = true; + eni_container_.emplace(std::piecewise_construct, + std::forward_as_tuple(eni_id), + std::forward_as_tuple(eni_id.to_string(), vnet_name, ctx)); + + eni_itr = eni_container_.find(eni_id); + } + + if (new_eni) + { + eni_itr->second.create(request); + uint64_t local_ep; + if (eni_itr->second.findLocalEp(local_ep)) + { + /* Add to the local map if the endpoint is found */ + handleEniDpuMapping(local_ep, eni_id, true); + } + } + else + { + eni_itr->second.update(request); + } + return true; +} + +bool DashEniFwdOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto vnet_name = request.getKeyString(0); + auto eni_id = request.getKeyMacAddress(1); + + auto eni_itr = eni_container_.find(eni_id); + + if (eni_itr == eni_container_.end()) + { + SWSS_LOG_ERROR("Invalid del request %s:%s", vnet_name.c_str(), eni_id.to_string().c_str()); + return true; + } + + bool result = eni_itr->second.destroy(request); + if (result) + { + uint64_t local_ep; + if (eni_itr->second.findLocalEp(local_ep)) + { + /* Add to the local map if the endpoint is found */ + handleEniDpuMapping(local_ep, eni_id, false); + } + } + eni_container_.erase(eni_id); + return true; +} + + +void DpuRegistry::populate(Table* dpuTable) +{ + SWSS_LOG_ENTER(); + std::vector keys; + dpuTable->getKeys(keys); + + for (auto key : keys) + { + try + { + std::vector values; + dpuTable->get(key, values); + + KeyOpFieldsValuesTuple kvo = { + key, SET_COMMAND, values + }; + processDpuTable(kvo); + } + catch(exception& e) + { + SWSS_LOG_ERROR("Failed to parse key:%s in the %s", key.c_str(), CFG_DPU_TABLE); + } + } + SWSS_LOG_INFO("DPU data read. %zu dpus found", dpus_.size()); +} + +void DpuRegistry::processDpuTable(const KeyOpFieldsValuesTuple& kvo) +{ + DpuData data; + + dpu_request_.clear(); + dpu_request_.parse(kvo); + + uint64_t key = dpu_request_.getKeyUint(0); + string type = dpu_request_.getAttrString(DPU_TYPE); + + dpus_ids_.push_back(key); + + if (type == "local") + { + data.type = dpu_type_t::LOCAL; + } + else + { + // External type is not suported + data.type = dpu_type_t::CLUSTER; + } + + data.pa_v4 = dpu_request_.getAttrIP(DPU_PA_V4); + data.npu_v4 = dpu_request_.getAttrIP(DPU_NPU_V4); + dpus_.insert({key, data}); +} + +std::vector DpuRegistry::getIds() +{ + return dpus_ids_; +} + +bool DpuRegistry::getType(uint64_t id, dpu_type_t& val) +{ + auto itr = dpus_.find(id); + if (itr == dpus_.end()) return false; + val = itr->second.type; + return true; +} + +bool DpuRegistry::getPaV4(uint64_t id, swss::IpAddress& val) +{ + auto itr = dpus_.find(id); + if (itr == dpus_.end()) return false; + val = itr->second.pa_v4; + return true; +} + +bool DpuRegistry::getNpuV4(uint64_t id, swss::IpAddress& val) +{ + auto itr = dpus_.find(id); + if (itr == dpus_.end()) return false; + val = itr->second.npu_v4; + return true; +} + +EniFwdCtxBase::EniFwdCtxBase(DBConnector* cfgDb, DBConnector* applDb) +{ + dpu_tbl_ = make_unique(cfgDb, CFG_DPU_TABLE); + port_tbl_ = make_unique
(cfgDb, CFG_PORT_TABLE_NAME); + vip_tbl_ = make_unique
(cfgDb, CFG_VIP_TABLE_TMP); + rule_table = make_unique(applDb, APP_ACL_RULE_TABLE_NAME); + vip_inferred_ = false; +} + +void EniFwdCtxBase::populateDpuRegistry() +{ + dpu_info.populate(dpu_tbl_.get()); +} + +std::set EniFwdCtxBase::findInternalPorts() +{ + std::vector all_ports; + std::set internal_ports; + port_tbl_->getKeys(all_ports); + for (auto& port : all_ports) + { + std::string val; + if (port_tbl_->hget(port, PORT_ROLE, val)) + { + if (val == PORT_ROLE_DPC) + { + internal_ports.insert(port); + } + } + } + return internal_ports; +} + +vector EniFwdCtxBase::getBindPoints() +{ + std::vector bpoints; + auto internal_ports = findInternalPorts(); + auto all_ports = getAllPorts(); + + std::set legitSet; + + /* Add Phy and Lag ports */ + for (auto &it: all_ports) + { + if (it.second.m_type == Port::PHY || it.second.m_type == Port::LAG) + { + legitSet.insert(it.first); + } + } + + /* Remove any Lag Members PHY's */ + for (auto &it: all_ports) + { + Port& port = it.second; + if (port.m_type == Port::LAG) + { + for (auto mem : port.m_members) + { + /* Remove any members that are part of a LAG */ + legitSet.erase(mem); + } + } + } + + /* Filter Internal ports */ + for (auto& port : legitSet) + { + if (internal_ports.find(port) == internal_ports.end()) + { + bpoints.push_back(port); + } + } + + return bpoints; +} + +string EniFwdCtxBase::getNbrAlias(const swss::IpAddress& nh_ip) +{ + auto itr = nh_alias_map_.find(nh_ip); + if (itr != nh_alias_map_.end()) + { + return itr->second; + } + + auto alias = this->getRouterIntfsAlias(nh_ip); + if (!alias.empty()) + { + nh_alias_map_.insert(std::pair(nh_ip, alias)); + } + return alias; +} + +bool EniFwdCtxBase::handleTunnelNH(const std::string& tunnel_name, swss::IpAddress endpoint, bool create) +{ + SWSS_LOG_ENTER(); + + auto nh_key = endpoint.to_string() + "@" + tunnel_name; + auto nh_itr = remote_nh_map_.find(nh_key); + + /* Delete Tunnel NH if ref_count = 0 */ + if (!create) + { + if (nh_itr != remote_nh_map_.end()) + { + if(!--nh_itr->second.first) + { + remote_nh_map_.erase(nh_key); + return removeNextHopTunnel(tunnel_name, endpoint); + } + } + return true; + } + + if (nh_itr == remote_nh_map_.end()) + { + /* Create a tunnel NH */ + auto nh_oid = createNextHopTunnel(tunnel_name, endpoint); + if (nh_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to create Tunnel Next Hop, name: %s. endpoint %s", tunnel_name.c_str(), + endpoint.to_string().c_str()); + } + remote_nh_map_.insert(make_pair(nh_key, make_pair(0, nh_oid))); + nh_itr = remote_nh_map_.find(nh_key); + } + nh_itr->second.first++; /* Increase the ref count, indicates number of rules using referencing this */ + return true; +} + +IpPrefix EniFwdCtxBase::getVip() +{ + SWSS_LOG_ENTER(); + + if (!vip_inferred_) + { + std::vector keys; + vip_tbl_->getKeys(keys); + if (keys.empty()) + { + SWSS_LOG_THROW("Invalid Config: VIP info not populated"); + } + + try + { + vip = IpPrefix(keys[0]); + SWSS_LOG_NOTICE("VIP found: %s", vip.to_string().c_str()); + } + catch (std::exception& e) + { + SWSS_LOG_THROW("VIP is not formatted correctly %s", keys[0].c_str()); + } + vip_inferred_ = true; + } + return vip; +} + + +void EniFwdCtx::initialize() +{ + portsorch_ = gDirectory.get(); + neighorch_ = gDirectory.get(); + intfsorch_ = gDirectory.get(); + vnetorch_ = gDirectory.get(); + vxlanorch_ = gDirectory.get(); + assert(portsorch_); + assert(neighorch_); + assert(intfsorch_); + assert(vnetorch_); + assert(vxlanorch_); +} + +bool EniFwdCtx::isNeighborResolved(const NextHopKey& nh) +{ + return neighorch_->isNeighborResolved(nh); +} + +void EniFwdCtx::resolveNeighbor(const NeighborEntry& nh) +{ + /* Neighorch already has the logic to handle the duplicate requests */ + neighorch_->resolveNeighbor(nh); +} + +string EniFwdCtx::getRouterIntfsAlias(const IpAddress &ip, const string &vrf_name) +{ + return intfsorch_->getRouterIntfsAlias(ip, vrf_name); +} + +bool EniFwdCtx::findVnetVni(const string& vnet_name, uint64_t& vni) +{ + if (vnetorch_->isVnetExists(vnet_name)) + { + vni = vnetorch_->getTypePtr(vnet_name)->getVni(); + return true; + } + return false; +} + +bool EniFwdCtx::findVnetTunnel(const string& vnet_name, string& tunnel) +{ + if (vnetorch_->isVnetExists(vnet_name)) + { + tunnel = vnetorch_->getTunnelName(vnet_name); + return true; + } + return false; +} + +std::map& EniFwdCtx::getAllPorts() +{ + return portsorch_->getAllPorts(); +} + +sai_object_id_t EniFwdCtx::createNextHopTunnel(string tunnel_name, IpAddress ip_addr) +{ + return safetyWrapper(vxlanorch_, &VxlanTunnelOrch::createNextHopTunnel, + (sai_object_id_t)SAI_NULL_OBJECT_ID, + (string)tunnel_name, ip_addr, + MacAddress(), + (uint32_t)0); +} + +bool EniFwdCtx::removeNextHopTunnel(string tunnel_name, IpAddress ip_addr) +{ + return safetyWrapper(vxlanorch_, &VxlanTunnelOrch::removeNextHopTunnel, + false, + (std::string)tunnel_name, ip_addr, + MacAddress(), + (uint32_t)0); +} diff --git a/orchagent/dash/dashenifwdorch.h b/orchagent/dash/dashenifwdorch.h new file mode 100644 index 0000000000..4ddba28a88 --- /dev/null +++ b/orchagent/dash/dashenifwdorch.h @@ -0,0 +1,392 @@ +#pragma once + +#include +#include +#include "producerstatetable.h" +#include "orch.h" +#include "portsorch.h" +#include "aclorch.h" +#include "neighorch.h" +#include "vnetorch.h" +#include "observer.h" +#include "request_parser.h" +#include +#include + +// TODO: remove after the schema is finalized and added to swss-common +#define CFG_VIP_TABLE_TMP "VIP_TABLE" + +#define ENI_REDIRECT_TABLE_TYPE "ENI_REDIRECT" +#define ENI_REDIRECT_TABLE "ENI" +#define ENI_FWD_VDPU_IDS "vdpu_ids" +#define ENI_FWD_PRIMARY "primary_vdpu" +#define ENI_FWD_OUT_VNI "outbound_vni" +#define ENI_FWD_OUT_MAC_LOOKUP "outbound_eni_mac_lookup" + +#define DPU_TYPE "type" +#define DPU_STATE "state" +#define DPU_PA_V4 "pa_ipv4" +#define DPU_PA_V6 "pa_ipv6" +#define DPU_NPU_V4 "npu_ipv4" +#define DPU_NPU_V6 "npu_ipv6" + +#define DPU_LOCAL "local" +#define OUT_MAC_DIR "dst" + + + +const request_description_t eni_dash_fwd_desc = { + { REQ_T_STRING, REQ_T_MAC_ADDRESS }, // VNET_NAME, ENI_ID + { + { ENI_FWD_VDPU_IDS, REQ_T_UINT_LIST }, // DPU ID's + { ENI_FWD_PRIMARY, REQ_T_UINT }, + { ENI_FWD_OUT_VNI, REQ_T_UINT }, + { ENI_FWD_OUT_MAC_LOOKUP, REQ_T_STRING }, + }, + { } +}; + +const request_description_t dpu_table_desc = { + { REQ_T_UINT }, // DPU_ID + { + { DPU_TYPE, REQ_T_STRING }, + { DPU_STATE, REQ_T_STRING }, + { DPU_PA_V4, REQ_T_IP }, + { DPU_PA_V6, REQ_T_IP }, + { DPU_NPU_V4, REQ_T_IP }, + { DPU_NPU_V6, REQ_T_IP }, + }, + { DPU_TYPE, DPU_PA_V4, DPU_NPU_V4 } +}; + +typedef enum +{ + LOCAL, + CLUSTER, + EXTERNAL +} dpu_type_t; + +typedef enum +{ + RESOLVED, + UNRESOLVED +} endpoint_status_t; + +typedef enum +{ + FAILED, + PENDING, + INSTALLED, + UNINSTALLED +} rule_state_t; + +typedef enum +{ + INVALID, + IDEMPOTENT, + CREATE, + PRIMARY_UPDATE /* Either NH update or primary endp change */ +} update_type_t; + +typedef enum +{ + INBOUND = 0, + OUTBOUND, + INBOUND_TERM, + OUTBOUND_TERM +} rule_type_t; + + +class DpuRegistry; +class EniNH; +class LocalEniNH; +class RemoteEniNH; +class EniAclRule; +class EniInfo; +class EniFwdCtxBase; +class EniFwdCtx; + + +class DashEniFwdOrch : public Orch2, public Observer +{ +public: + struct EniFwdRequest : public Request + { + EniFwdRequest() : Request(eni_dash_fwd_desc, ':') {} + }; + + DashEniFwdOrch(swss::DBConnector*, swss::DBConnector*, const std::string&, NeighOrch* neigh_orch_); + ~DashEniFwdOrch(); + + /* Refresh the ENIs based on NextHop status */ + void update(SubjectType, void *) override; + +protected: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + EniFwdRequest request_; + +private: + void lazyInit(); + void initAclTableCfg(); + void initLocalEndpoints(); + void handleNeighUpdate(const NeighborUpdate& update); + void handleEniDpuMapping(uint64_t id, MacAddress mac, bool add = true); + + /* multimap because Multiple ENIs can be mapped to the same DPU */ + std::multimap dpu_eni_map_; + /* Local Endpoint -> DPU mapping */ + std::map neigh_dpu_map_; + std::map eni_container_; + + bool ctx_initialized_ = false; + shared_ptr ctx; + unique_ptr acl_table_; + unique_ptr acl_table_type_; + NeighOrch* neighorch_; +}; + + +class DpuRegistry +{ +public: + struct DpuData + { + dpu_type_t type; + swss::IpAddress pa_v4; + swss::IpAddress npu_v4; + }; + + struct DpuRequest : public Request + { + DpuRequest() : Request(dpu_table_desc, '|' ) { } + }; + + void populate(Table*); + std::vector getIds(); + bool getType(uint64_t id, dpu_type_t& val); + bool getPaV4(uint64_t id, swss::IpAddress& val); + bool getNpuV4(uint64_t id, swss::IpAddress& val); + +private: + void processDpuTable(const KeyOpFieldsValuesTuple& ); + + std::vector dpus_ids_; + DpuRequest dpu_request_; + map dpus_; +}; + + +class EniNH +{ +public: + static std::unique_ptr createNextHop(dpu_type_t, const swss::IpAddress&); + + EniNH(const swss::IpAddress& ip) : endpoint_(ip) {} + void setStatus(endpoint_status_t status) {status_ = status;} + void setType(dpu_type_t type) {type_ = type;} + endpoint_status_t getStatus() {return status_;} + dpu_type_t getType() {return type_;} + swss::IpAddress getEp() {return endpoint_;} + + virtual void resolve(EniInfo& eni) = 0; + virtual void destroy(EniInfo& eni) = 0; + virtual string getRedirectVal() = 0; + +protected: + endpoint_status_t status_; + dpu_type_t type_; + swss::IpAddress endpoint_; +}; + + +class LocalEniNH : public EniNH +{ +public: + LocalEniNH(const swss::IpAddress& ip) : EniNH(ip) + { + setStatus(endpoint_status_t::UNRESOLVED); + setType(dpu_type_t::LOCAL); + } + void resolve(EniInfo& eni) override; + void destroy(EniInfo& eni) {} + string getRedirectVal() override; +}; + + +class RemoteEniNH : public EniNH +{ +public: + RemoteEniNH(const swss::IpAddress& ip) : EniNH(ip) + { + /* No BFD monitoring for Remote NH yet */ + setStatus(endpoint_status_t::UNRESOLVED); + setType(dpu_type_t::CLUSTER); + } + void resolve(EniInfo& eni) override; + void destroy(EniInfo& eni) override; + string getRedirectVal() override; + +private: + string tunnel_name_; +}; + + +class EniAclRule +{ +public: + static const int BASE_PRIORITY; + static const std::vector RULE_NAMES; + + EniAclRule(rule_type_t type, EniInfo& eni) : + type_(type), + state_(rule_state_t::PENDING) { setKey(eni); } + + void destroy(EniInfo&); + void fire(EniInfo&); + + update_type_t processUpdate(EniInfo& eni); + std::string getKey() {return name_; } + string getMacMatchDirection(EniInfo& eni); + void setState(rule_state_t state); + +private: + void setKey(EniInfo&); + std::unique_ptr nh_ = nullptr; + std::string name_; + rule_type_t type_; + rule_state_t state_; +}; + + +class EniInfo +{ +public: + friend class DashEniFwdOrch; /* Only orch is expected to call create/update/fire */ + + EniInfo(const std::string&, const std::string&, const shared_ptr&); + EniInfo(const EniInfo&) = delete; + EniInfo& operator=(const EniInfo&) = delete; + EniInfo(EniInfo&&) = delete; + EniInfo& operator=(EniInfo&&) = delete; + + string toKey() const; + std::shared_ptr& getCtx() {return ctx;} + bool findLocalEp(uint64_t&) const; + swss::MacAddress getMac() const { return mac_; } // Can only be set during object creation + std::vector getEpList() { return ep_list_; } + uint64_t getPrimaryId() const { return primary_id_; } + uint64_t getOutVni() const { return outbound_vni_; } + std::string getOutMacLookup() const { return outbound_mac_lookup_; } + std::string getVnet() const { return vnet_name_; } + +protected: + void formatMac(); + void fireRule(rule_type_t); + void fireAllRules(); + bool create(const Request&); + bool destroy(const Request&); + bool update(const Request& ); + bool update(const NeighborUpdate&); + + std::shared_ptr ctx; + std::map rule_container_; + std::vector ep_list_; + uint64_t primary_id_; + uint64_t outbound_vni_; + std::string outbound_mac_lookup_; + std::string vnet_name_; + swss::MacAddress mac_; + std::string mac_key_; // Formatted MAC key +}; + + +/* + Collection of API's used across DashEniFwdOrch +*/ +class EniFwdCtxBase +{ +public: + EniFwdCtxBase(DBConnector* cfgDb, DBConnector* applDb); + void populateDpuRegistry(); + std::vector getBindPoints(); + std::string getNbrAlias(const swss::IpAddress& ip); + bool handleTunnelNH(const std::string&, swss::IpAddress, bool); + swss::IpPrefix getVip(); + + virtual void initialize() = 0; + /* API's that call other orchagents */ + virtual std::map& getAllPorts() = 0; + virtual bool isNeighborResolved(const NextHopKey&) = 0; + virtual void resolveNeighbor(const NeighborEntry &) = 0; + virtual string getRouterIntfsAlias(const IpAddress &, const string & = "") = 0; + virtual bool findVnetVni(const std::string&, uint64_t& ) = 0; + virtual bool findVnetTunnel(const std::string&, string&) = 0; + virtual sai_object_id_t createNextHopTunnel(string, IpAddress) = 0; + virtual bool removeNextHopTunnel(string, IpAddress) = 0; + + DpuRegistry dpu_info; + unique_ptr rule_table; +protected: + std::set findInternalPorts(); + + /* RemoteNh Key -> {ref_count, sai oid} */ + std::map> remote_nh_map_; + /* Mapping between DPU Nbr and Alias */ + std::map nh_alias_map_; + + unique_ptr port_tbl_; + unique_ptr vip_tbl_; + unique_ptr dpu_tbl_; + + /* Only one vip is expected per T1 */ + swss::IpPrefix vip; + bool vip_inferred_; +}; + + +/* + Wrapper on API's that are throwable +*/ +template +R safetyWrapper(C* ptr, R (C::*func)(Args...), R defaultValue, Args&&... args) +{ + SWSS_LOG_ENTER(); + try + { + return (ptr->*func)(std::forward(args)...); + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Exception thrown.. %s", e.what()); + return defaultValue; + } +} + + +/* + Implements API's to access other orchagents +*/ +class EniFwdCtx : public EniFwdCtxBase +{ +public: + using EniFwdCtxBase::EniFwdCtxBase; + + /* Setup pointers to other orchagents */ + void initialize() override; + bool isNeighborResolved(const NextHopKey&) override; + void resolveNeighbor(const NeighborEntry&) override; + std::string getRouterIntfsAlias(const IpAddress &, const string & = "") override; + bool findVnetVni(const std::string&, uint64_t&) override; + bool findVnetTunnel(const std::string&, string&) override; + std::map& getAllPorts() override; + virtual sai_object_id_t createNextHopTunnel(string, IpAddress) override; + virtual bool removeNextHopTunnel(string, IpAddress) override; + +private: + PortsOrch* portsorch_; + NeighOrch* neighorch_; + IntfsOrch* intfsorch_; + VNetOrch* vnetorch_; + VxlanTunnelOrch* vxlanorch_; +}; diff --git a/orchagent/dash/dashorch.cpp b/orchagent/dash/dashorch.cpp index d9aac53ce4..ffe06c1d56 100644 --- a/orchagent/dash/dashorch.cpp +++ b/orchagent/dash/dashorch.cpp @@ -85,6 +85,11 @@ bool DashOrch::getRouteTypeActions(dash::route_type::RoutingType routing_type, d return true; } +bool DashOrch::hasApplianceEntry() +{ + return !appliance_entries_.empty(); +} + bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::appliance::Appliance &entry) { SWSS_LOG_ENTER(); @@ -94,6 +99,11 @@ bool DashOrch::addApplianceEntry(const string& appliance_id, const dash::applian SWSS_LOG_WARN("Appliance Entry already exists for %s", appliance_id.c_str()); return true; } + if (!appliance_entries_.empty()) + { + SWSS_LOG_ERROR("Appliance entry is a singleton and already exists"); + return false; + } uint32_t attr_count = 1; sai_attribute_t appliance_attr; diff --git a/orchagent/dash/dashorch.h b/orchagent/dash/dashorch.h index dfc9d7df65..ac607a27e9 100644 --- a/orchagent/dash/dashorch.h +++ b/orchagent/dash/dashorch.h @@ -53,6 +53,7 @@ class DashOrch : public ZmqOrch const EniEntry *getEni(const std::string &eni) const; bool getRouteTypeActions(dash::route_type::RoutingType routing_type, dash::route_type::RouteType& route_type); void handleFCStatusUpdate(bool is_enabled); + bool hasApplianceEntry(); private: ApplianceTable appliance_entries_; diff --git a/orchagent/dash/dashvnetorch.cpp b/orchagent/dash/dashvnetorch.cpp index 512b03396d..54051eb78a 100644 --- a/orchagent/dash/dashvnetorch.cpp +++ b/orchagent/dash/dashvnetorch.cpp @@ -56,6 +56,12 @@ bool DashVnetOrch::addVnet(const string& vnet_name, DashVnetBulkContext& ctxt) SWSS_LOG_WARN("Vnet already exists for %s", vnet_name.c_str()); return true; } + DashOrch* dash_orch = gDirectory.get(); + if (!dash_orch->hasApplianceEntry()) + { + SWSS_LOG_INFO("Retry as no appliance table entry found"); + return false; + } uint32_t attr_count = 1; auto& object_ids = ctxt.object_ids; diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index 98236be7d5..1993cc61b9 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -1205,6 +1205,12 @@ void FdbOrch::updatePortOperState(const PortOperStateUpdate& update) if (update.operStatus == SAI_PORT_OPER_STATUS_DOWN) { swss::Port p = update.port; + if (gMlagOrch->isMlagInterface(p.m_alias)) + { + SWSS_LOG_NOTICE("Ignoring fdb flush on MCLAG port:%s", p.m_alias.c_str()); + return; + } + if (p.m_bridge_port_id != SAI_NULL_OBJECT_ID) { flushFDBEntries(p.m_bridge_port_id, SAI_NULL_OBJECT_ID); diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index 635c757601..36fba9c7cf 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -5,7 +5,6 @@ #include "schema.h" #include "rediscommand.h" #include "logger.h" -#include "sai_serialize.h" #include @@ -20,14 +19,13 @@ using swss::ProducerTable; extern sai_switch_api_t *sai_switch_api; -extern sai_object_id_t gSwitchId; - const string FLEX_COUNTER_ENABLE("enable"); const string FLEX_COUNTER_DISABLE("disable"); const unordered_map FlexCounterManager::stats_mode_lookup = { { StatsMode::READ, STATS_MODE_READ }, + { StatsMode::READ_AND_CLEAR, STATS_MODE_READ_AND_CLEAR }, }; const unordered_map FlexCounterManager::status_lookup = @@ -42,6 +40,8 @@ const unordered_map FlexCounterManager::counter_id_field_lo { CounterType::SWITCH_DEBUG, SWITCH_DEBUG_COUNTER_ID_LIST }, { CounterType::PORT, PORT_COUNTER_ID_LIST }, { CounterType::QUEUE, QUEUE_COUNTER_ID_LIST }, + { CounterType::QUEUE_ATTR, QUEUE_ATTR_ID_LIST }, + { CounterType::PRIORITY_GROUP, PG_COUNTER_ID_LIST }, { CounterType::MACSEC_SA_ATTR, MACSEC_SA_ATTR_ID_LIST }, { CounterType::MACSEC_SA, MACSEC_SA_COUNTER_ID_LIST }, { CounterType::MACSEC_FLOW, MACSEC_FLOW_COUNTER_ID_LIST }, @@ -259,7 +259,7 @@ string FlexCounterManager::getFlexCounterTableKey( // serializeCounterStats turns a set of stats into a format suitable for FLEX_COUNTER_DB. string FlexCounterManager::serializeCounterStats( - const unordered_set& counter_stats) const + const unordered_set& counter_stats) { SWSS_LOG_ENTER(); diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index b9e6e4c487..1ad566caaf 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -9,6 +9,9 @@ #include "producertable.h" #include "table.h" #include +#include +#include "sai_serialize.h" +#include "saihelper.h" extern "C" { #include "sai.h" @@ -16,13 +19,16 @@ extern "C" { enum class StatsMode { - READ + READ, + READ_AND_CLEAR }; enum class CounterType { PORT, QUEUE, + QUEUE_ATTR, + PRIORITY_GROUP, PORT_DEBUG, SWITCH_DEBUG, MACSEC_SA_ATTR, @@ -35,6 +41,10 @@ enum class CounterType ENI }; +extern bool gTraditionalFlexCounter; +extern sai_object_id_t gSwitchId; + +struct CachedObjects; // FlexCounterManager allows users to manage a group of flex counters. // // TODO: FlexCounterManager doesn't currently support the full range of @@ -42,6 +52,7 @@ enum class CounterType // counters and support for plugins needs to be added. class FlexCounterManager { + friend struct CachedObjects; public: FlexCounterManager( const std::string& group_name, @@ -69,12 +80,12 @@ class FlexCounterManager void enableFlexCounterGroup(); void disableFlexCounterGroup(); - void setCounterIdList( + virtual void setCounterIdList( const sai_object_id_t object_id, const CounterType counter_type, const std::unordered_set& counter_stats, const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID); - void clearCounterIdList(const sai_object_id_t object_id); + virtual void clearCounterIdList(const sai_object_id_t object_id); const std::string& getGroupName() const { @@ -99,12 +110,9 @@ class FlexCounterManager protected: void applyGroupConfiguration(); - private: std::string getFlexCounterTableKey( const std::string& group_name, const sai_object_id_t object_id) const; - std::string serializeCounterStats( - const std::unordered_set& counter_stats) const; std::string group_name; StatsMode stats_mode; @@ -114,11 +122,235 @@ class FlexCounterManager std::unordered_map installed_counters; bool is_gearbox; + static std::string serializeCounterStats( + const std::unordered_set& counter_stats); + static const std::unordered_map stats_mode_lookup; static const std::unordered_map status_lookup; static const std::unordered_map counter_id_field_lookup; }; +struct CachedObjects +{ + CounterType pending_counter_type; + sai_object_id_t pending_switch_id; + std::unordered_set pending_counter_stats; + std::unordered_set pending_sai_objects; + + bool try_cache(const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + sai_object_id_t switch_id) + { + if (pending_sai_objects.empty()) + { + pending_counter_type = counter_type; + pending_switch_id = switch_id; + // Just to avoid recreating counter IDs + if (pending_counter_stats != counter_stats) + { + pending_counter_stats = counter_stats; + } + } + else if (counter_type != pending_counter_type || + switch_id != pending_switch_id || + counter_stats != pending_counter_stats) + { + return false; + } + + cache(object_id); + + return true; + } + + bool is_cached(const sai_object_id_t object_id) + { + return pending_sai_objects.find(object_id) != pending_sai_objects.end(); + } + + void flush(const std::string &group_name) + { + if (pending_sai_objects.empty()) + { + return; + } + + auto counter_ids = FlexCounterManager::serializeCounterStats(pending_counter_stats); + auto counter_type_it = FlexCounterManager::counter_id_field_lookup.find(pending_counter_type); + + auto counter_keys = group_name + ":"; + for (const auto& oid: pending_sai_objects) + { + counter_keys += sai_serialize_object_id(oid) + ","; + } + counter_keys.pop_back(); + + startFlexCounterPolling(pending_switch_id, counter_keys, counter_ids, counter_type_it->second); + + pending_sai_objects.clear(); + } + + void cache(sai_object_id_t object_id) + { + pending_sai_objects.emplace(object_id); + } +}; + +class FlexCounterCachedManager : public FlexCounterManager +{ + public: + FlexCounterCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + virtual void flush() + { + } + + protected: + void flush(const std::string &group_name, struct CachedObjects &cached_objects) + { + cached_objects.flush(group_name); + } + + void setCounterIdList( + struct CachedObjects &cached_objects, + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + if (gTraditionalFlexCounter) + { + // Unable to cache an object and initialize in bulk in traditional flex counter mode + FlexCounterManager::setCounterIdList(object_id, counter_type, counter_stats, switch_id); + return; + } + + auto effective_switch_id = switch_id == SAI_NULL_OBJECT_ID ? gSwitchId : switch_id; + installed_counters[object_id] = effective_switch_id; + if (cached_objects.try_cache(object_id, counter_type, counter_stats, effective_switch_id)) + { + return; + } + else + { + flush(group_name, cached_objects); + cached_objects.cache(object_id); + } + } + + void clearCounterIdList( + struct CachedObjects &cached_objects, + const sai_object_id_t object_id) + { + auto search = cached_objects.pending_sai_objects.find(object_id); + if (search == cached_objects.pending_sai_objects.end()) + { + FlexCounterManager::clearCounterIdList(object_id); + } + else + { + installed_counters.erase(object_id); + cached_objects.pending_sai_objects.erase(search); + } + } +}; + +template +class FlexCounterTaggedCachedManager : public FlexCounterCachedManager +{ + public: + FlexCounterTaggedCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterCachedManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + void flush() + { + FlexCounterCachedManager::flush(group_name, cached_objects); + } + + virtual void setCounterIdList( + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + FlexCounterCachedManager::setCounterIdList(cached_objects, + object_id, + counter_type, + counter_stats); + } + + virtual void clearCounterIdList( + const sai_object_id_t object_id) + { + FlexCounterCachedManager::clearCounterIdList(cached_objects, object_id); + } + + private: + struct CachedObjects cached_objects; +}; + +template +class FlexCounterTaggedCachedManager::value>> : public FlexCounterCachedManager +{ + public: + FlexCounterTaggedCachedManager( + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")) : + FlexCounterCachedManager(group_name, stats_mode, polling_interval, enabled, fv_plugin) + { + } + + void flush() + { + for(auto &it : cached_objects) + { + FlexCounterCachedManager::flush(group_name, it.second); + } + } + + void setCounterIdList( + const sai_object_id_t object_id, + const CounterType counter_type, + const std::unordered_set& counter_stats, + const TagType tag, + const sai_object_id_t switch_id=SAI_NULL_OBJECT_ID) + { + FlexCounterCachedManager::setCounterIdList(cached_objects[tag], + object_id, + counter_type, + counter_stats); + } + + void clearCounterIdList( + const sai_object_id_t object_id, + const TagType tag) + { + FlexCounterCachedManager::clearCounterIdList(cached_objects[tag], object_id); + } + + private: + std::map cached_objects; +}; + class FlexManagerDirectory { public: diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index aefb475cf5..2ab009e727 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -271,6 +271,7 @@ void FlexCounterOrch::doTask(Consumer &consumer) } } + gPortsOrch->flushCounters(); setFlexCounterGroupOperation(flexCounterGroupMap[key], value); if (gPortsOrch && gPortsOrch->isGearboxEnabled()) diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 5079250dea..2e84f4ea04 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -67,6 +67,7 @@ extern bool gIsNatSupported; #define HEART_BEAT_INTERVAL_MSECS_DEFAULT 10 * 1000 string gMySwitchType = ""; +string gMySwitchSubType = ""; int32_t gVoqMySwitchId = -1; int32_t gVoqMaxCores = 0; uint32_t gCfgSystemPorts = 0; @@ -166,7 +167,7 @@ void init_gearbox_phys(DBConnector *applDb) delete tmpGearboxTable; } -void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) +void getCfgSwitchType(DBConnector *cfgDb, string &switch_type, string &switch_sub_type) { Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); @@ -190,6 +191,16 @@ void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) //If configured switch type is none of the supported, assume regular switch switch_type = "switch"; } + + try + { + cfgDeviceMetaDataTable.hget("localhost", "subtype", switch_sub_type); + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error in parsing switch subtype: %s", e.what()); + } + } bool getSystemPortConfigList(DBConnector *cfgDb, DBConnector *appDb, vector &sysportcfglist) @@ -529,7 +540,7 @@ int main(int argc, char **argv) } // Get switch_type - getCfgSwitchType(&config_db, gMySwitchType); + getCfgSwitchType(&config_db, gMySwitchType, gMySwitchSubType); sai_attribute_t attr; vector attrs; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index b21e17a058..5b7f1c68f6 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -26,6 +26,7 @@ using namespace swss; extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; extern string gMySwitchType; +extern string gMySwitchSubType; extern void syncd_apply_view(); /* @@ -256,7 +257,9 @@ bool OrchDaemon::init() gDirectory.set(chassis_frontend_orch); gIntfsOrch = new IntfsOrch(m_applDb, APP_INTF_TABLE_NAME, vrf_orch, m_chassisAppDb); + gDirectory.set(gIntfsOrch); gNeighOrch = new NeighOrch(m_applDb, APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassisAppDb); + gDirectory.set(gNeighOrch); const int fgnhgorch_pri = 15; @@ -605,6 +608,13 @@ bool OrchDaemon::init() m_orchList.push_back(gFabricPortsOrch); } + if (gMySwitchSubType == "SmartSwitch") + { + DashEniFwdOrch *dash_eni_fwd_orch = new DashEniFwdOrch(m_configDb, m_applDb, APP_DASH_ENI_FORWARD_TABLE, gNeighOrch); + gDirectory.set(dash_eni_fwd_orch); + m_orchList.push_back(dash_eni_fwd_orch); + } + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index 32b793c55b..8cc75325db 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -48,6 +48,7 @@ #include "nvgreorch.h" #include "twamporch.h" #include "stporch.h" +#include "dash/dashenifwdorch.h" #include "dash/dashaclorch.h" #include "dash/dashorch.h" #include "dash/dashrouteorch.h" diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index 8857721643..08777ed913 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -8,9 +8,14 @@ extern "C" #include "portsorch.h" -#define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 -#define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 -#define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 +#define PORT_SPEED_LIST_DEFAULT_SIZE 16 +#define PORT_STATE_POLLING_SEC 5 +#define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 +#define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 +#define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 +#define QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) @@ -21,8 +26,12 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector PortsOrch::generateCounterStats(const string &type, bool gearbox) +template +std::unordered_set PortsOrch::generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)) { return {}; } diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 3cd8808097..9ef7f3a870 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -112,30 +112,28 @@ void PfcWdOrch::doTask(Consumer& consumer) break; } } + + if (m_pfcwdFlexCounterManager != nullptr) + { + m_pfcwdFlexCounterManager->flush(); + } } } template template -string PfcWdSwOrch::counterIdsToStr( - const vector ids, string (*convert)(T)) +unordered_set PfcWdSwOrch::counterIdsToStr( + const vector ids, string (*convert)(T)) { SWSS_LOG_ENTER(); - - string str; + unordered_set counterIdSet; for (const auto& i: ids) { - str += convert(i) + ","; - } - - // Remove trailing ',' - if (!str.empty()) - { - str.pop_back(); + counterIdSet.emplace(convert(i)); } - return str; + return counterIdSet; } template @@ -345,7 +343,7 @@ task_process_status PfcWdSwOrch::createEntry(const if (field == POLL_INTERVAL_FIELD) { - setFlexCounterGroupPollInterval(PFC_WD_FLEX_COUNTER_GROUP, value); + this->m_pfcwdFlexCounterManager->updateGroupPollingInterval(stoi(value)); } else if (field == BIG_RED_SWITCH_FIELD) { @@ -548,11 +546,8 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, if (!c_portStatIds.empty()) { - string key = getFlexCounterTableKey(sai_serialize_object_id(port.m_port_id)); - string str = counterIdsToStr(c_portStatIds, &sai_serialize_port_stat); - string filteredStr = filterPfcCounters(str, losslessTc); - - startFlexCounterPolling(gSwitchId, key, filteredStr, PORT_COUNTER_ID_LIST); + auto portStatIdSet = filterPfcCounters(counterIdsToStr(c_portStatIds, &sai_serialize_port_stat), losslessTc); + this->m_pfcwdFlexCounterManager->setCounterIdList(port.m_port_id, CounterType::PORT, portStatIdSet, SAI_OBJECT_TYPE_PORT); } for (auto i : losslessTc) @@ -577,14 +572,14 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, if (!c_queueStatIds.empty()) { - string str = counterIdsToStr(c_queueStatIds, sai_serialize_queue_stat); - startFlexCounterPolling(gSwitchId, key, str, QUEUE_COUNTER_ID_LIST); + auto queueStatIdSet = counterIdsToStr(c_queueStatIds, sai_serialize_queue_stat); + this->m_pfcwdFlexCounterManager->setCounterIdList(queueId, CounterType::QUEUE, queueStatIdSet, SAI_OBJECT_TYPE_QUEUE); } if (!c_queueAttrIds.empty()) { - string str = counterIdsToStr(c_queueAttrIds, sai_serialize_queue_attr); - startFlexCounterPolling(gSwitchId, key, str, QUEUE_ATTR_ID_LIST); + auto queueAttrIdSet = counterIdsToStr(c_queueAttrIds, sai_serialize_queue_attr); + (dynamic_cast(this->m_pfcwdFlexCounterManager.get()))->setCounterIdList(queueId, CounterType::QUEUE_ATTR, queueAttrIdSet); } // Create internal entry @@ -602,37 +597,31 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, } template -string PfcWdSwOrch::filterPfcCounters(string counters, set& losslessTc) +unordered_set PfcWdSwOrch::filterPfcCounters(const unordered_set &counters, set& losslessTc) { SWSS_LOG_ENTER(); - istringstream is(counters); - string counter; - string filterCounters; + unordered_set filterCounters; - while (getline(is, counter, ',')) + for (auto &counter : counters) { + //auto &counter = it.first; size_t index = 0; index = counter.find(SAI_PORT_STAT_PFC_PREFIX); if (index != 0) { - filterCounters = filterCounters + counter + ","; + filterCounters.emplace(counter); } else { uint8_t tc = (uint8_t)atoi(counter.substr(index + sizeof(SAI_PORT_STAT_PFC_PREFIX) - 1, 1).c_str()); if (losslessTc.count(tc)) { - filterCounters = filterCounters + counter + ","; + filterCounters.emplace(counter); } } } - if (!filterCounters.empty()) - { - filterCounters.pop_back(); - } - return filterCounters; } @@ -649,16 +638,12 @@ void PfcWdSwOrch::unregisterFromWdDb(const Port& po { SWSS_LOG_ENTER(); - string key = getFlexCounterTableKey(sai_serialize_object_id(port.m_port_id)); - stopFlexCounterPolling(gSwitchId, key); + this->m_pfcwdFlexCounterManager->clearCounterIdList(port.m_port_id, SAI_OBJECT_TYPE_PORT); for (uint8_t i = 0; i < PFC_WD_TC_MAX; i++) { sai_object_id_t queueId = port.m_queue_ids[i]; - key = getFlexCounterTableKey(sai_serialize_object_id(queueId)); - - // Unregister in syncd - stopFlexCounterPolling(gSwitchId, key); + this->m_pfcwdFlexCounterManager->clearCounterIdList(queueId, SAI_OBJECT_TYPE_QUEUE); auto entry = m_entryMap.find(queueId); if (entry != m_entryMap.end() && entry->second.handler != nullptr) @@ -722,11 +707,8 @@ PfcWdSwOrch::PfcWdSwOrch( SWSS_LOG_WARN("Lua scripts and polling interval for PFC watchdog were not set successfully"); } - setFlexCounterGroupParameter(PFC_WD_FLEX_COUNTER_GROUP, - pollIntervalStr, - STATS_MODE_READ, - QUEUE_PLUGIN_FIELD, - plugins); + this->m_pfcwdFlexCounterManager = make_shared>( + "PFC_WD", StatsMode::READ, m_pollInterval, true, make_pair(QUEUE_PLUGIN_FIELD, plugins)); auto consumer = new swss::NotificationConsumer( this->getCountersDb().get(), @@ -750,7 +732,6 @@ template PfcWdSwOrch::~PfcWdSwOrch(void) { SWSS_LOG_ENTER(); - delFlexCounterGroup(PFC_WD_FLEX_COUNTER_GROUP); } template diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 261c1e2c3d..dd7d46c1b1 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -64,6 +64,8 @@ class PfcWdOrch: public Orch protected: virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") = 0; string m_platform = ""; + shared_ptr> m_pfcwdFlexCounterManager; + private: shared_ptr m_countersDb = nullptr; @@ -117,13 +119,13 @@ class PfcWdSwOrch: public PfcWdOrch }; template - static string counterIdsToStr(const vector ids, string (*convert)(T)); + static unordered_set counterIdsToStr(const vector ids, string (*convert)(T)); bool registerInWdDb(const Port& port, uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action); void unregisterFromWdDb(const Port& port); void doTask(swss::NotificationConsumer &wdNotification); - string filterPfcCounters(string counters, set& losslessTc); + unordered_set filterPfcCounters(const unordered_set &counters, set& losslessTc); string getFlexCounterTableKey(string s); void disableBigRedSwitchMode(); diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 1a3a0018d7..b36a7ccd24 100644 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -78,6 +78,9 @@ extern event_handle_t g_events_handle; #define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 #define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 #define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 +#define QUEUE_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_WATERMARK_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 60000 +#define PG_DROP_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 // types -------------------------------------------------------------------------------------------------------------- @@ -161,6 +164,14 @@ static map pt_timestamp_template { "template4", SAI_PORT_PATH_TRACING_TIMESTAMP_TYPE_20_27 } }; +static map sai_queue_type_string_map = +{ + {SAI_QUEUE_TYPE_ALL, "SAI_QUEUE_TYPE_ALL"}, + {SAI_QUEUE_TYPE_UNICAST, "SAI_QUEUE_TYPE_UNICAST"}, + {SAI_QUEUE_TYPE_MULTICAST, "SAI_QUEUE_TYPE_MULTICAST"}, + {SAI_QUEUE_TYPE_UNICAST_VOQ, "SAI_QUEUE_TYPE_UNICAST_VOQ"}, +}; + const vector port_stat_ids = { SAI_PORT_STAT_IF_IN_OCTETS, @@ -553,8 +564,21 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(attr[0].value.s32); m_queueInfo[queue_id].index = attr[1].value.u8; + + if (sai_queue_type_string_map.find(m_queueInfo[queue_id].type) == sai_queue_type_string_map.end()) + { + SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); + throw runtime_error("Got unsupported queue type"); + } } else { @@ -3326,26 +3356,8 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin SWSS_LOG_INFO("Fetched cached information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); } - switch (attr[0].value.s32) - { - case SAI_QUEUE_TYPE_ALL: - type = "SAI_QUEUE_TYPE_ALL"; - break; - case SAI_QUEUE_TYPE_UNICAST: - type = "SAI_QUEUE_TYPE_UNICAST"; - break; - case SAI_QUEUE_TYPE_MULTICAST: - type = "SAI_QUEUE_TYPE_MULTICAST"; - break; - case SAI_QUEUE_TYPE_UNICAST_VOQ: - type = "SAI_QUEUE_TYPE_UNICAST_VOQ"; - break; - default: - SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); - throw runtime_error("Got unsupported queue type"); - } - index = attr[1].value.u8; + queue_type = static_cast(attr[0].value.s32); return true; } @@ -3731,10 +3743,10 @@ bool PortsOrch::initPort(const PortConfig &port) If they are enabled, install the counters immediately */ if (flex_counters_orch->getPortCountersState()) { - auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto port_counter_stats = generateCounterStats(port_stat_ids, sai_serialize_port_stat); port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_counter_stats); - auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); + auto gbport_counter_stats = generateCounterStats(gbport_stat_ids, sai_serialize_port_stat); if (p.m_system_side_id) gb_port_stat_manager.setCounterIdList(p.m_system_side_id, CounterType::PORT, gbport_counter_stats, p.m_switch_id); @@ -3744,13 +3756,13 @@ bool PortsOrch::initPort(const PortConfig &port) } if (flex_counters_orch->getPortBufferDropCountersState()) { - auto port_buffer_drop_stats = generateCounterStats(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP); + auto port_buffer_drop_stats = generateCounterStats(port_buffer_drop_stat_ids, sai_serialize_port_stat); port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } if (flex_counters_orch->getWredPortCountersState()) { - auto wred_port_stats = generateCounterStats(WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto wred_port_stats = generateCounterStats(wred_port_stat_ids, sai_serialize_port_stat); wred_port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, wred_port_stats); } @@ -4885,7 +4897,7 @@ void PortsOrch::doPortTask(Consumer &consumer) initializePortOperErrors(p); // Restore admin status if the port was brought down - if (admin_status != p.m_admin_state_up) + if (admin_status != p.m_admin_state_up && pCfg.admin_status.is_set == false) { pCfg.admin_status.is_set = true; pCfg.admin_status.value = admin_status; @@ -5827,6 +5839,7 @@ void PortsOrch::doTask(Consumer &consumer) else if (table_name == APP_PORT_TABLE_NAME) { doPortTask(consumer); + flushCounters(); } else if (table_name == APP_SEND_TO_INGRESS_PORT_TABLE_NAME) { @@ -7313,7 +7326,7 @@ bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) m_portList[lag.m_alias] = lag; - if (lag.m_bridge_port_id > 0) + if ((lag.m_bridge_port_id > 0)||(!lag.m_child_ports.empty())) { if (!setHostIntfsStripTag(port, SAI_HOSTIF_VLAN_TAG_KEEP)) { @@ -7361,7 +7374,7 @@ bool PortsOrch::removeLagMember(Port &lag, Port &port) lag.m_members.erase(port.m_alias); m_portList[lag.m_alias] = lag; - if (lag.m_bridge_port_id > 0) + if ((lag.m_bridge_port_id > 0)||(!lag.m_child_ports.empty())) { if (!setHostIntfsStripTag(port, SAI_HOSTIF_VLAN_TAG_STRIP)) { @@ -7597,7 +7610,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates const auto id = sai_serialize_object_id(queue_ids[queueIndex]); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -7607,7 +7620,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates { continue; } - queueTypeVector.emplace_back(id, queueType); + queueTypeVector.emplace_back(id, sai_queue_type_string_map[queueType]); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } @@ -7617,7 +7630,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates // Install a flex counter for this voq to track stats. Voq counters do // not have buffer queue config. So it does not get enabled through the // flexcounter orch logic. Always enabled voq counters. - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true, queueType); queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); } else @@ -7630,7 +7643,7 @@ void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates // counter on voq systems. if (gMySwitchType == "voq") { - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); } @@ -7696,7 +7709,7 @@ void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueSt { for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -7705,12 +7718,12 @@ void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueSt continue; } // Install a flex counter for this queue to track stats - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } } } -void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq) +void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType) { std::unordered_set counter_stats; std::vector queue_ids; @@ -7732,7 +7745,7 @@ void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_ queue_ids = port.m_queue_ids; } - queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats, queueType); } @@ -7782,7 +7795,7 @@ void PortsOrch::addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCount for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -7790,28 +7803,15 @@ void PortsOrch::addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCount { continue; } - addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex, queueType); } } } -void PortsOrch::addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex) +void PortsOrch::addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, sai_queue_type_t queueType) { - const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - - /* add watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - - string delimiter(""); - std::ostringstream counters_stream; - for (const auto& it: queueWatermarkStatIds) - { - counters_stream << delimiter << sai_serialize_queue_stat(it); - delimiter = comma; - } - auto &&counters_str = counters_stream.str(); - - startFlexCounterPolling(gSwitchId, key, counters_str, QUEUE_COUNTER_ID_LIST); + auto queue_counter_stats = generateCounterStats(queueWatermarkStatIds, sai_serialize_queue_stat); + queue_watermark_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, queue_counter_stats, queueType); } void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues, bool skip_host_tx_queue) @@ -7844,11 +7844,11 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues, b const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - queueTypeVector.emplace_back(id, queueType); + queueTypeVector.emplace_back(id, sai_queue_type_string_map[queueType]); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } @@ -7859,18 +7859,18 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues, b if (flexCounterOrch->getQueueCountersState()) { // Install a flex counter for this queue to track stats - addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } if (flexCounterOrch->getQueueWatermarkCountersState()) { /* add watermark queue counters */ - addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex, queueType); } if (flexCounterOrch->getWredQueueCountersState()) { /* add wred queue counters */ - addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } } @@ -7911,7 +7911,7 @@ void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues, b m_queueTable->hdel("", name.str()); m_queuePortTable->hdel("", id); - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -7923,19 +7923,18 @@ void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues, b if (flexCounterOrch->getQueueCountersState()) { // Remove the flex counter for this queue - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); } if (flexCounterOrch->getQueueWatermarkCountersState()) { // Remove watermark queue counters - string key = getQueueWatermarkFlexCounterTableKey(id); - stopFlexCounterPolling(gSwitchId, key); + queue_watermark_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); } if (flexCounterOrch->getWredQueueCountersState()) { /* Remove wred queue counters */ - wred_queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + wred_queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex], queueType); } } @@ -8108,22 +8107,8 @@ void PortsOrch::addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounte void PortsOrch::addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) { - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - string delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) - { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) - { - delimiter = comma; - } - } - auto &&counters_str = ingress_pg_drop_packets_counters_stream.str(); - startFlexCounterPolling(gSwitchId, key, counters_str, PG_COUNTER_ID_LIST); + auto pg_counter_stats = generateCounterStats(ingressPriorityGroupDropStatIds, sai_serialize_ingress_priority_group_stat); + pg_drop_stat_manager.setCounterIdList(port.m_priority_group_ids[pgIndex], CounterType::PRIORITY_GROUP, pg_counter_stats); } void PortsOrch::addPriorityGroupWatermarkFlexCounters(map pgsStateVector) @@ -8178,22 +8163,8 @@ void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, F void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) { - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) - { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - delimiter = comma; - } - - auto &&counters_str = counters_stream.str(); - - startFlexCounterPolling(gSwitchId, key, counters_str, PG_COUNTER_ID_LIST); + auto pg_counter_stats = generateCounterStats(ingressPriorityGroupWatermarkStatIds, sai_serialize_ingress_priority_group_stat); + pg_watermark_manager.setCounterIdList(port.m_priority_group_ids[pgIndex], CounterType::PRIORITY_GROUP, pg_counter_stats); } void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) @@ -8225,15 +8196,13 @@ void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) if (flexCounterOrch->getPgCountersState()) { // Remove dropped packets counters from flex_counter - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - stopFlexCounterPolling(gSwitchId, key); + pg_drop_stat_manager.clearCounterIdList(port.m_priority_group_ids[pgIndex]); } if (flexCounterOrch->getPgWatermarkCountersState()) { // Remove watermark counters from flex_counter - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - stopFlexCounterPolling(gSwitchId, key); + pg_watermark_manager.clearCounterIdList(port.m_priority_group_ids[pgIndex]); } } @@ -8247,8 +8216,8 @@ void PortsOrch::generatePortCounterMap() return; } - auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); + auto port_counter_stats = generateCounterStats(port_stat_ids, sai_serialize_port_stat); + auto gbport_counter_stats = generateCounterStats(gbport_stat_ids, sai_serialize_port_stat); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -8276,7 +8245,7 @@ void PortsOrch::generatePortBufferDropCounterMap() return; } - auto port_buffer_drop_stats = generateCounterStats(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP); + auto port_buffer_drop_stats = generateCounterStats(port_buffer_drop_stat_ids, sai_serialize_port_stat); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -8303,7 +8272,7 @@ void PortsOrch::generateWredPortCounterMap() return; } - auto wred_port_stats = generateCounterStats(WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto wred_port_stats = generateCounterStats(wred_port_stat_ids, sai_serialize_port_stat); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -8375,7 +8344,7 @@ void PortsOrch::addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQue for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { - string queueType; + sai_queue_type_t queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { @@ -8383,7 +8352,7 @@ void PortsOrch::addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQue { continue; } - addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + addWredQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false, queueType); } } } @@ -8394,7 +8363,7 @@ void PortsOrch::addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQue * Description: Sets the Stats list to be polled by the flexcounter **/ -void PortsOrch::addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq) +void PortsOrch::addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType) { std::unordered_set counter_stats; std::vector queue_ids; @@ -8412,7 +8381,15 @@ void PortsOrch::addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, s queue_ids = port.m_queue_ids; } - wred_queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + wred_queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats, queueType); +} + +void PortsOrch::flushCounters() +{ + for (auto counter_manager : counter_managers) + { + counter_manager.get().flush(); + } } uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) @@ -9849,31 +9826,16 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) m_tableVoqSystemLagMemberTable->del(key); } -std::unordered_set PortsOrch::generateCounterStats(const string& type, bool gearbox) +template +std::unordered_set PortsOrch::generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)) { std::unordered_set counter_stats; - if (type == PORT_STAT_COUNTER_FLEX_COUNTER_GROUP) - { - auto& stat_ids = gearbox ? gbport_stat_ids : port_stat_ids; - for (const auto& it: stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } - } - else if (type == PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP) - { - for (const auto& it: port_buffer_drop_stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } - } - else if (type == WRED_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP) + + for (const auto& it:counterIds) { - for (const auto& it: wred_port_stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } + counter_stats.emplace(serializer(it)); } + return counter_stats; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 3d5eca0a81..6e54400d7e 100644 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -207,6 +207,8 @@ class PortsOrch : public Orch, public Subject void generateWredPortCounterMap(); void generateWredQueueCounterMap(); + void flushCounters(); + void refreshPortStatus(); bool removeAclTableGroup(const Port &p); @@ -283,11 +285,16 @@ class PortsOrch : public Orch, public Subject shared_ptr m_state_db; shared_ptr m_notificationsDb; - FlexCounterManager port_stat_manager; - FlexCounterManager port_buffer_drop_stat_manager; - FlexCounterManager queue_stat_manager; - FlexCounterManager wred_port_stat_manager; - FlexCounterManager wred_queue_stat_manager; + FlexCounterTaggedCachedManager port_stat_manager; + FlexCounterTaggedCachedManager port_buffer_drop_stat_manager; + FlexCounterTaggedCachedManager queue_stat_manager; + FlexCounterTaggedCachedManager queue_watermark_manager; + FlexCounterTaggedCachedManager pg_watermark_manager; + FlexCounterTaggedCachedManager pg_drop_stat_manager; + FlexCounterTaggedCachedManager wred_port_stat_manager; + FlexCounterTaggedCachedManager wred_queue_stat_manager; + + std::vector> counter_managers; FlexCounterManager gb_port_stat_manager; shared_ptr m_gb_counter_db; @@ -444,21 +451,21 @@ class PortsOrch : public Orch, public Subject bool getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds); task_process_status setPortAdvSpeeds(Port &port, std::set &speed_list); - bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); + bool getQueueTypeAndIndex(sai_object_id_t queue_id, sai_queue_type_t &type, uint8_t &index); bool m_isQueueMapGenerated = false; void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq); bool m_isQueueFlexCountersAdded = false; void addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); - void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq); + void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType); bool m_isQueueWatermarkFlexCountersAdded = false; void addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); - void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex); + void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, sai_queue_type_t queueType); bool m_isWredQueueCounterMapGenerated = false; void addWredQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); - void addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq); + void addWredQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq, sai_queue_type_t queueType); bool m_isPriorityGroupMapGenerated = false; void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); @@ -537,7 +544,9 @@ class PortsOrch : public Orch, public Subject unique_ptr m_lagIdAllocator; set m_macsecEnabledPorts; - std::unordered_set generateCounterStats(const string& type, bool gearbox = false); + template + std::unordered_set generateCounterStats(const vector &counterIds, std::string (*serializer)(const T)); + map m_queueInfo; /* Protoypes for Path tracing */ diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 21cb11c5db..fe78118c89 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -2088,6 +2088,25 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFiel return task_process_status::task_invalid_entry; } } + if (attr.id == SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP) + { + /* Query Port's UPDATE_DSCP Capability */ + bool rv = gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_PORT, SAI_PORT_ATTR_UPDATE_DSCP); + if (rv == true) + { + sai_attribute_t update_dscp_attr; + memset(&update_dscp_attr, 0, sizeof(update_dscp_attr)); + update_dscp_attr.id = SAI_PORT_ATTR_UPDATE_DSCP; + update_dscp_attr.value.booldata = false; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &update_dscp_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to reset UPDATE_DSCP attribute on port %s, rv:%d", + port_name.c_str(), status); + return task_process_status::task_invalid_entry; + } + } + } SWSS_LOG_INFO("Removed %s on port %s", mapRef.first.c_str(), port_name.c_str()); } @@ -2195,6 +2214,25 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFiel return task_process_status::task_invalid_entry; } } + if (attr.id == SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP) + { + /* Query Port's UPDATE_DSCP Capability */ + bool rv = gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_PORT, SAI_PORT_ATTR_UPDATE_DSCP); + if (rv == true) + { + sai_attribute_t update_dscp_attr; + memset(&update_dscp_attr, 0, sizeof(update_dscp_attr)); + update_dscp_attr.id = SAI_PORT_ATTR_UPDATE_DSCP; + update_dscp_attr.value.booldata = true; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &update_dscp_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set UPDATE_DSCP attribute on port %s, rv:%d", + port_name.c_str(), status); + return task_process_status::task_invalid_entry; + } + } + } SWSS_LOG_INFO("Applied %s to port %s", it->second.first.c_str(), port_name.c_str()); } diff --git a/orchagent/request_parser.cpp b/orchagent/request_parser.cpp index 70b4351119..8a40e135bd 100644 --- a/orchagent/request_parser.cpp +++ b/orchagent/request_parser.cpp @@ -71,10 +71,10 @@ void Request::parseKey(const KeyOpFieldsValuesTuple& request) key_items.push_back(full_key_.substr(key_item_start, full_key_.length())); /* - * Attempt to parse an IPv6 address only if the following conditions are met: + * Attempt to parse an IPv6/MAC address only if the following conditions are met: * - The key separator is ":" * - The above logic will already correctly parse IPv6 addresses using other key separators - * - Special consideration is only needed for ":" key separators since IPv6 addresses also use ":" as the field separator + * - Special consideration is only needed for ":" key separators since IPv6/MAC addresses also use ":" as the field separator * - The number of parsed key items exceeds the number of expected key items * - If we have too many key items and the last key item is supposed to be an IP or prefix, there is a chance that it was an * IPv6 address that got segmented during parsing @@ -85,7 +85,8 @@ void Request::parseKey(const KeyOpFieldsValuesTuple& request) */ if (key_separator_ == ':' and key_items.size() > number_of_key_items_ and - (request_description_.key_item_types.back() == REQ_T_IP or request_description_.key_item_types.back() == REQ_T_IP_PREFIX)) + (request_description_.key_item_types.back() == REQ_T_IP or request_description_.key_item_types.back() == REQ_T_IP_PREFIX + or request_description_.key_item_types.back() == REQ_T_MAC_ADDRESS)) { // Remove key_items so that key_items.size() is correct, then assemble the removed items into an IPv6 address std::vector ip_addr_groups(--key_items.begin() + number_of_key_items_, key_items.end()); diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index 314e3027db..02e449d108 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -1870,6 +1870,11 @@ bool RouteOrch::addRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) return false; } } + else if (m_neighOrch->isNextHopFlagSet(nexthop, NHFLAGS_IFDOWN)) + { + SWSS_LOG_INFO("Interface down for NH %s, skip this Route for programming", nexthop.to_string().c_str()); + return false; + } } /* For non-existent MPLS NH, check if IP neighbor NH exists */ else if (nexthop.isMplsNextHop() && diff --git a/tests/dash/dash_db.py b/tests/dash/dash_db.py index a26e0b5091..f84f28de5e 100644 --- a/tests/dash/dash_db.py +++ b/tests/dash/dash_db.py @@ -17,6 +17,7 @@ from google.protobuf.json_format import ParseDict from google.protobuf.message import Message +ASIC_DASH_APPLIANCE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_DASH_APPLIANCE" ASIC_DIRECTION_LOOKUP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY" ASIC_VIP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIP_ENTRY" ASIC_VNET_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VNET" @@ -154,6 +155,20 @@ def polling_function(): else: return None + def wait_for_asic_db_keys_(self, table_name, min_keys=1): + + def polling_function(): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + keys = table.get_keys() + return len(keys) >= min_keys, keys + + _, keys = wait_for_result(polling_function, failure_message=f"Found fewer than {min_keys} keys in ASIC_DB table {table_name}") + return keys + + def get_keys(self, table_name): + table = Table(self.dvs.get_asic_db().db_connection, table_name) + return table.get_keys() + def __init__(self, dvs): self.dvs = dvs self.app_dash_routing_type_table = ProducerStateTable( @@ -175,6 +190,8 @@ def __init__(self, dvs): self.app_dash_route_group_table = ProducerStateTable( self.dvs.get_app_db().db_connection, "DASH_ROUTE_GROUP_TABLE") + self.asic_dash_appliance_table = Table( + self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_DASH_APPLIANCE") self.asic_direction_lookup_table = Table( self.dvs.get_asic_db().db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_DIRECTION_LOOKUP_ENTRY") self.asic_vip_table = Table( diff --git a/tests/dash/test_dash_vnet.py b/tests/dash/test_dash_vnet.py index 8409db7ce6..d585269838 100644 --- a/tests/dash/test_dash_vnet.py +++ b/tests/dash/test_dash_vnet.py @@ -32,6 +32,19 @@ class TestDash(TestFlexCountersBase): def test_appliance(self, dash_db: DashDB): + # verify vnet creation before appliance is rejected + vnet = "Vnet151" + vni = "75651" + guid = "559c6ce8-26ab-5651-b946-ccc6e8f930b2" + pb = Vnet() + pb.vni = int(vni) + pb.guid.value = bytes.fromhex(uuid.UUID(guid).hex) + dash_db.create_vnet(vnet, {"pb": pb.SerializeToString()}) + keys = dash_db.get_keys(ASIC_VNET_TABLE) + time.sleep(2) + assert len(keys) == 0, "VNET wrongly pushed to SAI before DASH Appliance" + dash_db.remove_vnet(vnet) + self.appliance_id = "100" self.sip = "10.0.0.1" self.vm_vni = "4321" @@ -39,8 +52,13 @@ def test_appliance(self, dash_db: DashDB): pb = Appliance() pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address(self.sip))) pb.vm_vni = int(self.vm_vni) + pb.local_region_id = int(self.local_region_id) dash_db.create_appliance(self.appliance_id, {"pb": pb.SerializeToString()}) + dash_appl_keys = dash_db.wait_for_asic_db_keys(ASIC_DASH_APPLIANCE_TABLE) + dash_appl_attrs = dash_db.get_asic_db_entry(ASIC_DASH_APPLIANCE_TABLE, dash_appl_keys[0]) + assert_sai_attribute_exists("SAI_DASH_APPLIANCE_ATTR_LOCAL_REGION_ID", dash_appl_attrs, self.local_region_id) + direction_keys = dash_db.wait_for_asic_db_keys(ASIC_DIRECTION_LOOKUP_TABLE) dl_attrs = dash_db.get_asic_db_entry(ASIC_DIRECTION_LOOKUP_TABLE, direction_keys[0]) assert_sai_attribute_exists("SAI_DIRECTION_LOOKUP_ENTRY_ATTR_ACTION", dl_attrs, "SAI_DIRECTION_LOOKUP_ENTRY_ACTION_SET_OUTBOUND_DIRECTION") @@ -50,6 +68,19 @@ def test_appliance(self, dash_db: DashDB): vip_attrs = dash_db.get_asic_db_entry(ASIC_VIP_TABLE, vip_keys[0]) assert_sai_attribute_exists("SAI_VIP_ENTRY_ATTR_ACTION", vip_attrs, "SAI_VIP_ENTRY_ACTION_ACCEPT") + # verify duplicate appliance is not passed to SAI + dupl_appliance_id = "200" + pb = Appliance() + pb.sip.ipv4 = socket.htonl(int(ipaddress.ip_address("11.0.0.1"))) + pb.vm_vni = int(1111) + pb.local_region_id = int(self.local_region_id) + dash_db.create_appliance(dupl_appliance_id, {"pb": pb.SerializeToString()}) + time.sleep(2) + keys = dash_db.get_keys(ASIC_DASH_APPLIANCE_TABLE) + assert len(keys) == 1, "duplicate DASH Appliance entry wrongly pushed to SAI" + dash_db.remove_appliance(dupl_appliance_id) + + def test_vnet(self, dash_db: DashDB): self.vnet = "Vnet1" self.vni = "45654" diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 1abe482c4c..c485e091e2 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -62,6 +62,7 @@ tests_SOURCES = aclorch_ut.cpp \ switchorch_ut.cpp \ warmrestarthelper_ut.cpp \ neighorch_ut.cpp \ + dashenifwdorch_ut.cpp \ dashorch_ut.cpp \ twamporch_ut.cpp \ stporch_ut.cpp \ @@ -131,6 +132,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/cfgmgr/portmgr.cpp \ $(top_srcdir)/cfgmgr/sflowmgr.cpp \ $(top_srcdir)/orchagent/zmqorch.cpp \ + $(top_srcdir)/orchagent/dash/dashenifwdorch.cpp \ + $(top_srcdir)/orchagent/dash/dashenifwdinfo.cpp \ $(top_srcdir)/orchagent/dash/dashaclorch.cpp \ $(top_srcdir)/orchagent/dash/dashorch.cpp \ $(top_srcdir)/orchagent/dash/dashaclgroupmgr.cpp \ diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index aa6cc3b370..380b3f9365 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -967,6 +967,30 @@ namespace aclorch_test return false; } } + else if (attr_name == MATCH_INNER_SRC_MAC || attr_name == MATCH_INNER_DST_MAC) + { + + auto it_field = rule_matches.find(attr_name == MATCH_INNER_SRC_MAC ? SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_MAC : + SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_MAC); + if (it_field == rule_matches.end()) + { + return false; + } + + if (attr_value != sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac)) + { + std::cerr << "MAC didn't match, Expected:" << attr_value << "\n" \ + << "Recieved: " << sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac) << "\n" ; + return false; + } + + if ("FF:FF:FF:FF:FF:FF" != sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.mask.mac)) + { + std::cerr << "MAC Mask didn't match, Expected: FF:FF:FF:FF:FF:FF\n" \ + << "Recieved: " << sai_serialize_mac(it_field->second.getSaiAttr().value.aclfield.data.mac) << "\n" ; + return false; + } + } else { // unknown attr_name @@ -990,13 +1014,18 @@ namespace aclorch_test return false; } } - else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_SRC_IPV6) + else if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP || attr_name == MATCH_SRC_IPV6 + || attr_name == MATCH_INNER_DST_MAC || attr_name == MATCH_INNER_SRC_MAC) { if (!validateAclRuleMatch(acl_rule, attr_name, attr_value)) { return false; } } + else if (attr_name == RULE_PRIORITY) + { + continue; + } else { // unknown attr_name @@ -1960,4 +1989,71 @@ namespace aclorch_test // Restore sai_switch_api. sai_switch_api = old_sai_switch_api; } + + TEST_F(AclOrchTest, Match_Inner_Mac) + { + string aclTableTypeName = "MAC_MATCH_TABLE_TYPE"; + string aclTableName = "MAC_MATCH_TABLE"; + string aclRuleName = "MAC_MATCH_RULE0"; + + auto orch = createAclOrch(); + + auto matches = string(MATCH_INNER_DST_MAC) + comma + string(MATCH_INNER_SRC_MAC); + orch->doAclTableTypeTask( + deque( + { + { + aclTableTypeName, + SET_COMMAND, + { + { ACL_TABLE_TYPE_MATCHES, matches}, + { ACL_TABLE_TYPE_ACTIONS, ACTION_PACKET_ACTION } + } + } + }) + ); + + orch->doAclTableTask( + deque( + { + { + aclTableName, + SET_COMMAND, + { + { ACL_TABLE_TYPE, aclTableTypeName }, + { ACL_TABLE_STAGE, STAGE_INGRESS } + } + } + }) + ); + + ASSERT_TRUE(orch->getAclTable(aclTableName)); + + auto tableOid = orch->getTableById(aclTableName); + ASSERT_NE(tableOid, SAI_NULL_OBJECT_ID); + const auto &aclTables = orch->getAclTables(); + auto it_table = aclTables.find(tableOid); + ASSERT_NE(it_table, aclTables.end()); + + const auto &aclTableObject = it_table->second; + + auto kvfAclRule = deque({ + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { RULE_PRIORITY, "9999" }, + { MATCH_INNER_DST_MAC, "FF:EE:DD:CC:BB:AA" }, + { MATCH_INNER_SRC_MAC, "11:22:33:44:55:66" }, + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP } + } + } + }); + orch->doAclRuleTask(kvfAclRule); + + auto it_rule = aclTableObject.rules.find(aclRuleName); + ASSERT_NE(it_rule, aclTableObject.rules.end()); + ASSERT_TRUE(validateAclRuleByConfOp(*it_rule->second, kfvFieldsValues(kvfAclRule.front()))); +} + } // namespace nsAclOrchTest diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp index bd633db108..f18685548c 100644 --- a/tests/mock_tests/buffermgrdyn_ut.cpp +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -137,6 +137,11 @@ namespace buffermgrdyn_test {"size", "1024000"} }; + testBufferProfile["ingress_lossy_profile"] = { + {"dynamic_th", "7"}, + {"pool", "ingress_lossless_pool"}, + {"size", "0"} + }; testBufferProfile["ingress_lossless_profile"] = { {"dynamic_th", "7"}, {"pool", "ingress_lossless_pool"}, @@ -462,6 +467,46 @@ namespace buffermgrdyn_test } } + void VerifyPgExists(const string &port, const string &pg, bool shouldExist) + { + if (shouldExist) + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(pg) != m_dynamicBuffer->m_portPgLookup[port].end()) + << "PG " << pg << " should exist for port " << port; + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(pg) == m_dynamicBuffer->m_portPgLookup[port].end()) + << "PG " << pg << " should not exist for port " << port; + } + } + + void VerifyPgProfile(const string &port, const string &pg, const string &expectedProfile) + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup[port][pg].running_profile_name, expectedProfile) + << "PG " << pg << " should have profile " << expectedProfile; + } + + void VerifyPgProfileEmpty(const string &port, const string &pg) + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][pg].running_profile_name.empty()) + << "PG " << pg << " should have an empty profile"; + } + + void VerifyProfileExists(const string &profile, bool shouldExist) + { + if (shouldExist) + { + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(profile) != m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Profile " << profile << " should exist"; + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(profile) == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Profile " << profile << " should not exist"; + } + } + void TearDown() override { delete m_dynamicBuffer; @@ -522,8 +567,8 @@ namespace buffermgrdyn_test InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -647,7 +692,7 @@ namespace buffermgrdyn_test appBufferPoolTable.getKeys(keys); ASSERT_EQ(keys.size(), 3); ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -933,8 +978,8 @@ namespace buffermgrdyn_test InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); for (auto i : testBufferProfile) { CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); @@ -1267,8 +1312,8 @@ namespace buffermgrdyn_test ASSERT_EQ(keys.size(), 3); InitDefaultBufferProfile(); appBufferProfileTable.getKeys(keys); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); m_dynamicBuffer->m_bufferCompletelyInitialized = true; m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; @@ -1471,4 +1516,165 @@ namespace buffermgrdyn_test HandleTable(cableLengthTable); ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_READY); } + + /* + Purpose: To verify the behavior of the buffer mgr dynamic when the cable length is set to "0m". + Here set to 0m indicates no lossless profile will be created, can still create lossy profile. + Steps: + 1. Initialize default lossless parameters and MMU size + 2. Initialize port and verify initial state + 3. Set port initialization as done and process tasks + 4. Initialize buffer pools and verify + 5. Initialize buffer profiles and PGs with 5m cable length + 6. Verify PG configuration with 5m cable length + 7. Create a lossy PG and change cable length to 0m and verify lossy PG profile still there + 8. Verify that no 0m profile is created and existing profile is removed + 9. Verify that the running_profile_name is cleared for lossless PGs + 10. Verify that the 5m profile is removed + 11. Try to create a new lossless PG with 0m cable length + 12. Verify that the PG exists but has no profile assigned + 13. Change cable length back to 5m and verify profiles are restored correctly + 14. Verify that profiles are removed again when cable length is set back to 0m + 15. Additional verification of PG state + 16. MTU updates work correctly with non-zero cable length + 17. Create a lossy PG and change cable length to 0m + 18. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + 19. Verify that lossless profiles are removed when cable length is set back to 0m + 20. Update cable length to 0m + 21. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + */ + + TEST_F(BufferMgrDynTest, SkipProfileCreationForZeroCableLength) + { + vector fieldValues; + vector keys; + + // SETUP: Initialize the environment + // 1. Initialize default lossless parameters and MMU size + InitDefaultLosslessParameter(); + InitMmuSize(); + StartBufferManager(); + + // 2. Initialize port and verify initial state + InitPort(); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + // 3. Set port initialization as done and process tasks + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // 4. Initialize buffer pools and verify + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + + // 5. Initialize buffer profiles and PGs with 5m cable length + InitBufferPg("Ethernet0|3-4"); + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 4); + InitCableLength("Ethernet0", "5m"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + // 6. Verify PG configuration with 5m cable length + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + + // TEST CASE 1: No new lossless profile is created when cable length is "0m" + // 7. Create a lossy PG and change cable length to 0m and verify lossy PG profile still there + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + + // 8. Verify that no 0m profile is created and existing profile is removed + auto zeroMProfile = "pg_lossless_100000_0m_profile"; + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find(zeroMProfile) == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "No lossless profile should be created for 0m cable length"; + + // 9. Verify that the running_profile_name is cleared for lossless PGs + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name.empty()) + << "Running profile name should be empty for lossless PGs when cable length is 0m"; + + // 10. Verify that the 5m profile is removed + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.find("pg_lossless_100000_5m_profile") == m_dynamicBuffer->m_bufferProfileLookup.end()) + << "Previous lossless profile should be removed when cable length is 0m"; + + // TEST CASE 2: No new lossless PG is created when cable length is "0m" + // 11. Try to create a new lossless PG with 0m cable length + InitBufferPg("Ethernet0|6"); + + // 12. Verify that the PG exists but has no profile assigned + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"].find("Ethernet0:6") != m_dynamicBuffer->m_portPgLookup["Ethernet0"].end()) + << "PG should be created even with 0m cable length"; + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:6"].running_profile_name.empty()) + << "No profile should be assigned to lossless PG when cable length is 0m"; + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + + // TEST CASE 3: Profiles are restored when cable length is changed back to non-zero + // 13. Change cable length back to 5m + cableLengthTable.set("AZURE", {{"Ethernet0", "5m"}}); + HandleTable(cableLengthTable); + m_dynamicBuffer->doTask(); + + // 14. Verify that profiles are restored correctly + CheckPg("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_profile"); + CheckPg("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_profile"); + + // 15. Additional verification of PG state + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfile("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_profile"); + VerifyPgProfile("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_profile"); + + // TEST CASE 4: Profiles are removed again when cable length is set back to 0m + // 16. Change cable length back to 0m + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + m_dynamicBuffer->doTask(); + + // 17. Verify that profiles are removed but PGs remain + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyProfileExists("pg_lossless_100000_0m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_profile", false); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:3-4"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:6"); + + // TEST CASE 5: MTU updates work correctly with non-zero cable length + // 18. Change cable length to 5m and update MTU + cableLengthTable.set("AZURE", {{"Ethernet0", "5m"}}); + HandleTable(cableLengthTable); + portTable.set("Ethernet0", {{"mtu", "4096"}}); + HandleTable(portTable); + + // 19. Verify profiles are created correctly with new MTU + CheckPg("Ethernet0", "Ethernet0:3-4", "pg_lossless_100000_5m_mtu4096_profile"); + CheckPg("Ethernet0", "Ethernet0:6", "pg_lossless_100000_5m_mtu4096_profile"); + + // 20. Update cable length to 0m + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + cableLengthTable.set("AZURE", {{"Ethernet0", "0m"}}); + HandleTable(cableLengthTable); + + // 21. Verify that lossy PG keeps its profile while lossless PGs have empty profiles + VerifyPgExists("Ethernet0", "Ethernet0:0", true); + VerifyPgExists("Ethernet0", "Ethernet0:3-4", true); + VerifyPgExists("Ethernet0", "Ethernet0:6", true); + VerifyPgProfile("Ethernet0", "Ethernet0:0", "ingress_lossy_profile"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:3-4"); + VerifyPgProfileEmpty("Ethernet0", "Ethernet0:6"); + VerifyProfileExists("pg_lossless_100000_0m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_profile", false); + VerifyProfileExists("pg_lossless_100000_5m_mtu4096_profile", false); + } } diff --git a/tests/mock_tests/dashenifwdorch_ut.cpp b/tests/mock_tests/dashenifwdorch_ut.cpp new file mode 100644 index 0000000000..f3a157f87d --- /dev/null +++ b/tests/mock_tests/dashenifwdorch_ut.cpp @@ -0,0 +1,664 @@ +#include "mock_orch_test.h" +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "mock_table.h" +#define protected public +#define private public +#include "dash/dashenifwdorch.h" +#undef public +#undef protected + +using namespace ::testing; + +namespace dashenifwdorch_ut +{ + /* Mock API Calls to other orchagents */ + class MockEniFwdCtx : public EniFwdCtxBase { + public: + using EniFwdCtxBase::EniFwdCtxBase; + + void initialize() override {} + MOCK_METHOD(std::string, getRouterIntfsAlias, (const swss::IpAddress&, const string& vrf), (override)); + MOCK_METHOD(bool, isNeighborResolved, (const NextHopKey&), (override)); + MOCK_METHOD(void, resolveNeighbor, (const NextHopKey&), (override)); + MOCK_METHOD(bool, findVnetVni, (const std::string&, uint64_t&), (override)); + MOCK_METHOD(bool, findVnetTunnel, (const std::string&, std::string&), (override)); + MOCK_METHOD(sai_object_id_t, createNextHopTunnel, (std::string, IpAddress), (override)); + MOCK_METHOD(bool, removeNextHopTunnel, (std::string, IpAddress), (override)); + MOCK_METHOD((std::map&), getAllPorts, (), (override)); + }; + + class DashEniFwdOrchTest : public Test + { + public: + unique_ptr cfgDb; + unique_ptr applDb; + unique_ptr chassisApplDb; + unique_ptr
dpuTable; + unique_ptr
eniFwdTable; + unique_ptr
aclRuleTable; + unique_ptr eniOrch; + shared_ptr ctx; + + /* Test values */ + string alias_dpu = "Vlan1000"; + string test_vip = "10.2.0.1/32"; + string vnet_name = "Vnet_1000"; + string tunnel_name = "mock_tunnel"; + string test_mac = "aa:bb:cc:dd:ee:ff"; + string test_mac2 = "ff:ee:dd:cc:bb:aa"; + string test_mac_key = "AABBCCDDEEFF"; + string test_mac2_key = "FFEEDDCCBBAA"; + string local_pav4 = "10.0.0.1"; + string remote_pav4 = "10.0.0.2"; + string remote_2_pav4 = "10.0.0.3"; + string local_npuv4 = "20.0.0.1"; + string remote_npuv4 = "20.0.0.2"; + string remote_2_npuv4 = "20.0.0.3"; + + uint64_t test_vni = 1234; + uint64_t test_vni2 = 5678; + int BASE_PRIORITY = 9996; + + void populateDpuTable() + { + /* Add 1 local and 1 cluster DPU */ + dpuTable->set("1", + { + { DPU_TYPE, "local" }, + { DPU_PA_V4, local_pav4 }, + { DPU_NPU_V4, local_npuv4 }, + }, SET_COMMAND); + + dpuTable->set("2", + { + { DPU_TYPE, "cluster" }, + { DPU_PA_V4, remote_pav4 }, + { DPU_NPU_V4, remote_npuv4 }, + }, SET_COMMAND); + + dpuTable->set("3", + { + { DPU_TYPE, "cluster" }, + { DPU_PA_V4, remote_2_pav4 }, + { DPU_NPU_V4, remote_2_npuv4 }, + }, SET_COMMAND); + } + + void populateVip() + { + Table vipTable(cfgDb.get(), CFG_VIP_TABLE_TMP); + vipTable.set(test_vip, {{}}); + } + + void doDashEniFwdTableTask(DBConnector* applDb, const deque &entries) + { + auto consumer = unique_ptr(new Consumer( + new swss::ConsumerStateTable(applDb, APP_DASH_ENI_FORWARD_TABLE, 1, 1), + eniOrch.get(), APP_DASH_ENI_FORWARD_TABLE)); + + consumer->addToSync(entries); + eniOrch->doTask(*consumer); + } + + void checkKFV(Table* m_table, const std::string& key, const std::vector& expectedValues) { + std::string val; + for (const auto& fv : expectedValues) { + const std::string& field = fvField(fv); + const std::string& expectedVal = fvValue(fv); + EXPECT_TRUE(m_table->hget(key, field, val)) + << "Failed to retrieve field " << field << " from key " << key; + EXPECT_EQ(val, expectedVal) + << "Mismatch for field " << field << " for key " << key + << ": expected " << expectedVal << ", got " << val; + } + } + + void checkRuleUninstalled(string key) + { + std::string val; + EXPECT_FALSE(aclRuleTable->hget(key, MATCH_DST_IP, val)) + << key << ": Still Exist"; + } + + void SetUp() override { + testing_db::reset(); + cfgDb = make_unique("CONFIG_DB", 0); + applDb = make_unique("APPL_DB", 0); + chassisApplDb = make_unique("CHASSIS_APP_DB", 0); + /* Initialize tables */ + dpuTable = make_unique
(cfgDb.get(), CFG_DPU_TABLE); + eniFwdTable = make_unique
(applDb.get(), APP_DASH_ENI_FORWARD_TABLE); + aclRuleTable = make_unique
(applDb.get(), APP_ACL_RULE_TABLE_NAME); + /* Populate DPU Configuration */ + populateDpuTable(); + populateVip(); + eniOrch = make_unique(cfgDb.get(), applDb.get(), APP_DASH_ENI_FORWARD_TABLE, nullptr); + + /* Clear the default context and Patch with the Mock */ + ctx = make_shared(cfgDb.get(), applDb.get()); + eniOrch->ctx.reset(); + eniOrch->ctx = ctx; + eniOrch->ctx->populateDpuRegistry(); + eniOrch->ctx_initialized_ = true; + } + }; + + /* + Test getting the PA, NPU address of a DPU and dpuType + */ + TEST_F(DashEniFwdOrchTest, TestDpuRegistry) + { + dpu_type_t type = dpu_type_t::EXTERNAL; + swss::IpAddress pa_v4; + swss::IpAddress npu_v4; + + EniFwdCtx ctx(cfgDb.get(), applDb.get()); + ctx.populateDpuRegistry(); + + EXPECT_TRUE(ctx.dpu_info.getType(1, type)); + EXPECT_EQ(type, dpu_type_t::LOCAL); + EXPECT_TRUE(ctx.dpu_info.getType(2, type)); + EXPECT_EQ(type, dpu_type_t::CLUSTER); + + EXPECT_TRUE(ctx.dpu_info.getPaV4(1, pa_v4)); + EXPECT_EQ(pa_v4.to_string(), local_pav4); + EXPECT_TRUE(ctx.dpu_info.getPaV4(2, pa_v4)); + EXPECT_EQ(pa_v4.to_string(), remote_pav4); + + EXPECT_TRUE(ctx.dpu_info.getNpuV4(1, npu_v4)); + EXPECT_EQ(npu_v4.to_string(), local_npuv4); + EXPECT_TRUE(ctx.dpu_info.getNpuV4(2, npu_v4)); + EXPECT_EQ(npu_v4.to_string(), remote_npuv4); + + vector ids = {1, 2, 3}; + EXPECT_EQ(ctx.dpu_info.getIds(), ids); + } + + /* + VNI is provided by HaMgrd, Resolve Neighbor + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + /* Mock calls to intfsOrch and neighOrch + If neighbor is already resolved, resolveNeighbor is not called */ + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); /* Once per local endpoint */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(4).WillRepeatedly(Return(true)); + EXPECT_CALL(*ctx, resolveNeighbor(nh)).Times(0); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "1" }, // Local endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni) } + } + } + } + ) + ); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN", { + { ACTION_REDIRECT_ACTION , local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + INBOUND) }, + { MATCH_INNER_DST_MAC, test_mac } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + OUTBOUND) }, + { MATCH_INNER_SRC_MAC, test_mac }, { MATCH_TUNNEL_VNI, to_string(test_vni) } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + INBOUND_TERM) }, + { MATCH_INNER_DST_MAC, test_mac }, + { MATCH_TUNNEL_TERM, "true"} + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + OUTBOUND_TERM) }, + { MATCH_INNER_SRC_MAC, test_mac }, { MATCH_TUNNEL_VNI, to_string(test_vni) }, + { MATCH_TUNNEL_TERM, "true"} + }); + } + + /* + Infer VNI by reading from the VnetOrch, resolved Neighbor + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor_NoVNI) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + + EXPECT_CALL(*ctx, findVnetVni(vnet_name, _)).Times(1) // Called once per ENI + .WillRepeatedly(DoAll( + SetArgReferee<1>(test_vni2), + Return(true) + )); + + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(4).WillRepeatedly(Return(true)); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "1" }, // Local endpoint is the primary + // No Explicit VNI from the DB, should be inferred from the VNetOrch + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + OUTBOUND) }, + { MATCH_INNER_SRC_MAC, test_mac2 }, { MATCH_TUNNEL_VNI, to_string(test_vni2) } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_DST_IP, test_vip }, + { RULE_PRIORITY, to_string(BASE_PRIORITY + OUTBOUND_TERM) }, + { MATCH_INNER_SRC_MAC, test_mac2 }, { MATCH_TUNNEL_VNI, to_string(test_vni2) }, + { MATCH_TUNNEL_TERM, "true"} + }); + } + + /* + Verify MAC direction + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor_MacDirection) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(4).WillRepeatedly(Return(true)); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "1" }, // Local endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni2) }, + { ENI_FWD_OUT_MAC_LOOKUP, "dst" } + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT", { + { MATCH_INNER_DST_MAC, test_mac2 }, + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT_TERM", { + { MATCH_INNER_DST_MAC, test_mac2 }, { MATCH_TUNNEL_TERM, "true"} + }); + } + + + /* + VNI is provided by HaMgrd, UnResolved Neighbor + */ + TEST_F(DashEniFwdOrchTest, LocalNeighbor_Unresolved) + { + auto nh_ip = swss::IpAddress(local_pav4); + NextHopKey nh = {nh_ip, alias_dpu}; + /* 1 for initLocalEndpoints */ + EXPECT_CALL(*ctx, getRouterIntfsAlias(nh_ip, _)).WillOnce(Return(alias_dpu)); + + /* Neighbor is not resolved, 1 per rule + 1 for initLocalEndpoints */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(9).WillRepeatedly(Return(false)); + /* resolveNeighbor is called because the neigh is not resolved */ + EXPECT_CALL(*ctx, resolveNeighbor(nh)).Times(9); /* 1 per rule + 1 for initLocalEndpoints */ + + eniOrch->initLocalEndpoints(); + + /* Populate 2 ENI's */ + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "1" }, // Local endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni) } + } + }, + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "1" }, // Local endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni2) } + } + } + } + ) + ); + + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_IN"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_IN_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_OUT"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_OUT_TERM"); + + /* Neighbor is resolved, Trigger a nexthop update (1 for Neigh Update) * 4 for Types of Rules */ + EXPECT_CALL(*ctx, isNeighborResolved(nh)).Times(8).WillRepeatedly(Return(true)); + + NeighborEntry temp_entry = nh; + NeighborUpdate update = { temp_entry, MacAddress(), true }; + eniOrch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN", { + { ACTION_REDIRECT_ACTION, local_pav4 } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_TUNNEL_TERM, "true"} + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT", { + { ACTION_REDIRECT_ACTION, local_pav4 }, + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 }, { MATCH_TUNNEL_TERM, "true"}, + { MATCH_INNER_SRC_MAC, test_mac2 }, + }); + } + + /* + Remote Endpoint + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor) + { + IpAddress remote_npuv4_ip = IpAddress(remote_npuv4); + EXPECT_CALL(*ctx, getRouterIntfsAlias(_, _)).WillOnce(Return(alias_dpu)); + /* calls to neighOrch expected for tunn termination entries */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(4).WillRepeatedly(Return(true)); + + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(4) // Once per non-tunnel term rules + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + + EXPECT_CALL(*ctx, createNextHopTunnel(tunnel_name, remote_npuv4_ip)).Times(1) + .WillRepeatedly(Return(0x400000000064d)); // Only once since same NH object will be re-used + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "2" }, // Remote endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni) } + } + }, + { + vnet_name + ":" + test_mac2, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "2" }, // Remote endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni2) } + } + } + } + ) + ); + + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN", { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name } + }); + /* Tunnel termiantion rule should be local endpoint */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 } + }); + + /* Rules for second ENI */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac2_key + "_OUT", { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name } + }); + + /* Check Ref count */ + ASSERT_TRUE(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name) != ctx->remote_nh_map_.end()); + EXPECT_EQ(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name)->second.first, 4); /* 4 rules using this NH */ + EXPECT_EQ(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name)->second.second, 0x400000000064d); + + EXPECT_CALL(*ctx, removeNextHopTunnel(tunnel_name, remote_npuv4_ip)).WillOnce(Return(true)); + + /* Delete all ENI's */ + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac2, + DEL_COMMAND, + { } + }, + { + vnet_name + ":" + test_mac, + DEL_COMMAND, + { } + } + } + ) + ); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_IN"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_IN_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_OUT"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac2_key + "_OUT_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_IN"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_IN_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_OUT"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_OUT_TERM"); + /* Check the tunnel is removed */ + ASSERT_TRUE(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name) == ctx->remote_nh_map_.end()); + } + + /* + Remote Endpoint with an update to switch to Local Endpoint + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor_SwitchToLocal) + { + IpAddress remote_npuv4_ip = IpAddress(remote_npuv4); + EXPECT_CALL(*ctx, getRouterIntfsAlias(_, _)).WillOnce(Return(alias_dpu)); + /* 2 calls made for tunnel termination rules */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(2) // Once per non-tunnel term rules + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + EXPECT_CALL(*ctx, createNextHopTunnel(tunnel_name, remote_npuv4_ip)).Times(1) + .WillRepeatedly(Return(0x400000000064d)); // Only once since same NH object will be re-used + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "1,2" }, + { ENI_FWD_PRIMARY, "2" }, // Remote endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni) } + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN", { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT", { + { ACTION_REDIRECT_ACTION, remote_npuv4 + "@" + tunnel_name } + }); + ASSERT_TRUE(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name) != ctx->remote_nh_map_.end()); + EXPECT_EQ(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name)->second.first, 2); /* 4 rules using this NH */ + EXPECT_EQ(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name)->second.second, 0x400000000064d); + + /* 2 calls will be made for non tunnel termination rules after primary switch */ + EXPECT_CALL(*ctx, isNeighborResolved(_)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(*ctx, removeNextHopTunnel(tunnel_name, remote_npuv4_ip)).WillOnce(Return(true)); + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_PRIMARY, "1" }, // Primary is Local now + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT", { + { ACTION_REDIRECT_ACTION, local_pav4 } + }); + /* Check ACL Rules */ + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT_TERM", { + { ACTION_REDIRECT_ACTION, local_pav4 } + }); + /* Check the tunnel is removed */ + ASSERT_TRUE(ctx->remote_nh_map_.find(remote_npuv4 + "@" + tunnel_name) == ctx->remote_nh_map_.end()); + } + + /* + T1 doesn't host the ENI, Both the enndpoints are Remote. + No Tunnel Termination Rules expected + */ + TEST_F(DashEniFwdOrchTest, RemoteNeighbor_NoTunnelTerm) + { + IpAddress remote_npuv4_ip = IpAddress(remote_2_npuv4); + EXPECT_CALL(*ctx, findVnetTunnel(vnet_name, _)).Times(2) // Only two rules are created + .WillRepeatedly(DoAll( + SetArgReferee<1>(tunnel_name), + Return(true) + )); + + EXPECT_CALL(*ctx, createNextHopTunnel(tunnel_name, remote_npuv4_ip)).Times(1) + .WillRepeatedly(Return(0x400000000064d)); // Only once since same NH object will be re-used + + doDashEniFwdTableTask(applDb.get(), + deque( + { + { + vnet_name + ":" + test_mac, + SET_COMMAND, + { + { ENI_FWD_VDPU_IDS, "2,3" }, + { ENI_FWD_PRIMARY, "3" }, // Remote endpoint is the primary + { ENI_FWD_OUT_VNI, to_string(test_vni) } + } + } + } + ) + ); + + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_IN", { + { ACTION_REDIRECT_ACTION, remote_2_npuv4 + "@" + tunnel_name } + }); + checkKFV(aclRuleTable.get(), "ENI:" + vnet_name + "_" + test_mac_key+ "_OUT", { + { ACTION_REDIRECT_ACTION, remote_2_npuv4 + "@" + tunnel_name } + }); + + /* Tunnel termination rules are not installed */ + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_IN_TERM"); + checkRuleUninstalled("ENI:" + vnet_name + "_" + test_mac_key+ "_OUT_TERM"); + } + + /* + Test ACL Table and Table Type config + */ + TEST_F(DashEniFwdOrchTest, TestAclTableConfig) + { + /* Create a set of ports */ + std::map allPorts; + allPorts["Ethernet0"] = Port("Ethernet0", Port::PHY); + allPorts["Ethernet4"] = Port("Ethernet4", Port::PHY); + allPorts["Ethernet8"] = Port("Ethernet8", Port::PHY); + allPorts["Ethernet16"] = Port("Ethernet16", Port::PHY); + allPorts["PortChannel1011"] = Port("PortChannel1012", Port::LAG); + allPorts["PortChannel1012"] = Port("Ethernet16", Port::LAG); + allPorts["PortChannel1011"].m_members.insert("Ethernet8"); + allPorts["PortChannel1012"].m_members.insert("Ethernet16"); + EXPECT_CALL(*ctx, getAllPorts()).WillOnce(ReturnRef(allPorts)); + + Table aclTableType(applDb.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); + Table aclTable(applDb.get(), APP_ACL_TABLE_TABLE_NAME); + Table portTable(cfgDb.get(), CFG_PORT_TABLE_NAME); + + portTable.set("Ethernet0", + { + { "lanes", "0,1,2,3" } + }, SET_COMMAND); + + portTable.set("Ethernet4", + { + { "lanes", "4,5,6,7" }, + { PORT_ROLE, PORT_ROLE_DPC } + }, SET_COMMAND); + + eniOrch->initAclTableCfg(); + + checkKFV(&aclTableType, "ENI_REDIRECT", { + { ACL_TABLE_TYPE_MATCHES, "TUNNEL_VNI,DST_IP,INNER_SRC_MAC,INNER_DST_MAC,TUNNEL_TERM" }, + { ACL_TABLE_TYPE_ACTIONS, "REDIRECT_ACTION" }, + { ACL_TABLE_TYPE_BPOINT_TYPES, "PORT,PORTCHANNEL" } + }); + + checkKFV(&aclTable, "ENI", { + { ACL_TABLE_TYPE, "ENI_REDIRECT" }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, "Ethernet0,PortChannel1011,PortChannel1012" } + }); + } +} + +namespace mock_orch_test +{ + TEST_F(MockOrchTest, EniFwdCtx) + { + EniFwdCtx ctx(m_config_db.get(), m_app_db.get()); + ASSERT_NO_THROW(ctx.initialize()); + + NextHopKey nh(IpAddress("10.0.0.1"), "Ethernet0"); + ASSERT_NO_THROW(ctx.isNeighborResolved(nh)); + ASSERT_NO_THROW(ctx.resolveNeighbor(nh)); + ASSERT_NO_THROW(ctx.getRouterIntfsAlias(IpAddress("10.0.0.1"))); + + uint64_t vni; + ASSERT_NO_THROW(ctx.findVnetVni("Vnet_1000", vni)); + string tunnel; + ASSERT_NO_THROW(ctx.findVnetTunnel("Vnet_1000", tunnel)); + ASSERT_NO_THROW(ctx.getAllPorts()); + ASSERT_NO_THROW(ctx.createNextHopTunnel("tunnel0", IpAddress("10.0.0.1"))); + ASSERT_NO_THROW(ctx.removeNextHopTunnel("tunnel0", IpAddress("10.0.0.1"))); + } +} diff --git a/tests/mock_tests/flexcounter_ut.cpp b/tests/mock_tests/flexcounter_ut.cpp index 686b818984..0ff39b2fe3 100644 --- a/tests/mock_tests/flexcounter_ut.cpp +++ b/tests/mock_tests/flexcounter_ut.cpp @@ -43,6 +43,7 @@ namespace flexcounter_test mockOldSaiSetSwitchAttribute = old; } + uint32_t mockFlexCounterOperationCallCount; sai_status_t mockFlexCounterOperation(sai_object_id_t objectId, const sai_attribute_t *attr) { if (objectId != gSwitchId) @@ -53,23 +54,42 @@ namespace flexcounter_test auto *param = reinterpret_cast(attr->value.ptr); std::vector entries; auto serializedObjectId = sai_serialize_object_id(objectId); - std::string key((const char*)param->counter_key.list); + auto keys = tokenize(string((const char*)param->counter_key.list), ','); + bool first = true; + string groupName; + string key; - if (param->stats_mode.list != nullptr) + for(auto key : keys) { - entries.push_back({STATS_MODE_FIELD, (const char*)param->stats_mode.list}); - } + if (first) + { + groupName = tokenize(key, ':')[0]; + first = false; + } + else + { + key = groupName + ":" + key; + } - if (param->counter_ids.list != nullptr) - { - entries.push_back({(const char*)param->counter_field_name.list, (const char*)param->counter_ids.list}); - mockFlexCounterTable->set(key, entries); - } - else - { - mockFlexCounterTable->del(key); + if (param->stats_mode.list != nullptr) + { + entries.push_back({STATS_MODE_FIELD, (const char*)param->stats_mode.list}); + } + + if (param->counter_ids.list != nullptr) + { + entries.push_back({(const char*)param->counter_field_name.list, (const char*)param->counter_ids.list}); + mockFlexCounterTable->set(key, entries); + entries.clear(); + } + else + { + mockFlexCounterTable->del(key); + } } + mockFlexCounterOperationCallCount++; + return SAI_STATUS_SUCCESS; } @@ -127,6 +147,14 @@ namespace flexcounter_test if (table->get(key, fieldValues)) { + if (entries.size() == 1 && fieldValues.size() == 1 && fvField(entries[0]).find("COUNTER_ID_LIST") != std::string::npos) + { + auto counterIds = tokenize(fvValue(entries[0]), ','); + auto expectedCounterIds = tokenize(fvValue(fieldValues[0]), ','); + set counterIdSet(counterIds.begin(), counterIds.end()); + set expectedCounterSet(expectedCounterIds.begin(), expectedCounterIds.end()); + return (counterIdSet == expectedCounterSet); + } set fvSet(fieldValues.begin(), fieldValues.end()); set expectedSet(entries.begin(), entries.end()); @@ -188,6 +216,31 @@ namespace flexcounter_test return _checkFlexCounterTableContent(mockFlexCounterTable, group + ":" + sai_serialize_object_id(oid), entries); } + void isNoPendingCounterObjects() + { + std::vector*> queueCounterManagers({ + &gPortsOrch->queue_stat_manager, + &gPortsOrch->queue_watermark_manager + }); + std::vector*> pgCounterManagers({ + &gPortsOrch->pg_drop_stat_manager, + &gPortsOrch->pg_watermark_manager + }); + + for (auto pgCounterManager : pgCounterManagers) + { + ASSERT_TRUE(pgCounterManager->cached_objects.pending_sai_objects.empty()); + } + + for (auto queueCounterManager : queueCounterManagers) + { + for (auto it : queueCounterManager->cached_objects) + { + ASSERT_TRUE(it.second.pending_sai_objects.empty()); + } + } + } + sai_switch_api_t ut_sai_switch_api; sai_switch_api_t *pold_sai_switch_api; @@ -448,12 +501,14 @@ namespace flexcounter_test { {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, {POLL_INTERVAL_FIELD, QUEUE_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"}, {QUEUE_PLUGIN_FIELD, ""} })); ASSERT_TRUE(checkFlexCounterGroup(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, { {STATS_MODE_FIELD, STATS_MODE_READ_AND_CLEAR}, {POLL_INTERVAL_FIELD, PG_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"}, {PG_PLUGIN_FIELD, ""} })); ASSERT_TRUE(checkFlexCounterGroup(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, @@ -467,6 +522,7 @@ namespace flexcounter_test { {STATS_MODE_FIELD, STATS_MODE_READ}, {POLL_INTERVAL_FIELD, PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS}, + {FLEX_COUNTER_STATUS_FIELD, "disable"} })); ASSERT_TRUE(checkFlexCounterGroup(RIF_STAT_COUNTER_FLEX_COUNTER_GROUP, { @@ -584,6 +640,8 @@ namespace flexcounter_test static_cast(flexCounterOrch)->doTask(); } + isNoPendingCounterObjects(); + ASSERT_TRUE(checkFlexCounterGroup(BUFFER_POOL_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, { {POLL_INTERVAL_FIELD, "60000"}, @@ -816,7 +874,7 @@ namespace flexcounter_test ASSERT_TRUE(checkFlexCounter(PFC_WD_FLEX_COUNTER_GROUP, firstPort.m_queue_ids[3], { - {QUEUE_COUNTER_ID_LIST, "SAI_QUEUE_STAT_PACKETS,SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES"}, + {QUEUE_COUNTER_ID_LIST, "SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES,SAI_QUEUE_STAT_PACKETS"}, {QUEUE_ATTR_ID_LIST, "SAI_QUEUE_ATTR_PAUSE_STATUS"} })); @@ -844,11 +902,25 @@ namespace flexcounter_test entries.clear(); static_cast(gBufferOrch)->doTask(); + isNoPendingCounterObjects(); + ASSERT_TRUE(checkFlexCounter(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid)); ASSERT_TRUE(checkFlexCounter(PG_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, pgOid)); ASSERT_TRUE(checkFlexCounter(QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid)); ASSERT_TRUE(checkFlexCounter(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, queueOid)); + if (!gTraditionalFlexCounter) + { + // Create and remove without flushing counters + auto oldMockFlexCounterCallCount = mockFlexCounterOperationCallCount; + gPortsOrch->createPortBufferQueueCounters(firstPort, "3"); + gPortsOrch->removePortBufferQueueCounters(firstPort, "3"); + gPortsOrch->createPortBufferPgCounters(firstPort, "3"); + gPortsOrch->removePortBufferPgCounters(firstPort, "3"); + ASSERT_EQ(oldMockFlexCounterCallCount, mockFlexCounterOperationCallCount); + isNoPendingCounterObjects(); + } + // Remove buffer profiles entries.push_back({"ingress_lossless_profile", "DEL", { {} }}); consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); diff --git a/tests/mock_tests/flowcounterrouteorch_ut.cpp b/tests/mock_tests/flowcounterrouteorch_ut.cpp index 28cd614694..089d681b31 100644 --- a/tests/mock_tests/flowcounterrouteorch_ut.cpp +++ b/tests/mock_tests/flowcounterrouteorch_ut.cpp @@ -246,6 +246,7 @@ namespace flowcounterrouteorch_test for (const auto &it : ports) { portTable.set(it.first, it.second); + portTable.set(it.first, {{ "oper_status", "up" }}); } // Set PortConfigDone @@ -412,4 +413,4 @@ namespace flowcounterrouteorch_test vrf_consumer->addToSync(entries); static_cast(gVrfOrch)->doTask(); } -} \ No newline at end of file +} diff --git a/tests/mock_tests/mock_orch_test.cpp b/tests/mock_tests/mock_orch_test.cpp index 15eb553ad1..c628e97cbd 100644 --- a/tests/mock_tests/mock_orch_test.cpp +++ b/tests/mock_tests/mock_orch_test.cpp @@ -263,6 +263,10 @@ void MockOrchTest::SetUp() gDirectory.set(m_VxlanTunnelOrch); ut_orch_list.push_back((Orch **)&m_VxlanTunnelOrch); + m_vnetOrch = new VNetOrch(m_app_db.get(), APP_VNET_TABLE_NAME); + gDirectory.set(m_vnetOrch); + ut_orch_list.push_back((Orch **)&m_vnetOrch); + ApplyInitialConfigs(); PostSetUp(); } diff --git a/tests/mock_tests/mock_orch_test.h b/tests/mock_tests/mock_orch_test.h index 86c5a36655..fa6e5faed8 100644 --- a/tests/mock_tests/mock_orch_test.h +++ b/tests/mock_tests/mock_orch_test.h @@ -50,6 +50,7 @@ namespace mock_orch_test MuxStateOrch *m_MuxStateOrch; FlexCounterOrch *m_FlexCounterOrch; VxlanTunnelOrch *m_VxlanTunnelOrch; + VNetOrch *m_vnetOrch; DashOrch *m_DashOrch; void PrepareSai(); diff --git a/tests/mock_tests/mock_orchagent_main.cpp b/tests/mock_tests/mock_orchagent_main.cpp index 96d86018fa..5f011eb6b8 100644 --- a/tests/mock_tests/mock_orchagent_main.cpp +++ b/tests/mock_tests/mock_orchagent_main.cpp @@ -13,6 +13,7 @@ MacAddress gMacAddress; MacAddress gVxlanMacAddress; string gMySwitchType = "switch"; +string gMySwitchSubType = "SmartSwitch"; int32_t gVoqMySwitchId = 0; string gMyHostName = "Linecard1"; string gMyAsicName = "Asic0"; diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 5ffda0dd41..e4c2c7fc6c 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -29,6 +29,7 @@ #include "nhgorch.h" #include "copporch.h" #include "twamporch.h" +#include "mlagorch.h" #define private public #include "stporch.h" #undef private @@ -64,6 +65,7 @@ extern AclOrch *gAclOrch; extern PolicerOrch *gPolicerOrch; extern TunnelDecapOrch *gTunneldecapOrch; extern StpOrch *gStpOrch; +extern MlagOrch *gMlagOrch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 2dfee93b2e..d6076d81d9 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -535,6 +535,14 @@ namespace portsorch_test ASSERT_EQ((gPfcwdOrch), nullptr); gPfcwdOrch = new PfcWdSwOrch(m_config_db.get(), pfc_wd_tables, portStatIds, queueStatIds, queueAttrIds, 100); + vector mlag_tables = { + { CFG_MCLAG_TABLE_NAME }, + { CFG_MCLAG_INTF_TABLE_NAME } + }; + + ASSERT_EQ(gMlagOrch, nullptr); + gMlagOrch = new MlagOrch(m_config_db.get(), mlag_tables); + } virtual void TearDown() override @@ -563,7 +571,8 @@ namespace portsorch_test gQosOrch = nullptr; delete gSwitchOrch; gSwitchOrch = nullptr; - + delete gMlagOrch; + gMlagOrch = nullptr; // clear orchs saved in directory gDirectory.m_values.clear(); } @@ -1307,17 +1316,16 @@ namespace portsorch_test ASSERT_NE(port.m_port_id, SAI_NULL_OBJECT_ID); // Get queue info - string type; + sai_queue_type_t type; uint8_t index; auto queue_id = port.m_queue_ids[0]; auto ut_sai_get_queue_attr_count = _sai_get_queue_attr_count; gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); - ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(type, SAI_QUEUE_TYPE_UNICAST); ASSERT_EQ(index, 0); - type = ""; index = 255; gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); - ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + ASSERT_EQ(type, SAI_QUEUE_TYPE_UNICAST); ASSERT_EQ(index, 0); ASSERT_EQ(++ut_sai_get_queue_attr_count, _sai_get_queue_attr_count); diff --git a/tests/request_parser_ut.cpp b/tests/request_parser_ut.cpp index 8409dc5b66..7ac44b12b7 100644 --- a/tests/request_parser_ut.cpp +++ b/tests/request_parser_ut.cpp @@ -1761,3 +1761,47 @@ TEST(request_parser, multipleValuesInvalidMac) FAIL() << "Expected std::invalid_argument, not other exception"; } } + +/* +Check MAC key +*/ +const request_description_t test_mac_key = { + { REQ_T_STRING, REQ_T_MAC_ADDRESS }, + { + { "f1", REQ_T_STRING } + }, + { } +}; + +class TestRequestMacKey: public Request +{ +public: + TestRequestMacKey() : Request(test_mac_key, ':') { } +}; + +TEST(request_parser, mac_key_parse_checker) +{ + KeyOpFieldsValuesTuple t {"Vnet_1000:ab:b1:ca:dd:e1:f2", "SET", + { } + }; + + try + { + TestRequestMacKey request; + + EXPECT_NO_THROW(request.parse(t)); + + EXPECT_STREQ(request.getOperation().c_str(), "SET"); + EXPECT_STREQ(request.getFullKey().c_str(), "Vnet_1000:ab:b1:ca:dd:e1:f2"); + EXPECT_EQ(request.getKeyString(0), "Vnet_1000"); + EXPECT_EQ(request.getKeyMacAddress(1), MacAddress("ab:b1:ca:dd:e1:f2")); + } + catch (const std::exception& e) + { + FAIL() << "Got unexpected exception " << e.what(); + } + catch (...) + { + FAIL() << "Got unexpected exception"; + } +} diff --git a/tests/test_copp.py b/tests/test_copp.py index 4163300a31..e7d86c41d7 100644 --- a/tests/test_copp.py +++ b/tests/test_copp.py @@ -232,6 +232,8 @@ def validate_trap_group(self, trap_oid, trap_group): queue = "" trap_action = "" trap_priority = "" + default_trap_queue = "0" + default_trap_prio = "1" for fv in trap_fvs: if fv[0] == "SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION": @@ -262,6 +264,11 @@ def validate_trap_group(self, trap_oid, trap_group): assert trap_group_oid != "oid:0x0" if keys == "queue": assert queue == trap_group[keys] + # default trap in copp config doesn't specify a trap priority + # this is instead set internally in swss + # confirm that default trap uses a priority 1 + if queue == default_trap_queue: + assert trap_priority == default_trap_prio else: assert 0 diff --git a/tests/test_macsec.py b/tests/test_macsec.py index 9dc5a4ed53..ef1f67e9f7 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -424,6 +424,7 @@ def init_macsec( wpa.init_macsec_port(port_name) wpa.config_macsec_port(port_name, {"enable_protect": True}) wpa.config_macsec_port(port_name, {"enable_encrypt": True}) + wpa.config_macsec_port(port_name, {"send_sci": True}) wpa.config_macsec_port( port_name, { diff --git a/tests/test_mclag_fdb.py b/tests/test_mclag_fdb.py index 8252db8421..0801b15a1d 100644 --- a/tests/test_mclag_fdb.py +++ b/tests/test_mclag_fdb.py @@ -83,6 +83,19 @@ def how_many_entries_exist(db, table): tbl = swsscommon.Table(db, table) return len(tbl.getKeys()) +def create_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + fvs = swsscommon.FieldValuePairs([("if_type", "PortChannel")]) + key_string = domain_id + "|" + mclag_interface + tbl.set(key_string, fvs) + time.sleep(1) + +def remove_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + key_string = domain_id + "|" + mclag_interface + tbl._del(key_string) + time.sleep(1) + # Test-1 Verify basic config add @pytest.mark.dev_sanity @@ -564,8 +577,100 @@ def test_mclagFdb_remote_to_local_mac_move_ntf(dvs, testlog): dvs.pdb, "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", ) - -# Test-13 Verify cleanup of the basic config. + +# Test-13 Verify FDB table flush on MCLAG link down. +@pytest.mark.dev_sanity +def test_mclagFdb_flush_on_link_down(dvs, testlog): + dvs.setup_db() + + # Create PortChannel001. We do not use the pre-created portchannels + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + + tbl.set("PortChannel001", fvs) + time.sleep(1) + + # Create vlan + dvs.create_vlan("200") + + # Add vlan members + dvs.create_vlan_member("200", "PortChannel001") + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL_MEMBER") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set("PortChannel001|Ethernet0", fvs) + time.sleep(1) + + # set oper_status for PortChannels + ps = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + ps.set("PortChannel001", fvs) + time.sleep(1) + + create_mclag_interface(dvs, "4095", "PortChannel001") + + #Add MAC to FDB_TABLE on PortChannel001 + create_entry_pst( + dvs.pdb, + "FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + [ + ("port", "PortChannel001"), + ("type", "dynamic"), + ] + ) + + # check that the FDB entry inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry not inserted to ASIC" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC")] + ) + + assert ok, str(extra) + + # bring PortChannel down + dvs.servers[0].runcmd("ip link set down dev eth0") + time.sleep(1) + ps = swsscommon.ProducerStateTable(dvs.pdb, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "down")]) + ps.set("PortChannel001", fvs) + time.sleep(1) + + # check that the FDB entry was not deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry was deleted" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC")] + ) + assert ok, str(extra) + + # Restore eth0 up + dvs.servers[0].runcmd("ip link set up dev eth0") + time.sleep(1) + + remove_mclag_interface(dvs, "4095", "PortChannel001") + + delete_entry_pst( + dvs.pdb, + "FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + ) + + time.sleep(2) + # check that the FDB entry was deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 0, "The MCLAG static fdb entry not deleted" + + # remove PortChannel member + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL_MEMBER") + tbl._del("PortChannel001|Ethernet0") + time.sleep(1) + + # remove PortChannel + tbl = swsscommon.Table(dvs.cdb, "PORTCHANNEL") + tbl._del("PortChannel001") + time.sleep(2) + +# Test-14 Verify cleanup of the basic config. @pytest.mark.dev_sanity def test_mclagFdb_basic_config_del(dvs, testlog): diff --git a/tests/test_mirror_port_erspan.py b/tests/test_mirror_port_erspan.py index 61c0a17cd3..7b48d31e76 100644 --- a/tests/test_mirror_port_erspan.py +++ b/tests/test_mirror_port_erspan.py @@ -435,6 +435,13 @@ def test_PortMirrorDestMoveLag(self, dvs, testlog): # add route dvs.add_route("13.13.13.0/24", "200.0.0.1") self.dvs_mirror.verify_session_status(session) + + # As the Route pointing to Down NH Interface it will not get installed and Mirror State should not be changed. + self.dvs_mirror.verify_session(dvs, session, asic_db=expected_asic_db, src_ports=src_asic_ports, direction="RX") + + # Now Make Port Channel oper up so route pointing to this gets installed + (exitcode, _) = dvs.runcmd("ip link set dev PortChannel080 carrier on") + assert exitcode == 0, "ip link set failed" expected_asic_db = {"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT": pmap.get("Ethernet32"), "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS": "12:10:08:06:04:02"} diff --git a/tests/test_port.py b/tests/test_port.py index feccb6917a..03199d8830 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -480,6 +480,59 @@ def test_PortLinkEventDamping(self, dvs, testlog): if fv[0] == "link_event_damping_algorithm": assert fv[1] == "disabled" + def test_PortAdminRestore(self, dvs, testlog): + appdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + ptbl = swsscommon.ProducerStateTable(appdb, "PORT_TABLE") + atbl = swsscommon.Table(asicdb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + + # Initialize Ethernet0 (admin_status, fec) = (up, rs) + fvs = swsscommon.FieldValuePairs([("admin_status", "up"), + ("fec", "rs")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_RS" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "true" + + # Verify pCfg.admin_status.is_set false by (fec) = (none) + fvs = swsscommon.FieldValuePairs([("fec", "none")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_NONE" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "true" + + # Verify pCfg.admin_status.is_set true by (admin_status, fec) = (down, rs) + fvs = swsscommon.FieldValuePairs([("admin_status", "down"), + ("fec", "rs")]) + ptbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_FEC_MODE": + assert fv[1] == "SAI_PORT_FEC_MODE_RS" + if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": + assert fv[1] == "false" # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 905b0dacaa..5fe3291cf2 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -69,9 +69,9 @@ def connect_dbs(self, dvs): self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) def create_tc_dscp_profile(self): - tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DSCP_MAP_TABLE_NAME) + tc_dscp_tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DSCP_MAP_TABLE_NAME) fvs = swsscommon.FieldValuePairs(list(TC_TO_DSCP_MAP.items())) - tbl.set(CFG_TC_TO_DSCP_MAP_KEY, fvs) + tc_dscp_tbl.set(CFG_TC_TO_DSCP_MAP_KEY, fvs) time.sleep(1) def find_tc_dscp_profile(self): @@ -96,6 +96,14 @@ def find_tc_dscp_profile(self): return (key, tc_dscp_map_raw) + def delete_tc_dscp_profile_on_all_ports(self): + tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) + ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() + for port in ports: + tbl._del(port) + + time.sleep(1) + def apply_tc_dscp_profile_on_all_ports(self): tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_TC_DSCP_MAP_FIELD, CFG_TC_TO_DSCP_MAP_KEY)]) @@ -139,6 +147,7 @@ def test_port_tc_dscp(self, dvs): port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) assert port_cnt == cnt + self.delete_tc_dscp_profile_on_all_ports() #Tests for TC-to-Dot1p qos map configuration class TestTcDot1p(object): diff --git a/tests/test_soft_bfd.py b/tests/test_soft_bfd.py index c0de637fcc..eb67cd1b14 100644 --- a/tests/test_soft_bfd.py +++ b/tests/test_soft_bfd.py @@ -4,6 +4,7 @@ #SOFT_BFD_STATE_TABLE = swsscommon.STATE_BFD_SOFTWARE_SESSION_TABLE_NAME SOFT_BFD_STATE_TABLE = "BFD_SOFTWARE_SESSION_TABLE" +DVS_ENV = ["BFDOFFLOAD=false"] class TestSoftBfd(object): def setup_db(self, dvs): @@ -12,8 +13,6 @@ def setup_db(self, dvs): self.sdb = dvs.get_state_db() self.cdb = dvs.get_config_db() - self.cdb.db_connection.hset('DEVICE_METADATA|localhost', "switch_type", "dpu") - #Restart swss to pick up new switch type dvs.stop_swss() dvs.start_swss() diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index 3c9edea5c6..9ff8ad63e6 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -588,6 +588,99 @@ def _test_sub_port_intf_creation(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) + def _test_sub_port_intf_creation_add_lag_member(self, dvs, sub_port_intf_name, vrf_name=None): + substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) + parent_port = self.get_parent_port(sub_port_intf_name) + vlan_id = substrs[1] + + assert parent_port.startswith(SUBINTF_LAG_PREFIX) + state_tbl_name = STATE_LAG_TABLE_NAME + phy_ports = self.LAG_MEMBERS_UNDER_TEST + old_lag_oids = self.get_oids(ASIC_LAG_TABLE) + + vrf_oid = self.default_vrf_oid + old_rif_oids = self.get_oids(ASIC_RIF_TABLE) + + self.set_parent_port_admin_status(dvs, parent_port, "up") + + parent_port_oid = self.get_newly_created_oid(ASIC_LAG_TABLE, old_lag_oids) + # Add lag members to test physical port host interface vlan tag attribute + self.add_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST[1:]) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, len(self.LAG_MEMBERS_UNDER_TEST[1:])) + if vrf_name: + self.create_vrf(vrf_name) + vrf_oid = self.get_newly_created_oid(ASIC_VIRTUAL_ROUTER_TABLE, [vrf_oid]) + self.create_sub_port_intf_profile(sub_port_intf_name, vrf_name) + self.add_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST[:1]) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, len(self.LAG_MEMBERS_UNDER_TEST)) + + # Verify that sub port interface state ok is pushed to STATE_DB by Intfmgrd + fv_dict = { + "state": "ok", + } + self.check_sub_port_intf_fvs(self.state_db, state_tbl_name, sub_port_intf_name, fv_dict) + + # Verify vrf name sub port interface bound to in STATE_DB INTERFACE_TABLE + fv_dict = { + "vrf": vrf_name if vrf_name else "", + } + self.check_sub_port_intf_fvs(self.state_db, STATE_INTERFACE_TABLE_NAME, sub_port_intf_name, fv_dict) + + # If bound to non-default vrf, verify sub port interface vrf binding in linux kernel, + # and parent port not bound to vrf + if vrf_name: + self.check_sub_port_intf_vrf_bind_kernel(dvs, sub_port_intf_name, vrf_name) + self.check_sub_port_intf_vrf_nobind_kernel(dvs, parent_port, vrf_name) + + # Verify that sub port interface configuration is synced to APPL_DB INTF_TABLE by Intfmgrd + fv_dict = { + ADMIN_STATUS: "up", + } + if vrf_name: + fv_dict[VRF_NAME if vrf_name.startswith(VRF_PREFIX) else VNET_NAME] = vrf_name + self.check_sub_port_intf_fvs(self.app_db, APP_INTF_TABLE_NAME, sub_port_intf_name, fv_dict) + + # Verify that a sub port router interface entry is created in ASIC_DB + fv_dict = { + "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_SUB_PORT", + "SAI_ROUTER_INTERFACE_ATTR_OUTER_VLAN_ID": "{}".format(vlan_id), + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE": "true", + "SAI_ROUTER_INTERFACE_ATTR_MTU": DEFAULT_MTU, + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": vrf_oid, + "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": parent_port_oid, + } + rif_oid = self.get_newly_created_oid(ASIC_RIF_TABLE, old_rif_oids) + self.check_sub_port_intf_fvs(self.asic_db, ASIC_RIF_TABLE, rif_oid, fv_dict) + + # Verify physical port host interface vlan tag attribute + fv_dict = { + "SAI_HOSTIF_ATTR_VLAN_TAG": "SAI_HOSTIF_VLAN_TAG_KEEP", + } + for phy_port in phy_ports: + hostif_oid = dvs.asicdb.hostifnamemap[phy_port] + self.check_sub_port_intf_fvs(self.asic_db, ASIC_HOSTIF_TABLE, hostif_oid, fv_dict) + + # Remove a sub port interface + self.remove_sub_port_intf_profile(sub_port_intf_name) + self.check_sub_port_intf_profile_removal(rif_oid) + + # Remove vrf if created + if vrf_name: + self.remove_vrf(vrf_name) + self.check_vrf_removal(vrf_oid) + if vrf_name.startswith(VNET_PREFIX): + self.remove_vxlan_tunnel(self.TUNNEL_UNDER_TEST) + self.app_db.wait_for_n_keys(ASIC_TUNNEL_TABLE, 0) + + # Remove lag members from lag parent port + self.remove_lag_members(parent_port, self.LAG_MEMBERS_UNDER_TEST) + self.asic_db.wait_for_n_keys(ASIC_LAG_MEMBER_TABLE, 0) + + # Remove lag + self.remove_lag(parent_port) + self.check_lag_removal(parent_port_oid) + def test_sub_port_intf_creation(self, dvs): self.connect_dbs(dvs) @@ -600,6 +693,8 @@ def test_sub_port_intf_creation(self, dvs): self._test_sub_port_intf_creation(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST, self.VNET_UNDER_TEST) self._test_sub_port_intf_creation(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, self.VNET_UNDER_TEST) + self._test_sub_port_intf_creation_add_lag_member(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST) + def _test_sub_port_intf_add_ip_addrs(self, dvs, sub_port_intf_name, vrf_name=None): substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) parent_port = self.get_parent_port(sub_port_intf_name)