/**************************************************************************** Copyright(c) 2019 by Aerospace C.Power (Chongqing) Microelectronics. ALL RIGHTS RESERVED. This Information is proprietary to Aerospace C.Power (Chongqing) Microelectronics and MAY NOT be copied by any method or incorporated into another program without the express written consent of Aerospace C.Power. This Information or any portion thereof remains the property of Aerospace C.Power. The Information contained herein is believed to be accurate and Aerospace C.Power assumes no responsibility or liability for its use in any way and conveys no license or title under any patent or copyright and makes no representation or warranty that this Information is free from patent or copyright infringement. ****************************************************************************/ /* os shim includes */ #include "os_types.h" #include "os_mem.h" /* common includes */ #include "iot_errno.h" #include "iot_module.h" #include "iot_dbglog_parser.h" #include "iot_dbglog_api.h" #include "iot_io.h" #include "iot_utils_api.h" /* public api includes */ #include "cvg_api.h" #include "plc_fr.h" #include "plc_const.h" #include "plc_mme_assoc.h" #include "mac_uni_cfg_api.h" #include "mac_vdev_api.h" /* cvg module internal includes */ #include "cvg.h" #include "cvg_prm.h" #include "cvg_prm_matm.h" #include "cvg_bitmap.h" #include "cvg_nwm.h" #include "cvg_rt.h" #include "cvg_app.h" #include "cvg_security.h" #if (PLC_SUPPORT_TEI_LOCK_RECYCLE) /* tei for a remote peer could be locked even the peer has left * the network for a while if the peer is using PLC_MAC_ADDR_TYPE_METER * mac address. this value defined the duration to keep the relation * after the peer left network. the unit is 1s. */ #define CVG_PRM_TEI_LOCK_DUR (60 * 60 * 24 * 2) #else /* tei for a remote peer could be locked even the peer has left * the network for a while if the peer is using PLC_MAC_ADDR_TYPE_METER * mac address. this value defined the duration to keep the relation * after the peer left network. the unit is 1s. */ #define CVG_PRM_TEI_LOCK_DUR (30 * 60) #endif /* define the duration of recycing, allow locked tei to be recycled when tei is * not enough. recyclable peer must meet the following conditions: * peer->last_delete_ts > CVG_PRM_TEI_LOCK_RECYCLE_DUR */ #define CVG_PRM_TEI_LOCK_RECYCLE_DUR (30 * 60) /* max tei of supporting 1k type sta */ #define CVG_PRM_SUPP_1K_MAX_TEI (1015) /* white list max count of supporting 1k type sta */ #define CVG_PRM_SUPP_1K_WL_MAX_CNT (1007) /* per vdev peer manamgent module descriptor */ typedef struct _cvg_prm_vdev { /* pointer of cvg vdev */ cvg_vdev_t *vdev; /* peer table */ cvg_peer_info_t peer[PLC_TEI_MAX_NUM]; /* bitmap to mark peers in use */ cvg_tei_map_t in_use; /* cco role peer info for different phases. CCO role device may support * multiple phases. Keep CCO role peer phase specific info here. phase * specific field is listed as below: * need_cal_tx * need_cal_rx * rx_snr * tx_snr * dis_rx * dis_mme_rx * prev_dis_rx * prev_dis_mme_rx * dis_only_rx * prev_bc_rx * bc_rx * tx_sr * rx_sr * tf_sr_valid * tf_sr * cco_tf_sr * * note the for beacon from CCO, we can get the phase info from fc. but * for discovery node list mme from CCO, there is no phase info. for * self device, we use some reserved bits to cary the phase info which is * beyond smart grid spec. * for other vendor device: * 1. discovery node list mme rx count won't be saved here but in the * peer table. * 2. discovery node list mme rx snr will be saved neither here nor in the * peer table. * 3. beacon rx count and rx snr will be saved both here and in the peer * table. * for our device: * 1. discovery node list mme rx count and rx snr will be saved both here * and in the peer table. * 2. beacon rx count and rx snr will be saved both here and in the peer * table. */ cvg_peer_info_t cco_peer[PLC_PHASE_CNT]; /* cco role peer info for different phases. flag to mark if we have not * received beacon from each phase of cco role peer for the new beacon * period. */ uint8_t cco_peer_new_bp[PLC_PHASE_CNT]; /* direct sub sta count of local device. this field is only valid if local * device is sta. cco device won't use it. instead, cco can get direct sub * sta cnt from level 1 device bitmap. */ uint16_t d_sub_sta_cnt; #if (PLC_SUPPORT_CCO_ROLE) /* start position to search the next free tei in the tei_assigned bitmap */ uint32_t tei_map_idx; /* total valid assoc request received */ uint32_t assoc_rx_cnt; /* total proxy change request accepted counter */ uint32_t proxy_chg_accept_cnt; /* total valid proxy change request received from the peer counter */ uint32_t proxy_chg_rx_cnt; /* total level 1 sta phase change detection counter */ uint32_t phase_chg_cnt; /* number of cco in the network */ uint32_t cco_cnt; /* number of pco in the network */ uint32_t pco_cnt; /* number of sta in the network */ uint32_t sta_cnt; /* first possible phase count of each phase in the whole network */ uint32_t phase1_cnt[PLC_PHASE_CNT]; /* second possible phase count of each phase in the whole network */ uint32_t phase2_cnt[PLC_PHASE_CNT]; /* third possible phase count of each phase in the whole network */ uint32_t phase3_cnt[PLC_PHASE_CNT]; /* bitmap of each level pco device */ cvg_tei_map_t pco_bms[PLC_MAX_RT_LEVEL]; /* bitmap of each level sta device */ cvg_tei_map_t sta_bms[PLC_MAX_RT_LEVEL]; #if HPLC_RF_SUPPORT /* bitmap of each level & link type pco device */ cvg_tei_map_t pco_link_bms[PLC_MAX_RT_LEVEL][CVG_PRM_LINK_BM_P_CNT]; /* bitmap of each level & link type sta device */ cvg_tei_map_t sta_link_bms[PLC_MAX_RT_LEVEL][CVG_PRM_LINK_BM_S_CNT]; /* number of each level & link type pco in the network */ uint16_t pco_link_cnt[PLC_MAX_RT_LEVEL][CVG_PRM_LINK_BM_P_CNT]; /* number of each link type sta in the network */ uint16_t sta_link_cnt[CVG_PRM_LINK_BM_S_CNT]; #endif /* pointer to memory pools, for PCOs store directly connected sub-sta * info. */ iot_mem_pool_t *pco_d_sub_sta_pool; /* bitmap to mark tei assigned */ cvg_tei_map_t tei_assigned; #endif #if (PLC_SUPPORT_ADDR_TO_TEI_MAP) /* mac address and tei mapping hash table */ cvg_matm_table_t table; #endif } cvg_prm_vdev_t; /* peer array index to tei conversion */ #define CVG_PRM_IDX_TO_TEI(__p_idx) ((tei_t)((__p_idx) + PLC_TEI_FIRST)) /* tei to peer array index conversion */ #define CVG_PRM_TEI_TO_IDX(__tei) ((__tei) - PLC_TEI_FIRST) /* peer array index to bitmap index conversion */ #define CVG_PRM_IDX_TO_BM(__p_idx) (CVG_TEI_TO_BM(CVG_PRM_IDX_TO_TEI(__p_idx))) /* bitmap index to peer array index conversion */ #define CVG_PRM_BM_TO_IDX(__bm) (CVG_PRM_TEI_TO_IDX(CVG_BM_TO_TEI(__bm))) #if (PLC_SUPPORT_ADDR_TO_TEI_MAP) cvg_peer_info_t *cvg_prm_get_peer_by_addr(cvg_vdev_t *vdev, uint8_t *addr) { tei_t tei = PLC_TEI_INVAL; cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer = NULL; cvg_matm_get_tei(&prm->table, addr, &tei); if (PLC_TEI_IS_VALID(tei) && cvg_tei_map_is_set(&prm->in_use, CVG_TEI_TO_BM(tei))) { peer = &prm->peer[CVG_PRM_TEI_TO_IDX(tei)]; } return peer; } void cvg_prm_set_peer_addr(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t *addr) { uint8_t tmp_addr[IOT_MAC_ADDR_LEN]; cvg_prm_vdev_t *prm = vdev->prm; tei_t tmp_tei; tei_t tei = CVG_PRM_IDX_TO_TEI(peer - prm->peer); if (vdev->role != PLC_DEV_ROLE_CCO) { /* for sta role device, new mac address should be not being used */ if (ERR_OK == cvg_matm_get_tei(&prm->table, addr, &tmp_tei)) { /* this is a fatal error, let's force assert to restart the * whole system. */ IOT_ASSERT(tmp_tei == tei); } } if (cvg_matm_get_addr(&prm->table, tmp_addr, tei) == ERR_OK) { /* mac address already exist */ if (iot_mac_addr_cmp(tmp_addr, addr) == 0) { /* mac address changed, how can it be? */ #if CVG_PEER_DEBUG iot_printf("%s addr chg tei %lu orig " "%02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, tei, tmp_addr[0], tmp_addr[1], tmp_addr[2], tmp_addr[3], tmp_addr[4], tmp_addr[5]); iot_printf("%s addr chg tei %lu new " "%02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, tei, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_SET_PEER_ADDR_ORIG_ID, 7, tei, tmp_addr[0], tmp_addr[1], tmp_addr[2], tmp_addr[3], tmp_addr[4], tmp_addr[5]); iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_SET_PEER_ADDR_NEW_ID, 7, tei, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); #endif cvg_matm_del_ent_addr(&prm->table, tmp_addr); cvg_matm_add_ent(&prm->table, addr, tei); } } else { #if CVG_PEER_DEBUG iot_printf("%s tei %lu mac %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, tei, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_SET_PEER_ADDR_ID, 7, tei, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); #endif cvg_matm_add_ent(&prm->table, addr, tei); } } uint32_t cvg_prm_get_peer_addr(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t *addr) { cvg_prm_vdev_t *prm = vdev->prm; tei_t tei = CVG_PRM_IDX_TO_TEI(peer - prm->peer); return cvg_matm_get_addr(&prm->table, addr, tei); } uint32_t cvg_prm_get_addr_by_tei(cvg_vdev_t *vdev, tei_t tei, uint8_t *addr) { cvg_prm_vdev_t *prm = vdev->prm; return cvg_matm_get_addr(&prm->table, addr, tei); } static inline void cvg_prm_matm_init(cvg_prm_vdev_t *prm) { cvg_matm_init_table(&prm->table); } static inline void cvg_prm_matm_del_ent(cvg_prm_vdev_t *prm, tei_t tei) { iot_printf("%s tei %d", __FUNCTION__, tei); cvg_matm_del_ent_tei(&prm->table, tei); } #else /* PLC_SUPPORT_ADDR_TO_TEI_MAP */ #define cvg_prm_matm_init(prm) #define cvg_prm_matm_del_ent(prm, peer) #endif /* PLC_SUPPORT_ADDR_TO_TEI_MAP */ #if (PLC_SUPPORT_CCO_ROLE) static void cvg_prm_calc_pco_his(cvg_peer_pco_his_t *pco, uint32_t *pco_tf_sr, uint32_t *pco_tf_sr_max, uint32_t *pco_tf_sr_min) { uint8_t idx, num; *pco_tf_sr = 0; *pco_tf_sr_max = 0; *pco_tf_sr_min = 100; if (pco->tf_sr_cnt < CVG_PEER_TF_SR_HIS_DEPTH) { num = pco->tf_sr_idx; } else { num = CVG_PEER_TF_SR_HIS_DEPTH; } for (idx = 0; idx < num; idx++) { if (*pco_tf_sr_max < pco->tf_sr[idx]) { *pco_tf_sr_max = pco->tf_sr[idx]; } if (*pco_tf_sr_min > pco->tf_sr[idx]) { *pco_tf_sr_min = pco->tf_sr[idx]; } *pco_tf_sr += pco->tf_sr[idx]; } if (num) { *pco_tf_sr /= num; } else { *pco_tf_sr_min = 0; } } static cvg_peer_pco_his_t *cvg_prm_cmp_pco_his(cvg_vdev_t *vdev, cvg_peer_pco_his_t *pco1, cvg_peer_pco_his_t *pco2) { uint32_t pco1_tf_sr, pco1_tf_sr_max, pco1_tf_sr_min; uint32_t pco2_tf_sr, pco2_tf_sr_max, pco2_tf_sr_min; cvg_peer_info_t *proxy1, *proxy2; /* calculate pco 1 data */ proxy1 = cvg_prm_get_peer_by_addr(vdev, pco1->addr); cvg_prm_calc_pco_his(pco1, &pco1_tf_sr, &pco1_tf_sr_max, &pco1_tf_sr_min); /* calculate pco 2 data */ proxy2 = cvg_prm_get_peer_by_addr(vdev, pco2->addr); cvg_prm_calc_pco_his(pco2, &pco2_tf_sr, &pco2_tf_sr_max, &pco2_tf_sr_min); /* compare pco 1 and 2 */ if (proxy1 == NULL && proxy2) { return pco2; } else if (proxy2 == NULL && proxy1) { return pco1; } if (pco2->tf_sr_cnt == 0) { return pco1; } else if (pco1->tf_sr_cnt == 0) { return pco2; } if (pco1_tf_sr < pco2_tf_sr) { return pco2; } else if (pco1_tf_sr > pco2_tf_sr) { return pco1; } if (pco1_tf_sr_min == 0 && pco2_tf_sr_min) return pco2; if (pco2_tf_sr_min == 0 && pco1_tf_sr_min) return pco1; if ((pco2_tf_sr_max + pco2_tf_sr_min) > (pco1_tf_sr_max + pco1_tf_sr_min)) { return pco2; } else { return pco1; } } static cvg_peer_pco_his_t *cvg_prm_get_best_pco_his(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { uint8_t i; cvg_peer_pco_his_t *slot = NULL; (void)vdev; for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { if (slot == NULL) { slot = &peer->pco_his[i]; } else { /* compare pco */ slot = cvg_prm_cmp_pco_his(vdev, slot, &peer->pco_his[i]); } } } return slot; } void cvg_prm_dump_pco_his(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { uint8_t i; uint32_t pco_tf_sr, pco_tf_sr_max, pco_tf_sr_min; cvg_peer_info_t *proxy; for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { proxy = cvg_prm_get_peer_by_addr(vdev, peer->pco_his[i].addr); if (proxy) { cvg_prm_calc_pco_his(&peer->pco_his[i], &pco_tf_sr, &pco_tf_sr_max, &pco_tf_sr_min); iot_printf("%s addr %02X:%02X:%02X:%02X:%02X:%02X, tei %lu, " "cnt %lu, tf_sr %lu, tf_sr_max %lu, tf_sr_min %lu, " "assoc_cnt %lu, is_rf %lu\n", __FUNCTION__, peer->pco_his[i].addr[0], peer->pco_his[i].addr[1], peer->pco_his[i].addr[2], peer->pco_his[i].addr[3], peer->pco_his[i].addr[4], peer->pco_his[i].addr[5], cvg_prm_get_peer_tei(vdev, proxy), peer->pco_his[i].tf_sr_cnt, pco_tf_sr, pco_tf_sr_max, pco_tf_sr_min, peer->pco_his[i].assoc_cnt, peer->pco_his[i].is_rf); } } } } uint8_t cvg_prm_is_pco_his_bad(cvg_vdev_t *vdev, cvg_peer_info_t *peer, cvg_peer_info_t *proxy, uint8_t link_type) { uint8_t i, bad = 0; uint8_t addr[IOT_MAC_ADDR_LEN]; uint8_t is_rf = (link_type == PLC_LINK_TYPE_RF) ? 1 : 0; cvg_peer_pco_his_t *slot = NULL; uint32_t pco_tf_sr, pco_tf_sr_max, pco_tf_sr_min, pco_tf_sr_th; IOT_ASSERT(ERR_OK == cvg_prm_get_peer_addr(vdev, proxy, addr)); for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { if (iot_mac_addr_cmp(peer->pco_his[i].addr, addr) && (peer->pco_his[i].is_rf == is_rf)) { slot = &peer->pco_his[i]; break; } } } if (slot) { if (slot->assoc_cnt > 30) { bad = 1; } else if (slot->tf_sr_cnt) { cvg_prm_calc_pco_his(slot, &pco_tf_sr, &pco_tf_sr_max, &pco_tf_sr_min); if (is_rf) { pco_tf_sr_th = CVG_PEER_RF_TF_SR_BAD_TH; } else { pco_tf_sr_th = CVG_PEER_HPLC_TF_SR_BAD_TH; } if (pco_tf_sr_min == 0) { bad = 1; } else if (pco_tf_sr < pco_tf_sr_th) { bad = 1; } } else if (peer->last_seen_ts) { bad = 1; } } return bad; } static void cvg_prm_clear_pco_his_assoc(cvg_vdev_t *vdev, cvg_peer_info_t *peer, cvg_peer_info_t *proxy, uint8_t link_type) { uint8_t i; uint8_t addr[IOT_MAC_ADDR_LEN]; uint8_t is_rf = (link_type == PLC_LINK_TYPE_RF) ? 1 : 0; cvg_peer_pco_his_t *slot = NULL; IOT_ASSERT(ERR_OK == cvg_prm_get_peer_addr(vdev, proxy, addr)); for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { if (iot_mac_addr_cmp(peer->pco_his[i].addr, addr) && (peer->pco_his[i].is_rf == is_rf)) { slot = &peer->pco_his[i]; break; } } } if (slot) { slot->assoc_cnt = 0; } } void cvg_prm_add_pco_his(cvg_vdev_t *vdev, cvg_peer_info_t *peer, cvg_peer_info_t *proxy, uint8_t link_type) { uint8_t i; uint8_t addr[IOT_MAC_ADDR_LEN]; uint8_t is_rf = (link_type == PLC_LINK_TYPE_RF) ? 1 : 0; cvg_peer_pco_his_t *slot = NULL; cvg_peer_pco_his_t *free_slot = NULL; IOT_ASSERT(ERR_OK == cvg_prm_get_peer_addr(vdev, proxy, addr)); for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { if (iot_mac_addr_cmp(peer->pco_his[i].addr, addr) && (peer->pco_his[i].is_rf == is_rf)) { slot = &peer->pco_his[i]; break; } } else if (free_slot == NULL) { free_slot = &peer->pco_his[i]; } } if (slot == NULL) { if (free_slot == NULL) { /* delete the best PCO info */ free_slot = cvg_prm_get_best_pco_his(vdev, peer); if (free_slot) { for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (&peer->pco_his[i] == free_slot) { break; } } for (; i < CVG_PEER_PCO_HIS_DEPTH - 1; i++) { os_mem_cpy(&peer->pco_his[i], &peer->pco_his[i + 1], sizeof(cvg_peer_pco_his_t)); } free_slot = &peer->pco_his[CVG_PEER_PCO_HIS_DEPTH - 1]; } } if (free_slot) { os_mem_set(free_slot, 0, sizeof(*free_slot)); iot_mac_addr_cpy(free_slot->addr, addr); free_slot->is_rf = is_rf; } } } void cvg_prm_update_pco_his(cvg_vdev_t *vdev, cvg_peer_info_t *peer, cvg_peer_info_t *proxy, uint8_t link_type, uint8_t from_assoc, uint8_t force_bad) { uint8_t i; uint8_t addr[IOT_MAC_ADDR_LEN]; uint8_t is_rf = (link_type == PLC_LINK_TYPE_RF) ? 1 : 0; cvg_peer_pco_his_t *slot = NULL; IOT_ASSERT(ERR_OK == cvg_prm_get_peer_addr(vdev, proxy, addr)); for (i = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { if (iot_mac_addr_cmp(peer->pco_his[i].addr, addr) && (peer->pco_his[i].is_rf == is_rf)) { slot = &peer->pco_his[i]; break; } } } if (slot) { if (force_bad) { slot->tf_sr_idx = 0; slot->tf_sr_cnt = 0; return; } if (from_assoc) { iot_counter_inc(slot->assoc_cnt); return; } /* using the traffic successful ratio calculated from discover mme * for level 1 peers, and using the traffic successful ratio got from * successful ratio mme for other level peers */ if (is_rf && proxy->role == PLC_DEV_ROLE_CCO) { slot->tf_sr[slot->tf_sr_idx] = iot_calc_m_ratio((uint8_t)cvg_prm_get_peer_rf_tx_sr(peer), (uint8_t)cvg_prm_get_peer_rf_rx_sr(peer)); } else { slot->tf_sr[slot->tf_sr_idx] = iot_calc_m_ratio((uint8_t)peer->tx_sr, (uint8_t)peer->rx_sr); } slot->tf_sr_idx++; if (slot->tf_sr_idx >= CVG_PEER_TF_SR_HIS_DEPTH) { slot->tf_sr_idx = 0; } if (slot->tf_sr_cnt < CVG_PEER_TF_SR_HIS_DEPTH) { slot->tf_sr_cnt++; } } } #if HPLC_RF_SUPPORT static void cvg_prm_alloc_peer_rf_d_sub_sta(cvg_prm_vdev_t *prm, cvg_peer_info_t *peer) { peer->rf_d_sub_sta = iot_mem_pool_alloc(prm->pco_d_sub_sta_pool); IOT_ASSERT(peer->rf_d_sub_sta); cvg_tei_map_reset(peer->rf_d_sub_sta); peer->rf_d_sub_sta_cnt = 0; } static void cvg_prm_free_peer_rf_d_sub_sta(cvg_prm_vdev_t *prm, cvg_peer_info_t *peer) { if (peer->rf_d_sub_sta) { iot_mem_pool_free(prm->pco_d_sub_sta_pool, peer->rf_d_sub_sta); peer->rf_d_sub_sta = NULL; } peer->rf_d_sub_sta_cnt = 0; } static void cvg_prm_update_peer_sub_link_type(cvg_peer_info_t *peer) { uint32_t total_cnt; uint32_t rf_cnt; if (!peer->direct_sub_sta && !peer->rf_d_sub_sta) { peer->sub_link_type = CVG_PEER_SUB_LINK_INVALID; return; } /* total_cnt contain both hplc and rf devices. rf_cnt contain only rf * devices. */ total_cnt = cvg_tei_map_cbs(peer->direct_sub_sta); rf_cnt = cvg_tei_map_cbs(peer->rf_d_sub_sta); peer->d_sub_sta_cnt = (uint16_t)total_cnt; peer->rf_d_sub_sta_cnt = (uint16_t)rf_cnt; if (total_cnt == 0) { IOT_ASSERT(rf_cnt == 0); /* bm is empty */ peer->sub_link_type = CVG_PEER_SUB_LINK_INVALID; } else if (rf_cnt == 0) { /* bm contain only hplc devices */ peer->sub_link_type = CVG_PEER_SUB_LINK_HPLC; } else if (total_cnt > rf_cnt) { /* bm contain hplc and rf devices */ peer->sub_link_type = CVG_PEER_SUB_LINK_DUAL; } else if (total_cnt == rf_cnt) { /* bm contain only rf devices */ peer->sub_link_type = CVG_PEER_SUB_LINK_RF; } else { IOT_ASSERT(0); } } static void cvg_prm_cco_clear_peer_link_type_bm(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; tei_t p_tei; uint8_t i; if (peer->role == PLC_DEV_ROLE_CCO) return; if (peer->level) { p_tei = cvg_prm_get_peer_tei(vdev, peer); for (i = 0; i < CVG_PRM_LINK_BM_P_CNT; i++) { if (cvg_tei_map_is_set(&prm->pco_link_bms[peer->level - 1][i], CVG_TEI_TO_BM(p_tei))) { cvg_tei_map_clear(&prm->pco_link_bms[peer->level - 1][i], CVG_TEI_TO_BM(p_tei)); IOT_ASSERT(prm->pco_link_cnt[peer->level][i]); prm->pco_link_cnt[peer->level][i]--; IOT_ASSERT(prm->pco_link_cnt[0][i]); prm->pco_link_cnt[0][i]--; return; } } for (i = 0; i < CVG_PRM_LINK_BM_S_CNT; i++) { if (cvg_tei_map_is_set(&prm->sta_link_bms[peer->level - 1][i], CVG_TEI_TO_BM(p_tei))) { cvg_tei_map_clear(&prm->sta_link_bms[peer->level - 1][i], CVG_TEI_TO_BM(p_tei)); IOT_ASSERT(prm->sta_link_cnt[i]); prm->sta_link_cnt[i]--; return; } } } } static void cvg_prm_cco_set_peer_link_type_bm(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; tei_t p_tei; uint8_t level = peer->level; if (vdev->role == PLC_DEV_ROLE_CCO) { p_tei = cvg_prm_get_peer_tei(vdev, peer); switch (peer->role) { case PLC_DEV_ROLE_PCO: { if (peer->comm_type == PLC_DEV_COMM_TYPE_HPLC) { cvg_tei_map_set(&prm->pco_link_bms[level - 1][ CVG_PRM_LINK_BM_P_HPLC], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->pco_link_cnt[level][ CVG_PRM_LINK_BM_P_HPLC]); iot_counter_inc(prm->pco_link_cnt[0][ CVG_PRM_LINK_BM_P_HPLC]); } else if (peer->comm_type == PLC_DEV_COMM_TYPE_RF) { cvg_tei_map_set(&prm->pco_link_bms[level - 1][ CVG_PRM_LINK_BM_P_RF], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->pco_link_cnt[level][ CVG_PRM_LINK_BM_P_RF]); iot_counter_inc(prm->pco_link_cnt[0][ CVG_PRM_LINK_BM_P_RF]); } else if (peer->comm_type == PLC_DEV_COMM_TYPE_DUAL_MODE) { if (peer->sub_link_type == CVG_PEER_SUB_LINK_HPLC) { cvg_tei_map_set(&prm->pco_link_bms[level - 1][ CVG_PRM_LINK_BM_P_DUAL_HPLC], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->pco_link_cnt[level][ CVG_PRM_LINK_BM_P_DUAL_HPLC]); iot_counter_inc(prm->pco_link_cnt[0][ CVG_PRM_LINK_BM_P_DUAL_HPLC]); } else if (peer->sub_link_type == CVG_PEER_SUB_LINK_RF) { cvg_tei_map_set(&prm->pco_link_bms[level - 1][ CVG_PRM_LINK_BM_P_DUAL_RF], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->pco_link_cnt[level][ CVG_PRM_LINK_BM_P_DUAL_RF]); iot_counter_inc(prm->pco_link_cnt[0][ CVG_PRM_LINK_BM_P_DUAL_RF]); } else if (peer->sub_link_type == CVG_PEER_SUB_LINK_DUAL) { cvg_tei_map_set(&prm->pco_link_bms[level - 1][ CVG_PRM_LINK_BM_P_DUAL_DUAL], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->pco_link_cnt[level][ CVG_PRM_LINK_BM_P_DUAL_DUAL]); iot_counter_inc(prm->pco_link_cnt[0][ CVG_PRM_LINK_BM_P_DUAL_DUAL]); } } break; } case PLC_DEV_ROLE_STA: { if (peer->comm_type == PLC_DEV_COMM_TYPE_HPLC) { cvg_tei_map_set(&prm->sta_link_bms[level - 1][ CVG_PRM_LINK_BM_S_HPLC], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->sta_link_cnt[CVG_PRM_LINK_BM_S_HPLC]); } else if (peer->comm_type == PLC_DEV_COMM_TYPE_RF) { cvg_tei_map_set(&prm->sta_link_bms[level - 1][ CVG_PRM_LINK_BM_S_RF], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->sta_link_cnt[CVG_PRM_LINK_BM_S_RF]); } else if (peer->comm_type == PLC_DEV_COMM_TYPE_DUAL_MODE) { cvg_tei_map_set(&prm->sta_link_bms[level - 1][ CVG_PRM_LINK_BM_S_DUAL], CVG_TEI_TO_BM(p_tei)); iot_counter_inc(prm->sta_link_cnt[CVG_PRM_LINK_BM_S_DUAL]); } break; } case PLC_DEV_ROLE_CCO: default: break; } } } static void cvg_prm_update_peer_link_type_bm(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_cco_clear_peer_link_type_bm(vdev, peer); cvg_prm_cco_set_peer_link_type_bm(vdev, peer); } cvg_tei_map_t *cvg_prm_get_pco_link_bm(cvg_vdev_t *vdev, uint8_t level, uint8_t link) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(level && level <= PLC_MAX_RT_LEVEL); IOT_ASSERT(link < CVG_PRM_LINK_BM_P_CNT); return &prm->pco_link_bms[level - 1][link]; } cvg_tei_map_t *cvg_prm_get_sta_link_bm(cvg_vdev_t *vdev, uint8_t level, uint8_t link) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(level && level <= PLC_MAX_RT_LEVEL); IOT_ASSERT(link < CVG_PRM_LINK_BM_S_CNT); return &prm->sta_link_bms[level - 1][link]; } uint32_t cvg_prm_get_pco_link_cnt(cvg_vdev_t *vdev, uint8_t level, uint8_t link) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(level <= PLC_MAX_RT_LEVEL); IOT_ASSERT(link < CVG_PRM_LINK_BM_P_CNT); return prm->pco_link_cnt[level][link]; } uint32_t cvg_prm_get_sta_link_cnt(cvg_vdev_t *vdev, uint8_t link) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(link < CVG_PRM_LINK_BM_S_CNT); return prm->sta_link_cnt[link]; } uint16_t cvg_prm_get_peer_hplc_cnt(cvg_vdev_t *vdev) { uint16_t rf_cnt; rf_cnt = (uint16_t)cvg_prm_get_pco_link_cnt(vdev, 0, CVG_PRM_LINK_BM_P_HPLC); rf_cnt += (uint16_t)cvg_prm_get_sta_link_cnt(vdev, CVG_PRM_LINK_BM_S_HPLC); return rf_cnt; } uint16_t cvg_prm_get_peer_rf_cnt(cvg_vdev_t *vdev) { uint16_t rf_cnt; rf_cnt = (uint16_t)cvg_prm_get_pco_link_cnt(vdev, 0, CVG_PRM_LINK_BM_P_RF); rf_cnt += (uint16_t)cvg_prm_get_pco_link_cnt(vdev, 0, CVG_PRM_LINK_BM_P_DUAL_HPLC); rf_cnt += (uint16_t)cvg_prm_get_pco_link_cnt(vdev, 0, CVG_PRM_LINK_BM_P_DUAL_RF); rf_cnt += (uint16_t)cvg_prm_get_pco_link_cnt(vdev, 0, CVG_PRM_LINK_BM_P_DUAL_DUAL); rf_cnt += (uint16_t)cvg_prm_get_sta_link_cnt(vdev, CVG_PRM_LINK_BM_S_RF); rf_cnt += (uint16_t)cvg_prm_get_sta_link_cnt(vdev, CVG_PRM_LINK_BM_S_DUAL); return rf_cnt; } void cvg_prm_cco_get_sub_pco_type_stat(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint16_t stat[][CVG_PRM_LINK_BM_P_CNT]) { cvg_peer_info_t *tmp_peer; tei_t tei; if (peer->comm_type == PLC_DEV_COMM_TYPE_HPLC) { stat[peer->level][CVG_PRM_LINK_BM_P_HPLC]++; } else if (peer->comm_type == PLC_DEV_COMM_TYPE_RF) { stat[peer->level][CVG_PRM_LINK_BM_P_RF]++; } else { if (peer->sub_link_type == CVG_PEER_SUB_LINK_HPLC) { stat[peer->level][CVG_PRM_LINK_BM_P_DUAL_HPLC]++; } else if (peer->sub_link_type == CVG_PEER_SUB_LINK_RF) { stat[peer->level][CVG_PRM_LINK_BM_P_DUAL_RF]++; } else { stat[peer->level][CVG_PRM_LINK_BM_P_DUAL_DUAL]++; } } if (peer->direct_sub_sta == NULL) { return; } CVG_TEI_MAP_LOOP_BEGIN(peer->direct_sub_sta, tei); tmp_peer = cvg_prm_get_peer_by_tei(vdev, tei); if (tmp_peer && (tmp_peer->role == PLC_DEV_ROLE_PCO)) { cvg_prm_cco_get_sub_pco_type_stat(vdev, tmp_peer, stat); } CVG_TEI_MAP_LOOP_END(peer->direct_sub_sta, tei); } void cvg_prm_cco_get_nw_pco_type_stat(cvg_vdev_t *vdev, uint16_t stat[][CVG_PRM_LINK_BM_P_CNT]) { cvg_prm_vdev_t *prm = vdev->prm; os_mem_cpy(stat, prm->pco_link_cnt, sizeof(prm->pco_link_cnt)); } static void cvg_prm_pco_his_sort(cvg_vdev_t *vdev, cvg_peer_pco_his_t *p_his[], uint8_t cnt) { uint8_t i, j, pos; cvg_peer_pco_his_t *tmp; /* sort the pco history from good to bad */ i = cnt - 1; while (i) { pos = 0; for (j = 0; j < i; j++) { if (cvg_prm_cmp_pco_his(vdev, p_his[j], p_his[j + 1]) == p_his[j + 1]) { tmp = p_his[j]; p_his[j] = p_his[j + 1]; p_his[j + 1] = tmp; pos = j; } } i = pos; } } uint8_t cvg_prm_pco_his_get_hint_link(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t *link) { uint8_t i, cnt; cvg_peer_pco_his_t *p_his[CVG_PEER_PCO_HIS_DEPTH]; for (i = 0, cnt = 0; i < CVG_PEER_PCO_HIS_DEPTH; i++) { if (iot_mac_addr_valid(peer->pco_his[i].addr)) { p_his[cnt] = &peer->pco_his[i]; cnt++; } } if (cnt < CVG_PEER_HINT_LINK_HIS_CNT_TH) { return ERR_FAIL; } else if (cnt > CVG_PEER_HINT_LINK_HIS_CNT_TH) { cvg_prm_pco_his_sort(vdev, p_his, cnt); } for (i = 0; i < CVG_PEER_HINT_LINK_HIS_CNT_TH; i++) { if (!(p_his[i])->is_rf) { return ERR_FAIL; } } *link = PLC_LINK_TYPE_RF; return ERR_OK; } #else /* HPLC_RF_SUPPORT */ static void cvg_prm_update_peer_sub_link_type(cvg_peer_info_t *peer) { if (!peer->direct_sub_sta) { peer->sub_link_type = CVG_PEER_SUB_LINK_INVALID; return; } peer->d_sub_sta_cnt = (uint16_t)cvg_tei_map_cbs(peer->direct_sub_sta); if (peer->d_sub_sta_cnt) { /* bm contain only hplc devices */ peer->sub_link_type = CVG_PEER_SUB_LINK_HPLC; } else { /* bm is empty */ peer->sub_link_type = CVG_PEER_SUB_LINK_INVALID; } } #define cvg_prm_cco_set_peer_link_type_bm(vdev, peer) #define cvg_prm_cco_clear_peer_link_type_bm(vdev, peer) #define cvg_prm_update_peer_link_type_bm(vdev, peer) #define cvg_prm_alloc_peer_rf_d_sub_sta(prm, peer) #define cvg_prm_free_peer_rf_d_sub_sta(prm, peer) #endif /* HPLC_RF_SUPPORT */ void cvg_prm_set_peer_proxy(cvg_vdev_t *vdev, tei_t s_tei, tei_t p_tei, uint8_t link_type, uint8_t from_assoc) { uint8_t rpt_to_app = 0, link_type_chg = 0; cvg_peer_info_t *peer, *proxy; cvg_prm_vdev_t *prm = vdev->prm; cvg_tei_map_t *bm; peer = cvg_prm_get_peer_by_tei(vdev, s_tei); /* set uplink is active */ cvg_prm_set_peer_uplink_active(prm->vdev, peer); peer->prev_proxy = peer->proxy; if (peer->pco_link_type != link_type) { link_type_chg = 1; } if (PLC_TEI_IS_VALID(peer->proxy) && (peer->proxy != p_tei || link_type_chg)) { if (from_assoc == 0) { peer->last_proxy_chg_ts = (uint32_t)(os_boot_time64() / 1000); iot_counter_inc(peer->proxy_chg_accept_cnt); iot_counter_inc(prm->proxy_chg_accept_cnt); rpt_to_app = 1; } /* peer proxy changed. clear the old proxy info */ proxy = cvg_prm_get_peer_by_tei(vdev, cvg_prm_get_peer_proxy_tei(peer)); if (proxy && proxy->direct_sub_sta) { /* it's possible that proxy rejoined the network after cco kick * out it. for this case, cco may assign same tei to the proxy * while the sub sta info of the proxy won't be maintained. */ cvg_tei_map_clear(proxy->direct_sub_sta, CVG_TEI_TO_BM(s_tei)); bm = cvg_prm_get_peer_rf_d_sub_sta(proxy); if (bm) { cvg_tei_map_clear(bm, CVG_TEI_TO_BM(s_tei)); } if (cvg_tei_map_cbs(proxy->direct_sub_sta) == 0) { cvg_prm_set_peer_role(vdev, proxy, PLC_DEV_ROLE_STA); } cvg_prm_update_peer_sub_link_type(proxy); cvg_prm_update_peer_link_type_bm(vdev, proxy); } } if (peer->proxy != p_tei || link_type_chg) { /* clean up peer snr info if proxy changed */ cvg_prm_set_peer_tx_snr(peer, INVALID_SNR); peer->rx_snr = INVALID_SNR; } /* set new proxy info */ proxy = cvg_prm_get_peer_by_tei(vdev, p_tei); IOT_ASSERT(proxy); cvg_prm_set_peer_role(vdev, proxy, PLC_DEV_ROLE_PCO); cvg_prm_set_peer_active(vdev, proxy); cvg_tei_map_set(proxy->direct_sub_sta, CVG_TEI_TO_BM(s_tei)); bm = cvg_prm_get_peer_rf_d_sub_sta(proxy); if ((link_type == PLC_LINK_TYPE_RF) && bm) { cvg_tei_map_set(bm, CVG_TEI_TO_BM(s_tei)); } cvg_prm_set_peer_pco_link_type(peer, link_type); cvg_prm_update_peer_sub_link_type(proxy); cvg_prm_update_peer_link_type_bm(vdev, proxy); peer->proxy = p_tei; peer->pco_out_of_sync = 0; if (rpt_to_app) { cvg_app_bcast_sta_proxy_changed(vdev, peer); } } /* get bitmap of all sub sta of one peer */ void cvg_prm_get_sub_sta_bm(cvg_vdev_t *vdev, cvg_peer_info_t *proxy, cvg_tei_map_t *bm) { cvg_prm_vdev_t *prm; cvg_peer_info_t *sub_peer; tei_t start; prm = vdev->prm; /* sta role device has no sub sta */ IOT_ASSERT(cvg_prm_get_peer_role(proxy) == PLC_DEV_ROLE_PCO); cvg_tei_map_merge(bm, proxy->direct_sub_sta); /* the highest level proxy won't have sub sta with proxy role */ if (cvg_prm_get_peer_level(proxy) == (PLC_MAX_RT_LEVEL - 1)) goto out; CVG_TEI_MAP_LOOP_BEGIN(proxy->direct_sub_sta, start); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO) cvg_prm_get_sub_sta_bm(vdev, sub_peer, bm); CVG_TEI_MAP_LOOP_END(proxy->direct_sub_sta, start); out: return; } uint32_t cvg_prm_get_sub_sta(cvg_vdev_t *vdev, cvg_peer_info_t *peer, cvg_tei_map_t *sub_bm, uint8_t *data, uint32_t len) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *sub_peer; uint32_t idx, b, s_idx, s_b, tmp_len = sizeof(route_info_t); route_info_t *info = (route_info_t *)data; sub_d_sta_t *d_sub_sta = (sub_d_sta_t *)info->rt_table; proxy_info_t *d_sub_proxy; tei_t start, start_proxy = PLC_TEI_INVAL, *sub_sta; cvg_tei_map_t bm; info->dc_proxy_cnt = 0; info->dc_sta_cnt = 0; info->route_table_size = 0; info->reserved = 0; os_mem_cpy(&bm, peer->direct_sub_sta, sizeof(bm)); os_mem_cpy(sub_bm, &bm, sizeof(bm)); idx = cvg_tei_map_ffs(&bm); if (idx == 0) goto out; start = CVG_BM_TO_TEI(idx); idx = start; b = idx & 0x07; idx >>= 3; /* find out all directly connected sub sta with sta role */ for (; idx < CVG_TEI_MAP_BYTE_LEN; idx++) { for (; b < 8; b++) { if (bm.map[idx] & (1 << b)) { sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_STA) { d_sub_sta[info->dc_sta_cnt].tei = start; d_sub_sta[info->dc_sta_cnt].link = cvg_prm_get_peer_pco_link_type(sub_peer); info->dc_sta_cnt++; tmp_len += sizeof(sub_d_sta_t); info->route_table_size += sizeof(sub_d_sta_t); bm.map[idx] &= ~((uint8_t)(1 << b)); } else if (start_proxy == PLC_TEI_INVAL) { start_proxy = start; } } start++; } b = 0; } if (start_proxy == PLC_TEI_INVAL) goto out; /* find out all directly connected sub sta with proxy role and it's * sub sta */ idx = start_proxy; b = idx & 0x07; idx >>= 3; d_sub_proxy = (proxy_info_t *)(data + tmp_len); for (; idx < CVG_TEI_MAP_BYTE_LEN; idx++) { for (; b < 8; b++) { if (bm.map[idx] & (1 << b)) { cvg_tei_map_t sub_sta_bm; cvg_tei_map_reset(&sub_sta_bm); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start_proxy)]; IOT_ASSERT(cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO); d_sub_proxy->dc_proxy = start_proxy; d_sub_proxy->link = cvg_prm_get_peer_pco_link_type(sub_peer); d_sub_proxy->sub_sta_cnt = 0; sub_sta = d_sub_proxy->sub_sta_tei; info->dc_proxy_cnt++; tmp_len += sizeof(*d_sub_proxy); info->route_table_size += sizeof(*d_sub_proxy); /* get bitmap of all sub sta of the proxy */ cvg_prm_get_sub_sta_bm(vdev, sub_peer, &sub_sta_bm); s_idx = cvg_tei_map_ffs(&sub_sta_bm); cvg_tei_map_merge(sub_bm, &sub_sta_bm); IOT_ASSERT(s_idx); start = CVG_BM_TO_TEI(s_idx); s_idx = start; s_b = s_idx & 0x07; s_idx >>= 3; /* generate the tei list */ for (; s_idx < CVG_TEI_MAP_BYTE_LEN; s_idx++) { for (; s_b < 8; s_b++) { if (sub_sta_bm.map[s_idx] & (1 << s_b)) { sub_sta[d_sub_proxy->sub_sta_cnt++] = start; tmp_len += sizeof(tei_t); info->route_table_size += sizeof(tei_t); } start++; } s_b = 0; } d_sub_proxy = (proxy_info_t *)(data + tmp_len); } start_proxy++; } b = 0; } IOT_ASSERT(tmp_len <= len); out: return tmp_len; } uint8_t cvg_prm_is_sub_sta(cvg_vdev_t *vdev, cvg_peer_info_t *proxy, cvg_peer_info_t *peer) { tei_t tmp = cvg_prm_get_peer_proxy_tei(peer); tei_t pt = cvg_prm_get_peer_tei(vdev, proxy); while (tmp != PLC_TEI_INVAL && tmp != pt) { peer = cvg_prm_get_peer_by_tei(vdev, tmp); if (peer) { /* it's possible that proxy in the chain already kicked out * by CCO. */ tmp = cvg_prm_get_peer_proxy_tei(peer); } else { return 2; } } if (tmp == pt) return 1; else return 0; } uint8_t cvg_prm_is_single_phase(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { (void)vdev; IOT_ASSERT(peer); if (!cvg_nwm_is_3_phase_dev_type(peer->dev_type) && peer->phy_phase_1 && !peer->phy_phase_2 && !peer->phy_phase_3) { return 1; } return 0; } cvg_peer_info_t *cvg_prm_create_peer(cvg_vdev_t *vdev, mme_vendor_info_t *vendor_info) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *tmp = NULL; uint32_t idx = 0, in_use_idx = 0; uint16_t tei_max = PLC_TEI_LAST; uint16_t bm_size = sizeof(prm->tei_assigned); cvg_cfg_t *cfg = cvg_get_global_cfg(); (void)cfg; (void)vendor_info; if (PLC_NETWORK_SCALE > CVG_PRM_SUPP_1K_MAX_TEI && (cvg_sec_get_wl_entry_count(vdev) <= CVG_PRM_SUPP_1K_WL_MAX_CNT || (vendor_info && vendor_info->vendor_mark == cfg->same_vendor_mark && !vendor_info->psram_enable))) { tei_max = min(CVG_PRM_SUPP_1K_MAX_TEI, PLC_TEI_LAST); bm_size = ((tei_max + 8) / 8); } in_use_idx = cvg_tei_map_ffz_from(&prm->tei_assigned, bm_size, prm->tei_map_idx); if (in_use_idx) { IOT_ASSERT(!cvg_tei_map_is_set(&prm->in_use, in_use_idx)); idx = CVG_PRM_BM_TO_IDX(in_use_idx); if (CVG_PRM_IDX_TO_TEI(idx) <= tei_max) { tmp = &prm->peer[idx]; cvg_prm_reset_peer(vdev, tmp); /* set the bit only if peer created successfully */ cvg_tei_map_set(&prm->in_use, in_use_idx); cvg_tei_map_set(&prm->tei_assigned, in_use_idx); prm->tei_map_idx = in_use_idx + 1; } } return tmp; } cvg_peer_info_t *cvg_prm_get_peer_by_addr_locked(cvg_vdev_t *vdev, uint8_t *addr) { tei_t tei = PLC_TEI_INVAL; cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer = NULL; cvg_matm_get_tei(&prm->table, addr, &tei); if (PLC_TEI_IS_VALID(tei) && cvg_tei_map_is_set(&prm->tei_assigned, CVG_TEI_TO_BM(tei))) { peer = &prm->peer[CVG_PRM_TEI_TO_IDX(tei)]; } return peer; } cvg_peer_info_t *cvg_prm_get_peer_by_tei_locked(cvg_vdev_t *vdev, tei_t tei) { cvg_prm_vdev_t *prm = vdev->prm; if (PLC_TEI_IS_VALID(tei) && cvg_tei_map_is_set(&prm->tei_assigned, CVG_TEI_TO_BM(tei))) { return &prm->peer[CVG_PRM_TEI_TO_IDX(tei)]; } else { return NULL; } } cvg_peer_info_t *cvg_prm_get_recycle_peer_locked(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; cvg_tei_map_t bm; cvg_peer_info_t *peer, *tmp_peer = NULL; tei_t tei; uint32_t tmp_del_ts = 0; uint32_t curr_ts = (uint32_t)(os_boot_time64() / 1000); uint8_t tmp_addr[IOT_MAC_ADDR_LEN]; if (!PLC_SUPPORT_TEI_LOCK || !PLC_SUPPORT_TEI_LOCK_RECYCLE) { return NULL; } if (CVG_PRM_TEI_LOCK_RECYCLE_DUR >= CVG_PRM_TEI_LOCK_DUR) { return NULL; } os_mem_cpy(&bm, &prm->tei_assigned, sizeof(bm)); cvg_tei_map_exclude(&bm, &prm->in_use); CVG_TEI_MAP_LOOP_BEGIN(&bm, tei); peer = &prm->peer[CVG_PRM_TEI_TO_IDX(tei)]; /* get longest locked peer */ if (!tmp_peer || (tmp_del_ts > peer->last_delete_ts)) { tmp_peer = peer; tmp_del_ts = peer->last_delete_ts; } CVG_TEI_MAP_LOOP_END(&bm, tei); if (tmp_peer && (curr_ts - tmp_peer->last_delete_ts < CVG_PRM_TEI_LOCK_RECYCLE_DUR)) { return NULL; } if (tmp_peer) { tei = cvg_prm_get_peer_tei(vdev, tmp_peer); IOT_ASSERT(ERR_OK == cvg_matm_get_addr(&prm->table, tmp_addr, tei)); iot_printf("%s tei %lu unlocked %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, tei, tmp_addr[0], tmp_addr[1], tmp_addr[2], tmp_addr[3], tmp_addr[4], tmp_addr[5]); /* delete locked peer info, note that other info of the peer has been * deleted in cvg_prm_delete_peer while the peer been leaving the * network. */ cvg_prm_matm_del_ent(prm, tei); os_mem_set(tmp_peer, 0, sizeof(*tmp_peer)); } return tmp_peer; } uint32_t cvg_prm_get_pco_cnt(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; return prm->pco_cnt; } uint32_t cvg_prm_get_sta_cnt(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; return prm->sta_cnt; } cvg_tei_map_t *cvg_prm_get_pco_bm(cvg_vdev_t *vdev, uint8_t level) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(level && level <= PLC_MAX_RT_LEVEL); return &prm->pco_bms[level - 1]; } cvg_tei_map_t *cvg_prm_get_sta_bm(cvg_vdev_t *vdev, uint8_t level) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(level && level <= PLC_MAX_RT_LEVEL); return &prm->sta_bms[level - 1]; } uint32_t cvg_prm_get_phase1_cnt(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(phase && phase <= PLC_PHASE_CNT); return prm->phase1_cnt[phase - 1]; } uint32_t cvg_prm_get_phase2_cnt(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(phase && phase <= PLC_PHASE_CNT); return prm->phase2_cnt[phase - 1]; } uint32_t cvg_prm_get_phase3_cnt(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; IOT_ASSERT(phase && phase <= PLC_PHASE_CNT); return prm->phase3_cnt[phase - 1]; } uint8_t cvg_prm_get_most_dev_phase(cvg_vdev_t *vdev) { uint32_t tmp = 0; uint8_t i, phase = PLC_PHASE_A; cvg_prm_vdev_t *prm = vdev->prm; for (i = 0; i < PLC_PHASE_CNT; i++) { if (tmp < prm->phase1_cnt[i]) { phase = i + 1; tmp = prm->phase1_cnt[i]; } } return phase; } uint8_t cvg_prm_get_topo_depth(cvg_vdev_t *vdev) { uint8_t i; cvg_prm_vdev_t *prm = vdev->prm; for (i = (PLC_MAX_RT_LEVEL - 1); i > 0; i--) { if (cvg_tei_map_ffs(&prm->sta_bms[i])) { break; } } return i; } void cvg_prm_peer_assoc_notify(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; peer->last_assoc_ts = (uint32_t)(os_boot_time64() / 1000); if (cvg_prm_get_peer_first_assoc_ts(peer) == 0) { cvg_prm_set_peer_first_assoc_ts(peer, peer->last_assoc_ts); } /* set uplink is active */ cvg_prm_set_peer_uplink_active(vdev, peer); iot_counter_inc(peer->assoc_rx_cnt); iot_counter_inc(prm->assoc_rx_cnt); } void cvg_prm_peer_proxy_chg_rx_notify(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; (void)peer; prm->proxy_chg_rx_cnt++; if (prm->proxy_chg_rx_cnt == 0) prm->proxy_chg_rx_cnt--; #if PLC_SUPPORT_PEER_STAT peer->proxy_chg_rx_cnt++; if (peer->proxy_chg_rx_cnt == 0) peer->proxy_chg_rx_cnt--; #endif } void cvg_prm_peer_phase_chg_notify(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { (void)peer; cvg_prm_vdev_t *prm = vdev->prm; prm->phase_chg_cnt++; if (prm->phase_chg_cnt == 0) prm->phase_chg_cnt--; } void cvg_prm_delete_locked_tei(cvg_vdev_t *vdev, tei_t sta_tei) { uint8_t tmp_addr[IOT_MAC_ADDR_LEN]; cvg_peer_info_t *tmp_peer; cvg_prm_vdev_t *prm = vdev->prm; if (!PLC_SUPPORT_TEI_LOCK) return; if (cvg_tei_map_is_set(&prm->tei_assigned, CVG_TEI_TO_BM(sta_tei))) { tmp_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(sta_tei)]; IOT_ASSERT(ERR_OK == cvg_matm_get_addr(&prm->table, tmp_addr, sta_tei)); iot_printf("%s tei %lu unlocked %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, sta_tei, tmp_addr[0], tmp_addr[1], tmp_addr[2], tmp_addr[3], tmp_addr[4], tmp_addr[5]); /* delete locked peer info, note that other info of the peer has been * deleted in cvg_prm_delete_peer while the peer been leaving the * network. */ cvg_prm_matm_del_ent(prm, sta_tei); os_mem_set(tmp_peer, 0, sizeof(*tmp_peer)); cvg_tei_map_clear(&prm->tei_assigned, CVG_TEI_TO_BM(sta_tei)); } } void cvg_prm_clean_up_locked_tei_bm(cvg_vdev_t *vdev) { uint32_t ts; uint8_t tmp_addr[IOT_MAC_ADDR_LEN]; tei_t tmp_tei; cvg_tei_map_t bm; cvg_peer_info_t *tmp_peer; cvg_prm_vdev_t *prm = vdev->prm; if (!PLC_SUPPORT_TEI_LOCK) return; os_mem_cpy(&bm, &prm->tei_assigned, sizeof(bm)); cvg_tei_map_exclude(&bm, &prm->in_use); ts = (uint32_t)(os_boot_time64() / 1000); CVG_TEI_MAP_LOOP_BEGIN(&bm, tmp_tei); tmp_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(tmp_tei)]; if ((ts - tmp_peer->last_delete_ts) >= CVG_PRM_TEI_LOCK_DUR) { IOT_ASSERT(ERR_OK == cvg_matm_get_addr(&prm->table, tmp_addr, tmp_tei)); iot_printf("%s tei %lu unlocked %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__, tmp_tei, tmp_addr[0], tmp_addr[1], tmp_addr[2], tmp_addr[3], tmp_addr[4], tmp_addr[5]); /* delete locked peer info, note that other info of the peer has been * deleted in cvg_prm_delete_peer while the peer been leaving the * network. */ cvg_prm_matm_del_ent(prm, tmp_tei); os_mem_set(tmp_peer, 0, sizeof(*tmp_peer)); cvg_tei_map_clear(&prm->tei_assigned, CVG_TEI_TO_BM(tmp_tei)); } CVG_TEI_MAP_LOOP_END(&bm, tmp_tei); } /* note that this function is invoked outside CVG context, suppose this * function should only read some statistics variables from CVG layer. */ static void cvg_prm_cco_status_dump(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; uint8_t i, j; uint32_t pco_cnt[8]; uint32_t sta_cnt[8]; BUILD_BUG_ON(IOT_ARRAY_CNT(pco_cnt) >= ((PLC_MAX_RT_LEVEL / 2) + 1)); BUILD_BUG_ON(IOT_ARRAY_CNT(sta_cnt) >= ((PLC_MAX_RT_LEVEL / 2) + 1)); iot_printf("%s assoc rx %lu, proxy chg rx %lu, " "proxy chg accept %lu, phase chg %lu, pco %lu, sta %lu, " "a %lu, b %lu, c %lu\n", __FUNCTION__, prm->assoc_rx_cnt, prm->proxy_chg_rx_cnt, prm->proxy_chg_accept_cnt, prm->phase_chg_cnt, prm->pco_cnt, prm->sta_cnt, prm->phase1_cnt[0], prm->phase1_cnt[1], prm->phase1_cnt[2]); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_ID, 9, prm->assoc_rx_cnt, prm->proxy_chg_rx_cnt, prm->proxy_chg_accept_cnt, prm->phase_chg_cnt, prm->pco_cnt, prm->sta_cnt, prm->phase1_cnt[0], prm->phase1_cnt[1], prm->phase1_cnt[2]); #if (!HPLC_RF_SUPPORT) os_mem_set(pco_cnt, 0, sizeof(pco_cnt)); os_mem_set(sta_cnt, 0, sizeof(sta_cnt)); for (i = 0, j = 0; i < PLC_MAX_RT_LEVEL; i++) { pco_cnt[j] |= (cvg_tei_map_cbs(&prm->pco_bms[i])) << ((i & 0x01) << 4); sta_cnt[j] |= (cvg_tei_map_cbs(&prm->sta_bms[i])) << ((i & 0x01) << 4); if ((i & 0x01) == 1) j++; } iot_printf("%s pco cnt %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", __FUNCTION__, pco_cnt[0], pco_cnt[1], pco_cnt[2], pco_cnt[3], pco_cnt[4], pco_cnt[5], pco_cnt[6], pco_cnt[7]); iot_printf("%s sta cnt %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", __FUNCTION__, sta_cnt[0], sta_cnt[1], sta_cnt[2], sta_cnt[3], sta_cnt[4], sta_cnt[5], sta_cnt[6], sta_cnt[7]); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_PCO_BM_ID, 8, pco_cnt[0], pco_cnt[1], pco_cnt[2], pco_cnt[3], pco_cnt[4], pco_cnt[5], pco_cnt[6], pco_cnt[7]); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_STA_BM_ID, 8, sta_cnt[0], sta_cnt[1], sta_cnt[2], sta_cnt[3], sta_cnt[4], sta_cnt[5], sta_cnt[6], sta_cnt[7]); #else /* (!HPLC_RF_SUPPORT) */ uint8_t k; for (k = 0; k < CVG_PRM_LINK_BM_P_CNT; k++) { os_mem_set(pco_cnt, 0, sizeof(pco_cnt)); os_mem_set(sta_cnt, 0, sizeof(sta_cnt)); for (i = 0, j = 0; i < PLC_MAX_RT_LEVEL; i++) { pco_cnt[j] |= (cvg_tei_map_cbs(&prm->pco_link_bms[i][k])) << ((i & 0x01) << 4); if ((i & 0x01) == 1) j++; } iot_printf("%s pco[%lu] cnt %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", __FUNCTION__, k, pco_cnt[0], pco_cnt[1], pco_cnt[2], pco_cnt[3], pco_cnt[4], pco_cnt[5], pco_cnt[6], pco_cnt[7]); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_PCO_LINK_BM_ID, 9, k, pco_cnt[0], pco_cnt[1], pco_cnt[2], pco_cnt[3], pco_cnt[4], pco_cnt[5], pco_cnt[6], pco_cnt[7]); } for (k = 0; k < CVG_PRM_LINK_BM_S_CNT; k++) { os_mem_set(pco_cnt, 0, sizeof(pco_cnt)); os_mem_set(sta_cnt, 0, sizeof(sta_cnt)); for (i = 0, j = 0; i < PLC_MAX_RT_LEVEL; i++) { sta_cnt[j] |= (cvg_tei_map_cbs(&prm->sta_link_bms[i][k])) << ((i & 0x01) << 4); if ((i & 0x01) == 1) j++; } iot_printf("%s sta[%lu] cnt %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", __FUNCTION__, k, sta_cnt[0], sta_cnt[1], sta_cnt[2], sta_cnt[3], sta_cnt[4], sta_cnt[5], sta_cnt[6], sta_cnt[7]); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_STA_LINK_BM_ID, 9, k, sta_cnt[0], sta_cnt[1], sta_cnt[2], sta_cnt[3], sta_cnt[4], sta_cnt[5], sta_cnt[6], sta_cnt[7]); } #endif /* (!HPLC_RF_SUPPORT) */ } static inline void cvg_prm_cco_init(cvg_prm_vdev_t *prm) { uint8_t i; prm->tei_map_idx = 0; prm->assoc_rx_cnt = 0; prm->proxy_chg_accept_cnt = 0; prm->proxy_chg_rx_cnt = 0; prm->phase_chg_cnt = 0; prm->cco_cnt = 0; prm->pco_cnt = 0; prm->sta_cnt = 0; #if HPLC_RF_SUPPORT os_mem_set(prm->pco_link_cnt, 0, sizeof(prm->pco_link_cnt)); os_mem_set(prm->sta_link_cnt, 0, sizeof(prm->sta_link_cnt)); os_mem_set(prm->pco_link_bms, 0, sizeof(prm->pco_link_bms)); os_mem_set(prm->sta_link_bms, 0, sizeof(prm->sta_link_bms)); #endif for (i = 0; i < PLC_PHASE_CNT; i++) { prm->phase1_cnt[i] = 0; prm->phase2_cnt[i] = 0; prm->phase3_cnt[i] = 0; } for (i = 0; i < PLC_MAX_RT_LEVEL; i++) { cvg_tei_map_reset(&prm->pco_bms[i]); cvg_tei_map_reset(&prm->sta_bms[i]); } cvg_tei_map_reset(&prm->tei_assigned); } static uint32_t cvg_prm_cco_init_vdev(cvg_prm_vdev_t *prm) { uint32_t ret = ERR_OK; #if HPLC_RF_SUPPORT ret = iot_mem_pool_new(PLC_CVG_PRM_MID, (CVG_PCO_MAX_CNT + 1) * 2, sizeof(cvg_tei_map_t), &prm->pco_d_sub_sta_pool, 0); #else ret = iot_mem_pool_new(PLC_CVG_PRM_MID, (CVG_PCO_MAX_CNT + 1), sizeof(cvg_tei_map_t), &prm->pco_d_sub_sta_pool, 0); #endif if (ret) { ret = ERR_NOMEM; } return ret; } static void cvg_prm_cco_deinit_vdev(cvg_prm_vdev_t *prm) { if (prm) { iot_mem_pool_destroy(prm->pco_d_sub_sta_pool); } return; } void cvg_prm_get_direct_peer_bm(cvg_vdev_t *vdev, cvg_tei_map_t *bm) { if (vdev->role == PLC_DEV_ROLE_CCO) { cvg_peer_info_t *peer = cvg_nwm_get_self_peer(vdev); os_mem_cpy(bm, peer->direct_sub_sta, sizeof(*bm)); } else { cvg_prm_vdev_t *prm = vdev->prm; os_mem_cpy(bm, &prm->in_use, sizeof(*bm)); } } uint16_t cvg_prm_get_d_sub_sta_cnt(cvg_vdev_t *vdev) { if (vdev->role == PLC_DEV_ROLE_CCO) { cvg_peer_info_t *peer = cvg_nwm_get_self_peer(vdev); return (uint16_t)peer->d_sub_sta_cnt; } else { cvg_prm_vdev_t *prm = vdev->prm; return prm->d_sub_sta_cnt; } } void cvg_prm_update_peer_rf_hop_for_sub_sta(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_peer_info_t *sub_peer; tei_t start; uint8_t rf_hop = cvg_prm_get_peer_rf_hop(peer); uint8_t tmp; cvg_prm_vdev_t *prm = vdev->prm; if (cvg_prm_get_peer_role(peer) != PLC_DEV_ROLE_PCO) return; CVG_TEI_MAP_LOOP_BEGIN(peer->direct_sub_sta, start); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; if (cvg_prm_get_peer_pco_link_type(sub_peer) == PLC_LINK_TYPE_RF) { tmp = 1; } else { tmp = 0; } cvg_prm_set_peer_rf_hop(vdev, sub_peer, rf_hop + tmp); if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO) cvg_prm_update_peer_rf_hop_for_sub_sta(vdev, sub_peer); CVG_TEI_MAP_LOOP_END(peer->direct_sub_sta, start); } void cvg_prm_update_peer_level_for_sub_sta(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_peer_info_t *sub_peer; tei_t start; uint8_t level = cvg_prm_get_peer_level(peer) + 1; cvg_prm_vdev_t *prm = vdev->prm; if (cvg_prm_get_peer_role(peer) != PLC_DEV_ROLE_PCO) return; IOT_ASSERT(level && level <= PLC_MAX_RT_LEVEL); CVG_TEI_MAP_LOOP_BEGIN(peer->direct_sub_sta, start); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; cvg_prm_set_peer_level(vdev, sub_peer, level); if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO) cvg_prm_update_peer_level_for_sub_sta(vdev, sub_peer); CVG_TEI_MAP_LOOP_END(peer->direct_sub_sta, start); } void cvg_prm_update_peer_phase_for_sub_sta(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_peer_info_t *sub_peer; tei_t start; uint8_t phase = cvg_prm_get_peer_phase1(peer); cvg_prm_vdev_t *prm = vdev->prm; if (cvg_prm_get_peer_role(peer) != PLC_DEV_ROLE_PCO) return; CVG_TEI_MAP_LOOP_BEGIN(peer->direct_sub_sta, start); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; cvg_prm_set_peer_phase(vdev, sub_peer, phase, PLC_PHASE_ALL, PLC_PHASE_ALL); if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO) cvg_prm_update_peer_phase_for_sub_sta(vdev, sub_peer); CVG_TEI_MAP_LOOP_END(peer->direct_sub_sta, start); } uint8_t cvg_prm_get_sub_sta_max_level(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_peer_info_t *sub_peer; tei_t start; uint8_t max_level, level; cvg_prm_vdev_t *prm = vdev->prm; max_level = cvg_prm_get_peer_level(peer); if (cvg_prm_get_peer_role(peer) == PLC_DEV_ROLE_PCO) { max_level++; if (max_level == PLC_MAX_RT_LEVEL) goto out; } else { goto out; } CVG_TEI_MAP_LOOP_BEGIN(peer->direct_sub_sta, start); sub_peer = &prm->peer[CVG_PRM_TEI_TO_IDX(start)]; if (cvg_prm_get_peer_role(sub_peer) == PLC_DEV_ROLE_PCO) { level = cvg_prm_get_sub_sta_max_level(vdev, sub_peer); if (max_level < level) { max_level = level; if (max_level == PLC_MAX_RT_LEVEL) goto out; } } CVG_TEI_MAP_LOOP_END(peer->direct_sub_sta, start); out: return max_level; } void cvg_prm_set_peer_active(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { uint32_t active_ts, last_dur, max_dur; cvg_peer_info_t *proxy; if (vdev->role == PLC_DEV_ROLE_CCO && peer->role != PLC_DEV_ROLE_CCO) { active_ts = (uint32_t)(os_boot_time64() / 1000); if (peer->state == CVG_PEER_S_OFFLINE) { /* move peer back to associated status */ peer->state = CVG_PEER_S_ASSOCED; cvg_app_bcast_sta_online_info(vdev, peer); /* make sure the last_leave_ts is vailed: none-zero */ if (peer->last_leave_ts != 0) { last_dur = active_ts - peer->last_leave_ts; cvg_prm_set_peer_last_leave_dur(peer, last_dur); max_dur = cvg_prm_get_peer_max_leave_dur(peer); if (last_dur > max_dur) { cvg_prm_set_peer_max_leave_dur(peer, last_dur); } } /* once the peer's state changes to CVG_PEER_S_ASSOCED, the * last_leave_ts should be set to invail: zero */ peer->last_leave_ts = 0; } if (peer->last_assoc_ts >= peer->last_seen_ts) { if (peer->state == CVG_PEER_S_ASSOCING) { /* this is the first detection after assoc response, let's * assume the peer is really joined now and broadcast the * message to app. */ cvg_app_bcast_sta_join_info(vdev, peer); peer->state = CVG_PEER_S_ASSOCED; peer->last_seen_ts = active_ts; /* make sure the last_leave_ts is vaild: none-zero */ if (peer->last_leave_ts != 0) { last_dur = active_ts - peer->last_leave_ts; cvg_prm_set_peer_last_leave_dur(peer, last_dur); max_dur = cvg_prm_get_peer_max_leave_dur(peer); if (last_dur > max_dur) { cvg_prm_set_peer_max_leave_dur(peer, last_dur); } } proxy = cvg_prm_get_peer_by_tei(vdev, cvg_prm_get_peer_proxy_tei(peer)); if (proxy) { /* add proxy into pco history */ cvg_prm_add_pco_his(vdev, peer, proxy, cvg_prm_get_peer_pco_link_type(peer)); /* clear proxy assoc history */ cvg_prm_clear_pco_his_assoc(vdev, peer, proxy, cvg_prm_get_peer_pco_link_type(peer)); } /* once the peer's state changes to CVG_PEER_S_ASSOCED, the * last_leave_ts should be set to be invaild: zero */ peer->last_leave_ts = 0; } } else { peer->last_seen_ts = active_ts; } } cvg_prm_set_peer_inactive_cnt(peer, 0); } void cvg_prm_set_peer_id(cvg_peer_info_t *peer, mme_chip_id_t *p_id) { if (!peer || !p_id) { return; } #if PLC_SUPPORT_CHIP_ID os_mem_cpy(&peer->chip_mm_id, p_id, sizeof(mme_chip_id_t)); #endif } void cvg_prm_get_peer_id(cvg_peer_info_t *peer, mme_chip_id_t *p_id) { if (!peer || !p_id) { return; } #if PLC_SUPPORT_CHIP_ID os_mem_cpy(p_id, &peer->chip_mm_id, sizeof(mme_chip_id_t)); #endif } void cvg_prm_get_hw_tsfm_addr(cvg_peer_info_t *peer, uint8_t *addr) { if (!peer || !addr) { return; } #if PLC_SUPPORT_HW_TSFM_DETECT_CACHE os_mem_cpy(addr, peer->vendor_info.tsfm_addr, IOT_MAC_ADDR_LEN); #endif } void cvg_prm_clear_peer_hw_tsfm(cvg_peer_info_t *peer) { if (!peer) { return; } #if PLC_SUPPORT_HW_TSFM_DETECT_CACHE os_mem_set(peer->vendor_info.tsfm_addr, 0, IOT_MAC_ADDR_LEN); peer->vendor_info.tsfm_valid = 0; #endif } void cvg_prm_set_vendor_info(cvg_peer_info_t *peer, mme_vendor_info_t *vendor_info) { if (!peer || !vendor_info) { return; } os_mem_cpy(&peer->vendor_info.build_ver, &vendor_info->build_ver, sizeof(peer->vendor_info.build_ver)); os_mem_cpy(&peer->vendor_info.info_param, &vendor_info->info_param, sizeof(peer->vendor_info.info_param)); #if PLC_SUPPORT_HW_TSFM_DETECT_CACHE os_mem_cpy(peer->vendor_info.tsfm_addr, vendor_info->tsfm_addr, IOT_MAC_ADDR_LEN); #endif } void cvg_prm_set_ver_info(cvg_peer_info_t *peer, mme_ver_info_t *ver_info) { if (!peer || !ver_info) { return; } #if PLC_SUPPORT_VER_INFO_COMP peer->ver_info.boot_reason = ver_info->boot_reason; peer->ver_info.boot_ver = ver_info->boot_ver; peer->ver_info.chip_id = ver_info->chip_id; #endif peer->ver_info.sw_ver = ver_info->sw_ver; peer->ver_info.build_time_y = ver_info->build_time_y; peer->ver_info.build_time_m = ver_info->build_time_m; peer->ver_info.build_time_d = ver_info->build_time_d; peer->ver_info.vendor_id = ver_info->vendor_id; } static inline void cvg_prm_cco_reset_peer(cvg_prm_vdev_t *prm, cvg_peer_info_t *peer) { peer->state = CVG_PEER_S_INVAL; peer->dev_type = PLC_DEV_TYPE_INVAL; peer->pco_link_type = PLC_LINK_TYPE_HPLC; peer->comm_type = PLC_DEV_COMM_TYPE_HPLC; peer->sub_link_type = CVG_PEER_SUB_LINK_INVALID; peer->proxy = PLC_TEI_INVAL; peer->prev_proxy = PLC_TEI_INVAL; peer->pco_out_of_sync = 0; peer->mac_addr_type = PLC_MAC_ADDR_TYPE_METER; peer->leave_unlock = 0; peer->p2p_sn = 0; peer->assoc_rnd = 0; peer->last_assoc_ts = 0; peer->last_proxy_chg_ts = 0; peer->last_seen_ts = 0; peer->last_delete_ts = 0; peer->assoc_rx_cnt = 0; peer->proxy_chg_accept_cnt = 0; peer->zc_no_phase_query_cnt = 0; peer->last_leave_ts = 0; #if PLC_SUPPORT_PEER_STAT peer->first_assoc_ts = 0; peer->proxy_chg_rx_cnt = 0; peer->last_leave_dur = 0; peer->max_leave_dur = 0; #endif peer->leave_cnt = 0; peer->phy_phase_1 = 0; peer->phy_phase_2 = 0; peer->phy_phase_3 = 0; peer->edge_type = CVG_ZC_CT_EDGE_INVALID; peer->zc_info_valid = 0; peer->zc_query_cnt = 0; peer->zc_phase_err_cnt = 0; peer->zc_3p_rpt_odd_flag = 0; peer->zc_3p_rpt_odd_cnt = 0; peer->opposite_phase = 0; peer->opposite_3p = 0; peer->hw_reset_cnt = 0; peer->sw_reset_cnt = 0; peer->hw_reset_flag = 0; peer->sw_reset_flag = 0; peer->opposite_3p = 0; peer->opposite_3p_pos = 0; peer->uplink_inactive_cnt = 0; peer->tf_sr_send_inactive_cnt = 0; peer->inactive_cnt = 0; peer->bonding_flag = 0; peer->d_sub_sta_cnt = 0; if (peer->direct_sub_sta) { iot_mem_pool_free(prm->pco_d_sub_sta_pool, peer->direct_sub_sta); peer->direct_sub_sta = NULL; } cvg_prm_free_peer_rf_d_sub_sta(prm, peer); os_mem_set(&peer->ver_info, 0, sizeof(peer->ver_info)); os_mem_set(&peer->vendor_info, 0, sizeof(peer->vendor_info)); os_mem_set(&peer->msdu_rec, 0, sizeof(peer->msdu_rec)); #if CVG_NETWORK_NTB_DEBUG os_mem_set(&peer->zc_info, 0, sizeof(peer->zc_info)); #endif #if PLC_SUPPORT_AUTH_DAK os_mem_set(&peer->auth_info, 0, sizeof(peer->auth_info)); #endif os_mem_set(&peer->pco_his, 0, sizeof(peer->pco_his)); } static void cvg_prm_cco_clear_peer_role(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; switch (peer->role) { case PLC_DEV_ROLE_PCO: { if (prm->pco_cnt == 0) { IOT_ASSERT(0); } else { prm->pco_cnt--; iot_mem_pool_free(prm->pco_d_sub_sta_pool, peer->direct_sub_sta); peer->direct_sub_sta = NULL; peer->d_sub_sta_cnt = 0; cvg_prm_free_peer_rf_d_sub_sta(prm, peer); } break; } case PLC_DEV_ROLE_STA: { if (prm->sta_cnt == 0) IOT_ASSERT(0); else prm->sta_cnt--; break; } case PLC_DEV_ROLE_CCO: { if (prm->cco_cnt == 0) { IOT_ASSERT(0); } else { prm->cco_cnt--; iot_mem_pool_free(prm->pco_d_sub_sta_pool, peer->direct_sub_sta); peer->direct_sub_sta = NULL; peer->d_sub_sta_cnt = 0; cvg_prm_free_peer_rf_d_sub_sta(prm, peer); } break; } default: break; } } static void cvg_prm_cco_clear_peer_level(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; tei_t p_tei; if (peer->level) { p_tei = cvg_prm_get_peer_tei(vdev, peer); cvg_tei_map_clear(&prm->pco_bms[peer->level - 1], CVG_TEI_TO_BM(p_tei)); cvg_tei_map_clear(&prm->sta_bms[peer->level - 1], CVG_TEI_TO_BM(p_tei)); } } static void cvg_prm_cco_clear_peer_phase(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; if (peer->phase_1) { (prm->phase1_cnt[peer->phase_1 - 1])--; } if (peer->phase_2) { (prm->phase2_cnt[peer->phase_2 - 1])--; } if (peer->phase_3) { (prm->phase3_cnt[peer->phase_3 - 1])--; } } static void cvg_prm_cco_set_peer_role(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t role) { cvg_prm_vdev_t *prm = vdev->prm; tei_t p_tei; if (vdev->role == PLC_DEV_ROLE_CCO) { cvg_prm_cco_clear_peer_role(vdev, peer); cvg_prm_cco_clear_peer_level(vdev, peer); p_tei = cvg_prm_get_peer_tei(vdev, peer); switch (role) { case PLC_DEV_ROLE_PCO: { peer->direct_sub_sta = iot_mem_pool_alloc(prm->pco_d_sub_sta_pool); IOT_ASSERT(peer->direct_sub_sta); cvg_tei_map_reset(peer->direct_sub_sta); peer->d_sub_sta_cnt = 0; cvg_prm_alloc_peer_rf_d_sub_sta(prm, peer); prm->pco_cnt++; if (peer->level) { cvg_tei_map_set(&prm->pco_bms[peer->level - 1], CVG_TEI_TO_BM(p_tei)); } break; } case PLC_DEV_ROLE_STA: { prm->sta_cnt++; if (peer->level) { cvg_tei_map_set(&prm->sta_bms[peer->level - 1], CVG_TEI_TO_BM(p_tei)); } break; } case PLC_DEV_ROLE_CCO: { prm->cco_cnt++; peer->direct_sub_sta = iot_mem_pool_alloc(prm->pco_d_sub_sta_pool); IOT_ASSERT(peer->direct_sub_sta); cvg_tei_map_reset(peer->direct_sub_sta); peer->d_sub_sta_cnt = 0; cvg_prm_alloc_peer_rf_d_sub_sta(prm, peer); IOT_ASSERT(prm->cco_cnt <= 1); break; } default: IOT_ASSERT(0); break; } } } static void cvg_prm_cco_set_peer_level(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t level) { cvg_prm_vdev_t *prm = vdev->prm; tei_t p_tei; if (level == 0 || level > PLC_MAX_RT_LEVEL) { IOT_ASSERT(0); return; } if (vdev->role == PLC_DEV_ROLE_CCO) { cvg_prm_cco_clear_peer_level(vdev, peer); p_tei = cvg_prm_get_peer_tei(vdev, peer); switch (peer->role) { case PLC_DEV_ROLE_PCO: { cvg_tei_map_set(&prm->pco_bms[level - 1], CVG_TEI_TO_BM(p_tei)); break; } case PLC_DEV_ROLE_STA: { cvg_tei_map_set(&prm->sta_bms[level - 1], CVG_TEI_TO_BM(p_tei)); break; } case PLC_DEV_ROLE_CCO: { /* CCO device level should always be 0 */ IOT_ASSERT(0); } default: break; } } } static void cvg_prm_cco_set_peer_phase(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; if (peer->role != PLC_DEV_ROLE_CCO) { if (peer->phase_1) { (prm->phase1_cnt[peer->phase_1 - 1])++; } if (peer->phase_2) { (prm->phase2_cnt[peer->phase_2 - 1])++; } if (peer->phase_3) { (prm->phase3_cnt[peer->phase_3 - 1])++; } } else { /* for cco device, we assume all phases have same weight */ if (peer->phase_1) { (prm->phase1_cnt[peer->phase_1 - 1])++; } if (peer->phase_2) { (prm->phase1_cnt[peer->phase_2 - 1])++; } if (peer->phase_3) { (prm->phase1_cnt[peer->phase_3 - 1])++; } } } static inline void cvg_prm_cco_delete_peer(cvg_prm_vdev_t *prm, cvg_peer_info_t *peer) { cvg_peer_info_t *proxy; cvg_tei_map_t *bm; tei_t s = cvg_prm_get_peer_tei(prm->vdev, peer); proxy = cvg_prm_get_peer_by_tei(prm->vdev, cvg_prm_get_peer_proxy_tei(peer)); if (proxy && proxy->direct_sub_sta) { /* it's possible that proxy rejoined the network after cco kick * out it. for this case, cco may assign same tei to the proxy * while the sub sta info of the proxy won't be maintained. */ cvg_tei_map_clear(proxy->direct_sub_sta, CVG_TEI_TO_BM(s)); bm = cvg_prm_get_peer_rf_d_sub_sta(proxy); if (bm) { cvg_tei_map_clear(bm, CVG_TEI_TO_BM(s)); } if (cvg_tei_map_cbs(proxy->direct_sub_sta) == 0) { /* move proxy from pco role back to sta role */ cvg_prm_set_peer_role(prm->vdev, proxy, PLC_DEV_ROLE_STA); } cvg_prm_update_peer_sub_link_type(proxy); cvg_prm_update_peer_link_type_bm(prm->vdev, proxy); } cvg_prm_cco_clear_peer_link_type_bm(prm->vdev, peer); cvg_prm_cco_clear_peer_role(prm->vdev, peer); cvg_prm_cco_clear_peer_level(prm->vdev, peer); cvg_prm_cco_clear_peer_phase(prm->vdev, peer); IOT_ASSERT(cvg_tei_map_is_set(&prm->tei_assigned, CVG_TEI_TO_BM(s))); if (PLC_SUPPORT_TEI_LOCK && peer->mac_addr_type == PLC_MAC_ADDR_TYPE_METER) { /* lock the tei for meter type address */ peer->last_delete_ts = (uint32_t)(os_boot_time64() / 1000); } else { cvg_tei_map_clear(&prm->tei_assigned, CVG_TEI_TO_BM(s)); /* delete mac address to tei mapping */ cvg_prm_matm_del_ent(prm, s); os_mem_set(peer, 0, sizeof(*peer)); } } #else /* PLC_SUPPORT_CCO_ROLE */ #define cvg_prm_cco_init(prm) #define cvg_prm_cco_init_vdev(prm) (ERR_OK) #define cvg_prm_cco_deinit_vdev(prm) /* get bitmap of directly connected peers */ void cvg_prm_get_direct_peer_bm(cvg_vdev_t *vdev, cvg_tei_map_t *bm) { cvg_prm_vdev_t *prm = vdev->prm; os_mem_cpy(bm, &prm->in_use, sizeof(*bm)); } uint16_t cvg_prm_get_d_sub_sta_cnt(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; return prm->d_sub_sta_cnt; } #define cvg_prm_cco_status_dump(vdev) #define cvg_prm_update_peer_sub_link_type(peer) #define cvg_prm_cco_set_peer_link_type_bm(vdev, peer) #define cvg_prm_cco_clear_peer_link_type_bm(vdev, peer) #define cvg_prm_update_peer_link_type_bm(vdev, peer) #define cvg_prm_cco_reset_peer(prm, peer) \ do {(void)prm; (void)peer;} while (0) #define cvg_prm_cco_delete_peer(prm, peer) \ do { (void)prm; (void)peer; } while (0) #define cvg_prm_cco_set_peer_role(vdev, peer, role) \ do { (void)vdev; (void)peer; (void)role; } while (0) #define cvg_prm_cco_set_peer_level(vdev, peer, level) \ do { (void)vdev; (void)peer; (void)level; } while (0) #define cvg_prm_cco_clear_peer_phase(vdev, peer) \ do { (void)vdev; (void)peer; } while (0) #define cvg_prm_cco_set_peer_phase(vdev, peer) \ do { (void)vdev; (void)peer; } while (0) #endif /* PLC_SUPPORT_CCO_ROLE */ #if PLC_SUPPORT_STA_ROLE static inline void cvg_prm_sta_delete_peer(cvg_prm_vdev_t *prm, cvg_peer_info_t *peer) { tei_t tei; tei = cvg_prm_get_peer_tei(prm->vdev, peer); cvg_rt_rm_cand_proxy(prm->vdev, tei); /* delete mac address to tei mapping */ cvg_prm_matm_del_ent(prm, tei); os_mem_set(peer, 0, sizeof(*peer)); } /* note that this function is invoked outside CVG context, suppose this * function should only read some statistics variables from CVG layer. */ static void cvg_prm_sta_status_dump(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *self, *proxy; uint32_t tf_sr = 0, rf_tf_sr = 0, inactive = 0; uint32_t level = 0, role = 0, phase = 0; int32_t rx_snr = INVALID_SNR, tx_snr = INVALID_SNR; self = cvg_nwm_get_self_peer(vdev); if (self) { level = self->level; role = self->role; phase = self->phase_1; role |= level << 4; role |= phase << 8; role |= cvg_prm_get_peer_rf_hop(self) << 12; role |= prm->d_sub_sta_cnt << 16; } proxy = cvg_nwm_get_proxy_peer(vdev); if (proxy) { tf_sr = proxy->tf_sr; tf_sr |= proxy->cco_tf_sr << 8; tf_sr |= proxy->rx_sr << 16; tf_sr |= proxy->tx_sr << 24; rx_snr = proxy->rx_snr; tx_snr = cvg_prm_get_peer_tx_snr(proxy); rf_tf_sr = cvg_prm_get_peer_rf_tf_sr(proxy); rf_tf_sr |= cvg_prm_get_peer_rf_wi_rx_sr(proxy) << 8; rf_tf_sr |= cvg_prm_get_peer_rf_rx_sr(proxy) << 16; rf_tf_sr |= cvg_prm_get_peer_rf_tx_sr(proxy) << 24; } inactive = prm->cco_peer[0].hplc_inactive_cnt; inactive |= prm->cco_peer[1].hplc_inactive_cnt << 8; inactive |= prm->cco_peer[2].hplc_inactive_cnt << 16; iot_printf("%s proxy tx_snr %d rx_snr %d " "tx_sr-rx_sr-cco_tf_sr-tf_sr %08x " "rf_tx_sr-rf_rx_sr-rf_wi_rx_sr-rf_tf_sr %08x " "d_sub_sta_cnt-rfhop-phase-level-role %08x cco_rx_snr a %d " "b %d c %d cco_inactive_a-b-c %08x\n", __FUNCTION__, tx_snr, rx_snr, tf_sr, rf_tf_sr, role, prm->cco_peer[0].rx_snr, prm->cco_peer[1].rx_snr, prm->cco_peer[2].rx_snr, inactive); iot_dbglog_input(PLC_CVG_COMMON_MID, DBGLOG_ERR, CVG_DUMP_STATUS_PRM_STA_ID, 9, tx_snr, rx_snr, tf_sr, rf_tf_sr, role, prm->cco_peer[0].rx_snr, prm->cco_peer[1].rx_snr, prm->cco_peer[2].rx_snr, inactive); } #else /* PLC_SUPPORT_STA_ROLE */ #define cvg_prm_sta_delete_peer(prm, peer) \ do { (void)prm; (void)peer; } while (0) #define cvg_prm_sta_status_dump(vdev) #endif /* PLC_SUPPORT_STA_ROLE */ #if HPLC_RF_SUPPORT void cvg_prm_set_peer_rf_hop(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t rf_hop) { (void)vdev; IOT_ASSERT(rf_hop <= PLC_MAX_RT_LEVEL); peer->rf_hop = rf_hop; /* todo: maybe route table changed, need handle */ } #if HPLC_RF_DEV_SUPPORT void cvg_prm_set_peer_rf_dp_inc(cvg_peer_info_t *peer) { if (!peer) { return; } if (peer->rf_dp_cnt < CVG_PRM_RF_RCV_BM_BIT_CNT) { peer->rf_dp_cnt++; } } void cvg_prm_set_peer_rf_dp_cnt(cvg_peer_info_t *peer, uint8_t cnt) { if (!peer) { return; } if (cnt <= CVG_PRM_RF_RCV_BM_BIT_CNT) { peer->rf_dp_cnt = cnt; } } #endif /* HPLC_RF_DEV_SUPPORT */ #endif /* HPLC_RF_SUPPORT */ void cvg_prm_get_peer_bm(cvg_vdev_t *vdev, cvg_tei_map_t *bm) { cvg_prm_vdev_t *prm = vdev->prm; os_mem_cpy(bm, &prm->in_use, sizeof(*bm)); } uint32_t cvg_prm_init(cvg_global_t *glb, cvg_cfg_t *cfg) { (void)glb; (void)cfg; iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_INIT_ID, 0); return 0; } /* reset member of rt vdev to default value */ static void __cvg_prm_reset_vdev(cvg_prm_vdev_t *prm, cvg_vdev_cfg_t *cfg) { uint32_t i; (void)cfg; for (i = 0; i < PLC_TEI_MAX_NUM; i++) cvg_prm_reset_peer(prm->vdev, &prm->peer[i]); for (i = 0; i < PLC_PHASE_CNT; i++) { cvg_prm_reset_peer(prm->vdev, &prm->cco_peer[i]); prm->cco_peer_new_bp[i] = 1; } prm->d_sub_sta_cnt = 0; cvg_tei_map_reset(&prm->in_use); cvg_prm_cco_init(prm); cvg_prm_matm_init(prm); } uint32_t cvg_prm_init_vdev(cvg_vdev_t *vdev, cvg_vdev_cfg_t *cfg) { uint32_t ret = 0; cvg_prm_vdev_t *prm; iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_INIT_VDEV_ID, 1, vdev->id); prm = os_mem_malloc(PLC_CVG_PRM_MID, sizeof(*prm)); if (prm == NULL) { ret = ERR_NOMEM; goto out; } ret = cvg_prm_cco_init_vdev(prm); if(ret) { goto err_handle1; } prm->vdev = vdev; __cvg_prm_reset_vdev(prm, cfg); vdev->prm = prm; goto out; err_handle1: os_mem_free(prm); out: return ret; } void cvg_prm_deinit_vdev(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; if (prm) { cvg_prm_cco_deinit_vdev(prm); os_mem_free(prm); vdev->prm = NULL; } return; } void cvg_prm_reset_vdev(cvg_vdev_t *vdev, cvg_vdev_cfg_t *cfg) { iot_dbglog_input(PLC_CVG_PRM_MID, DBGLOG_INFO_LVL_2, CVG_PRM_RESET_VDEV_ID, 1, vdev->id); __cvg_prm_reset_vdev(vdev->prm, cfg); } void cvg_prm_reset_peer(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { peer->role = PLC_DEV_ROLE_INVALID; peer->level = 0; peer->phase_1 = PLC_PHASE_ALL; peer->phase_2 = PLC_PHASE_ALL; peer->phase_3 = PLC_PHASE_ALL; peer->need_cal_tx = 0; peer->need_cal_rx = 0; peer->rx_snr = INVALID_SNR; cvg_prm_set_peer_tx_snr(peer, INVALID_SNR); peer->d_sub_sta = 0; peer->proxy_chg_cnt = 0; peer->same_vendor = 0; peer->prev_dis_mme_rx = 0; peer->prev_dis_rx = 0; peer->dis_mme_rx = 0; peer->dis_rx = 0; peer->dis_only_rx = 0; peer->bc_rx = 0; peer->prev_bc_rx = 0; peer->tx_sr = 100; peer->rx_sr = 100; peer->tf_sr_valid = 0; peer->tf_sr = 100; peer->cco_tf_sr = 0; peer->d_sub_delay_del = 0; cvg_prm_set_peer_hplc_inactive_cnt(peer, 0); cvg_prm_set_peer_rf_start_seq(peer, 0); cvg_prm_set_peer_rf_rcv_bm(peer, 0); cvg_prm_set_peer_rf_inactive_cnt(peer, 0); cvg_prm_set_peer_rf_dis_inactive_cnt(peer, 0); cvg_prm_set_peer_rf_dp_cnt(peer, 1); cvg_prm_set_peer_rf_hop(vdev, peer, 0); cvg_prm_set_peer_rf_rx_snr(peer, RF_INVALID_SNR); cvg_prm_set_peer_rf_tx_snr(peer, RF_INVALID_SNR); cvg_prm_set_peer_rf_rx_sr(peer, 100); cvg_prm_set_peer_rf_wi_rx_sr(peer, 100); cvg_prm_set_peer_rf_tx_sr(peer, 100); cvg_prm_set_peer_rf_tf_sr(peer, 100); cvg_prm_set_peer_rf_tf_sr_valid(peer, 0); cvg_prm_set_peer_rf_bc_dis_rx(peer, 0); cvg_prm_set_peer_rf_prev_bc_dis_rx(peer, 0); cvg_prm_set_peer_rf_seq_valid(peer, 0); cvg_prm_set_peer_rf_rx_rssi(peer, INV_RSSI_RF); cvg_prm_set_learnt_proxy(vdev, peer, PLC_TEI_INVAL); cvg_prm_cco_reset_peer(vdev->prm, peer); } cvg_peer_info_t *cvg_prm_create_peer_by_tei(cvg_vdev_t *vdev, tei_t tei) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *tmp = NULL; uint32_t idx, b; idx = CVG_PRM_TEI_TO_IDX(tei); b = CVG_TEI_TO_BM(tei); if (cvg_tei_map_is_set(&prm->in_use, b) == false) { tmp = &prm->peer[idx]; cvg_tei_map_set(&prm->in_use, b); cvg_prm_reset_peer(vdev, tmp); if (vdev->role != PLC_DEV_ROLE_CCO && tei == PLC_TEI_CCO) { for (b = 0; b < PLC_PHASE_CNT; b++) { cvg_prm_reset_peer(vdev, &prm->cco_peer[b]); prm->cco_peer[b].role = PLC_DEV_ROLE_CCO; cvg_prm_set_peer_phase(vdev, &prm->cco_peer[b], (uint8_t)(b + 1), PLC_PHASE_ALL, PLC_PHASE_ALL); prm->cco_peer_new_bp[b] = 1; } } } return tmp; } void cvg_prm_delete_peer(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; uint32_t b, idx = peer - prm->peer; b = CVG_PRM_IDX_TO_BM(idx); if (cvg_tei_map_is_set(&prm->in_use, b)) { if (vdev->role == PLC_DEV_ROLE_CCO) cvg_prm_cco_delete_peer(prm, peer); else cvg_prm_sta_delete_peer(prm, peer); /* clean up related route entry. be careful, after this point, peer * data structure is not available anymore. */ cvg_rt_rm_next_hop(vdev, CVG_PRM_IDX_TO_TEI(idx), 0); cvg_rt_rm_dest(vdev, CVG_PRM_IDX_TO_TEI(idx), 0); cvg_tei_map_clear(&prm->in_use, b); if (CVG_PRM_IDX_TO_TEI(idx) == PLC_TEI_CCO) { os_mem_set(prm->cco_peer, 0, sizeof(prm->cco_peer)); os_mem_set(prm->cco_peer_new_bp, 0, sizeof(prm->cco_peer_new_bp)); } } } void cvg_prm_delete_peer_by_tei(cvg_vdev_t *vdev, tei_t tei) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer = &prm->peer[CVG_PRM_TEI_TO_IDX(tei)]; /* make sure direct sub sta flag cleared before delete */ IOT_ASSERT(!cvg_prm_is_peer_d_sub(peer)); cvg_prm_delete_peer(vdev, peer); } cvg_peer_info_t *cvg_prm_get_peer_by_tei(cvg_vdev_t *vdev, tei_t tei) { cvg_prm_vdev_t *prm = vdev->prm; uint32_t idx = CVG_PRM_TEI_TO_IDX(tei); if (PLC_TEI_IS_VALID(tei) && cvg_tei_map_is_set(&prm->in_use, CVG_TEI_TO_BM(tei))) { return &prm->peer[idx]; } else { return NULL; } } tei_t cvg_prm_get_peer_tei(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { cvg_prm_vdev_t *prm = vdev->prm; return CVG_PRM_IDX_TO_TEI(peer - prm->peer); } uint16_t cvg_prm_get_dis_node_info(cvg_vdev_t *vdev, uint8_t *data, uint32_t *len, uint32_t max_len, uint8_t fix_bm_size) { uint32_t p_idx; uint16_t cnt = 0; tei_t tmp, local_tei, max_tei = PLC_TEI_INVAL; uint8_t *dis_cnt; cvg_prm_vdev_t *prm = vdev->prm; cvg_tei_map_t bm = { 0 }; uint16_t len_temp = 0; uint8_t i, scan_flag, max_dis_only_rx = 0, is_direct = 0; cvg_peer_info_t *peer = cvg_nwm_get_self_peer(vdev); cvg_peer_info_t *proxy; tei_t proxy_tei; uint8_t reason = 0; (void)peer; (void)proxy; (void)proxy_tei; cvg_prm_get_peer_bm(vdev, &bm); IOT_ASSERT((fix_bm_size == 0) || (fix_bm_size >= sizeof(cvg_tei_map_t))); IOT_ASSERT(*len >= (sizeof(cvg_tei_map_t) + PLC_TEI_MAX_NUM)); os_mem_set(data, 0, sizeof(cvg_tei_map_t)); if (fix_bm_size) { dis_cnt = data + fix_bm_size; } else { dis_cnt = data + sizeof(cvg_tei_map_t); } local_tei = cvg_nwm_get_local_tei(vdev); cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(local_tei)); if (!PLC_SUPPORT_CCO_DIS_MME_COMPRESS && vdev->role == PLC_DEV_ROLE_CCO) { CVG_TEI_MAP_LOOP_BEGIN(&bm, tmp); p_idx = CVG_PRM_TEI_TO_IDX(tmp); if (prm->peer[p_idx].prev_dis_rx && prm->peer[p_idx].prev_dis_mme_rx) { /* received at least one mme from the peer. note that beacon rx * is not enough to identify if csma region is ok. */ cvg_tei_map_set((cvg_tei_map_t *)data, CVG_TEI_TO_BM(tmp)); dis_cnt[cnt] = (uint8_t)prm->peer[p_idx].prev_dis_rx; cnt++; max_tei = tmp; } CVG_TEI_MAP_LOOP_END(&bm, tmp); } else { #if PLC_SUPPORT_STA_ROLE /* put the direct PCO in dis node list mme */ if (vdev->role != PLC_DEV_ROLE_CCO) { proxy = cvg_nwm_get_proxy_peer(vdev); proxy_tei = cvg_prm_get_peer_tei(vdev, proxy); cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(proxy_tei)); if (proxy->prev_dis_rx && proxy->prev_dis_mme_rx) { len_temp = cnt + 1; if (!fix_bm_size) { len_temp += CVG_TEI_TO_BM(proxy_tei) >> 3; if (CVG_TEI_TO_BM(proxy_tei) & 0x7) { len_temp++; } } else { len_temp += fix_bm_size; } if (len_temp <= max_len) { cvg_tei_map_set((cvg_tei_map_t *)data, CVG_TEI_TO_BM(proxy_tei)); dis_cnt[proxy_tei - 1] = (uint8_t)proxy->prev_dis_rx; cnt++; max_tei = proxy_tei; } else { reason = 1; goto done; } } } #endif /* put the direct sub sta in dis node list mme */ CVG_TEI_MAP_LOOP_BEGIN(&bm, tmp); p_idx = CVG_PRM_TEI_TO_IDX(tmp); if (vdev->role == PLC_DEV_ROLE_CCO) { #if PLC_SUPPORT_CCO_ROLE if (cvg_tei_map_is_set(peer->direct_sub_sta, CVG_TEI_TO_BM(tmp))) { is_direct = 1; } else { is_direct = 0; } #else IOT_ASSERT(0); #endif } else { is_direct = cvg_prm_is_peer_d_sub(&prm->peer[p_idx]); } if (is_direct) { if (prm->peer[p_idx].prev_dis_rx && prm->peer[p_idx].prev_dis_mme_rx) { len_temp = cnt + 1; if (!fix_bm_size) { len_temp += CVG_TEI_TO_BM(max(tmp, max_tei)) >> 3; if (CVG_TEI_TO_BM(max(tmp, max_tei)) & 0x7) { len_temp++; } } else { len_temp += fix_bm_size; } if (len_temp <= max_len) { cvg_tei_map_set((cvg_tei_map_t *)data, CVG_TEI_TO_BM(tmp)); dis_cnt[tmp - 1] = (uint8_t)prm->peer[p_idx].prev_dis_rx; cnt++; max_tei = max(tmp, max_tei); } else { reason = 2; goto done; } } cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(tmp)); } CVG_TEI_MAP_LOOP_END(&bm, tmp); /* put the CCO/PCO in dis node list mme */ CVG_TEI_MAP_LOOP_BEGIN(&bm, tmp); p_idx = CVG_PRM_TEI_TO_IDX(tmp); if (prm->peer[p_idx].role == PLC_DEV_ROLE_CCO || prm->peer[p_idx].role == PLC_DEV_ROLE_PCO) { if (prm->peer[p_idx].prev_dis_rx && prm->peer[p_idx].prev_dis_mme_rx) { len_temp = cnt + 1; if (!fix_bm_size) { len_temp += CVG_TEI_TO_BM(max(tmp, max_tei)) >> 3; if (CVG_TEI_TO_BM(max(tmp, max_tei)) & 0x7) { len_temp++; } } else { len_temp += fix_bm_size; } if (len_temp <= max_len) { cvg_tei_map_set((cvg_tei_map_t *)data, CVG_TEI_TO_BM(tmp)); dis_cnt[tmp - 1] = (uint8_t)prm->peer[p_idx].prev_dis_rx; cnt++; max_tei = max(tmp, max_tei); } else { reason = 3; goto done; } } cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(tmp)); } else { if (prm->peer[p_idx].prev_dis_mme_rx) { max_dis_only_rx = max(prm->peer[p_idx].dis_only_rx, max_dis_only_rx); } else { /* if prev_dis_mme_rx has not been set, it means discovery * node list has not been received in last route period and * dis_only_rx has not been cleared and is expired. */ cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(tmp)); } } CVG_TEI_MAP_LOOP_END(&bm, tmp); /* screen nodes according to the number of discovery node list mme * received, node with more discovery node list mme received should * be put into the discovery node mme firstly, until the discovery * node list mme length exceeds 1PB. */ for (i = max_dis_only_rx; i > 0; i--) { scan_flag = 0; CVG_TEI_MAP_LOOP_BEGIN(&bm, tmp); scan_flag = 1; p_idx = CVG_PRM_TEI_TO_IDX(tmp); if (prm->peer[p_idx].dis_only_rx >= i) { len_temp = cnt + 1; if (!fix_bm_size) { len_temp += CVG_TEI_TO_BM(max(tmp, max_tei)) >> 3; if (CVG_TEI_TO_BM(max(tmp, max_tei)) & 0x7) { len_temp++; } } else { len_temp += fix_bm_size; } if (len_temp <= max_len) { cvg_tei_map_set((cvg_tei_map_t *)data, CVG_TEI_TO_BM(tmp)); dis_cnt[tmp - 1] = (uint8_t)prm->peer[p_idx].prev_dis_rx; cnt++; max_tei = max(tmp, max_tei); cvg_tei_map_clear(&bm, CVG_TEI_TO_BM(tmp)); } else { reason = 4; goto done; } } CVG_TEI_MAP_LOOP_END(&bm, tmp); if (!scan_flag) { break; } } done: if (reason) { iot_printf("%s, reason %lu, max_len:%lu, len:%lu, dis_cnt:%lu\n", __FUNCTION__, reason, max_len, len_temp, cnt); } /* After the number of beacons and discovery node list mme received * by node has been written to the corresponding memory in tei order, * we should summarize the data to the contiguous memory block. */ cnt = 0; CVG_TEI_MAP_LOOP_BEGIN((cvg_tei_map_t *)data, tmp); dis_cnt[cnt] = dis_cnt[tmp - 1]; cnt++; CVG_TEI_MAP_LOOP_END((cvg_tei_map_t *)data, tmp); } if (cnt) { *len = cnt; if (!fix_bm_size) { data += CVG_TEI_TO_BM(max_tei) >> 3; *len += CVG_TEI_TO_BM(max_tei) >> 3; if (CVG_TEI_TO_BM(max_tei) & 0x7) { data++; (*len)++; } os_mem_move(data, dis_cnt, cnt); } else { *len += fix_bm_size; } } else { if (!fix_bm_size) { *len = 0; } else { *len = fix_bm_size; } } return cnt; } void cvg_prm_set_peer_role(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t role) { (void)vdev; /* won't be able to change the role of CCO peer */ if (peer->role == PLC_DEV_ROLE_CCO) return; if (peer->role != role) { cvg_prm_cco_set_peer_role(vdev, peer, role); peer->role = role; } } void cvg_prm_set_peer_level(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t level) { (void)vdev; IOT_ASSERT(level <= PLC_MAX_RT_LEVEL); if (peer->level != level) { cvg_prm_cco_set_peer_level(vdev, peer, level); cvg_prm_cco_clear_peer_link_type_bm(vdev, peer); peer->level = level; cvg_prm_cco_set_peer_link_type_bm(vdev, peer); if (peer == cvg_nwm_get_self_peer(vdev)) { /* level of local device changed, update route entry accordingly */ cvg_rt_local_level_changed(vdev); } } } void cvg_prm_set_peer_phase(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t phase1, uint8_t phase2, uint8_t phase3) { uint8_t cleared = 0; IOT_ASSERT(phase1 <= PLC_PHASE_CNT); IOT_ASSERT(phase2 <= PLC_PHASE_CNT); IOT_ASSERT(phase3 <= PLC_PHASE_CNT); /* below rules need to be enforced: * 1. if phase 1 is PLC_PHASE_ALL, then phase 2 and phase 3 are unavailable. * 2. it phase 2 is PLC_PHASE_ALL, it means both phase 2 and phase 3 are * unavailable. * 3. if phase 3 is PLC_PHASE_ALL, then phase 3 is unavailable. * 4. if phase 2 is available, it should be different as phase 1. * 5. if phase 3 is available, it should be different as phase 1 and * phase 2. */ if (phase1 != peer->phase_1) { cvg_prm_cco_clear_peer_phase(vdev, peer); cleared = 1; if (phase1 == PLC_PHASE_ALL) { /* apply rule 1 */ peer->phase_1 = PLC_PHASE_ALL; peer->phase_2 = PLC_PHASE_ALL; peer->phase_3 = PLC_PHASE_ALL; } else { peer->phase_1 = phase1; } } if (peer->phase_1 == PLC_PHASE_ALL) goto out; if (phase2 != peer->phase_2) { if (cleared == 0) { cvg_prm_cco_clear_peer_phase(vdev, peer); cleared = 1; } if (phase2 == PLC_PHASE_ALL || phase2 == peer->phase_1) { /* apply rule 2 & 4 */ peer->phase_2 = PLC_PHASE_ALL; peer->phase_3 = PLC_PHASE_ALL; } else { peer->phase_2 = phase2; } } if (peer->phase_2 == PLC_PHASE_ALL) goto out; if (phase3 != peer->phase_3) { if (cleared == 0) { cvg_prm_cco_clear_peer_phase(vdev, peer); cleared = 1; } if (phase3 == peer->phase_2 || phase3 == peer->phase_1) { /* apply rule 3 & 5 */ peer->phase_3 = PLC_PHASE_ALL; } else { peer->phase_3 = phase3; } } out: if (cleared) { cvg_prm_cco_set_peer_phase(vdev, peer); } return; } void cvg_prm_set_peer_d_sub(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { PLC_VDEV_CFG_D_SUB_STA_STRUCT tlv; cvg_prm_vdev_t *prm = vdev->prm; if (!cvg_prm_is_peer_d_sub(peer)) { peer->d_sub_sta = 1; /* reset delay deletion flag, it will be set true when direct sub sta * delay leaving network */ peer->d_sub_delay_del = 0; tlv.hdr.len = sizeof(tlv) - sizeof(tlv.hdr); // length of data field tlv.hdr.type = PLC_VDEV_CFG_ADD_D_SUB_STA; tlv.sub_sta = cvg_prm_get_peer_tei(vdev, peer); mac_set_vdev_cfg(PLC_PDEV_ID, vdev->mac_vdev_id, (cfg_data_tlv *)&tlv); prm->d_sub_sta_cnt++; IOT_ASSERT(prm->d_sub_sta_cnt <= PLC_TEI_MAX_NUM); } } void cvg_prm_clear_peer_d_sub(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { PLC_VDEV_CFG_D_SUB_STA_STRUCT tlv; cvg_prm_vdev_t *prm = vdev->prm; if (cvg_prm_is_peer_d_sub(peer)) { peer->d_sub_sta = 0; /* become non-direct sub sta, reset delay deletion flag */ peer->d_sub_delay_del = 0; tlv.hdr.len = sizeof(tlv) - sizeof(tlv.hdr); // length of data field tlv.hdr.type = PLC_VDEV_CFG_DELETE_D_SUB_STA; tlv.sub_sta = cvg_prm_get_peer_tei(vdev, peer); mac_set_vdev_cfg(PLC_PDEV_ID, vdev->mac_vdev_id, (cfg_data_tlv *)&tlv); IOT_ASSERT(prm->d_sub_sta_cnt); prm->d_sub_sta_cnt--; } } void cvg_prm_set_peer_cco_tf_sr(cvg_vdev_t *vdev, cvg_peer_info_t *peer, uint8_t cco_tf_sr) { (void)vdev; peer->cco_tf_sr = cco_tf_sr; } uint8_t cvg_prm_get_peer_to_cco_min_tf_sr(cvg_peer_info_t *peer) { if (peer->cco_tf_sr < peer->tf_sr) return (uint8_t)peer->cco_tf_sr; else return (uint8_t)peer->tf_sr; } cvg_peer_info_t *cvg_prm_get_cco_peer(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer; if (phase && phase <= PLC_PHASE_CNT) { peer = cvg_prm_get_peer_by_tei(vdev, PLC_TEI_CCO); if (peer && (peer->phase_1 == phase || peer->phase_2 == phase || peer->phase_3 == phase)) { return &prm->cco_peer[phase - 1]; } } return NULL; } uint8_t cvg_prm_get_cco_peer_new_bp(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer; if (phase && phase <= PLC_PHASE_CNT) { peer = cvg_prm_get_peer_by_tei(vdev, PLC_TEI_CCO); if (peer && (peer->phase_1 == phase || peer->phase_2 == phase || peer->phase_3 == phase)) { return prm->cco_peer_new_bp[phase - 1]; } } return 1; } void cvg_prm_set_cco_peer_new_bp(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer; if (phase && phase <= PLC_PHASE_CNT) { peer = cvg_prm_get_peer_by_tei(vdev, PLC_TEI_CCO); if (peer && (peer->phase_1 == phase || peer->phase_2 == phase || peer->phase_3 == phase)) { prm->cco_peer_new_bp[phase - 1] = 1; } } } void cvg_prm_clear_cco_peer_new_bp(cvg_vdev_t *vdev, uint8_t phase) { cvg_prm_vdev_t *prm = vdev->prm; cvg_peer_info_t *peer; if (phase && phase <= PLC_PHASE_CNT) { peer = cvg_prm_get_peer_by_tei(vdev, PLC_TEI_CCO); if (peer && (peer->phase_1 == phase || peer->phase_2 == phase || peer->phase_3 == phase)) { prm->cco_peer_new_bp[phase - 1] = 0; } } } void cvg_prm_copy_peer(cvg_peer_info_t *dst, cvg_peer_info_t *src, uint8_t copy_tf_sr) { /* NOTE: rf snr/rssi/tf_sr will not be copied */ dst->rx_snr = src->rx_snr; /* copy beacon rx flag */ dst->bc_rx = src->bc_rx; dst->prev_bc_rx = src->prev_bc_rx; cvg_prm_set_peer_tx_snr(dst, cvg_prm_get_peer_tx_snr(src)); if (copy_tf_sr) { dst->need_cal_rx = src->need_cal_rx; dst->need_cal_tx = src->need_cal_tx; dst->tx_sr = src->tx_sr; dst->rx_sr = src->rx_sr; dst->tf_sr_valid = src->tf_sr_valid; dst->tf_sr = src->tf_sr; dst->dis_rx = src->dis_rx; dst->dis_mme_rx = src->dis_mme_rx; dst->prev_dis_rx = src->prev_dis_rx; dst->prev_dis_mme_rx = src->prev_dis_mme_rx; dst->cco_tf_sr = src->cco_tf_sr; } } uint32_t cvg_prm_get_peer_cnt(cvg_vdev_t *vdev) { cvg_prm_vdev_t *prm = vdev->prm; return cvg_tei_map_cbs(&prm->in_use); } void cvg_prm_dump_peer(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { tei_t tei; if (peer->role == PLC_DEV_ROLE_CCO) { tei = PLC_TEI_CCO; } else { tei = cvg_prm_get_peer_tei(vdev, peer); } iot_printf( "%s tei %lu phase %lu tx_sr %lu rx_sr %lu tf_sr %lu " "cco_tf_sr %lu rx_snr %d tx_snr %d inactive_cnt %lu\n", __FUNCTION__, tei, cvg_prm_get_peer_phase1(peer), peer->tx_sr, peer->rx_sr, peer->tf_sr, peer->cco_tf_sr, peer->rx_snr, cvg_prm_get_peer_tx_snr(peer), peer->hplc_inactive_cnt); iot_dbglog_input(PLC_CVG_RT_MID, DBGLOG_INFO_LVL_2, CVG_PRM_DUMP_PEER_ID, 9, tei, cvg_prm_get_peer_phase1(peer), peer->tx_sr, peer->rx_sr, peer->tf_sr, peer->cco_tf_sr, peer->rx_snr, cvg_prm_get_peer_tx_snr(peer), peer->hplc_inactive_cnt); } /* note that this function is invoked outside CVG context, suppose this * function should only read some statistics variables from CVG layer. */ void cvg_prm_status_dump(cvg_vdev_t *vdev) { if (vdev->role == PLC_DEV_ROLE_CCO) { cvg_prm_cco_status_dump(vdev); } else { cvg_prm_sta_status_dump(vdev); } } void cvg_prm_set_learnt_proxy(cvg_vdev_t *vdev, cvg_peer_info_t *peer, tei_t ptei) { (void)vdev; (void)peer; (void)ptei; #if PLC_SUPPORT_PROXY_LEARNING peer->learnt_proxy = ptei; #endif } tei_t cvg_prm_get_learnt_proxy(cvg_vdev_t *vdev, cvg_peer_info_t *peer) { (void)peer; if (vdev->role == PLC_DEV_ROLE_CCO) { #if PLC_SUPPORT_CCO_ROLE return cvg_prm_get_peer_proxy_tei(peer); #else IOT_ASSERT(0); return PLC_TEI_INVAL; #endif } else { #if PLC_SUPPORT_PROXY_LEARNING return (tei_t)peer->learnt_proxy; #else return PLC_TEI_INVAL; #endif } }