2796 lines
		
	
	
		
			96 KiB
		
	
	
	
		
			C
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			2796 lines
		
	
	
		
			96 KiB
		
	
	
	
		
			C
		
	
	
		
			Executable File
		
	
	
	
	
| /****************************************************************************
 | |
| 
 | |
| Copyright(c) 2019 by Aerospace C.Power (Chongqing) Microelectronics. ALL RIGHTS RESERVED.
 | |
| 
 | |
| This Information is proprietary to Aerospace C.Power (Chongqing) Microelectronics and MAY NOT
 | |
| be copied by any method or incorporated into another program without
 | |
| the express written consent of Aerospace C.Power. This Information or any portion
 | |
| thereof remains the property of Aerospace C.Power. The Information contained herein
 | |
| is believed to be accurate and Aerospace C.Power assumes no responsibility or
 | |
| liability for its use in any way and conveys no license or title under
 | |
| any patent or copyright and makes no representation or warranty that this
 | |
| Information is free from patent or copyright infringement.
 | |
| 
 | |
| ****************************************************************************/
 | |
| 
 | |
| /* os shim includes */
 | |
| #include "os_types.h"
 | |
| #include "os_mem.h"
 | |
| #include "os_timer.h"
 | |
| 
 | |
| /* common includes */
 | |
| #include "iot_module.h"
 | |
| #include "iot_errno.h"
 | |
| #include "iot_utils.h"
 | |
| #include "iot_io.h"
 | |
| #include "iot_config.h"
 | |
| #include "iot_dbglog_api.h"
 | |
| #include "iot_dbglog_parser.h"
 | |
| 
 | |
| /* mac module internal includes */
 | |
| #include "mac_vdev.h"
 | |
| #include "mac_sched.h"
 | |
| #include "mac_sched_hw.h"
 | |
| #include "command_list.h"
 | |
| #include "mac_desc_engine.h"
 | |
| #include "mac_hwq_mgr.h"
 | |
| #include "mac_pdev.h"
 | |
| #include "mac_tx_hw.h"
 | |
| #include "nn_cco.h"
 | |
| #include "mac.h"
 | |
| #include "mac_cert_test.h"
 | |
| /* rf support */
 | |
| #include "mac_rf_sched.h"
 | |
| 
 | |
| /* public api includes */
 | |
| #include "plc_fr.h"
 | |
| #include "mac_bcm_api.h"
 | |
| #include "mac_dsr.h"
 | |
| #include "mac_isr.h"
 | |
| #include "phy_bb.h"
 | |
| #include "mpdu_header.h"
 | |
| #include "mac_check_spur_cco.h"
 | |
| 
 | |
| #if HW_PLATFORM != HW_PLATFORM_SIMU
 | |
| 
 | |
| /* csma region pkt tx may be overlapped with next region due to HW issue.
 | |
|  * reserve some gap in the end of the csma region of each beacon period.
 | |
|  * unit is 1ms
 | |
|  */
 | |
| #define MAC_SCHED_CSMA_GAP_MS           40
 | |
| 
 | |
| #else /* HW_PLATFORM != HW_PLATFORM_SIMU */
 | |
| 
 | |
| #define MAC_SCHED_CSMA_GAP_MS           0
 | |
| 
 | |
| #endif /* HW_PLATFORM != HW_PLATFORM_SIMU */
 | |
| 
 | |
| /* define the rx only slot duration for CCO role device, the unit is 1ms. */
 | |
| #define MAC_SCHED_CCO_RX_ONLY_SLOT_DUR  100
 | |
| 
 | |
| /* if there is big hole between current ntb and start ntb of network beacon
 | |
|  * period. try to insert csma region into the hole to improve bandwidth
 | |
|  * utilization.
 | |
|  * if the whole equal or larged than this value, local device should insert
 | |
|  * the csma region. the unit is 1 ntb.
 | |
|  */
 | |
| #define MAC_SCHED_INSERT_CSMA_TH        MAC_MS_TO_NTB(600)
 | |
| 
 | |
| /* define neighbor network bandwidth change detection threshold.
 | |
|  * if neighbor network start ntb moved equal or larged than this value, local
 | |
|  * device should treat neighbor network bandwidth as changed. the unit is 1 ntb.
 | |
|  */
 | |
| #define MAC_SCHED_NN_BW_CHG_TH          MAC_MS_TO_NTB(2)
 | |
| 
 | |
| /* define neighbor network bandwidth safe guard threshold.
 | |
|  * if neighbor network start ntb far away from the end of current protected,
 | |
|  * region, local device won't move the start ntb of the beacon period after
 | |
|  * current protected region as there is still enough time to negotiate the
 | |
|  * bandwidth for next beacon period. uint is 1 ntb.
 | |
|  */
 | |
| #define MAC_SCHED_NN_BW_GUARD           MAC_MS_TO_NTB(1000)
 | |
| 
 | |
| /* define neighbor network info time out value for neighbor networks which
 | |
|  * can't see local network. set this value to 10 seconds to pass cert test
 | |
|  * case 3.2.9.2.
 | |
|  * the unit is 1 ntb.
 | |
|  */
 | |
| #define MAC_SCHED_NN_NO_WATCH_TO_DUR    MAC_MS_TO_NTB(10 * 1000)
 | |
| 
 | |
| /* define max allowed protected region
 | |
|  * if neighbor network required protected region larger than this value,
 | |
|  * local device will consider the request as invalid. unit is 1ms.
 | |
|  */
 | |
| #define MAC_SCHED_NN_MAX_REGION         (5000)
 | |
| 
 | |
| /* define the neighbor network negotiation frame tx timer interval.
 | |
|  * unit is 1ms.
 | |
|  */
 | |
| #define MAC_SCHED_NN_TX_INTERVAL        500
 | |
| 
 | |
| /* define the supported minimum duration for command list.
 | |
|  * the unit is 1ms.
 | |
|  */
 | |
| #define MAC_SCHED_CMD_LIST_DUR_MIN      500
 | |
| 
 | |
| /* define the maximum count of csma slots supported */
 | |
| #define MAC_SCHED_CSMA_SPLIT_MAX        500
 | |
| 
 | |
| /* define the max cnt of not watched this network */
 | |
| #define MAC_SCHED_NN_NOT_WATCH_MAX      16
 | |
| 
 | |
| /* length of the head of hw_sched_cmd_list_t */
 | |
| #define MAC_SCHED_CMD_LIST_HEAD_LEN     (sizeof(hw_sched_cmd_list_t) - \
 | |
|     sizeof(hw_sched_cmd_t) * HW_SCHED_CMD_MAX_CNT)
 | |
| 
 | |
| /* neighbor network info */
 | |
| typedef struct _mac_nn_info {
 | |
|     /* network id of the neighbor network */
 | |
|     uint32_t    nid             :24,
 | |
|     /* flag to mark if the neighbor network are watching us */
 | |
|                 watched         :1,
 | |
|     /* number of not watch */
 | |
|                 not_watch_cnt   :7;
 | |
|     /* latest nncco frame rx ntb from the neighbor network */
 | |
|     uint64_t    rx_ntb;
 | |
|     /* start ntb of next protected region */
 | |
|     uint64_t    start_ntb;
 | |
|     /* end ntb of next protecte region */
 | |
|     uint64_t    end_ntb;
 | |
|     /* requested protected region length, unit is 1ms */
 | |
|     uint16_t    region_dur;
 | |
|     /* neighbor network self rf channel id */
 | |
|     uint8_t     self_ch_id;
 | |
|     /* neighbor network self rf option id */
 | |
|     uint8_t     self_option;
 | |
| } mac_nn_info_t;
 | |
| 
 | |
| /* scheulder context */
 | |
| typedef struct _mac_sched_ctx {
 | |
|     /* current command list queue in use */
 | |
|     hw_sched_cmd_list_t *curr_hw_cmd_list;
 | |
|     /* command list queue */
 | |
|     hw_sched_cmd_list_t *hw_cmd_list[HW_SCHED_QUEUE_DEPTH];
 | |
|     /* command list queue count */
 | |
|     uint8_t hw_cmd_list_cnt;
 | |
|     /* command list queue pos of the next free slot */
 | |
|     uint8_t hw_cmd_list_pos;
 | |
| #if PLC_SUPPORT_CCO_ROLE
 | |
|     /* flag to mark if neighbor network negotiation frame tx allowed */
 | |
|     uint8_t allow_tx    :1,
 | |
|     /* flag to mark if next neighbor network negotiation frame tx need to
 | |
|      * carry all neighbor network NID watched by local device.
 | |
|      */
 | |
|             bc_all      :1,
 | |
|     /* flag to mark if NID conflict detected */
 | |
|             nid_conflict:1,
 | |
|     /* flag to mark if neighbour network negotiation is enabled,
 | |
|      * default is enabled
 | |
|      */
 | |
|             nw_nego_en:  1,
 | |
|     /* reserved for future */
 | |
|             rsvd        :4;
 | |
|     /* next watched neighbor network nid broadcast index, local device should
 | |
|      * try to broadcast all watched neighbor network nid in each beacon period.
 | |
|      */
 | |
|     uint8_t nid_bc_idx;
 | |
|     /* number of available nid in the prev_nid_list */
 | |
|     uint8_t prev_nid_list_cnt;
 | |
|     /* number of available nid in the curr_nid_list */
 | |
|     uint8_t curr_nid_list_cnt;
 | |
|     /* protected region length. unit is 1ms */
 | |
|     uint16_t region_dur;
 | |
|     /* start ntb 64bit version neighbor network negotiation frame tx allowed */
 | |
|     uint64_t allow_start_ntb;
 | |
|     /* end ntb 64bit version neighbor network negotiation frame tx allowed */
 | |
|     uint64_t allow_end_ntb;
 | |
|     /* heard network id list in previous beacon periods. */
 | |
|     mac_nn_info_t prev_nid_list[PLC_MAX_NEIGHBOR_NETWORK];
 | |
|     /* heard network id list in current beacon period. note that network heard
 | |
|      * in previous beacon periods and its protected region not expired yet will
 | |
|      * also be included.
 | |
|      */
 | |
|     mac_nn_info_t curr_nid_list[PLC_MAX_NEIGHBOR_NETWORK];
 | |
|     /* neighbor network negotiation from tx timer */
 | |
|     timer_id_t tx_timer;
 | |
| #endif
 | |
| } mac_sched_ctx_t;
 | |
| 
 | |
| /* pack for the structures in the whole file */
 | |
| #pragma pack(push)  /* save the pack status */
 | |
| #pragma pack(1)     /* 1 byte align */
 | |
| /* csma slot info */
 | |
| typedef struct _mac_csma_slot {
 | |
|     /* slot phase info */
 | |
|     uint8_t phase       : 2,
 | |
|     /* flag to mark if this slot is the last slot */
 | |
|             last        : 1,
 | |
|     /* reserved for future */
 | |
|             rsvd        : 5;
 | |
| } mac_csma_slot_t;
 | |
| #pragma pack(pop) /* restore the pack status */
 | |
| 
 | |
| /* temp buffer to save splitted csma slot info */
 | |
| static mac_csma_slot_t g_csma_slot[MAC_SCHED_CSMA_SPLIT_MAX];
 | |
| /* slot count of each phase. the phase seuqence will be affected by
 | |
|  * g_slot_phase.
 | |
|  */
 | |
| static uint16_t g_slot_cnt[PLC_PHASE_CNT];
 | |
| /* phase array to identify the phase sequence */
 | |
| static uint16_t g_slot_phase[PLC_PHASE_CNT];
 | |
| /* last slot duration of each phase as the last slot may not be a multiple
 | |
|  * of fragment length. the phase seuqence is always A, B, C and won't be
 | |
|  * affected by g_slot_phase.
 | |
|  */
 | |
| static uint16_t g_slot_last[PLC_PHASE_CNT];
 | |
| 
 | |
| #if MAC_SCHED_DEBUG
 | |
| 
 | |
| static void mac_sched_dump(hw_sched_cmd_list_t *cl, uint16_t cnt)
 | |
| {
 | |
|     uint16_t i;
 | |
| 
 | |
|     iot_printf("%s total %lu, recusive %lu stntb:%lu\n",
 | |
|         __FUNCTION__, cnt, cl->recursive, cl->start_ntb);
 | |
|     for (i = 0; i < cnt; i++) {
 | |
|         if (cl->cmd[i].t_info.e.r_flag) {
 | |
|             if (cl->cmd[i].t_info.e.s_flag) {
 | |
|                 iot_printf("rs phase - %lu, start offset %lu, "
 | |
|                     "end offset - %lu, q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.se.start_t,
 | |
|                 cl->cmd[i].t_info.se.end_t, cl->cmd[i].tx_q_en_bm);
 | |
|             } else {
 | |
|                 if (cl->cmd[i].t_info.r.rf_flag) {
 | |
|                     iot_printf("rf phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|                 if (cl->cmd[i].t_info.r.re_flag) {
 | |
|                     iot_printf("rl phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|                 if (!cl->cmd[i].t_info.r.rf_flag &&
 | |
|                     !cl->cmd[i].t_info.r.re_flag) {
 | |
|                     iot_printf("rm phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|             }
 | |
|         } else if (cl->cmd[i].t_info.e.s_flag) {
 | |
|             iot_printf("st phase - %lu, start offset %lu, end offset - %lu, "
 | |
|                 "q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.se.start_t,
 | |
|                 cl->cmd[i].t_info.se.end_t, cl->cmd[i].tx_q_en_bm);
 | |
|         } else {
 | |
|             iot_printf("e phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.e.end_t,
 | |
|                 cl->cmd[i].tx_q_en_bm);
 | |
|         }
 | |
|     }
 | |
|     iot_printf("------------------------------------------------------\n");
 | |
| }
 | |
| 
 | |
| #else /* MAC_SCHED_DEBUG */
 | |
| 
 | |
| #define mac_sched_dump(cl, cnt)
 | |
| 
 | |
| #endif /* MAC_SCHED_DEBUG */
 | |
| 
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
| 
 | |
| void mac_sched_nn_dump(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint8_t i;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     /* dump previous neighbor network info */
 | |
|     iot_printf("%s prev ----------------------------\n", __FUNCTION__);
 | |
|     for (i = 0; i < ctx->prev_nid_list_cnt; i++) {
 | |
|         iot_printf("%s nid %lu, start_ntb %lu-%lu, end_ntb %lu-%lu\n",
 | |
|             __FUNCTION__, ctx->prev_nid_list[i].nid,
 | |
|             iot_uint64_higher32(ctx->prev_nid_list[i].start_ntb),
 | |
|             iot_uint64_lower32(ctx->prev_nid_list[i].start_ntb),
 | |
|             iot_uint64_higher32(ctx->prev_nid_list[i].end_ntb),
 | |
|             iot_uint64_lower32(ctx->prev_nid_list[i].end_ntb));
 | |
|     }
 | |
|     iot_printf("%s current -------------------------\n", __FUNCTION__);
 | |
|     for (i = 0; i < ctx->curr_nid_list_cnt; i++) {
 | |
|         iot_printf("%s nid %lu, start_ntb %lu-%lu, end_ntb %lu-%lu\n",
 | |
|             __FUNCTION__, ctx->curr_nid_list[i].nid,
 | |
|             iot_uint64_higher32(ctx->curr_nid_list[i].start_ntb),
 | |
|             iot_uint64_lower32(ctx->curr_nid_list[i].start_ntb),
 | |
|             iot_uint64_higher32(ctx->curr_nid_list[i].end_ntb),
 | |
|             iot_uint64_lower32(ctx->curr_nid_list[i].end_ntb));
 | |
|     }
 | |
| }
 | |
| 
 | |
| #else /* MAC_SCHED_NN_DEBUG */
 | |
| 
 | |
| #define mac_sched_nn_dump(vdev)
 | |
| 
 | |
| #endif /* MAC_SCHED_NN_DEBUG */
 | |
| 
 | |
| /* get wide band beacon queue bitmap according to appointed phase */
 | |
| static uint32_t mac_sched_get_wb_bc_q(mac_vdev_t *vdev, uint8_t phase)
 | |
| {
 | |
|     uint32_t ret = 0;
 | |
|     mac_queue_ctxt_t *queue = &g_mac_pdev[vdev->ref_pdev_id]->hwq_hdl;
 | |
| 
 | |
|     switch (phase) {
 | |
|     case PLC_PHASE_A:
 | |
|     case PLC_PHASE_ALL: /* force A if all for HW limitation of TDMA Q */
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCN_A);
 | |
|         break;
 | |
|     case PLC_PHASE_B:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCN_B);
 | |
|         break;
 | |
|     case PLC_PHASE_C:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCN_C);
 | |
|         break;
 | |
|     default:
 | |
|         IOT_ASSERT(0);
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     return ret;
 | |
| }
 | |
| 
 | |
| /* get csma queue bitmap according to appointed phase */
 | |
| static uint32_t mac_sched_get_csma_q(mac_vdev_t *vdev, uint8_t phase)
 | |
| {
 | |
|     uint32_t ret = 0;
 | |
|     mac_queue_ctxt_t *queue = &g_mac_pdev[vdev->ref_pdev_id]->hwq_hdl;
 | |
| 
 | |
|     switch (phase) {
 | |
|     case PLC_PHASE_A:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_A_0);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_A_1);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_A_2);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_A_3);
 | |
|         break;
 | |
|     case PLC_PHASE_B:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_B_0);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_B_1);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_B_2);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_B_3);
 | |
|         break;
 | |
|     case PLC_PHASE_C:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_C_0);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_C_1);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_C_2);
 | |
|         ret |= 1 << mac_q_get_hwqid(queue, MAC_QUE_CSMA_C_3);
 | |
|         break;
 | |
|     default:
 | |
|         IOT_ASSERT(0);
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     return ret;
 | |
| }
 | |
| 
 | |
| /* get tdma queue bitmap according to appointed phase */
 | |
| static uint32_t mac_sched_get_tdma_q(mac_vdev_t *vdev, uint8_t phase)
 | |
| {
 | |
|     uint32_t ret = 0;
 | |
|     mac_queue_ctxt_t *queue = &g_mac_pdev[vdev->ref_pdev_id]->hwq_hdl;
 | |
| 
 | |
|     switch (phase) {
 | |
|     case PLC_PHASE_A:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_TDMA_A);
 | |
|         break;
 | |
|     case PLC_PHASE_B:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_TDMA_B);
 | |
|         break;
 | |
|     case PLC_PHASE_C:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_TDMA_C);
 | |
|         break;
 | |
|     default:
 | |
|         IOT_ASSERT(0);
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     return ret;
 | |
| }
 | |
| 
 | |
| /* get dedicated csma queue bitmap according to appointed phase */
 | |
| static uint32_t mac_sched_get_dcsma_q(mac_vdev_t *vdev, uint8_t phase)
 | |
| {
 | |
|     uint32_t ret = 0;
 | |
|     mac_queue_ctxt_t *queue = &g_mac_pdev[vdev->ref_pdev_id]->hwq_hdl;
 | |
| 
 | |
|     switch (phase) {
 | |
|     case PLC_PHASE_A:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCSMA_A);
 | |
|         break;
 | |
|     case PLC_PHASE_B:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCSMA_B);
 | |
|         break;
 | |
|     case PLC_PHASE_C:
 | |
|         ret = 1 << mac_q_get_hwqid(queue, MAC_QUE_BCSMA_C);
 | |
|         break;
 | |
|     default:
 | |
|         IOT_ASSERT(0);
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     return ret;
 | |
| }
 | |
| 
 | |
| static hw_sched_cmd_list_t *mac_sched_alloc_cmd_list(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint32_t cl_reset_len;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
| 
 | |
|     IOT_ASSERT(ctx->hw_cmd_list_cnt <= HW_SCHED_QUEUE_DEPTH);
 | |
| 
 | |
|     if (ctx->hw_cmd_list_cnt < HW_SCHED_QUEUE_DEPTH) {
 | |
|         /* free command list vailable, allocate new one */
 | |
|         if (ctx->hw_cmd_list[ctx->hw_cmd_list_pos] == NULL) {
 | |
|             mac_desc_get(&g_mac_desc_eng, PLC_SCHED_CMD_LIST_POOL, (void **)&cl);
 | |
|             ctx->hw_cmd_list[ctx->hw_cmd_list_pos] = cl;
 | |
|         } else {
 | |
|             cl = ctx->hw_cmd_list[ctx->hw_cmd_list_pos];
 | |
|         }
 | |
|         ctx->hw_cmd_list_pos++;
 | |
|         if (ctx->hw_cmd_list_pos == HW_SCHED_QUEUE_DEPTH)
 | |
|             ctx->hw_cmd_list_pos = 0;
 | |
|         ctx->hw_cmd_list_cnt++;
 | |
|     } else {
 | |
|         /* check if any command list returned from HW */
 | |
|         if (mac_sched_get_cmd_list_cnt(vdev) >= HW_SCHED_QUEUE_DEPTH) {
 | |
|             //TODO: dbg for hw sched
 | |
|             os_mem_cpy(ctx->hw_cmd_list[0]->cmd, ctx->hw_cmd_list[1], 20);
 | |
|             IOT_ASSERT_DUMP(0, (uint32_t *)ctx->hw_cmd_list[0], 10);
 | |
|         }
 | |
|         /* re-use the previos command list returned from HW */
 | |
|         cl = ctx->hw_cmd_list[ctx->hw_cmd_list_pos];
 | |
|         ctx->hw_cmd_list_pos++;
 | |
|         if (ctx->hw_cmd_list_pos == HW_SCHED_QUEUE_DEPTH)
 | |
|             ctx->hw_cmd_list_pos = 0;
 | |
|     }
 | |
|     IOT_ASSERT(cl);
 | |
|     cl_reset_len = MAC_SCHED_CMD_LIST_HEAD_LEN;
 | |
|     cl_reset_len += min(cl->total_cnt + 1, HW_SCHED_CMD_MAX_CNT) *
 | |
|         sizeof(hw_sched_cmd_t);
 | |
|     os_mem_set(cl, 0, cl_reset_len);
 | |
| 
 | |
| #if MAC_SCHED_DEBUG
 | |
| 
 | |
|     iot_printf("%s allocate %p\n", __FUNCTION__, cl->cmd);
 | |
| 
 | |
| #endif
 | |
| 
 | |
|     return cl;
 | |
| }
 | |
| 
 | |
| static void mac_sched_free_cmd_list(mac_vdev_t *vdev)
 | |
| {
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     ctx->hw_cmd_list_cnt = 0;
 | |
|     ctx->hw_cmd_list_pos = 0;
 | |
| }
 | |
| 
 | |
| /* split csma slot according to SG spec. the unit of frag is 1ms. */
 | |
| static uint16_t mac_sched_split_csma(mac_bc_cmsa_si_t *csma, uint32_t frag)
 | |
| {
 | |
|     uint16_t g_slot_base[PLC_PHASE_CNT];
 | |
|     uint16_t i, j, k, idx, b_switch, total_cnt = 0;
 | |
|     uint8_t phase_cnt = 0;
 | |
| 
 | |
|     IOT_ASSERT(csma->phase_cnt <= PLC_PHASE_CNT);
 | |
|     IOT_ASSERT(frag);
 | |
| 
 | |
|     os_mem_set(g_csma_slot, 0, sizeof(g_csma_slot));
 | |
|     os_mem_set(g_slot_cnt, 0, sizeof(g_slot_cnt));
 | |
|     os_mem_set(g_slot_phase, 0, sizeof(g_slot_phase));
 | |
|     os_mem_set(g_slot_base, 0, sizeof(g_slot_base));
 | |
|     os_mem_set(g_slot_last, 0, sizeof(g_slot_last));
 | |
| 
 | |
|     /* calculate slot cnt of each phase */
 | |
|     for (i = 0; i < csma->phase_cnt; i++) {
 | |
|         /* exclude 0 duration phase */
 | |
|         if (csma->slot_dur[i]) {
 | |
|             g_slot_cnt[phase_cnt] = (uint16_t)(csma->slot_dur[i] / frag);
 | |
|             g_slot_last[csma->phase[i] - 1] =
 | |
|                 (uint16_t)(csma->slot_dur[i] % frag);
 | |
|             g_slot_phase[phase_cnt] = csma->phase[i];
 | |
|             if (g_slot_cnt[phase_cnt] == 0) {
 | |
|                 g_slot_cnt[phase_cnt] = 1;
 | |
|             } else {
 | |
|                 g_slot_last[csma->phase[i] - 1] += (uint16_t)frag;
 | |
|             }
 | |
|             total_cnt += g_slot_cnt[phase_cnt];
 | |
|             phase_cnt++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (total_cnt == 0 || phase_cnt == 0)
 | |
|         return 0;
 | |
| 
 | |
|     IOT_ASSERT(total_cnt <= MAC_SCHED_CSMA_SPLIT_MAX);
 | |
| 
 | |
|     /* sort from less to more */
 | |
|     for (i = 0; i < phase_cnt; i++) {
 | |
|         for (j = i + 1; j < phase_cnt; j++) {
 | |
|             b_switch = 0;
 | |
|             if (g_slot_cnt[i] > g_slot_cnt[j]) {
 | |
|                 b_switch = 1;
 | |
|             } else if (g_slot_cnt[i] == g_slot_cnt[j]) {
 | |
|                 /* for same slot count phase, we should follow A, B, C
 | |
|                  * sequence according to smart grid spec.
 | |
|                  */
 | |
|                 if (g_slot_phase[j] < g_slot_phase[i]) {
 | |
|                     b_switch = 1;
 | |
|                 }
 | |
|             }
 | |
|             if (b_switch) {
 | |
|                 k = g_slot_cnt[i];
 | |
|                 g_slot_cnt[i] = g_slot_cnt[j];
 | |
|                 g_slot_cnt[j] = k;
 | |
|                 k = g_slot_phase[i];
 | |
|                 g_slot_phase[i] = g_slot_phase[j];
 | |
|                 g_slot_phase[j] = k;
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* arrange slot for each phase */
 | |
|     for (i = 0; i < phase_cnt; i++) {
 | |
|         /* the last phase always try to use any left slot */
 | |
|         if (i == (phase_cnt - 1))
 | |
|             g_slot_base[i] = 1;
 | |
|         else
 | |
|             g_slot_base[i] = total_cnt / g_slot_cnt[i];
 | |
|         for (j = 0; j < g_slot_cnt[i]; j++) {
 | |
|             idx = i + j * g_slot_base[i];
 | |
|             if (g_csma_slot[idx].phase == 0) {
 | |
|                 /* free slot found */
 | |
|                 g_csma_slot[idx].phase = (uint8_t)g_slot_phase[i];
 | |
|                 if (j == (g_slot_cnt[i] - 1)) {
 | |
|                     g_csma_slot[idx].last = 1;
 | |
|                 }
 | |
|             } else {
 | |
|                 /* try to find the next free slot */
 | |
|                 for (k = idx; k < total_cnt; k++) {
 | |
|                     if (g_csma_slot[k].phase == 0) {
 | |
|                         g_csma_slot[k].phase = (uint8_t)g_slot_phase[i];
 | |
|                         if (j == (g_slot_cnt[i] - 1)) {
 | |
|                             g_csma_slot[k].last = 1;
 | |
|                         }
 | |
|                         break;
 | |
|                     }
 | |
|                 }
 | |
|                 IOT_ASSERT(k < total_cnt);
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
| #if MAC_SCHED_DEBUG
 | |
| 
 | |
|     iot_printf("%s total %lu, frag %lu, phase %lu %lu, phase %lu %lu, "
 | |
|         "phase %lu %lu-------\n",
 | |
|         __FUNCTION__, total_cnt, frag, csma->phase[0], csma->slot_dur[0],
 | |
|         csma->phase[1], csma->slot_dur[1], csma->phase[2], csma->slot_dur[2]);
 | |
|     for (i = 0; i < total_cnt; i++) {
 | |
|         switch (g_csma_slot[i].phase) {
 | |
|         case PLC_PHASE_A:
 | |
|             iot_printf("A");
 | |
|             break;
 | |
|         case PLC_PHASE_B:
 | |
|             iot_printf("B");
 | |
|             break;
 | |
|         case PLC_PHASE_C:
 | |
|             iot_printf("C");
 | |
|             break;
 | |
|         default:
 | |
|             IOT_ASSERT(0);
 | |
|             break;
 | |
|         }
 | |
|     }
 | |
|     iot_printf("------------------------------------------------------\n");
 | |
| 
 | |
| #endif /* MAC_SCHED_DEBUG */
 | |
| 
 | |
|     return total_cnt;
 | |
| }
 | |
| 
 | |
| /* load command list into the HW scheduler, return 1 if the command list
 | |
|  * is already done.
 | |
|  */
 | |
| static uint32_t mac_sched_load_cmd_list(mac_vdev_t *vdev,
 | |
|     hw_sched_cmd_list_t *cl)
 | |
| {
 | |
|     uint32_t cnt, curr_dur, left_dur;
 | |
|     hw_sched_cmd_t *start_cmd, *end_cmd, *last_cmd, *prev_cmd = NULL;
 | |
| 
 | |
|     if (cl->next_idx == cl->total_cnt) {
 | |
|         /* current command list is done */
 | |
|         return  1;
 | |
|     }
 | |
| 
 | |
|     IOT_ASSERT(cl->next_idx < cl->total_cnt);
 | |
| 
 | |
|     /* set start ntb of next beacon period */
 | |
|     mac_sched_set_bp_start_ntb(vdev, cl->start_ntb);
 | |
| 
 | |
|     /* calculate how many commands can be push into the HW scheduler */
 | |
|     cnt = cl->total_cnt - cl->next_idx;
 | |
|     cnt = min(cnt, HW_SHCED_CMD_DEPTH);
 | |
|     start_cmd = cl->cmd + cl->next_idx;
 | |
|     if (cl->next_idx) {
 | |
|         prev_cmd = start_cmd - 1;
 | |
|     }
 | |
|     end_cmd = start_cmd + (cnt - 1);
 | |
|     last_cmd = cl->cmd + (cl->total_cnt - 1);
 | |
|     /* push commands into the HW */
 | |
|     mac_sched_set_bp_cmd_list(vdev, start_cmd, (uint16_t)cnt);
 | |
| 
 | |
|     if (cl->recursive) {
 | |
|         /* for recursive command list, no fragment required */
 | |
|         IOT_ASSERT(cl->total_cnt <= HW_SHCED_CMD_DEPTH);
 | |
|         /* set command list duration */
 | |
|         mac_sched_set_bp_dur(vdev, (uint16_t)start_cmd->t_info.se.end_t);
 | |
| #if MAC_SCHED_DEBUG
 | |
|         iot_printf("%s %lu start offset %lu, end offset %lu\n", __FUNCTION__,
 | |
|             cl->total_cnt, start_cmd->t_info.se.start_t,
 | |
|             start_cmd->t_info.se.end_t);
 | |
| #endif
 | |
|     } else {
 | |
|         if (prev_cmd) {
 | |
|             curr_dur = end_cmd->t_info.e.end_t - prev_cmd->t_info.e.end_t;
 | |
|         } else {
 | |
|             curr_dur = end_cmd->t_info.e.end_t;
 | |
|         }
 | |
|         if (last_cmd != end_cmd) {
 | |
|             /* reduce current command list duration to make sure enough
 | |
|              * duration left for the last command list.
 | |
|              */
 | |
|             left_dur = last_cmd->t_info.e.end_t - end_cmd->t_info.e.end_t;
 | |
|             while (left_dur < MAC_SCHED_CMD_LIST_DUR_MIN) {
 | |
|                 end_cmd--;
 | |
|                 cnt--;
 | |
|                 IOT_ASSERT(end_cmd > start_cmd);
 | |
|                 left_dur = last_cmd->t_info.e.end_t - end_cmd->t_info.e.end_t;
 | |
|             }
 | |
|             if (prev_cmd) {
 | |
|                 curr_dur = end_cmd->t_info.e.end_t - prev_cmd->t_info.e.end_t;
 | |
|             } else {
 | |
|                 curr_dur = end_cmd->t_info.e.end_t;
 | |
|             }
 | |
|         }
 | |
|         if (prev_cmd) {
 | |
|             IOT_ASSERT(curr_dur >= MAC_SCHED_CMD_LIST_DUR_MIN);
 | |
|         }
 | |
|         /* set command list duration */
 | |
|         mac_sched_set_bp_dur(vdev, (uint16_t)end_cmd->t_info.e.end_t);
 | |
| #if MAC_SCHED_DEBUG
 | |
|         iot_printf("%s %lu start offset %lu, end offset %lu\n", __FUNCTION__,
 | |
|             cl->total_cnt, prev_cmd ? prev_cmd->t_info.e.end_t : 0,
 | |
|             end_cmd->t_info.e.end_t);
 | |
| #endif
 | |
|     }
 | |
| 
 | |
|     /* trigger new beacon period */
 | |
|     mac_sched_trigger_bp(vdev);
 | |
| 
 | |
|     cl->next_idx += cnt;
 | |
| 
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| void mac_sched_cco_snr_rx(mac_vdev_t *vdev, uint32_t nid, int8_t snr,
 | |
|     uint8_t rf_id, uint8_t rf_option, uint8_t is_rf, uint8_t band_id)
 | |
| {
 | |
|     mac_cco_snr_rpt_t *rpt;
 | |
|     iot_pkt_t *pkt;
 | |
| 
 | |
|     if (!vdev->start_cfg.mac_cco_snr_rx_func) {
 | |
|         goto out;
 | |
|     }
 | |
|     pkt = iot_pkt_alloc(sizeof(*rpt), PLC_MAC_SCHED_MID);
 | |
|     if (!pkt) {
 | |
|         goto out;
 | |
|     }
 | |
|     rpt = (mac_cco_snr_rpt_t *)iot_pkt_data(pkt);
 | |
|     rpt->nid = nid;
 | |
|     rpt->snr = snr;
 | |
|     rpt->rf_channel = rf_id;
 | |
|     rpt->rf_option = rf_option;
 | |
|     rpt->is_rf = is_rf;
 | |
|     rpt->band_id = band_id;
 | |
|     vdev->start_cfg.mac_cco_snr_rx_func(vdev->start_cfg.mac_callback_arg, pkt);
 | |
| out:
 | |
|     return;
 | |
| }
 | |
| 
 | |
| #if (PLC_SUPPORT_CCO_ROLE)
 | |
| 
 | |
| void mac_sched_nn_report_nid(mac_vdev_t *vdev)
 | |
| {
 | |
|     iot_pkt_t *buf;
 | |
|     uint8_t i;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     mac_nid_list_rpt_t *rpt;
 | |
| 
 | |
|     if (ctx->curr_nid_list_cnt == 0)
 | |
|         return;
 | |
| 
 | |
|     buf = iot_pkt_alloc(sizeof(*rpt) +
 | |
|         (sizeof(rpt->nid_list[0]) * ctx->curr_nid_list_cnt), PLC_MAC_SCHED_MID);
 | |
|     if (!buf)
 | |
|         return;
 | |
| 
 | |
|     rpt = (mac_nid_list_rpt_t *)iot_pkt_data(buf);
 | |
|     rpt->nid_cnt = ctx->curr_nid_list_cnt;
 | |
|     for (i = 0; i < ctx->curr_nid_list_cnt; i++) {
 | |
|         rpt->nid_list[i].nid = ctx->curr_nid_list[i].nid;
 | |
|         rpt->nid_list[i].sp_flag = !(ctx->curr_nid_list[i].watched);
 | |
|         rpt->nid_list[i].bandwidth = ctx->curr_nid_list[i].region_dur;
 | |
|         rpt->nid_list[i].rf_channel = ctx->curr_nid_list[i].self_ch_id;
 | |
|         rpt->nid_list[i].rf_option = ctx->curr_nid_list[i].self_option;
 | |
|     }
 | |
|     vdev->start_cfg.mac_nid_rpt_func(vdev->start_cfg.mac_callback_arg, buf);
 | |
| }
 | |
| 
 | |
| /* @brief mac_sched_nn_calc_start_ntb() - calculate start ntb according to
 | |
|  * latest neighbor network info
 | |
|  * @param vdev:             pointer to mac vdev.
 | |
|  * @param start_ntb64:      planned start ntb 64bit version.
 | |
|  * @param region_dur:       required protected region length. unit is 1ms.
 | |
|  * @param latest_end_ntb64: end ntb 64bit version of current conflict region.
 | |
|  * @retval calculated start ntb 64bit version.
 | |
|  */
 | |
| static uint64_t mac_sched_nn_calc_start_ntb(mac_vdev_t *vdev,
 | |
|     uint64_t start_ntb64, uint16_t region_dur, uint64_t *latest_end_ntb64)
 | |
| {
 | |
|     uint8_t i, confict_found, nid_list_cnt, overlapped;
 | |
|     uint32_t ntb_dur;
 | |
|     uint64_t new_start_ntb, new_end_ntb;
 | |
|     mac_nn_info_t *nn_info, *nid_list;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     nid_t nid;
 | |
| 
 | |
|     new_start_ntb = start_ntb64;
 | |
|     ntb_dur = MAC_MS_TO_NTB(region_dur);
 | |
|     new_end_ntb = new_start_ntb + ntb_dur;
 | |
| 
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|     iot_printf("%s next_start_ntb %lu-%lu, next_end_ntb %lu-%lu\n",
 | |
|         __FUNCTION__, iot_uint64_higher32(new_start_ntb),
 | |
|         iot_uint64_lower32(new_start_ntb), iot_uint64_higher32(new_end_ntb),
 | |
|         iot_uint64_lower32(new_end_ntb));
 | |
| #endif
 | |
| 
 | |
|     *latest_end_ntb64 = 0;
 | |
|     nid_list_cnt = ctx->curr_nid_list_cnt;
 | |
|     nid_list = ctx->curr_nid_list;
 | |
| again:
 | |
|     confict_found = 0;
 | |
|     /* compare all networks to find out the bandwidth allocation */
 | |
|     for (i = 0; i < nid_list_cnt; i++) {
 | |
|         if (nid_list[i].nid == 0 || nid_list[i].region_dur == 0)
 | |
|             continue;
 | |
|         /* found existing slot */
 | |
|         nn_info = &nid_list[i];
 | |
|         if ((nn_info->start_ntb - MAC_SCHED_NN_BW_CHG_TH) <= new_start_ntb
 | |
|             &&
 | |
|             (nn_info->end_ntb - MAC_SCHED_NN_BW_CHG_TH) <= new_start_ntb) {
 | |
|             /* no overlapped region */
 | |
|             overlapped = 0;
 | |
|         } else if ((nn_info->start_ntb + MAC_SCHED_NN_BW_CHG_TH)
 | |
|             >= new_end_ntb &&
 | |
|             (nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH) >= new_end_ntb) {
 | |
|             /* no overlapped region */
 | |
|             overlapped = 0;
 | |
|         } else {
 | |
|             overlapped = 1;
 | |
|         }
 | |
|         if (overlapped == 0) {
 | |
|             if ((nn_info->end_ntb > new_end_ntb) &&
 | |
|                 (nn_info->end_ntb - new_end_ntb) < MAC_SCHED_NN_BW_GUARD) {
 | |
|                 if (*latest_end_ntb64 < nn_info->end_ntb) {
 | |
|                     *latest_end_ntb64 = nn_info->end_ntb;
 | |
|                 }
 | |
|             }
 | |
|             continue;
 | |
|         } else {
 | |
|             if (*latest_end_ntb64 < nn_info->end_ntb) {
 | |
|                 *latest_end_ntb64 = nn_info->end_ntb;
 | |
|             }
 | |
|         }
 | |
|         /* if code hit here, it means overlapped protected region
 | |
|          * detected.
 | |
|          */
 | |
|         if (nn_info->watched == 0) {
 | |
|             /* rule 1, if remote network can't see us, we should be back
 | |
|              * off.
 | |
|              */
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|             iot_printf("%s rule 1 applied, nn_start_ntb %lu-%lu, "
 | |
|                 "nn_end_ntb %lu-%lu\n", __FUNCTION__,
 | |
|                 iot_uint64_higher32(nn_info->start_ntb),
 | |
|                 iot_uint64_lower32(nn_info->start_ntb),
 | |
|                 iot_uint64_higher32(nn_info->end_ntb),
 | |
|                 iot_uint64_lower32(nn_info->end_ntb));
 | |
| #endif
 | |
|             new_start_ntb = nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH;
 | |
|             new_end_ntb = new_start_ntb + ntb_dur;
 | |
|             confict_found = 1;
 | |
|             continue;
 | |
|         }
 | |
|         if (((nn_info->start_ntb + MAC_SCHED_NN_BW_CHG_TH)
 | |
|             <= new_start_ntb) &&
 | |
|             ((nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH)
 | |
|             <= new_end_ntb)) {
 | |
|             /* rule 2, if remote network beacon period ended first, we
 | |
|              * should be back off.
 | |
|              */
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|             iot_printf("%s rule 2 applied, nn_start_ntb %lu-%lu, "
 | |
|                 "nn_end_ntb %lu-%lu\n", __FUNCTION__,
 | |
|                 iot_uint64_higher32(nn_info->start_ntb),
 | |
|                 iot_uint64_lower32(nn_info->start_ntb),
 | |
|                 iot_uint64_higher32(nn_info->end_ntb),
 | |
|                 iot_uint64_lower32(nn_info->end_ntb));
 | |
| #endif
 | |
|             new_start_ntb = nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH;
 | |
|             new_end_ntb = new_start_ntb + ntb_dur;
 | |
|             confict_found = 1;
 | |
|             continue;
 | |
|         } else if ((nn_info->start_ntb - MAC_SCHED_NN_BW_CHG_TH)
 | |
|             < new_start_ntb) {
 | |
|             /* two networks ended in same time */
 | |
|             if ((ERR_OK == vdev_get_nid(vdev, &nid)) \
 | |
|                 && (PLC_NID_INVALID != nid)) {
 | |
|                 if (nn_info->nid < nid) {
 | |
|                     /* rule 3, if remote network nid is smaller than us, we
 | |
|                      * should be back off.
 | |
|                      */
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|                     iot_printf("%s rule 3 applied, nn_start_ntb %lu-%lu, "
 | |
|                         "nn_end_ntb %lu-%lu\n", __FUNCTION__,
 | |
|                         iot_uint64_higher32(nn_info->start_ntb),
 | |
|                         iot_uint64_lower32(nn_info->start_ntb),
 | |
|                         iot_uint64_higher32(nn_info->end_ntb),
 | |
|                         iot_uint64_lower32(nn_info->end_ntb));
 | |
| #endif
 | |
|                     new_start_ntb = nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH;
 | |
|                     new_end_ntb = new_start_ntb + ntb_dur;
 | |
|                     confict_found = 1;
 | |
|                     continue;
 | |
|                 }
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (confict_found)
 | |
|         goto again;
 | |
| 
 | |
|     *latest_end_ntb64 += MAC_SCHED_NN_BW_CHG_TH;
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|     if (start_ntb64 != new_start_ntb) {
 | |
|         iot_printf("%s start_ntb %lu-%lu, new_start_ntb %lu-%lu, "
 | |
|             "latest_end_ntb %lu-%lu\n",
 | |
|             __FUNCTION__,
 | |
|             iot_uint64_higher32(start_ntb64),
 | |
|             iot_uint64_lower32(start_ntb64),
 | |
|             iot_uint64_higher32(new_start_ntb),
 | |
|             iot_uint64_lower32(new_start_ntb),
 | |
|             iot_uint64_higher32(*latest_end_ntb64),
 | |
|             iot_uint64_lower32(*latest_end_ntb64));
 | |
|     }
 | |
| #endif
 | |
| 
 | |
|     return new_start_ntb;
 | |
| }
 | |
| 
 | |
| static void mac_sched_nn_update(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint8_t i, j, remove;
 | |
|     uint64_t curr_ntb64;
 | |
|     mac_nn_info_t *nn_info;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     mac_sched_nn_dump(vdev);
 | |
| 
 | |
|     /* save neighbor network info watched in current beacon period */
 | |
|     os_mem_cpy(ctx->prev_nid_list, ctx->curr_nid_list,
 | |
|         sizeof(ctx->curr_nid_list));
 | |
|     ctx->prev_nid_list_cnt = ctx->curr_nid_list_cnt;
 | |
| 
 | |
|     curr_ntb64 = mac_sched_get_ntb64(vdev);
 | |
|     /* clean up current neighbor network info */
 | |
|     i = 0;
 | |
|     while (i < ctx->curr_nid_list_cnt) {
 | |
|         nn_info = &ctx->curr_nid_list[i];
 | |
|         remove = 0;
 | |
|         if (nn_info->watched) {
 | |
|             if (curr_ntb64 >= nn_info->end_ntb ||
 | |
|                 (nn_info->end_ntb + MAC_SCHED_NN_BW_CHG_TH) <=
 | |
|                 vdev->bcn_ctx.cco.next_start_ntb64) {
 | |
|                 /* neighbor network protected region invalid or no overlapped
 | |
|                  * region, remove network from current neighbor network list
 | |
|                  */
 | |
|                 remove = 1;
 | |
|             }
 | |
| #if HW_PLATFORM != HW_PLATFORM_SIMU
 | |
|         } else {
 | |
|             /* for neighbor network which can't see us, let's keep it for
 | |
|              * a while to pass cert case 3.2.9.2.
 | |
|              */
 | |
|             if ((curr_ntb64 - nn_info->rx_ntb) > MAC_SCHED_NN_NO_WATCH_TO_DUR) {
 | |
|                 remove = 1;
 | |
|             }
 | |
| #endif /* HW_PLATFORM != HW_PLATFORM_SIMU */
 | |
|         }
 | |
|         if (remove) {
 | |
|             j = ctx->curr_nid_list_cnt - 1;
 | |
|             if (j > i) {
 | |
|                 os_mem_cpy(nn_info, &ctx->curr_nid_list[j], sizeof(*nn_info));
 | |
|             }
 | |
|             os_mem_set(&ctx->curr_nid_list[j], 0, sizeof(*nn_info));
 | |
|             ctx->curr_nid_list_cnt--;
 | |
|         } else {
 | |
|             i++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* reset neighbor network info broadcast index to 0 for the new
 | |
|      * beacon period.
 | |
|      */
 | |
|     ctx->nid_bc_idx = 0;
 | |
| }
 | |
| 
 | |
| static void mac_sched_nn_tx_internal(mac_vdev_t *vdev, uint32_t nid)
 | |
| {
 | |
|     uint64_t curr_ntb;
 | |
|     uint32_t dur;
 | |
|     uint8_t cert_mode = mac_get_cert_test_flag();
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     if (PLC_SUPPORT_NEIGHBOR_NW_NEGO &&
 | |
|         ctx->nw_nego_en &&
 | |
|         ctx->allow_tx &&
 | |
|         ctx->nid_conflict == 0) {
 | |
|         curr_ntb = mac_sched_get_ntb64(vdev);
 | |
|         if (curr_ntb <= ctx->allow_start_ntb ||
 | |
|             curr_ntb >= ctx->allow_end_ntb)
 | |
|             return;
 | |
|         /* calculate protected zone duration */
 | |
|         dur = ctx->region_dur;
 | |
|         curr_ntb = vdev->bcn_ctx.cco.next_start_ntb64;
 | |
|         uint64_t last_ntb = vdev->bcn_ctx.cco.curr_start_ntb64;
 | |
| 
 | |
|         /* fix gw hplc cert test bed case 3.2.9.5, cco nncco tx too frequently,
 | |
|          * test bed sof TX has conflict with NNCCO frame when using kl3
 | |
|          * hardware device.
 | |
|          */
 | |
|         if (cert_mode && !vdev->sta_joined) {
 | |
|             /* if tx at single phase or cert mode currently
 | |
|              * we do tx 3 packets seperately one by one
 | |
|              */
 | |
|             if (PLC_SUPPORT_CCO_TX_3_PHASE_SLOT) {
 | |
|                 /* still need to queue 3 packet here to fullfil cert test
 | |
|                  * case 3.2.9.1.
 | |
|                  */
 | |
|                 mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                     vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                     HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                     PLC_PHASE_A);
 | |
|                 mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                     vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                     HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                     PLC_PHASE_A);
 | |
|                 mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                     vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                     HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                     PLC_PHASE_A);
 | |
|             } else {
 | |
|                 mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                     vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                     HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                     vdev->l_phase1);
 | |
|                 if (vdev->l_phase2) {
 | |
|                     mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                         vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                         HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                         vdev->l_phase2);
 | |
|                 }
 | |
|                 if (vdev->l_phase3) {
 | |
|                     mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                         vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO,
 | |
|                         HW_DESC_TX_PORT_PLC, 3, dur, last_ntb, curr_ntb, nid,
 | |
|                         vdev->l_phase3);
 | |
|                 }
 | |
|             }
 | |
|         } else {
 | |
|             phase_t tdphase = PLC_PHASE_ALL;
 | |
|             if (vdev->nncco_tx_3phase && !cert_mode) {
 | |
|                 tdphase = (phase_t)vdev->nncco_td_tx_phase;
 | |
|                 if (tdphase == PLC_PHASE_A) {
 | |
|                     if (vdev->l_phase2) {
 | |
|                         vdev->nncco_td_tx_phase = PLC_PHASE_B;
 | |
|                     } else if (vdev->l_phase3) {
 | |
|                         vdev->nncco_td_tx_phase = PLC_PHASE_C;
 | |
|                     }
 | |
|                 } else if (tdphase == PLC_PHASE_B) {
 | |
|                     if (vdev->l_phase3) {
 | |
|                         vdev->nncco_td_tx_phase = PLC_PHASE_C;
 | |
|                     } else {
 | |
|                         vdev->nncco_td_tx_phase = PLC_PHASE_A;
 | |
|                     }
 | |
|                 } else {
 | |
|                     vdev->nncco_td_tx_phase = PLC_PHASE_A;
 | |
|                 }
 | |
|             }
 | |
|             /* if tx at 3 phase and not cert mode,
 | |
|              * we send nncco at 3 phase simutenously
 | |
|              * and so just one packet is enough.
 | |
|              */
 | |
|             mac_tx_nncco(vdev->ref_pdev_id,
 | |
|                 vdev->bcn_ctx.fc.nid, FC_DELIM_NNCCO, HW_DESC_TX_PORT_PLC,
 | |
|                 3, dur, last_ntb, curr_ntb, nid, tdphase);
 | |
|         }
 | |
|     }
 | |
| }
 | |
| 
 | |
| static void mac_sched_nn_tx_start(mac_vdev_t *vdev, uint64_t start_ntb,
 | |
|     uint64_t end_ntb)
 | |
| {
 | |
|     uint64_t tmp_ntb;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     if (ctx->allow_tx == 0) {
 | |
|         ctx->allow_tx = 1;
 | |
|         ctx->bc_all = 1;
 | |
|         ctx->allow_start_ntb = start_ntb;
 | |
|         ctx->allow_end_ntb = end_ntb;
 | |
|         tmp_ntb = mac_sched_get_ntb64(vdev);
 | |
|         if (start_ntb > tmp_ntb) {
 | |
| #if MAC_SCHED_TIME_DISORDER_DEBUG
 | |
|             uint64_t tmp_ntb_pt = tmp_ntb;
 | |
|             tmp_ntb = start_ntb - tmp_ntb;
 | |
|             if (iot_uint64_higher32(tmp_ntb) != 0) {
 | |
|                 iot_printf("%s, start_ntb: %lu-%lu, tmp_ntb: %lu-%lu\n",
 | |
|                     __FUNCTION__,
 | |
|                     iot_uint64_higher32(start_ntb),
 | |
|                     iot_uint64_lower32(start_ntb),
 | |
|                     iot_uint64_higher32(tmp_ntb_pt),
 | |
|                     iot_uint64_lower32(tmp_ntb_pt));
 | |
|                 IOT_ASSERT(0);
 | |
|             }
 | |
| #else
 | |
|             tmp_ntb = start_ntb - tmp_ntb;
 | |
|             IOT_ASSERT(iot_uint64_higher32(tmp_ntb) == 0);
 | |
| #endif
 | |
|             /* add one more ms to avoid 0 interval timer */
 | |
|             os_start_timer(ctx->tx_timer,
 | |
|                 MAC_NTB_TO_MS(iot_uint64_lower32(tmp_ntb) + 1));
 | |
|         } else {
 | |
|             os_start_timer(ctx->tx_timer, 1);
 | |
|         }
 | |
|     }
 | |
| }
 | |
| 
 | |
| static void mac_sched_nn_tx_stop(mac_vdev_t *vdev)
 | |
| {
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     if (ctx->allow_tx) {
 | |
|         os_stop_timer(ctx->tx_timer);
 | |
|         ctx->allow_tx = 0;
 | |
|     }
 | |
| }
 | |
| 
 | |
| static void mac_sched_tx_timer_func(timer_id_t timer_id, void *arg)
 | |
| {
 | |
|     (void)timer_id;
 | |
|     mac_msg_t *msg;
 | |
| 
 | |
|     msg = mac_alloc_msg();
 | |
|     if (msg) {
 | |
|         msg->type = MAC_MSG_TYPE_TIMER;
 | |
|         msg->id = MAC_MSG_ID_SCHED_TX;
 | |
|         msg->data1 = (uint32_t)arg;
 | |
|         mac_queue_msg(msg, MAC_MSG_QUEUE_HP);
 | |
|     } else {
 | |
|         /* maybe delay the timer handling is a better choice */
 | |
|         IOT_ASSERT(0);
 | |
|     }
 | |
| }
 | |
| 
 | |
| static uint64_t mac_sched_nn_new_bp_start(mac_vdev_t *vdev,
 | |
|     uint64_t start_ntb64, uint64_t *next_start_ntb64,
 | |
|     uint16_t protect_region_dur, uint16_t csma_offset)
 | |
| {
 | |
|     uint32_t ntb_dur;
 | |
|     uint64_t new_start_ntb64, end_ntb64;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     new_start_ntb64 = start_ntb64;
 | |
|     mac_pdev_t *pdev = get_pdev_ptr(PLC_PDEV_ID);
 | |
|     new_start_ntb64 = mac_cco_check_spur_get_intvl(&pdev->mac_check_spur_ctxt, \
 | |
|         new_start_ntb64);
 | |
| 
 | |
|     if (!PLC_SUPPORT_NEIGHBOR_NW_NEGO || !ctx->nw_nego_en) {
 | |
|         *next_start_ntb64 = new_start_ntb64 +
 | |
|             MAC_MS_TO_NTB(vdev->bcn_ctx.time_slot.bc_period);
 | |
|         goto out;
 | |
|     }
 | |
| 
 | |
|     ntb_dur = MAC_MS_TO_NTB(ctx->region_dur);
 | |
|     if (ntb_dur == 0) {
 | |
|         /* if protected region length is zero, it may be the first beacon
 | |
|          * period. suppose next protected region is same as current protected
 | |
|          * region.
 | |
|          */
 | |
|         ntb_dur = MAC_MS_TO_NTB(vdev->bcn_ctx.time_slot.protected_region_dur);
 | |
|         if (ntb_dur == 0) {
 | |
|             *next_start_ntb64 = new_start_ntb64 +
 | |
|                 MAC_MS_TO_NTB(vdev->bcn_ctx.time_slot.bc_period);
 | |
|             goto out;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     new_start_ntb64 = mac_sched_nn_calc_start_ntb(vdev, new_start_ntb64,
 | |
|         (uint16_t)MAC_NTB_TO_MS(ntb_dur), &end_ntb64);
 | |
| 
 | |
|     *next_start_ntb64 = new_start_ntb64 +
 | |
|                 MAC_MS_TO_NTB(vdev->bcn_ctx.time_slot.bc_period);
 | |
|     if (end_ntb64 > *next_start_ntb64)
 | |
|         *next_start_ntb64 = end_ntb64;
 | |
| 
 | |
|     /* init new protected region length */
 | |
|     ctx->region_dur = protect_region_dur;
 | |
|     mac_sched_nn_update(vdev);
 | |
|     /* start tx timer for nncco frame */
 | |
|     if (vdev->bcn_ctx.cco.started) {
 | |
| #if MAC_SCHED_TIME_DISORDER_DEBUG
 | |
|         iot_printf("%s, start_ntb: %lu-%lu, csma_offset:%lu\n", __FUNCTION__,
 | |
|             iot_uint64_higher32(new_start_ntb64),
 | |
|             iot_uint64_lower32(new_start_ntb64),
 | |
|             csma_offset);
 | |
| #endif
 | |
|         mac_sched_nn_tx_start(vdev,
 | |
|             new_start_ntb64 + MAC_MS_TO_NTB(csma_offset),
 | |
|             (*next_start_ntb64 - MAC_MS_TO_NTB(MAC_BP_AHEAD_ALERT_DUR)));
 | |
|     }
 | |
| out:
 | |
|     //if (new_start_ntb64 != start_ntb64) {
 | |
|         iot_printf("%s start_ntb: %lu-%lu, new_start_ntb %lu-%lu, "
 | |
|             "next_start_ntb %lu-%lu\n", __FUNCTION__,
 | |
|             iot_uint64_higher32(start_ntb64),
 | |
|             iot_uint64_lower32(start_ntb64),
 | |
|             iot_uint64_higher32(new_start_ntb64),
 | |
|             iot_uint64_lower32(new_start_ntb64),
 | |
|             iot_uint64_higher32(*next_start_ntb64),
 | |
|             iot_uint64_lower32(*next_start_ntb64));
 | |
|     //}
 | |
|     return new_start_ntb64;
 | |
| }
 | |
| 
 | |
| #if HW_PLATFORM == HW_PLATFORM_SIMU
 | |
| 
 | |
| static uint8_t mac_sched_extract_nn_info(mac_vdev_t *vdev,\
 | |
|     mac_nn_info_t *nn_info, void *fc, uint32_t rx_ntb)
 | |
| {
 | |
|     uint8_t need_check = 0;
 | |
|     uint64_t ntb64;
 | |
|     nid_t nid;
 | |
| 
 | |
|     nncco_fc_info_t nn_fc_info = { 0 };
 | |
|     mac_get_nncco_sw_info_from_fc(fc, &nn_fc_info);
 | |
|     if (ERR_OK == vdev_get_nid(vdev, &nid)) {
 | |
|         if (nn_fc_info.sw_receive_nid == nid) {
 | |
|             nn_info->watched = 1;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (nn_fc_info.sw_duration) {
 | |
|         nn_info->region_dur = (uint16_t)nn_fc_info.sw_duration;
 | |
|         /* for simulator case, we only support 32bit ntb */
 | |
|         ntb64 = nn_fc_info.start_ntb_l32;
 | |
|         if (ntb64 > nn_info->start_ntb) {
 | |
|             if ((ntb64 - MAC_SCHED_NN_BW_CHG_TH) >= nn_info->start_ntb)
 | |
|                 need_check = 1;
 | |
|         } else {
 | |
|             if ((ntb64 + MAC_SCHED_NN_BW_CHG_TH) <= nn_info->start_ntb)
 | |
|                 need_check = 1;
 | |
|         }
 | |
|         nn_info->rx_ntb = rx_ntb;
 | |
|         nn_info->start_ntb = ntb64;
 | |
|         nn_info->end_ntb = nn_info->start_ntb +
 | |
|             MAC_MS_TO_NTB(nn_info->region_dur);
 | |
|     } else {
 | |
|         nn_info->region_dur = 0;
 | |
|     }
 | |
| 
 | |
|     return need_check;
 | |
| }
 | |
| 
 | |
| uint32_t mac_sched_get_nidmap(mac_vdev_t *vdev)
 | |
| {
 | |
|     (void)vdev;
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| #else /* HW_PLATFORM == HW_PLATFORM_SIMU */
 | |
| 
 | |
| /* convert a passed ntb32 value to ntb64 value. the ntb32 value should be at
 | |
|  * most 0xFFFFFFFF ntb ahead of current ntb.
 | |
|  */
 | |
| uint64_t mac_sched_ntb_32_to_64(mac_vdev_t *vdev, uint32_t ntb32)
 | |
| {
 | |
|     uint64_t ntb64;
 | |
|     uint32_t l_ntb, h_ntb;
 | |
|     ntb64 = mac_sched_get_ntb64(vdev);
 | |
|     l_ntb = iot_uint64_lower32(ntb64);
 | |
|     h_ntb = iot_uint64_higher32(ntb64);
 | |
|     if (l_ntb < ntb32) {
 | |
|         /* wrap around happened, move h_ntb back */
 | |
|         IOT_ASSERT(h_ntb);
 | |
|         h_ntb--;
 | |
|     }
 | |
|     ntb64 = h_ntb;
 | |
|     ntb64 <<= 32;
 | |
|     ntb64 |= ntb32;
 | |
| 
 | |
|     return ntb64;
 | |
| }
 | |
| 
 | |
| static uint8_t mac_sched_extract_nn_info(mac_vdev_t *vdev,\
 | |
|     mac_nn_info_t *nn_info, void *fc, uint32_t rx_ntb)
 | |
| {
 | |
|     uint8_t need_check = 0;
 | |
|     uint64_t ntb64, rx_ntb64;
 | |
|     nid_t nid;
 | |
| 
 | |
|     nncco_fc_info_t nn_fc_info = { 0 };
 | |
|     mac_get_nncco_info_from_fc(fc, &nn_fc_info);
 | |
| 
 | |
|     uint32_t dur = nn_fc_info.duration;
 | |
|     uint32_t start_offset = nn_fc_info.sbandoffset;
 | |
|     vdev_get_nid(vdev, &nid);
 | |
|     nn_info->self_ch_id = nn_fc_info.self_rf_channel;
 | |
|     nn_info->self_option = nn_fc_info.self_rf_option;
 | |
| 
 | |
|     uint32_t rlt = mac_nncco_nid_compare(nn_fc_info.receive_nid, nid);
 | |
|     if (!rlt) {
 | |
|         /* NOTE: not_watch_cnt only 7bit, so can not over 127 */
 | |
|         if (++nn_info->not_watch_cnt > MAC_SCHED_NN_NOT_WATCH_MAX) {
 | |
|             iot_printf("%s, nid:0x%x not see us\n",
 | |
|                 __FUNCTION__, nn_fc_info.nid);
 | |
|             nn_info->not_watch_cnt = 0;
 | |
|             nn_info->watched = 0;
 | |
|         }
 | |
|     } else {
 | |
|         nn_info->not_watch_cnt = 0;
 | |
|         nn_info->watched = rlt;
 | |
|     }
 | |
| 
 | |
|     if (dur &&
 | |
|         dur < MAC_SCHED_NN_MAX_REGION) {
 | |
|         nn_info->region_dur = (uint16_t)dur;
 | |
|         rx_ntb64 = mac_sched_ntb_32_to_64(vdev, rx_ntb);
 | |
|         ntb64 = rx_ntb64 + MAC_MS_TO_NTB(start_offset);
 | |
|         if (ntb64 > nn_info->start_ntb) {
 | |
|             if ((ntb64 - MAC_SCHED_NN_BW_CHG_TH) >= nn_info->start_ntb)
 | |
|                 need_check = 1;
 | |
|         } else {
 | |
|             if ((ntb64 + MAC_SCHED_NN_BW_CHG_TH) <= nn_info->start_ntb)
 | |
|                 need_check = 1;
 | |
|         }
 | |
|         nn_info->rx_ntb = rx_ntb64;
 | |
|         nn_info->start_ntb = ntb64;
 | |
|         nn_info->end_ntb = nn_info->start_ntb +
 | |
|             MAC_MS_TO_NTB(nn_info->region_dur);
 | |
|     } else {
 | |
|         nn_info->region_dur = 0;
 | |
|     }
 | |
|     return need_check;
 | |
| }
 | |
| 
 | |
| #if SUPPORT_SOUTHERN_POWER_GRID
 | |
| uint32_t mac_sched_get_nidmap(mac_vdev_t *vdev)
 | |
| {
 | |
| 
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     uint32_t nid_bitmap = 0;
 | |
|     mac_nn_info_t *nn_info = NULL;
 | |
|     for (uint32_t i = 0; i < ctx->curr_nid_list_cnt; i++) {
 | |
|         nn_info = &ctx->curr_nid_list[i];
 | |
|         nid_bitmap |= (1<< (nn_info->nid -1));
 | |
|     }
 | |
|     return nid_bitmap;
 | |
| }
 | |
| #else
 | |
| uint32_t mac_sched_get_nidmap(mac_vdev_t *vdev)
 | |
| {
 | |
|     (void)vdev;
 | |
|     return 0;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #endif /* HW_PLATFORM == HW_PLATFORM_SIMU */
 | |
| 
 | |
| void mac_sched_nn_rx(mac_pdev_t *pdev, void *fc, uint32_t ntb,
 | |
|     int8_t snr, uint8_t is_rf, uint8_t band_id)
 | |
| {
 | |
|     uint8_t i, need_check = 0;
 | |
|     uint64_t ntb64, tmp_ntb64;
 | |
|     mac_vdev_t *vdev = pdev->vdev[0];
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     mac_nn_info_t *nn_info = NULL;
 | |
| 
 | |
|     uint32_t proto = PHY_PROTO_TYPE_GET();
 | |
|     uint32_t nid = mac_get_nid_from_fc(proto, fc);
 | |
|     nid_t vnid;
 | |
| 
 | |
|     if (mac_vdev_cfg_get_node_role(vdev) != PLC_DEV_ROLE_CCO) {
 | |
|         /* todo: get rf info by mac_sched_extract_nn_info */
 | |
|         mac_sched_cco_snr_rx(vdev, nid, snr,
 | |
|             mac_get_nncco_self_ch_id(proto, fc),
 | |
|             mac_get_nncco_self_option(proto, fc), is_rf, band_id);
 | |
|         return;
 | |
|     }
 | |
| 
 | |
| #if MAC_SCHED_NN_DEBUG
 | |
|     mac_debug_nncco_info(fc, ntb);
 | |
| #endif /* MAC_SCHED_NN_DEBUG */
 | |
| 
 | |
|     if (nid == 0)
 | |
|         return;
 | |
| 
 | |
|     vdev_get_nid(vdev, &vnid);
 | |
|     if (nid == vnid) {
 | |
|         ctx->nid_conflict = 1;
 | |
|     } else if ((snr < -3) && !mac_get_cert_test_flag()) {
 | |
|         /* ignore network with low snr, reduce unnecessary negotiation */
 | |
|         return;
 | |
|     }
 | |
| 
 | |
|     for (i = 0; i < ctx->curr_nid_list_cnt; i++) {
 | |
|         if (ctx->curr_nid_list[i].nid == nid) {
 | |
|             /* found existing slot */
 | |
|             nn_info = &ctx->curr_nid_list[i];
 | |
|             break;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (i == ctx->curr_nid_list_cnt) {
 | |
|         if (ctx->curr_nid_list_cnt < PLC_MAX_NEIGHBOR_NETWORK) {
 | |
|             nn_info = &ctx->curr_nid_list[ctx->curr_nid_list_cnt++];
 | |
|             nn_info->nid = nid;
 | |
|         } else {
 | |
|             goto out;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (nn_info) {
 | |
|         need_check = mac_sched_extract_nn_info(vdev, nn_info, fc, ntb);
 | |
|     }
 | |
| 
 | |
|     if (PLC_SUPPORT_NEIGHBOR_NW_NEGO && ctx->nw_nego_en && need_check
 | |
|         && ctx->region_dur) {
 | |
|         tmp_ntb64 = mac_sched_nn_calc_start_ntb(vdev,
 | |
|             vdev->bcn_ctx.cco.next_start_ntb64, ctx->region_dur, &ntb64);
 | |
|         if (tmp_ntb64 != vdev->bcn_ctx.cco.next_start_ntb64) {
 | |
|             vdev->bcn_ctx.cco.next_start_ntb64 = tmp_ntb64;
 | |
|             iot_printf("%s conflict detected nid:0x%x,"
 | |
|                     "next_bcn:%lu-%lu\n", __FUNCTION__, nid,
 | |
|                     iot_uint64_higher32(tmp_ntb64),
 | |
|                     iot_uint64_lower32(tmp_ntb64));
 | |
|             /* local network bandwidth allocation changed, notify
 | |
|             * neighbor networks as soon as possible.
 | |
|             */
 | |
|             mac_sched_nn_update(vdev);
 | |
|             mac_sched_nn_tx_internal(vdev, nn_info->nid);
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if ((nid == vnid) || ((RF_CHANNEL_ID_INVALID != nn_info->self_ch_id)
 | |
|         && (RF_OPTION_INVALID != nn_info->self_option)
 | |
|         && (nn_info->self_ch_id == mac_rf_get_self_channel())
 | |
|         && (nn_info->self_option == mac_rf_get_self_option()))) {
 | |
|         /* NID or channel id conflict detected, report to cvg immediately */
 | |
|         mac_sched_nn_report_nid(vdev);
 | |
|     }
 | |
| out:
 | |
|     return;
 | |
| }
 | |
| 
 | |
| void mac_sched_nn_tx(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint8_t i;
 | |
|     uint32_t nid;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     if (PLC_SUPPORT_NEIGHBOR_NW_NEGO && ctx->nw_nego_en && ctx->allow_tx) {
 | |
|         os_start_timer(ctx->tx_timer, MAC_SCHED_NN_TX_INTERVAL);
 | |
|         mac_sched_nn_update(vdev);
 | |
|         if (ctx->bc_all) {
 | |
|             ctx->bc_all = 0;
 | |
|             if (ctx->curr_nid_list_cnt) {
 | |
|                 for (i = 0; i < ctx->curr_nid_list_cnt; i++) {
 | |
|                     nid = ctx->curr_nid_list[i].nid;
 | |
|                     mac_sched_nn_tx_internal(vdev, nid);
 | |
|                 }
 | |
|             } else {
 | |
|                 mac_sched_nn_tx_internal(vdev, 0);
 | |
|             }
 | |
|         } else {
 | |
|             if (ctx->nid_bc_idx < ctx->curr_nid_list_cnt) {
 | |
|                 nid = ctx->curr_nid_list[ctx->nid_bc_idx++].nid;
 | |
|             } else if (ctx->curr_nid_list_cnt) {
 | |
|                 ctx->nid_bc_idx = 0;
 | |
|                 nid = ctx->curr_nid_list[ctx->nid_bc_idx++].nid;
 | |
|             } else {
 | |
|                 ctx->nid_bc_idx = 0;
 | |
|                 nid = 0;
 | |
|             }
 | |
|             mac_sched_nn_tx_internal(vdev, nid);
 | |
|         }
 | |
|     }
 | |
| }
 | |
| 
 | |
| static void mac_sched_cco_init(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint32_t id;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     if (mac_vdev_cfg_get_node_role(vdev) == PLC_DEV_ROLE_CCO) {
 | |
|         id = (vdev->ref_pdev_id << 8) | vdev->vdev_id;
 | |
|         ctx->tx_timer = os_create_timer(PLC_MAC_SCHED_MID, false,
 | |
|             mac_sched_tx_timer_func, (void *)id);
 | |
|         ctx->nw_nego_en = 1;
 | |
|     }
 | |
| }
 | |
| 
 | |
| static void mac_sched_cco_stop(mac_vdev_t *vdev)
 | |
| {
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     if (mac_vdev_cfg_get_node_role(vdev) == PLC_DEV_ROLE_CCO) {
 | |
|         mac_sched_nn_tx_stop(vdev);
 | |
|         ctx->nid_conflict = 0;
 | |
|     }
 | |
| }
 | |
| 
 | |
| /* cco role device need to take care of both phase and band config */
 | |
| uint64_t mac_sched_cco_set(mac_vdev_t *vdev, mac_bc_time_slot_t *ts,
 | |
|     uint64_t start_ntb64, uint64_t *next_start_ntb64)
 | |
| {
 | |
|     uint16_t i, cnt = 0;
 | |
|     uint64_t tmp_ntb;
 | |
|     uint32_t tmp_cnt;
 | |
|     uint16_t csma_slot_cnt, csma_frag, start_offset = 0;
 | |
|     uint16_t csma_offset, end_offset;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_t *cmd, *prev_cmd;
 | |
|     uint32_t proto = PHY_PROTO_TYPE_GET();
 | |
| 
 | |
|     cl = mac_sched_alloc_cmd_list(vdev);
 | |
|     IOT_ASSERT(cl);
 | |
|     ctx->curr_hw_cmd_list = cl;
 | |
|     cmd = cl->cmd;
 | |
| 
 | |
|     //TODO: dbg for hw sched cmd
 | |
|     cl->alloc_ntb = mac_sched_get_ntb(vdev);
 | |
|     cl->caller = 1;
 | |
| 
 | |
|     /* prepare central beacon slot */
 | |
|     for (i = 0; i < ts->cco_bc_cnt; i++) {
 | |
|         if (i < PLC_PHASE_CNT) {
 | |
|             cmd->phase = i + 1;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_wb_bc_q(vdev, (uint8_t)cmd->phase);
 | |
|             start_offset += ts->bc_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         } else {
 | |
|             cmd->phase = PLC_PHASE_ALL;
 | |
|             cmd->tx_q_en_bm = 0;
 | |
|             start_offset += ts->bc_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* prepere proxy and discovery beacon slot */
 | |
|     if (ts->non_cco_bc_info.bc_cnt) {
 | |
|         cmd->phase = ts->non_cco_bc_info.phase[0];
 | |
|         start_offset += ts->bc_slot_dur;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         for (i = 1; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|             if (cmd->phase != ts->non_cco_bc_info.phase[i]) {
 | |
|                 /* a new time slot required */
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = ts->non_cco_bc_info.phase[i];
 | |
|             }
 | |
|             start_offset += ts->bc_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|         }
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|     }
 | |
| 
 | |
|     /* prepare tdma slot. tdma is behind csma for SPG */
 | |
|     if ((proto != PLC_PROTO_TYPE_SPG) && ts->tdma_slot_dur) {
 | |
|         /* prepare tdma for cco device A, B, C phase */
 | |
|         for (i = 0; i < ts->cco_bc_cnt && i < PLC_PHASE_CNT; i++) {
 | |
|             cmd->phase = i + 1;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_tdma_q(vdev, (uint8_t)cmd->phase);
 | |
|             start_offset += ts->tdma_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         }
 | |
|         /* prepare tdma for pco and sta device */
 | |
|         if (ts->non_cco_bc_info.bc_cnt) {
 | |
|             cmd->phase = ts->non_cco_bc_info.phase[0];
 | |
|             start_offset += ts->tdma_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             for (i = 1; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|                 if (cmd->phase != ts->non_cco_bc_info.phase[i]) {
 | |
|                     /* a new time slot required */
 | |
|                     cnt++;
 | |
|                     if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                         IOT_ASSERT(0);
 | |
|                     cmd++;
 | |
|                     cmd->phase = ts->non_cco_bc_info.phase[i];
 | |
|                 }
 | |
|                 start_offset += ts->tdma_slot_dur;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|             }
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     csma_offset = start_offset;
 | |
|     /* prepare csma slot */
 | |
|     csma_frag = ts->csma_slot_frag_s;
 | |
|     csma_slot_cnt = mac_sched_split_csma(&ts->csma_info, ts->csma_slot_frag_s);
 | |
|     if (csma_slot_cnt) {
 | |
|         cmd->phase = g_csma_slot[0].phase;
 | |
|         if (PLC_SUPPORT_CCO_TX_3_PHASE_SLOT) {
 | |
|             /* always enable PHASE A HWQ as cco always send packets in
 | |
|              * 3 physical phases simultaneously.
 | |
|              */
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|         } else {
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, (uint8_t)cmd->phase);
 | |
|         }
 | |
|         if (g_csma_slot[0].last) {
 | |
|             start_offset += g_slot_last[g_csma_slot[0].phase - 1];
 | |
|         } else {
 | |
|             start_offset += csma_frag;
 | |
|         }
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         for (i = 1; i < csma_slot_cnt; i++) {
 | |
|             if (cmd->phase != g_csma_slot[i].phase) {
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = g_csma_slot[i].phase;
 | |
|                 if (PLC_SUPPORT_CCO_TX_3_PHASE_SLOT) {
 | |
|                     /* always enable PHASE A HWQ as cco always send packets in
 | |
|                      * 3 physical phases simultaneously.
 | |
|                      */
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|                 } else {
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                         (uint8_t)cmd->phase);
 | |
|                 }
 | |
|             }
 | |
|             if (g_csma_slot[i].last) {
 | |
|                 start_offset += g_slot_last[g_csma_slot[i].phase - 1];
 | |
|             } else {
 | |
|                 start_offset += csma_frag;
 | |
|             }
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|         }
 | |
|         if (MAC_SCHED_CSMA_GAP_MS) {
 | |
|             prev_cmd = cmd - 1;
 | |
|             if ((cmd->t_info.e.end_t - prev_cmd->t_info.e.end_t) >
 | |
|                 MAC_SCHED_CSMA_GAP_MS) {
 | |
|                 /* last slot has enough room to reserve the gap */
 | |
|                 cmd->t_info.e.end_t -= MAC_SCHED_CSMA_GAP_MS;
 | |
|                 prev_cmd = cmd;
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|                 cmd->phase = prev_cmd->phase;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|             } else {
 | |
|                 /* last sot has no enough room to reserve the gap, disable tx
 | |
|                  * for the last slot.
 | |
|                  */
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|             }
 | |
|         }
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|     }
 | |
| 
 | |
|     /* prepare tdma slot. tdma is behind csma for SPG */
 | |
|     if ((proto == PLC_PROTO_TYPE_SPG) && ts->tdma_slot_dur) {
 | |
|         /* prepare tdma for cco device A, B, C phase */
 | |
|         for (i = 0; i < ts->cco_bc_cnt && i < PLC_PHASE_CNT; i++) {
 | |
|             cmd->phase = i + 1;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_tdma_q(vdev, (uint8_t)cmd->phase);
 | |
|             start_offset += ts->tdma_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         }
 | |
|         /* prepare tdma for pco and sta device */
 | |
|         if (ts->non_cco_bc_info.bc_cnt) {
 | |
|             cmd->phase = ts->non_cco_bc_info.phase[0];
 | |
|             start_offset += ts->tdma_slot_dur;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             for (i = 1; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|                 if (cmd->phase != ts->non_cco_bc_info.phase[i]) {
 | |
|                     /* a new time slot required */
 | |
|                     cnt++;
 | |
|                     if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                         IOT_ASSERT(0);
 | |
|                     cmd++;
 | |
|                     cmd->phase = ts->non_cco_bc_info.phase[i];
 | |
|                 }
 | |
|                 start_offset += ts->tdma_slot_dur;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|             }
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* prepare dedicated csma slot */
 | |
|     csma_slot_cnt = mac_sched_split_csma(&ts->d_csma_info,
 | |
|         ts->csma_slot_frag_s);
 | |
|     if (csma_slot_cnt) {
 | |
|         cmd->phase = g_csma_slot[0].phase;
 | |
|         if (PLC_SUPPORT_CCO_TX_3_PHASE_SLOT) {
 | |
|             /* always enable PHASE A HWQ as cco always send packets in
 | |
|              * 3 physical phases simultaneously.
 | |
|              */
 | |
|             cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev, PLC_PHASE_A);
 | |
|         } else {
 | |
|             cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev, (uint8_t)cmd->phase);
 | |
|         }
 | |
|         if (g_csma_slot[0].last) {
 | |
|             start_offset += g_slot_last[g_csma_slot[0].phase - 1];
 | |
|         } else {
 | |
|             start_offset += csma_frag;
 | |
|         }
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         for (i = 1; i < csma_slot_cnt; i++) {
 | |
|             if (cmd->phase != g_csma_slot[i].phase) {
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = g_csma_slot[i].phase;
 | |
|                 if (PLC_SUPPORT_CCO_TX_3_PHASE_SLOT) {
 | |
|                     /* always enable PHASE A HWQ as cco always send packets in
 | |
|                      * 3 physical phases simultaneously.
 | |
|                      */
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev, PLC_PHASE_A);
 | |
|                 } else {
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev,
 | |
|                         (uint8_t)cmd->phase);
 | |
|                 }
 | |
|             }
 | |
|             if (g_csma_slot[i].last) {
 | |
|                 start_offset += g_slot_last[g_csma_slot[i].phase - 1];
 | |
|             } else {
 | |
|                 start_offset += csma_frag;
 | |
|             }
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|         }
 | |
|         if (MAC_SCHED_CSMA_GAP_MS) {
 | |
|             prev_cmd = cmd - 1;
 | |
|             if ((cmd->t_info.e.end_t - prev_cmd->t_info.e.end_t) >
 | |
|                 MAC_SCHED_CSMA_GAP_MS) {
 | |
|                 /* last slot has enough room to reserve the gap */
 | |
|                 cmd->t_info.e.end_t -= MAC_SCHED_CSMA_GAP_MS;
 | |
|                 prev_cmd = cmd;
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|                 cmd->phase = prev_cmd->phase;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|             } else {
 | |
|                 /* last sot has no enough room to reserve the gap, disable tx
 | |
|                  * for the last slot.
 | |
|                  */
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|             }
 | |
|         }
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|     }
 | |
| 
 | |
|     start_ntb64 = mac_sched_nn_new_bp_start(vdev, start_ntb64,
 | |
|         next_start_ntb64, ts->protected_region_dur, csma_offset);
 | |
|     cl->start_ntb = iot_uint64_lower32(start_ntb64);
 | |
|     cl->start_ntb_h = iot_uint64_higher32(start_ntb64);
 | |
|     tmp_ntb = *next_start_ntb64 - start_ntb64;
 | |
|     IOT_ASSERT(iot_uint64_higher32(tmp_ntb) == 0);
 | |
|     end_offset = (uint16_t)MAC_NTB_TO_MS(iot_uint64_lower32(tmp_ntb));
 | |
|     while (end_offset > start_offset) {
 | |
|         /* there is a hole between the end of current bp to the start of next
 | |
|          * bp, fill in rx only slot for the hole to:
 | |
|          * 1. delay the next beacon end alert to delay the next bp start ntb
 | |
|          * final calculation to avoid protected region conflict.
 | |
|          * 2. switch among different phases to increase the possiblilty to
 | |
|          * receive neighbor network negotiation frame from other networks.
 | |
|          */
 | |
|         start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|         if (start_offset > end_offset)
 | |
|             start_offset = end_offset;
 | |
|         cmd->phase = vdev->l_phase1;
 | |
|         cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, (uint8_t)cmd->phase);
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         cnt++;
 | |
|         cmd++;
 | |
|         if (vdev->l_phase2 && end_offset > start_offset) {
 | |
|             start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|             if (start_offset > end_offset)
 | |
|                 start_offset = end_offset;
 | |
|             cmd->phase = vdev->l_phase2;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, (uint8_t)cmd->phase);
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         }
 | |
|         if (vdev->l_phase3 && end_offset > start_offset) {
 | |
|             start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|             if (start_offset > end_offset)
 | |
|                 start_offset = end_offset;
 | |
|             cmd->phase = vdev->l_phase3;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, (uint8_t)cmd->phase);
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             cmd++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     tmp_ntb = mac_sched_get_ntb64(vdev);
 | |
|     if ((tmp_ntb + MAC_SCHED_INSERT_CSMA_TH) < start_ntb64) {
 | |
|         /* there is a hole between current ntb and start ntb of the new beacon
 | |
|          * period. let's insert csma region to improve the bandwidth
 | |
|          * utilization.
 | |
|          */
 | |
|         tmp_cnt = (uint32_t)(MAC_NTB_TO_MS(start_ntb64 - tmp_ntb) /
 | |
|             MAC_SCHED_CCO_RX_ONLY_SLOT_DUR);
 | |
|         /* make sure command list memory is enough */
 | |
|         tmp_cnt = min(tmp_cnt, ((uint32_t)HW_SCHED_CMD_MAX_CNT - cnt));
 | |
|         /* make sure command end offset won't be overflowed */
 | |
|         tmp_cnt = min(tmp_cnt,
 | |
|             (0x7FFF - start_offset) / (uint32_t)MAC_SCHED_CCO_RX_ONLY_SLOT_DUR);
 | |
|         if (tmp_cnt) {
 | |
|             iot_printf("%s insert csma %lu into hole from %lu to %lu\n",
 | |
|                 __FUNCTION__, tmp_cnt, iot_uint64_lower32(tmp_ntb),
 | |
|                 iot_uint64_lower32(start_ntb64));
 | |
|             /* try to insert csma command put as many as possible */
 | |
|             os_mem_move(&cl->cmd[tmp_cnt], &cl->cmd[0],
 | |
|                 sizeof(cl->cmd[0]) * cnt);
 | |
|             cmd = cl->cmd;
 | |
|             start_offset = 0;
 | |
|             i = 0;
 | |
|             while (i < tmp_cnt) {
 | |
|                 start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|                 cmd->phase = vdev->l_phase1;
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                     (uint8_t)cmd->phase);
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 cnt++;
 | |
|                 cmd++;
 | |
|                 i++;
 | |
| 
 | |
|                 if (vdev->l_phase2 && i < tmp_cnt) {
 | |
|                     start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|                     cmd->phase = vdev->l_phase2;
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                         (uint8_t)cmd->phase);
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
|                     cnt++;
 | |
|                     cmd++;
 | |
|                     i++;
 | |
|                 }
 | |
|                 if (vdev->l_phase3 && i < tmp_cnt) {
 | |
|                     start_offset += MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|                     cmd->phase = vdev->l_phase3;
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                         (uint8_t)cmd->phase);
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
|                     cnt++;
 | |
|                     cmd++;
 | |
|                     i++;
 | |
|                 }
 | |
|             }
 | |
|             cmd--;
 | |
|             /* disable tx for the last csma command before beacon region.
 | |
|              * see MAC_SCHED_CSMA_GAP_MS for more info.
 | |
|              */
 | |
|             cmd->tx_q_en_bm = 0;
 | |
|             cmd++;
 | |
|             /* fix the end offset of following commands */
 | |
|             for (; i < cnt; i++) {
 | |
|                 cmd->t_info.e.end_t += start_offset;
 | |
|                 cmd++;
 | |
|             }
 | |
|             cl->start_ntb -=
 | |
|                 (MAC_MS_TO_NTB(MAC_SCHED_CCO_RX_ONLY_SLOT_DUR) * tmp_cnt);
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     cl->total_cnt = cnt;
 | |
|     mac_sched_dump(cl, cnt);
 | |
| 
 | |
|     mac_sched_load_cmd_list(vdev, cl);
 | |
|     mac_sched_enable_bp(vdev, 1);
 | |
| 
 | |
|     return start_ntb64;
 | |
| }
 | |
| 
 | |
| /* cco role device need to take care of both phase and band config */
 | |
| void mac_sched_cco_set_htbus(mac_vdev_t *vdev, mac_bc_htbus_time_slot_t *ts,
 | |
|     uint64_t start_ntb64)
 | |
| {
 | |
|     uint16_t cnt = 0;
 | |
|     uint16_t start_offset = 0;
 | |
|     uint8_t rsp_slot_cnt;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_t *cmd;
 | |
| 
 | |
|     cl = mac_sched_alloc_cmd_list(vdev);
 | |
|     IOT_ASSERT(cl);
 | |
|     ctx->curr_hw_cmd_list = cl;
 | |
|     cmd = cl->cmd;
 | |
| 
 | |
|     cl->alloc_ntb = mac_sched_get_ntb(vdev);
 | |
|     cl->caller = 1;
 | |
| 
 | |
|     cl->start_ntb = iot_uint64_lower32(start_ntb64);
 | |
|     cl->start_ntb_h = iot_uint64_higher32(start_ntb64);
 | |
| 
 | |
|      /* prepare request command slot */
 | |
|     if (ts->cco_slot) {
 | |
|         cmd->phase = vdev->l_phase1;
 | |
|         start_offset += ts->cco_slot;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         cmd->tx_q_en_bm = mac_sched_get_wb_bc_q(vdev, (uint8_t)cmd->phase);
 | |
|         cnt++;
 | |
|         cmd++;
 | |
|     }
 | |
|     /* prepare response command slot */
 | |
|     rsp_slot_cnt = (uint8_t)iot_bitmap_cbs(ts->rsp_bm, sizeof(ts->rsp_bm));
 | |
|     if (rsp_slot_cnt) {
 | |
|         cmd->phase = vdev->l_phase1;
 | |
|         start_offset += ts->sta_slot * rsp_slot_cnt;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         cnt++;
 | |
|     }
 | |
| 
 | |
|     cl->total_cnt = cnt;
 | |
|     mac_sched_dump(cl, cnt);
 | |
|     mac_sched_load_cmd_list(vdev, cl);
 | |
|     mac_sched_enable_bp(vdev, 1);
 | |
| }
 | |
| 
 | |
| void mac_sched_cco_set_nw_nego(mac_vdev_t *vdev, uint8_t enable)
 | |
| {
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     ctx->nw_nego_en = !!enable;
 | |
| }
 | |
| 
 | |
| void mac_sched_cco_bc_alert(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint64_t tmp;
 | |
|     uint8_t cco_started = 0;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     mac_rf_vdev_t *rf_vdev = get_rf_vdev_ptr(vdev->ref_pdev_id,
 | |
|         RF_PDEV_ID, vdev->rf_vdev_id);
 | |
| 
 | |
|     if (vdev->is_up) {
 | |
|         IOT_ASSERT(ctx->curr_hw_cmd_list);
 | |
|         /* load next portion of the current command list */
 | |
|         if (mac_sched_load_cmd_list(vdev, ctx->curr_hw_cmd_list)) {
 | |
|             mac_pdev_t *pdev = get_pdev_ptr(vdev->ref_pdev_id);
 | |
|             mac_cco_check_spur_start_alert(&pdev->mac_check_spur_ctxt);
 | |
|             /* check bcsma pending */
 | |
|             mac_tx_flush_bcsma_pending_queue(&pdev->hwq_hdl);
 | |
|             /* current command list is done */
 | |
|             if (vdev->bcn_ctx.cco.started) {
 | |
|                 cco_started = 1;
 | |
|                 vdev->start_cfg.mac_bp_end_alert_func(
 | |
|                     vdev->start_cfg.mac_callback_arg);
 | |
|                 mac_sched_nn_tx_stop(vdev);
 | |
|             } else {
 | |
|                 IOT_ASSERT(ctx->curr_hw_cmd_list->next_idx ==
 | |
|                     ctx->curr_hw_cmd_list->total_cnt);
 | |
|                 /* local device is not beaconing, put device in RX only mode */
 | |
|                 tmp = mac_sched_get_ntb64(vdev) + \
 | |
|                     MAC_MS_TO_NTB(MAC_BP_AHEAD_ALERT_DUR);
 | |
|                 tmp = mac_cco_check_spur_get_intvl(&pdev->mac_check_spur_ctxt, \
 | |
|                     tmp);
 | |
| 
 | |
|                 mac_sched_set_csma_only(vdev, tmp, 0);
 | |
|             }
 | |
|             mac_sched_nn_report_nid(vdev);
 | |
|             /* rf alert cmdlist application */
 | |
|             mac_rf_sched_cco_bc_alert(rf_vdev, cco_started);
 | |
|         } else {
 | |
|             /* make sure HW scheduler started */
 | |
|             mac_sched_enable_bp(vdev, 1);
 | |
|         }
 | |
|     }
 | |
| }
 | |
| 
 | |
| void mac_dump_sched_dbg()
 | |
| {
 | |
|     mac_pdev_t *pdev_t = get_pdev_ptr(PLC_PDEV_ID);
 | |
|     mac_vdev_t *vdev = pdev_t->vdev[0];
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_list_t *cl = ctx->curr_hw_cmd_list;
 | |
|     if (cl == NULL) {
 | |
|         return;
 | |
|     }
 | |
| 
 | |
|     uint32_t i;
 | |
|     uint32_t cnt = cl->total_cnt;
 | |
| 
 | |
|     iot_printf("%s total %lu, recusive %lu ----------------------\n",
 | |
|         __FUNCTION__, cnt, cl->recursive);
 | |
|     /* just need tdma cmd */
 | |
|     for (i = 0; i < min(3, cnt); i++) {
 | |
|         if (cl->cmd[i].t_info.e.r_flag) {
 | |
|             if (cl->cmd[i].t_info.e.s_flag) {
 | |
|                 iot_printf("rs phase - %lu, start offset %lu, "
 | |
|                     "end offset - %lu, q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.se.start_t,
 | |
|                 cl->cmd[i].t_info.se.end_t, cl->cmd[i].tx_q_en_bm);
 | |
|             } else {
 | |
|                 if (cl->cmd[i].t_info.r.rf_flag) {
 | |
|                     iot_printf("rf phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|                 if (cl->cmd[i].t_info.r.re_flag) {
 | |
|                     iot_printf("rl phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|                 if (!cl->cmd[i].t_info.r.rf_flag &&
 | |
|                     !cl->cmd[i].t_info.r.re_flag) {
 | |
|                     iot_printf("rm phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                         cl->cmd[i].phase, cl->cmd[i].t_info.r.end_t,
 | |
|                         cl->cmd[i].tx_q_en_bm);
 | |
|                 }
 | |
|             }
 | |
|         } else if (cl->cmd[i].t_info.e.s_flag) {
 | |
|             iot_printf("st phase - %lu, start offset %lu, end offset - %lu, "
 | |
|                 "q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.se.start_t,
 | |
|                 cl->cmd[i].t_info.se.end_t, cl->cmd[i].tx_q_en_bm);
 | |
|         } else {
 | |
|             iot_printf("e phase - %lu, end offset - %lu, q_bm %x\n",
 | |
|                 cl->cmd[i].phase, cl->cmd[i].t_info.e.end_t,
 | |
|                 cl->cmd[i].tx_q_en_bm);
 | |
|         }
 | |
|     }
 | |
|     iot_printf("------------------------------------------------------\n");
 | |
| }
 | |
| 
 | |
| uint64_t mac_sched_get_saved_bp_start_ntb64(mac_vdev_t *vdev)
 | |
| {
 | |
|     IOT_ASSERT(vdev);
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_list_t *cl = ctx->curr_hw_cmd_list;
 | |
|     IOT_ASSERT(cl);
 | |
|     uint64_t tmp = cl->start_ntb_h;
 | |
|     tmp <<= 32;
 | |
|     tmp |= cl->start_ntb;
 | |
|     return tmp;
 | |
| }
 | |
| 
 | |
| #else /* PLC_SUPPORT_CCO_ROLE */
 | |
| 
 | |
| void mac_sched_nn_rx(mac_pdev_t *pdev, void *fc, uint32_t ntb, int8_t snr,
 | |
|     uint8_t is_rf, uint8_t band_id)
 | |
| {
 | |
|     uint32_t proto = PHY_PROTO_TYPE_GET();
 | |
|     uint32_t nid = mac_get_nid_from_fc(proto, fc);
 | |
|     mac_vdev_t *vdev = pdev->vdev[0];
 | |
|     (void)ntb;
 | |
| 
 | |
|     mac_sched_cco_snr_rx(vdev, nid, snr,
 | |
|         mac_get_nncco_self_ch_id(proto, fc),
 | |
|         mac_get_nncco_self_option(proto, fc), is_rf, band_id);
 | |
| }
 | |
| 
 | |
| void mac_sched_nn_tx(mac_vdev_t *vdev)
 | |
| {
 | |
|     (void)vdev;
 | |
| }
 | |
| 
 | |
| #define mac_sched_cco_init(vdev)
 | |
| 
 | |
| #define mac_sched_cco_stop(vdev)
 | |
| 
 | |
| /* cco role device need to take care of both phase and band config */
 | |
| uint64_t mac_sched_cco_set(mac_vdev_t *vdev, mac_bc_time_slot_t *ts,
 | |
|     uint64_t start_ntb64, uint64_t *next_start_ntb64)
 | |
| {
 | |
|     (void)vdev;
 | |
|     (void)ts;
 | |
|     (void)next_start_ntb64;
 | |
|     IOT_ASSERT(0);
 | |
|     return start_ntb64;
 | |
| }
 | |
| 
 | |
| void mac_sched_cco_bc_alert(mac_vdev_t *vdev)
 | |
| {
 | |
|     (void)vdev;
 | |
|     /* for STA role only device, won't trigger here */
 | |
|     IOT_ASSERT(0);
 | |
| }
 | |
| 
 | |
| void mac_sched_cco_set_nw_nego(mac_vdev_t *vdev, uint8_t enable)
 | |
| {
 | |
|     (void)vdev;
 | |
|     (void)enable;
 | |
| }
 | |
| 
 | |
| void mac_dump_sched_dbg()
 | |
| {
 | |
|     return;
 | |
| }
 | |
| 
 | |
| uint64_t mac_sched_get_saved_bp_start_ntb64(mac_vdev_t *vdev)
 | |
| {
 | |
|     (void)vdev;
 | |
|     return 0;
 | |
| }
 | |
| 
 | |
| #endif /* PLC_SUPPORT_CCO_ROLE */
 | |
| 
 | |
| /* sta device need to take care of band config */
 | |
| void mac_sched_sta_set_htbus(mac_vdev_t *vdev, mac_bc_htbus_time_slot_t *ts,
 | |
|     uint32_t start_ntb)
 | |
| {
 | |
|     tei_t dst_tei, dev_tei;
 | |
|     uint16_t cnt = 0;
 | |
|     uint8_t new_cmd_required = 0;
 | |
|     uint16_t start_offset = 0;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_t *cmd;
 | |
|     const uint8_t local_phase = vdev->l_phase1;
 | |
|     cl = mac_sched_alloc_cmd_list(vdev);
 | |
|     IOT_ASSERT(cl);
 | |
|     ctx->curr_hw_cmd_list = cl;
 | |
|     cl->start_ntb = start_ntb;
 | |
|     cmd = cl->cmd;
 | |
|     cmd->phase = local_phase;
 | |
| 
 | |
|     cl->alloc_ntb = mac_sched_get_ntb(vdev);
 | |
|     cl->caller = 2;
 | |
| 
 | |
|     /* prepare request command slot */
 | |
|     if (ts->cco_slot) {
 | |
|         start_offset += ts->cco_slot;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         new_cmd_required = 1;
 | |
|     }
 | |
|     dev_tei = vdev_get_tei(vdev);
 | |
|     if (!iot_bitmap_is_set(ts->rsp_bm, sizeof(ts->rsp_bm), dev_tei + 1)) {
 | |
|         start_offset += ts->sta_slot *
 | |
|             (uint16_t)iot_bitmap_cbs(ts->rsp_bm, sizeof(ts->rsp_bm));
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         new_cmd_required = 1;
 | |
|     } else {
 | |
|         dst_tei = (tei_t)iot_bitmap_ffs_and_c(ts->rsp_bm, sizeof(ts->rsp_bm));
 | |
|         while (dst_tei) {
 | |
|             dst_tei -= 1;
 | |
|             if (dst_tei == dev_tei) {
 | |
|                 if (new_cmd_required) {
 | |
|                     cnt++;
 | |
|                     IOT_ASSERT(cnt < HW_SCHED_CMD_MAX_CNT);
 | |
|                     cmd++;
 | |
|                     cmd->phase = local_phase;
 | |
|                 }
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_wb_bc_q(vdev, local_phase);
 | |
|                 start_offset += ts->sta_slot;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 cnt++;
 | |
|                 IOT_ASSERT(cnt < HW_SCHED_CMD_MAX_CNT);
 | |
|                 cmd++;
 | |
|                 cmd->phase = local_phase;
 | |
|                 new_cmd_required = 0;
 | |
|             } else {
 | |
|                 start_offset += ts->sta_slot;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 new_cmd_required = 1;
 | |
|             }
 | |
|             dst_tei = (tei_t)iot_bitmap_ffs_and_c(
 | |
|                 ts->rsp_bm, sizeof(ts->rsp_bm));
 | |
|         }
 | |
|     }
 | |
|     if (new_cmd_required) {
 | |
|         cnt++;
 | |
|         IOT_ASSERT(cnt < HW_SCHED_CMD_MAX_CNT);
 | |
|     }
 | |
| 
 | |
|     cl->total_cnt = cnt;
 | |
|     mac_sched_dump(cl, cnt);
 | |
| 
 | |
|     mac_sched_load_cmd_list(vdev, cl);
 | |
| 
 | |
|     mac_sched_enable_bp(vdev, 1);
 | |
| }
 | |
| 
 | |
| /* sta device need to take care of band config */
 | |
| void mac_sched_sta_set(mac_vdev_t *vdev, mac_bc_time_slot_t *ts,
 | |
|     uint32_t start_ntb, uint8_t phase_a_tx, uint8_t phase_b_tx,
 | |
|     uint8_t phase_c_tx, uint8_t need_early_stop)
 | |
| {
 | |
|     uint16_t i, cnt = 0;
 | |
|     uint8_t new_cmd_required = 0, allowed_tx_phase = 0;
 | |
|     uint16_t csma_slot_cnt, csma_frag, min_csma_frag, start_offset = 0;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_t *cmd, *prev_cmd;
 | |
|     const uint8_t local_phase = vdev->l_phase1;
 | |
|     uint32_t proto = PHY_PROTO_TYPE_GET();
 | |
|     tei_t self_tei = vdev_get_tei(vdev);
 | |
|     /* clear flag every time  */
 | |
|     vdev->bcsma_slot_exist = 0;
 | |
|     /* judge dcsma is LID_BCSMA_FB_DETECT or not */
 | |
|     vdev->fb_lid_exist = (ts->d_csma_lid == LID_BCSMA_FB_DETECT);
 | |
| 
 | |
|     if (phase_a_tx) {
 | |
|         allowed_tx_phase |= 1 << PLC_PHASE_A;
 | |
|     }
 | |
|     if (phase_b_tx) {
 | |
|         allowed_tx_phase |= 1 << PLC_PHASE_B;
 | |
|     }
 | |
|     if (phase_c_tx) {
 | |
|         allowed_tx_phase |= 1 << PLC_PHASE_C;
 | |
|     }
 | |
| 
 | |
|     cl = mac_sched_alloc_cmd_list(vdev);
 | |
|     IOT_ASSERT(cl);
 | |
|     ctx->curr_hw_cmd_list = cl;
 | |
|     cl->start_ntb = start_ntb;
 | |
|     cmd = cl->cmd;
 | |
|     cmd->phase = local_phase;
 | |
| 
 | |
|     //TODO: dbg for hw sched
 | |
|     cl->alloc_ntb = mac_sched_get_ntb(vdev);
 | |
|     cl->caller = 2;
 | |
| 
 | |
|     /* prepare central beacon slot */
 | |
|     if (ts->bc_slot_dur) {
 | |
|         start_offset += ts->bc_slot_dur * ts->cco_bc_cnt;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         new_cmd_required = 1;
 | |
|     }
 | |
| 
 | |
|     /* prepere proxy and discovery beacon slot */
 | |
|     if (ts->non_cco_bc_info.tei_valid && (self_tei != PLC_TEI_INVAL)) {
 | |
|         for (i = 0; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|             uint16_t tx_flag = ts->non_cco_bc_info.sta[i].tx_flag;
 | |
|             if (ts->non_cco_bc_info.sta[i].tei == self_tei &&
 | |
|                 BEACON_TX_ONLY_RF != tx_flag) {
 | |
|                 /* a new time slot required for local beacon tx */
 | |
|                 if (new_cmd_required) {
 | |
|                     cnt++;
 | |
|                     if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                         IOT_ASSERT(0);
 | |
|                     cmd++;
 | |
|                     cmd->phase = local_phase;
 | |
|                 }
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_wb_bc_q(vdev, local_phase);
 | |
|                 start_offset += ts->bc_slot_dur;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
| 
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = local_phase;
 | |
|                 new_cmd_required = 0;
 | |
|             } else {
 | |
|                 start_offset += ts->bc_slot_dur;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 new_cmd_required = 1;
 | |
|             }
 | |
|         }
 | |
|     } else if (ts->non_cco_bc_info.bc_cnt) {
 | |
|         start_offset += ts->bc_slot_dur * ts->non_cco_bc_info.bc_cnt;
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         new_cmd_required = 1;
 | |
|     }
 | |
| 
 | |
|     if (new_cmd_required) {
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|         cmd->phase = local_phase;
 | |
|         new_cmd_required = 0;
 | |
|     }
 | |
| 
 | |
|     if (proto != PLC_PROTO_TYPE_SPG) {
 | |
|         /* prepare tdma slot. tdma is behind csma for SPG */
 | |
|         if (ts->tdma_slot_dur) {
 | |
|             cmd->phase = local_phase;
 | |
|             /* prepare tdma for cco device A, B, C phase */
 | |
|             if (ts->bc_slot_dur) {
 | |
|                 start_offset += ts->tdma_slot_dur * ts->cco_bc_cnt;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 new_cmd_required = 1;
 | |
|             }
 | |
|             /* prepare tdma for pco and sta device */
 | |
|             for (i = 0; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|                 if (ts->non_cco_bc_info.sta[i].tei == self_tei) {
 | |
|                     /* a new time slot required for local beacon tx */
 | |
|                     if (new_cmd_required) {
 | |
|                         cnt++;
 | |
|                         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                             IOT_ASSERT(0);
 | |
|                         cmd++;
 | |
|                         cmd->phase = local_phase;
 | |
|                     }
 | |
|                     /* enalbe tx for the phase of local device, note that we always
 | |
|                      * use phase a hwq for sta role device.
 | |
|                      */
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_tdma_q(vdev, PLC_PHASE_A);
 | |
|                     start_offset += ts->tdma_slot_dur;
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
| 
 | |
|                     cnt++;
 | |
|                     if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                         IOT_ASSERT(0);
 | |
|                     cmd++;
 | |
|                     cmd->phase = local_phase;
 | |
|                     new_cmd_required = 0;
 | |
|                 } else {
 | |
|                     start_offset += ts->tdma_slot_dur;
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
|                     new_cmd_required = 1;
 | |
|                 }
 | |
|             }
 | |
|         }
 | |
| 
 | |
|         if (new_cmd_required) {
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|             cmd->phase = local_phase;
 | |
|             new_cmd_required = 0;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* prepare csma slot */
 | |
|     /* minimum time slot 10ms align */
 | |
|     min_csma_frag = (uint16_t)(iot_ceil(ts->bc_period / HW_SCHED_CMD_MAX_CNT,
 | |
|         10) * 10);
 | |
|     csma_frag = max(ts->csma_slot_frag_s, min_csma_frag);
 | |
|     csma_slot_cnt = mac_sched_split_csma(&ts->csma_info, csma_frag);
 | |
|     if (csma_slot_cnt) {
 | |
|         if (allowed_tx_phase & (1 << g_csma_slot[0].phase)) {
 | |
|             /* enable tx for the allowed phase, note that we always
 | |
|              * use phase a hwq for sta role device.
 | |
|              */
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|         }
 | |
|         cmd->phase = g_csma_slot[0].phase;
 | |
|         if (g_csma_slot[0].last) {
 | |
|             start_offset += g_slot_last[g_csma_slot[0].phase - 1];
 | |
|         } else {
 | |
|             start_offset += csma_frag;
 | |
|         }
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         for (i = 1; i < csma_slot_cnt; i++) {
 | |
|             if (g_csma_slot[i].phase != g_csma_slot[i - 1].phase) {
 | |
|                 /* a new slot required for local device tx */
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = g_csma_slot[i].phase;
 | |
|                 if (allowed_tx_phase & (1 << g_csma_slot[i].phase)) {
 | |
|                     /* enable tx for the allowed phase. note that we
 | |
|                      * always use phase a hwq for sta role device.
 | |
|                      */
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|                 }
 | |
|             }
 | |
|             if (g_csma_slot[i].last) {
 | |
|                 start_offset += g_slot_last[g_csma_slot[i].phase - 1];
 | |
|             } else {
 | |
|                 start_offset += csma_frag;
 | |
|             }
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|         }
 | |
|         if (cmd->tx_q_en_bm && MAC_SCHED_CSMA_GAP_MS) {
 | |
|             prev_cmd = cmd - 1;
 | |
|             if ((cmd->t_info.e.end_t - prev_cmd->t_info.e.end_t) >
 | |
|                 MAC_SCHED_CSMA_GAP_MS) {
 | |
|                 /* last slot has enough room to reserve the gap */
 | |
|                 cmd->t_info.e.end_t -= MAC_SCHED_CSMA_GAP_MS;
 | |
|                 prev_cmd = cmd;
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|                 cmd->phase = prev_cmd->phase;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|             } else {
 | |
|                 /* last sot has no enough room to reserve the gap, disable tx
 | |
|                  * for the last slot.
 | |
|                  */
 | |
|                 cmd->tx_q_en_bm = 0;
 | |
|             }
 | |
|         }
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|     }
 | |
| 
 | |
|     if (proto == PLC_PROTO_TYPE_SPG) {
 | |
|         /* prepare tdma slot. tdma is behind csma for SPG */
 | |
|         if (ts->tdma_slot_dur) {
 | |
|             cmd->phase = local_phase;
 | |
|             /* prepare tdma for cco device A, B, C phase */
 | |
|             if (ts->bc_slot_dur) {
 | |
|                 start_offset += ts->tdma_slot_dur * ts->cco_bc_cnt;
 | |
|                 cmd->t_info.e.end_t = start_offset;
 | |
|                 new_cmd_required = 1;
 | |
|             }
 | |
|             /* prepare tdma for pco and sta device */
 | |
|             for (i = 0; i < ts->non_cco_bc_info.bc_cnt; i++) {
 | |
|                 if (ts->non_cco_bc_info.sta[i].tei == self_tei) {
 | |
|                     /* a new time slot required for local beacon tx */
 | |
|                     if (new_cmd_required) {
 | |
|                         cnt++;
 | |
|                         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                             IOT_ASSERT(0);
 | |
|                         cmd++;
 | |
|                         cmd->phase = local_phase;
 | |
|                     }
 | |
|                     /* enalbe tx for the phase of local device, note that we
 | |
|                      * always use phase a hwq for sta role device.
 | |
|                      */
 | |
|                     cmd->tx_q_en_bm = mac_sched_get_tdma_q(vdev, PLC_PHASE_A);
 | |
|                     start_offset += ts->tdma_slot_dur;
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
| 
 | |
|                     cnt++;
 | |
|                     if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                         IOT_ASSERT(0);
 | |
|                     cmd++;
 | |
|                     cmd->phase = local_phase;
 | |
|                     new_cmd_required = 0;
 | |
|                 } else {
 | |
|                     start_offset += ts->tdma_slot_dur;
 | |
|                     cmd->t_info.e.end_t = start_offset;
 | |
|                     new_cmd_required = 1;
 | |
|                 }
 | |
|             }
 | |
|         }
 | |
| 
 | |
|         if (new_cmd_required) {
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|             cmd->phase = local_phase;
 | |
|             new_cmd_required = 0;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     /* prepare dedicated csma slot */
 | |
|     csma_slot_cnt = mac_sched_split_csma(&ts->d_csma_info, csma_frag);
 | |
|     if (csma_slot_cnt) {
 | |
|         /* NOTE: if protocoal == SUPPORT_SOUTHERN_POWER_GRID, and dcsma slot.
 | |
|          *       As long as there is a detection slot and
 | |
|          *       lid == LID_BCSMA_START, sta allocates the slot,
 | |
|          *       whether it is its own phase or not.
 | |
|          */
 | |
|         if ((allowed_tx_phase & (1 << g_csma_slot[0].phase)) ||
 | |
|             (SUPPORT_SOUTHERN_POWER_GRID &&
 | |
|             (ts->d_csma_lid == LID_BCSMA_FB_DETECT))) {
 | |
|             /* enable tx for the allowed phase, note that we always
 | |
|              * use phase a hwq for sta role device.
 | |
|              */
 | |
|             if (ts->d_csma_lid < LID_BCSMA_START) {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|             } else {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev, PLC_PHASE_A);
 | |
|                 vdev->bcsma_slot_exist = 1;
 | |
|             }
 | |
|         }
 | |
|         cmd->phase = g_csma_slot[0].phase;
 | |
|         if (g_csma_slot[0].last) {
 | |
|             start_offset += g_slot_last[g_csma_slot[0].phase - 1];
 | |
|         } else {
 | |
|             start_offset += csma_frag;
 | |
|         }
 | |
|         cmd->t_info.e.end_t = start_offset;
 | |
|         for (i = 1; i < csma_slot_cnt; i++) {
 | |
|             if (g_csma_slot[i].phase != g_csma_slot[i - 1].phase) {
 | |
|                 /* a new slot required for local device tx */
 | |
|                 cnt++;
 | |
|                 if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                     IOT_ASSERT(0);
 | |
|                 cmd++;
 | |
|                 cmd->phase = g_csma_slot[i].phase;
 | |
|                 if (allowed_tx_phase & (1 << g_csma_slot[i].phase)) {
 | |
|                     /* enable tx for the allowed phase. note that we
 | |
|                      * always use phase a hwq for sta role device.
 | |
|                      */
 | |
|                     if (ts->d_csma_lid < LID_BCSMA_START) {
 | |
|                         cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                             PLC_PHASE_A);
 | |
|                     } else {
 | |
|                         cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev,
 | |
|                             PLC_PHASE_A);
 | |
|                         vdev->bcsma_slot_exist = 1;
 | |
|                     }
 | |
|                 }
 | |
|             }
 | |
|             if (g_csma_slot[i].last) {
 | |
|                 start_offset += g_slot_last[g_csma_slot[i].phase - 1];
 | |
|             } else {
 | |
|                 start_offset += csma_frag;
 | |
|             }
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|         }
 | |
|         cnt++;
 | |
|         if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|             IOT_ASSERT(0);
 | |
|         cmd++;
 | |
|     }
 | |
| 
 | |
|     /* check cmd list */
 | |
|     if (start_offset != ts->bc_period) {
 | |
|         iot_printf("%s, cfg err, cfg ntb:%lu, bc pb:%lu\n",
 | |
|             __FUNCTION__, start_offset, ts->bc_period);
 | |
|         if (start_offset + MAC_SCHED_CSMA_GAP_MS < (uint16_t)ts->bc_period) {
 | |
|             start_offset = (uint16_t)ts->bc_period;
 | |
|             cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev, PLC_PHASE_A);
 | |
|             cmd->phase = local_phase;
 | |
|             cmd->t_info.e.end_t = start_offset - MAC_SCHED_CSMA_GAP_MS;
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|             /* last cmd */
 | |
|             cmd->tx_q_en_bm = 0;
 | |
|             cmd->phase = local_phase;
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cnt++;
 | |
|             if (cnt >= HW_SCHED_CMD_MAX_CNT)
 | |
|                 IOT_ASSERT(0);
 | |
|             cmd++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     cl->total_cnt = cnt;
 | |
| 
 | |
|     if (need_early_stop) {
 | |
|         cl->cmd[cl->total_cnt - 1].t_info.r.end_t -=
 | |
|             MAC_EARLY_STOP_CMDLIST_T_MS;
 | |
|     }
 | |
| 
 | |
|     mac_sched_dump(cl, cnt);
 | |
| 
 | |
|     mac_sched_load_cmd_list(vdev, cl);
 | |
| 
 | |
|     mac_sched_enable_bp(vdev, 1);
 | |
| }
 | |
| 
 | |
| void mac_sched_sta_bc_alert(mac_vdev_t *vdev)
 | |
| {
 | |
|     uint64_t tmp;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     mac_rf_vdev_t *rf_vdev = get_rf_vdev_ptr(vdev->ref_pdev_id,
 | |
|         RF_PDEV_ID, vdev->rf_vdev_id);
 | |
|     mac_pdev_t *pdev;
 | |
| 
 | |
|     /* load next portion of the current command list */
 | |
|     if (mac_sched_load_cmd_list(vdev, ctx->curr_hw_cmd_list)) {
 | |
|         /* near the end if current bp, let's try to reuse previous beacon
 | |
|          * period time slot to follow HW behavior if beacon period is
 | |
|          * available. otherwise, put device in RX only mode for next bp.
 | |
|          * sta role device will config the exact schedule for next bp after
 | |
|          * get the real beacon.
 | |
|          */
 | |
|         //TODO: dbg for hw sched
 | |
| #if (PLC_MAC_TX_DEBUG_LOG >= PLC_MAC_LOG_LEVEL_1)
 | |
|         iot_printf("mac hw sch list cnt = %d, ts:%d, reuse:%d\n",
 | |
|             mac_sched_get_cmd_list_cnt(vdev),
 | |
|             vdev->bcn_ctx.sta.allow_reuse_ts,
 | |
|             vdev->bcn_ctx.time_slot.allow_reuse);
 | |
| #endif
 | |
|         uint32_t bp;
 | |
|         hw_sched_cmd_list_t *cl = ctx->curr_hw_cmd_list;
 | |
|         IOT_ASSERT(cl);
 | |
|         tmp = cl->start_ntb_h;
 | |
|         tmp <<= 32;
 | |
|         tmp |= cl->start_ntb;
 | |
| 
 | |
|         if (cl->recursive) {
 | |
|             bp = cl->cmd[0].t_info.se.end_t;
 | |
|         } else {
 | |
|             bp = cl->cmd[cl->total_cnt - 1].t_info.r.end_t;
 | |
|         }
 | |
| 
 | |
|         tmp += MAC_MS_TO_NTB(bp);
 | |
|         vdev->bcn_ctx.time_slot.bp_start_ntb = iot_uint64_lower32(tmp);
 | |
| 
 | |
|         if (vdev->bcn_ctx.sta.allow_reuse_ts &&
 | |
|             vdev->bcn_ctx.time_slot.allow_reuse) {
 | |
|             /* flush tdma */
 | |
|             pdev = get_pdev_ptr(vdev->ref_pdev_id);
 | |
|             mac_tx_flush_all_tdma_queue(&pdev->hwq_hdl);
 | |
| 
 | |
|             /* for time slot reused case, we only enable tx for local logical
 | |
|              * phase.
 | |
|              */
 | |
|             switch (vdev->l_phase1) {
 | |
|             case PLC_PHASE_A:
 | |
|                 vdev->bcn_ctx.sta.phase_a_tx = 1;
 | |
|                 break;
 | |
|             case PLC_PHASE_B:
 | |
|                 vdev->bcn_ctx.sta.phase_b_tx = 1;
 | |
|                 break;
 | |
|             case PLC_PHASE_C:
 | |
|                 vdev->bcn_ctx.sta.phase_c_tx = 1;
 | |
|                 break;
 | |
|             default:
 | |
|                 break;
 | |
|             }
 | |
|             mac_sched_sta_set(vdev, &vdev->bcn_ctx.time_slot,
 | |
|                 vdev->bcn_ctx.time_slot.bp_start_ntb,
 | |
|                 vdev->bcn_ctx.sta.phase_a_tx, vdev->bcn_ctx.sta.phase_b_tx,
 | |
|                 vdev->bcn_ctx.sta.phase_c_tx, 0);
 | |
|         } else {
 | |
|             /* TODO: get real bcn period start ntb */
 | |
|             mac_sched_set_csma_only(vdev, tmp,
 | |
|                 (uint8_t)vdev->bcn_ctx.time_slot.allow_reuse);
 | |
|         }
 | |
| 
 | |
|         /* rf alert cmdlist application */
 | |
|         mac_rf_sched_sta_bc_alert(rf_vdev, &vdev->bcn_ctx.time_slot,
 | |
|             tmp,
 | |
|             vdev->bcn_ctx.sta.allow_reuse_ts,
 | |
|             vdev->bcn_ctx.time_slot.allow_reuse);
 | |
|     } else {
 | |
|         /* make sure HW scheduler started */
 | |
|         mac_sched_enable_bp(vdev, 1);
 | |
|     }
 | |
| }
 | |
| 
 | |
| void mac_sched_stop(mac_vdev_t *vdev)
 | |
| {
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
| 
 | |
|     /* stop scheduler */
 | |
|     mac_sched_enable_bp(vdev, 0);
 | |
|     mac_pm_check_freq();
 | |
|     /* clean up command list */
 | |
|     mac_sched_free_cmd_list(vdev);
 | |
|     ctx->curr_hw_cmd_list = NULL;
 | |
|     /* clear pending isr */
 | |
|     mac_isr_clear(MAC_ISR_BC_ALERT_ID);
 | |
|     /* make sure there is no beacon period end ahead alert spur as
 | |
|      * we rely on it to switch status. after HW scheduler disabled
 | |
|      * and enabled again, it's possible there is a pending bp end
 | |
|      * ahead alert dsr in between.
 | |
|      */
 | |
|     mac_dsr_clear(MAC_DSR_BC_ALERT_ID);
 | |
|     mac_sched_cco_stop(vdev);
 | |
| }
 | |
| 
 | |
| /* set scheduler to work in csma only mode */
 | |
| static void mac_sched_set_csma_only_intern(mac_vdev_t *vdev,
 | |
|     mac_bc_cmsa_si_t *csma, uint32_t sched_cnt, uint64_t start_ntb,
 | |
|     uint8_t enable_tx)
 | |
| {
 | |
|     uint8_t i;
 | |
|     uint32_t tmp;
 | |
|     uint16_t start_offset, cnt;
 | |
|     hw_sched_cmd_list_t *cl = NULL;
 | |
|     mac_sched_ctx_t *ctx = vdev->sched_ctx;
 | |
|     hw_sched_cmd_t *cmd;
 | |
|     /* clear flag every time  */
 | |
|     vdev->bcsma_slot_exist = 0;
 | |
|     /* rx only, LID_BCSMA_FB_DETECT is not exist */
 | |
|     vdev->fb_lid_exist = 0;
 | |
| 
 | |
|     cl = mac_sched_alloc_cmd_list(vdev);
 | |
|     ctx->curr_hw_cmd_list = cl;
 | |
|     IOT_ASSERT(cl);
 | |
|     IOT_ASSERT(csma->phase_cnt);
 | |
|     IOT_ASSERT(sched_cnt);
 | |
|     cl->start_ntb = iot_uint64_lower32(start_ntb);
 | |
|     cl->start_ntb_h = iot_uint64_higher32(start_ntb);
 | |
|     cmd = cl->cmd;
 | |
|     cnt = 0;
 | |
|     start_offset = 0;
 | |
| 
 | |
|     //TODO: dbg for hw sched
 | |
|     cl->alloc_ntb = mac_sched_get_ntb(vdev);
 | |
|     cl->caller = 3;
 | |
| 
 | |
|     if (sched_cnt > 1) {
 | |
|         /* create recursive start command */
 | |
|         cmd->t_info.se.s_flag = 1;
 | |
|         cmd->t_info.se.r_flag = 1;
 | |
|         cmd->t_info.se.start_t = 0;
 | |
|         cmd++;
 | |
|         cnt++;
 | |
|         /* mark recursive first */
 | |
|         cmd->t_info.r.rf_flag = 1;
 | |
|         for (i = 0; i < csma->phase_cnt; i++) {
 | |
|             cmd->phase = csma->phase[i];
 | |
|             if (enable_tx) {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                     (uint8_t)cmd->phase);
 | |
|             }
 | |
| #if PLC_SUPPORT_DBG_PKT_MODE
 | |
|             /* if and only if it's STA/PCO dev, and not blocking dbg pkt sch
 | |
|              * in rx only period
 | |
|              */
 | |
|             else if (!vdev_get_block_dbg_pkt_4_rx_only(vdev) && \
 | |
|                 ((PLC_DBG_PKT_MODE_DEF_PHASE == PLC_PHASE_ALL) \
 | |
|                     || (cmd->phase == PLC_DBG_PKT_MODE_DEF_PHASE))) {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev,
 | |
|                     (uint8_t)cmd->phase);
 | |
|                 vdev->bcsma_slot_exist = 1;
 | |
|             }
 | |
| #endif
 | |
|             cmd->t_info.r.r_flag = 1;
 | |
|             start_offset += (uint16_t)csma->slot_dur[i];
 | |
|             /* recursive mode end field means duration,
 | |
|              * it's shared with offset when non-recursive mode
 | |
|              */
 | |
|             cmd->t_info.r.end_t = csma->slot_dur[i];
 | |
|             cmd++;
 | |
|             cnt++;
 | |
|         }
 | |
|         /* mark recursive end */
 | |
|         cmd--;
 | |
|         cmd->t_info.r.re_flag = 1;
 | |
|         /* fill the end time of the recursive start command */
 | |
|         tmp = start_offset * sched_cnt;
 | |
|         /* make sure no wrap around */
 | |
|         IOT_ASSERT(tmp <= 0x7FFF);
 | |
|         start_offset = (uint16_t)tmp;
 | |
|         cl->cmd->t_info.se.end_t = start_offset;
 | |
|         cl->recursive = 1;
 | |
|     } else {
 | |
|         for (i = 0; i < csma->phase_cnt; i++) {
 | |
|             cmd->phase = csma->phase[i];
 | |
|             if (enable_tx) {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_csma_q(vdev,
 | |
|                     (uint8_t)cmd->phase);
 | |
|             }
 | |
| #if PLC_SUPPORT_DBG_PKT_MODE
 | |
|             /* if and only if it's STA/PCO dev, and not blocking dbg pkt sch
 | |
|              * in rx only period
 | |
|              */
 | |
|             else if (!vdev_get_block_dbg_pkt_4_rx_only(vdev) && \
 | |
|                 ((PLC_DBG_PKT_MODE_DEF_PHASE == PLC_PHASE_ALL) \
 | |
|                     || (cmd->phase == PLC_DBG_PKT_MODE_DEF_PHASE))) {
 | |
|                 cmd->tx_q_en_bm = mac_sched_get_dcsma_q(vdev,
 | |
|                     (uint8_t)cmd->phase);
 | |
|                 vdev->bcsma_slot_exist = 1;
 | |
|             }
 | |
| #endif
 | |
|             start_offset += (uint16_t)csma->slot_dur[i];
 | |
|             cmd->t_info.e.end_t = start_offset;
 | |
|             cmd++;
 | |
|             cnt++;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     mac_sched_dump(cl, cnt);
 | |
| 
 | |
|     /* for csma only mode, make sure cmd list is short enough */
 | |
|     IOT_ASSERT(cnt < HW_SHCED_CMD_DEPTH);
 | |
|     cl->total_cnt = cnt;
 | |
|     /* load command list of next beacon period */
 | |
|     mac_sched_load_cmd_list(vdev, cl);
 | |
|     /* start HW scheduler */
 | |
|     mac_sched_enable_bp(vdev, 1);
 | |
| }
 | |
| 
 | |
| void mac_sched_set_csma_only(mac_vdev_t *vdev, uint64_t start_ntb,
 | |
|     uint8_t enable_tx)
 | |
| {
 | |
|     mac_bc_cmsa_si_t csma;
 | |
|     uint32_t sched_cnt;
 | |
| 
 | |
|     /* config HW scheduler to RX only mode */
 | |
|     if (mac_vdev_cfg_get_node_role(vdev) == PLC_DEV_ROLE_CCO &&
 | |
|         vdev->mac_vdev_cfg.p_phase_cnt > 1) {
 | |
|         /* set cco device to work in supported phases evenly */
 | |
|         csma.phase_cnt = 0;
 | |
|         if (vdev->l_phase1) {
 | |
|             csma.phase[csma.phase_cnt] = vdev->l_phase1;
 | |
|             csma.slot_dur[csma.phase_cnt] = MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|             csma.phase_cnt++;
 | |
|         }
 | |
|         if (vdev->l_phase2) {
 | |
|             csma.phase[csma.phase_cnt] = vdev->l_phase2;
 | |
|             csma.slot_dur[csma.phase_cnt] = MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|             csma.phase_cnt++;
 | |
|         }
 | |
|         if (vdev->l_phase3) {
 | |
|             csma.phase[csma.phase_cnt] = vdev->l_phase3;
 | |
|             csma.slot_dur[csma.phase_cnt] = MAC_SCHED_CCO_RX_ONLY_SLOT_DUR;
 | |
|             csma.phase_cnt++;
 | |
|         }
 | |
|         /* switch phase every 100 ms and repeat the whole session 10 times.
 | |
|          * totally phase_cnt * 100 * 10 = MAC_CSMA_ONLY_PEIROD_MS.
 | |
|          */
 | |
|         sched_cnt = 10;
 | |
|     } else {
 | |
|         /* for one phase cco device and sta device */
 | |
|         csma.phase_cnt = 1;
 | |
|         if (vdev->l_phase1) {
 | |
|             csma.phase[0] = vdev->l_phase1;
 | |
|         } else {
 | |
|             csma.phase[0] = PLC_PHASE_A;
 | |
|         }
 | |
|         csma.slot_dur[0] = MAC_CSMA_ONLY_PEIROD_MS;
 | |
|         /* no need to switch the phase and no repeat required */
 | |
|         sched_cnt = 1;
 | |
|     }
 | |
|     mac_sched_set_csma_only_intern(vdev, &csma, sched_cnt, start_ntb,
 | |
|         enable_tx);
 | |
| }
 | |
| 
 | |
| void mac_sched_init(mac_vdev_t *vdev)
 | |
| {
 | |
|     vdev->sched_ctx = os_mem_malloc(PLC_MAC_SCHED_MID, sizeof(mac_sched_ctx_t));
 | |
|     IOT_ASSERT(vdev->sched_ctx);
 | |
| 
 | |
|     mac_sched_cco_init(vdev);
 | |
| }
 | |
| 
 |