3093 lines
		
	
	
		
			106 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			3093 lines
		
	
	
		
			106 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/****************************************************************************
 | 
						|
 | 
						|
Copyright(c) 2019 by Aerospace C.Power (Chongqing) Microelectronics. ALL RIGHTS RESERVED.
 | 
						|
 | 
						|
This Information is proprietary to Aerospace C.Power (Chongqing) Microelectronics and MAY NOT
 | 
						|
be copied by any method or incorporated into another program without
 | 
						|
the express written consent of Aerospace C.Power. This Information or any portion
 | 
						|
thereof remains the property of Aerospace C.Power. The Information contained herein
 | 
						|
is believed to be accurate and Aerospace C.Power assumes no responsibility or
 | 
						|
liability for its use in any way and conveys no license or title under
 | 
						|
any patent or copyright and makes no representation or warranty that this
 | 
						|
Information is free from patent or copyright infringement.
 | 
						|
 | 
						|
****************************************************************************/
 | 
						|
 | 
						|
#include "iot_config.h"
 | 
						|
#include "os_types.h"
 | 
						|
#include "iot_bitops.h"
 | 
						|
#include "bb_cpu_utils.h"
 | 
						|
#include "bb_cpu_fsm.h"
 | 
						|
#include "bb_cpu_config.h"
 | 
						|
#include "bb_init.h"
 | 
						|
#include "bb_rf_cfg.h"
 | 
						|
#include "bb_cpu_mac_isr.h"
 | 
						|
#include "bb_cpu_mac_init.h"
 | 
						|
#include "bb_rf_cfg.h"
 | 
						|
#include "bb_rf_hw_tbl.h"
 | 
						|
#include "mac_sched_hw.h"
 | 
						|
#include "plc_mpdu_header.h"
 | 
						|
#include "mpdu_frame.h"
 | 
						|
#include "phy_rf_chn.h"
 | 
						|
#include "rf_tx_mpdu_desc.h"
 | 
						|
#include "rf_rx_mpdu_desc.h"
 | 
						|
#include "mac_cmn_hw.h"
 | 
						|
#include "rfplc_reg_base.h"
 | 
						|
#include "plc_const.h"
 | 
						|
#include "rf_spi_api.h"
 | 
						|
#include "iot_clock.h"
 | 
						|
#include "bb_cpu_timer.h"
 | 
						|
#include "gp_timer.h"
 | 
						|
#include "mac_rf_hwq_mgr.h"
 | 
						|
#include "mac_rf_txq_hw.h"
 | 
						|
#include "bb_cpu_hw_ring.h"
 | 
						|
#include "rf_mac_int.h"
 | 
						|
#include "mac_rf_common_hw.h"
 | 
						|
#include "plc_mac_cfg.h"
 | 
						|
#include "bb_cpu_utils.h"
 | 
						|
#include "hw_war.h"
 | 
						|
#include "mac_reset.h"
 | 
						|
#include "plc_protocol.h"
 | 
						|
#include "plc_beacon.h"
 | 
						|
 | 
						|
void bb_cpu_rx_sm(uint32_t event_id);
 | 
						|
void bb_cpu_global_sm(uint32_t event_id);
 | 
						|
 | 
						|
#if ENA_RF_MULTI_CSMA_HWQ_WAR
 | 
						|
 | 
						|
/* NOTE: currently csma txq just support 4 hwq */
 | 
						|
rf_tx_mpdu_start simu_mpdu_tbl[MAX_ENABLE_CSMA_HWQ] = { 0 };
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
typedef struct _bb_cpu_fsm {
 | 
						|
    /* bb cpu event signal */
 | 
						|
    uint32_t    bb_cpu_event;
 | 
						|
    /* bb cpu global finite state machine */
 | 
						|
    uint32_t    bb_cpu_glb_fsm;
 | 
						|
    /* bb cpu tx finite state machine */
 | 
						|
    uint32_t    bb_cpu_tx_fsm;
 | 
						|
    /* bb cpu rx finite state machine */
 | 
						|
    uint32_t    bb_cpu_rx_fsm;
 | 
						|
    /* bb cpu reset finite state machine */
 | 
						|
    uint32_t    bb_cpu_rst_fsm;
 | 
						|
    /* bb cpu record previous fsm */
 | 
						|
    uint32_t    bb_cpu_prev_glb_fsm;
 | 
						|
    /* bb cpu record more previous fsm */
 | 
						|
    uint32_t    bb_cpu_more_prev_glb_fsm;
 | 
						|
    /* record tx dtei, used by rx sack */
 | 
						|
    uint16_t    wait_sack_tei;
 | 
						|
    /* the flag to indecate reset reason.
 | 
						|
     * 0: invaild reason. 1: stop schedule. 2: rx abort.
 | 
						|
     */
 | 
						|
    uint8_t     rst_reason;
 | 
						|
    /* bb cpu record hwqid */
 | 
						|
    uint8_t     bb_cpu_hwqid;
 | 
						|
    /* bb cpu record event signal */
 | 
						|
    uint32_t    bb_cpu_evt_tmp;
 | 
						|
    /* point to current tx mpdu desc */
 | 
						|
    uint32_t    bb_cpu_tx_mpdu_t;
 | 
						|
    /* indicate bb cpu isr is vaild */
 | 
						|
    uint8_t     bb_cpu_isr_vaild;
 | 
						|
    /* record bb cpu option */
 | 
						|
    uint8_t     bb_cpu_option;
 | 
						|
    /* record bbcpu tobe set channel id */
 | 
						|
    uint8_t     bb_cpu_channel_id;
 | 
						|
    /* record bb cpu vcs timer working */
 | 
						|
    uint8_t     bb_cpu_vcs_working;
 | 
						|
    /* record bbcpu tobe set channel frequecy unit hz */
 | 
						|
    uint32_t    bb_cpu_channel_freq;
 | 
						|
    /* record bb cpu rx buf */
 | 
						|
    uint8_t     *bb_cpu_rx_buf;
 | 
						|
    /* record bb cpu vcs timer start local ntb */
 | 
						|
    uint32_t    bb_cpu_vcs_ntb;
 | 
						|
    /* record bb cpu vcs timer length, unit us */
 | 
						|
    uint32_t    bb_cpu_vcs_len;
 | 
						|
    /* record bb cpu enter rx flag */
 | 
						|
    uint8_t     bb_cpu_rx_flag;
 | 
						|
    /* record current wphy tx power, unit: dBm */
 | 
						|
    int8_t      bb_cpu_tx_pwr;
 | 
						|
    /* record bb cpu set tx cfg step1 */
 | 
						|
    uint8_t     bb_cpu_txcfg1_vld;
 | 
						|
    /* record bb cpu set proto */
 | 
						|
    uint8_t     bb_cpu_proto;
 | 
						|
} bb_cpu_fsm_t;
 | 
						|
 | 
						|
bb_cpu_fsm_t glb_fsm_ctxt = { 0 };
 | 
						|
 | 
						|
/* get parameter */
 | 
						|
#define BB_CPU_GET(_STR, _ITEM)   _STR._ITEM
 | 
						|
/* set parameter */
 | 
						|
#define BB_CPU_SET(_STR, _ITEM, VALUE)   (_STR._ITEM = VALUE)
 | 
						|
 | 
						|
/* bb cpu get globle fsm */
 | 
						|
#define BB_CPU_GET_GLB_FSM()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_glb_fsm)
 | 
						|
/* bb cpu get the previous glb state */
 | 
						|
#define BB_CPU_GET_PREV_STATE()    BB_CPU_GET(glb_fsm_ctxt, bb_cpu_prev_glb_fsm)
 | 
						|
/* bb cpu get the previous glb state */
 | 
						|
#define BB_CPU_GET_MORE_PREV_STATE() \
 | 
						|
    BB_CPU_GET(glb_fsm_ctxt, bb_cpu_more_prev_glb_fsm)
 | 
						|
/* bb cpu set globle fsm */
 | 
						|
#define BB_CPU_SET_GLB_FSM(X)      \
 | 
						|
    do { \
 | 
						|
        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_more_prev_glb_fsm, \
 | 
						|
                   (BB_CPU_GET_PREV_STATE())); \
 | 
						|
        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_prev_glb_fsm, (BB_CPU_GET_GLB_FSM())); \
 | 
						|
        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_glb_fsm, X); \
 | 
						|
    } while (0);
 | 
						|
/* bb cpu get tx fsm */
 | 
						|
#define BB_CPU_GET_TX_FSM()        BB_CPU_GET(glb_fsm_ctxt, bb_cpu_tx_fsm)
 | 
						|
/* bb cpu set tx fsm */
 | 
						|
#define BB_CPU_SET_TX_FSM(X)       BB_CPU_SET(glb_fsm_ctxt, bb_cpu_tx_fsm, X)
 | 
						|
/* bb cpu get rx fsm */
 | 
						|
#define BB_CPU_GET_RX_FSM()        BB_CPU_GET(glb_fsm_ctxt, bb_cpu_rx_fsm)
 | 
						|
/* bb cpu set rx fsm */
 | 
						|
#define BB_CPU_SET_RX_FSM(X)       BB_CPU_SET(glb_fsm_ctxt, bb_cpu_rx_fsm, X)
 | 
						|
/* bb cpu get reset fsm */
 | 
						|
#define BB_CPU_GET_RST_FSM()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_rst_fsm)
 | 
						|
/* bb cpu set reset fsm */
 | 
						|
#define BB_CPU_SET_RST_FSM(X)      BB_CPU_SET(glb_fsm_ctxt, bb_cpu_rst_fsm, X)
 | 
						|
/* bb cpu get tx dtei, used by rx sack */
 | 
						|
#define BB_CPU_GET_TXDTEI()        BB_CPU_GET(glb_fsm_ctxt, wait_sack_tei)
 | 
						|
/* bb cpu set need tx sack, used by rx sack  */
 | 
						|
#define BB_CPU_SET_TXDTEI(X)       BB_CPU_SET(glb_fsm_ctxt, wait_sack_tei, X)
 | 
						|
/* bb cpu get reset reason */
 | 
						|
#define BB_CPU_GET_RST_RS()        BB_CPU_GET(glb_fsm_ctxt, rst_reason)
 | 
						|
/* bb cpu set reset reason */
 | 
						|
#define BB_CPU_SET_RST_RS(X)       BB_CPU_SET(glb_fsm_ctxt, rst_reason, X)
 | 
						|
/* bb cpu get tx mpdu */
 | 
						|
#define BB_CPU_GET_TX_MPDU()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_tx_mpdu_t)
 | 
						|
/* bb cpu set tx mpdu */
 | 
						|
#define BB_CPU_SET_TX_MPDU(X)      BB_CPU_SET(glb_fsm_ctxt, bb_cpu_tx_mpdu_t, X)
 | 
						|
/* bb cpu get hwqid */
 | 
						|
#define BB_CPU_GET_HWQID()         BB_CPU_GET(glb_fsm_ctxt, bb_cpu_hwqid)
 | 
						|
/* bb cpu set hwqid */
 | 
						|
#define BB_CPU_SET_HWQID(X)        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_hwqid, X)
 | 
						|
/* bb cpu get isr vaild */
 | 
						|
#define BB_CPU_GET_ISR_VAILD()     BB_CPU_GET(glb_fsm_ctxt, bb_cpu_isr_vaild)
 | 
						|
/* bb cpu set isr vaild */
 | 
						|
#define BB_CPU_SET_ISR_VAILD(X)    BB_CPU_SET(glb_fsm_ctxt, bb_cpu_isr_vaild, X)
 | 
						|
/* bb cpu get tobe channel frequecy */
 | 
						|
#define BB_CPU_GET_CHANNEL_FREQ()  BB_CPU_GET(glb_fsm_ctxt, bb_cpu_channel_freq)
 | 
						|
/* bb cpu get tobe channel id */
 | 
						|
#define BB_CPU_GET_CHANNEL_ID()    BB_CPU_GET(glb_fsm_ctxt, bb_cpu_channel_id)
 | 
						|
/* bb cpu set tobe channel frequecy */
 | 
						|
#define BB_CPU_SET_CHANNEL(id, freq)      \
 | 
						|
    do { \
 | 
						|
        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_channel_id, id); \
 | 
						|
        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_channel_freq, freq); \
 | 
						|
    } while (0);
 | 
						|
/* bb cpu get option */
 | 
						|
#define BB_CPU_GET_OPTION()        BB_CPU_GET(glb_fsm_ctxt, bb_cpu_option)
 | 
						|
/* bb cpu set option */
 | 
						|
#define BB_CPU_SET_OPTION(X)       BB_CPU_SET(glb_fsm_ctxt, bb_cpu_option, X)
 | 
						|
/* bb cpu get rx buf */
 | 
						|
#define BB_CPU_GET_RX_BUF()        BB_CPU_GET(glb_fsm_ctxt, bb_cpu_rx_buf)
 | 
						|
/* bb cpu set rx buf */
 | 
						|
#define BB_CPU_SET_RX_BUF(X)       BB_CPU_SET(glb_fsm_ctxt, bb_cpu_rx_buf, X)
 | 
						|
/* bb cpu get VCS timer working */
 | 
						|
#define BB_CPU_GET_VCS_WORKING()   BB_CPU_GET(glb_fsm_ctxt, bb_cpu_vcs_working)
 | 
						|
/* bb cpu set vCS timer working */
 | 
						|
#define BB_CPU_SET_VCS_WORKING(X)     \
 | 
						|
    BB_CPU_SET(glb_fsm_ctxt, bb_cpu_vcs_working, X)
 | 
						|
/* bb cpu get VCS timer start ntb */
 | 
						|
#define BB_CPU_GET_VCS_NTB()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_vcs_ntb)
 | 
						|
/* bb cpu set vCS timer start ntb */
 | 
						|
#define BB_CPU_SET_VCS_NTB(X)      BB_CPU_SET(glb_fsm_ctxt, bb_cpu_vcs_ntb, X)
 | 
						|
/* bb cpu get VCS timer length */
 | 
						|
#define BB_CPU_GET_VCS_LEN()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_vcs_len)
 | 
						|
/* bb cpu set vCS timer length */
 | 
						|
#define BB_CPU_SET_VCS_LEN(X)      BB_CPU_SET(glb_fsm_ctxt, bb_cpu_vcs_len, X)
 | 
						|
/* bb cpu get rx flag */
 | 
						|
#define BB_CPU_GET_RX_FLAG()       BB_CPU_GET(glb_fsm_ctxt, bb_cpu_rx_flag)
 | 
						|
/* bb cpu set rx flag */
 | 
						|
#define BB_CPU_SET_RX_FLAG(X)      BB_CPU_SET(glb_fsm_ctxt, bb_cpu_rx_flag, X)
 | 
						|
/* bb cpu get current wphy tx power */
 | 
						|
#define BB_CPU_GET_CUR_TX_PWR()    BB_CPU_GET(glb_fsm_ctxt, bb_cpu_tx_pwr)
 | 
						|
/* bb cpu set current wphy tx power */
 | 
						|
#define BB_CPU_SET_CUR_TX_PWR(X)   BB_CPU_SET(glb_fsm_ctxt, bb_cpu_tx_pwr, (X))
 | 
						|
/* bb cpu get txcfg step1 vaild or not */
 | 
						|
#define BB_CPU_GET_TXCFG1_VLD()    BB_CPU_GET(glb_fsm_ctxt, bb_cpu_txcfg1_vld)
 | 
						|
/* bb cpu set txcfg step1 vaild */
 | 
						|
#define BB_CPU_SET_TXCFG1_VLD(X)   \
 | 
						|
    BB_CPU_SET(glb_fsm_ctxt, bb_cpu_txcfg1_vld, (X))
 | 
						|
/* bb cpu get proto */
 | 
						|
#define BB_CPU_GET_PROTO()         BB_CPU_GET(glb_fsm_ctxt, bb_cpu_proto)
 | 
						|
/* bb cpu set proto */
 | 
						|
#define BB_CPU_SET_PROTO(X)        BB_CPU_SET(glb_fsm_ctxt, bb_cpu_proto, (X))
 | 
						|
 | 
						|
static void bb_cpu_csma_check_txq()
 | 
						|
{
 | 
						|
#if ENA_RF_MULTI_CSMA_HWQ_WAR
 | 
						|
    uint32_t hwq_id;
 | 
						|
    uint8_t dummy_hwq_map = 0;
 | 
						|
    rf_tx_mpdu_start *mpdu = NULL;
 | 
						|
    rf_tx_mpdu_start *next = NULL;
 | 
						|
 | 
						|
    /* debug mode do not need this war */
 | 
						|
    if (mac_rf_txq_is_dbg_mode()) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    /* judge csma hwq is all enable or not */
 | 
						|
    for (hwq_id = MAC_RF_QUE_CSMA_0; hwq_id < MAX_ENABLE_CSMA_HWQ; hwq_id++) {
 | 
						|
        if (!mac_rf_txq_is_enable(hwq_id)) {
 | 
						|
            bb_cpu_printf("need 4 csma hwq enable!\n");
 | 
						|
            return;
 | 
						|
        }
 | 
						|
    }
 | 
						|
    uint8_t vaild_mpdu = 0;
 | 
						|
    /* if hwq pointer all dummy */
 | 
						|
    for (hwq_id = MAC_RF_QUE_CSMA_0; hwq_id < MAX_ENABLE_CSMA_HWQ; hwq_id++) {
 | 
						|
        mpdu = (rf_tx_mpdu_start *)bb_cpu_mac_get_hwq_cur_ptr(hwq_id);
 | 
						|
        IOT_ASSERT(mpdu);
 | 
						|
        /* is mpdu */
 | 
						|
        if (mpdu->desc_type == DESC_TYPE_TX_MPDU_START) {
 | 
						|
            /* is simulater mpdu */
 | 
						|
            if (mpdu->tx_status == NULL) {
 | 
						|
                next = mpdu->next;
 | 
						|
                IOT_ASSERT(next);
 | 
						|
                /* is dummy */
 | 
						|
                if (next->desc_type != DESC_TYPE_TX_MPDU_START) {
 | 
						|
                    next = next->next;
 | 
						|
                    /* vaild mpdu exist or not */
 | 
						|
                    if (next && next->desc_type == DESC_TYPE_TX_MPDU_START) {
 | 
						|
                        mac_rf_txq_force_disable(hwq_id);
 | 
						|
                        /* recover csma configuration */
 | 
						|
                        mac_rf_txq_cfg_by_hwqid(hwq_id);
 | 
						|
                        /* enable hwq */
 | 
						|
                        mac_rf_txq_enable(hwq_id, mpdu->next);
 | 
						|
                        vaild_mpdu++;
 | 
						|
                    }
 | 
						|
                }
 | 
						|
            } else {
 | 
						|
                /* recover csma configuration */
 | 
						|
                mac_rf_txq_cfg_by_hwqid(hwq_id);
 | 
						|
                vaild_mpdu++;
 | 
						|
            }
 | 
						|
        } else {
 | 
						|
            /* current pointer is dummy and next is vaild mpdu */
 | 
						|
            next = mpdu->next;
 | 
						|
            if (next && next->desc_type == DESC_TYPE_TX_MPDU_START &&
 | 
						|
                next->tx_status != NULL) {
 | 
						|
                /* recover csma configuration */
 | 
						|
                mac_rf_txq_cfg_by_hwqid(hwq_id);
 | 
						|
                vaild_mpdu++;
 | 
						|
            } else {
 | 
						|
                dummy_hwq_map |= 1 << hwq_id;
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    if (vaild_mpdu) {
 | 
						|
        for (hwq_id = MAC_RF_QUE_CSMA_0;
 | 
						|
            dummy_hwq_map && hwq_id < MAX_ENABLE_CSMA_HWQ; hwq_id++) {
 | 
						|
            if (dummy_hwq_map & (1 << hwq_id)) {
 | 
						|
                mpdu = (rf_tx_mpdu_start *)bb_cpu_mac_get_hwq_cur_ptr(hwq_id);
 | 
						|
                mac_rf_txq_force_disable(hwq_id);
 | 
						|
                rf_tx_mpdu_start *simu_mpdu = &simu_mpdu_tbl[hwq_id];
 | 
						|
                IOT_ASSERT(simu_mpdu);
 | 
						|
                simu_mpdu->next = mpdu;
 | 
						|
                mac_rf_txq_cfg_block_by_hwqid(hwq_id);
 | 
						|
                mac_rf_txq_enable(hwq_id, simu_mpdu);
 | 
						|
                dummy_hwq_map &= ~(0x1 << hwq_id);
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_csma_check_next_mpdu()
 | 
						|
{
 | 
						|
#if ENA_RF_MULTI_CSMA_HWQ_WAR
 | 
						|
    uint8_t allhwq_is_simu = 1;
 | 
						|
    rf_tx_mpdu_start *mpdu = NULL;
 | 
						|
    rf_tx_mpdu_start *tmp_mpdu = NULL;
 | 
						|
    rf_tx_mpdu_start *next = NULL;
 | 
						|
 | 
						|
    /* debug mode do not need this war */
 | 
						|
    if (mac_rf_txq_is_dbg_mode()) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    uint32_t hwq_id = BB_CPU_GET_HWQID();
 | 
						|
    if (hwq_id >= MAC_RF_QUE_CSMA_0 && hwq_id < MAX_ENABLE_CSMA_HWQ) {
 | 
						|
        mpdu = (rf_tx_mpdu_start *)bb_cpu_mac_get_hwq_cur_ptr(hwq_id);
 | 
						|
        IOT_ASSERT(mpdu->desc_type == DESC_TYPE_TX_MPDU_START);
 | 
						|
        next = mpdu->next;
 | 
						|
        IOT_ASSERT(next->desc_type != DESC_TYPE_TX_MPDU_START);
 | 
						|
        next = next->next;
 | 
						|
        if (next == NULL) {
 | 
						|
            for (uint8_t i = MAC_RF_QUE_CSMA_0; i < MAX_ENABLE_CSMA_HWQ; i++) {
 | 
						|
                if (i == hwq_id) {
 | 
						|
                    continue;
 | 
						|
                }
 | 
						|
                tmp_mpdu = (rf_tx_mpdu_start *)bb_cpu_mac_get_hwq_cur_ptr(i);
 | 
						|
                if (!(tmp_mpdu->desc_type == DESC_TYPE_TX_MPDU_START &&
 | 
						|
                    tmp_mpdu->tx_status == NULL)) {
 | 
						|
                    allhwq_is_simu = 0;
 | 
						|
                    break;
 | 
						|
                }
 | 
						|
            }
 | 
						|
            /* at least one of them is not simu mpdu */
 | 
						|
            if (!allhwq_is_simu) {
 | 
						|
                mac_rf_txq_force_disable(hwq_id);
 | 
						|
                rf_tx_mpdu_start *simu_mpdu = &simu_mpdu_tbl[hwq_id];
 | 
						|
                simu_mpdu->next = mpdu->next;
 | 
						|
                mac_rf_txq_cfg_block_by_hwqid(hwq_id);
 | 
						|
                mac_rf_txq_enable(hwq_id, simu_mpdu);
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_reset()
 | 
						|
{
 | 
						|
    /* reset rx need stop rx timer */
 | 
						|
    bb_rf_rx_timer_en(0);
 | 
						|
    bb_cpu_mac_rx_timer_en(0);
 | 
						|
    bb_rf_rx_reset();
 | 
						|
    //bb_rf_jesd_reset();
 | 
						|
    /* set rx flag = 0 */
 | 
						|
    BB_CPU_SET_RX_FLAG(0);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_reset()
 | 
						|
{
 | 
						|
    /* reset rx need stop rx timer */
 | 
						|
    bb_rf_tx_timer_en(0);
 | 
						|
    bb_cpu_mac_tx_timer_en(0);
 | 
						|
    bb_rf_tx_reset();
 | 
						|
    //bb_rf_jesd_reset();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rf_reset()
 | 
						|
{
 | 
						|
    bb_cpu_rx_reset();
 | 
						|
    bb_cpu_tx_reset();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_isr_vaild(uint8_t is_vaild)
 | 
						|
{
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* config isr is vaild */
 | 
						|
    BB_CPU_SET_ISR_VAILD(is_vaild);
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_set_isr_vaild_from_isr(uint8_t is_vaild)
 | 
						|
{
 | 
						|
    /* config isr is vaild */
 | 
						|
    BB_CPU_SET_ISR_VAILD(is_vaild);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_judge_goto_next_ptr()
 | 
						|
{
 | 
						|
#define GOTO_NEXT_PTR_TIMEOUT  2500  // 100us
 | 
						|
    uint32_t hwq_id, judge, start_ntb, cur_ntb, time_span;
 | 
						|
    /* notify main cpu tx done */
 | 
						|
    hwq_id = BB_CPU_GET_HWQID();
 | 
						|
 | 
						|
    /* wait vld change */
 | 
						|
    start_ntb = mac_sched_get_lts();
 | 
						|
    do {
 | 
						|
        cur_ntb = mac_sched_get_lts();
 | 
						|
        time_span = cur_ntb - start_ntb;
 | 
						|
        judge = (bb_cpu_mac_get_hwq_cur_ptr(hwq_id) == BB_CPU_GET_TX_MPDU());
 | 
						|
    } while (judge && time_span < GOTO_NEXT_PTR_TIMEOUT);
 | 
						|
 | 
						|
    /* judge again */
 | 
						|
    judge = (bb_cpu_mac_get_hwq_cur_ptr(hwq_id) == BB_CPU_GET_TX_MPDU());
 | 
						|
    if (judge) {
 | 
						|
        bb_cpu_printf("forcestophwq:%d, mpdu:0x%x\n",
 | 
						|
            hwq_id, BB_CPU_GET_TX_MPDU());
 | 
						|
#if ENA_RF_FORCE_RESET_HWQ
 | 
						|
        rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)BB_CPU_GET_TX_MPDU();
 | 
						|
        rf_tx_mpdu_start *next = mpdu->next;
 | 
						|
        IOT_ASSERT(next);
 | 
						|
        mac_rf_txq_force_disable(hwq_id);
 | 
						|
        mac_rf_txq_enable(hwq_id, next);
 | 
						|
#else
 | 
						|
        IOT_ASSERT(0);
 | 
						|
#endif
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_notify_sw_tx_done()
 | 
						|
{
 | 
						|
    uint32_t hwq_id;
 | 
						|
    /* notify main cpu tx done */
 | 
						|
    hwq_id = BB_CPU_GET_HWQID();
 | 
						|
    /* clear record hwqid */
 | 
						|
    BB_CPU_SET_HWQID(MAX_MAC_RF_TXQ_NUM);
 | 
						|
    IOT_ASSERT(hwq_id < MAX_MAC_RF_TXQ_NUM);
 | 
						|
    bb_cpu_mac_set_share_irq_to_maincpu(hwq_id + RF_MAC_INT_HWQ_BIT_OFFSET);
 | 
						|
    bb_cpu_mac_set_sw_irq_to_maincpu(RF_MAC_SW_ISR_TX_MPDU_COMPLETE);
 | 
						|
}
 | 
						|
 | 
						|
#if HPLC_RF_ASYNC_TX
 | 
						|
 | 
						|
#if HPLC_RF_ASYNC_TX == 1
 | 
						|
 | 
						|
/* return 1: early stop occor */
 | 
						|
uint32_t bb_cpu_set_hplc_pcs_sts(uint32_t is_busy)
 | 
						|
{
 | 
						|
#define BB_CPU_SET_HPLC_PCS_TIMEOUT     25000  // 1ms
 | 
						|
    uint32_t start_ntb, cur_ntb, time_span, early_stop_judge;
 | 
						|
 | 
						|
    if (mac_rf_get_hplc_pcs_reg() == is_busy) {
 | 
						|
        return 0;
 | 
						|
    }
 | 
						|
 | 
						|
    /* notify hplc to set pcs busy */
 | 
						|
    mac_rf_set_hplc_pcs_reg(is_busy);
 | 
						|
    if (is_busy) {
 | 
						|
        /* set hplc pcs status */
 | 
						|
        mac_rf_set_hplc_pcs_sts_reg(0);
 | 
						|
    }
 | 
						|
    bb_cpu_mac_set_sw_irq_to_maincpu(RF_MAC_SW_ISR_SET_HPLC_PCS);
 | 
						|
 | 
						|
    if (is_busy) {
 | 
						|
        /* wait vld change */
 | 
						|
        start_ntb = mac_sched_get_lts();
 | 
						|
        do {
 | 
						|
            cur_ntb = mac_sched_get_lts();
 | 
						|
            time_span = cur_ntb - start_ntb;
 | 
						|
            early_stop_judge = !!(glb_fsm_ctxt.bb_cpu_event &
 | 
						|
                (1 << BB_CPU_EVENT_RST_ID));
 | 
						|
        } while (!early_stop_judge && !mac_rf_get_hplc_pcs_sts_reg() &&
 | 
						|
            time_span < BB_CPU_SET_HPLC_PCS_TIMEOUT);
 | 
						|
 | 
						|
        /* early stop occor */
 | 
						|
        if (early_stop_judge) {
 | 
						|
            bb_cpu_printf("early stop occor!\n");
 | 
						|
            return 1;
 | 
						|
        }
 | 
						|
 | 
						|
        /* judge again */
 | 
						|
        if (!mac_rf_get_hplc_pcs_sts_reg()) {
 | 
						|
            bb_cpu_printf("set hplc pcs fail:0x%x\n",
 | 
						|
                glb_fsm_ctxt.bb_cpu_event);
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    return 0;
 | 
						|
}
 | 
						|
 | 
						|
#elif HPLC_RF_ASYNC_TX == 2
 | 
						|
 | 
						|
uint32_t bb_cpu_set_hplc_pcs_sts(uint32_t is_busy)
 | 
						|
{
 | 
						|
    uint32_t ret;
 | 
						|
 | 
						|
    if (is_busy) {
 | 
						|
        ret = mac_set_pcs_busy_from_isr(1, 1);
 | 
						|
    } else {
 | 
						|
        ret = mac_set_pcs_busy_from_isr(0, 0);
 | 
						|
    }
 | 
						|
    return ret;
 | 
						|
}
 | 
						|
 | 
						|
#else
 | 
						|
 | 
						|
#error "config HPLC_RF_ASYNC_TX error"
 | 
						|
 | 
						|
#endif
 | 
						|
 | 
						|
uint32_t bb_cpu_async_tx_check()
 | 
						|
{
 | 
						|
    uint32_t ret = 0;
 | 
						|
 | 
						|
    if (!mac_rf_get_hplc_rf_async_tx()) {
 | 
						|
        return 0;
 | 
						|
    }
 | 
						|
 | 
						|
    /* if current cmd is csma and is not cert mode,
 | 
						|
     * need judge hplc is tx or not
 | 
						|
     */
 | 
						|
    if (bb_cpu_mac_judge_cur_cmd_need_tx_csma() &&
 | 
						|
        !mac_rf_get_cert_flag()) {
 | 
						|
        /* if hplc is tx status, wmac return.
 | 
						|
         * if hplc is not tx status, wmac tx rf pkt.
 | 
						|
         */
 | 
						|
        if (mac_get_phy_txrx_sts() == MAC_PHY_TX_STS) {
 | 
						|
            /* set rf mac tx done */
 | 
						|
            bb_cpu_mac_set_tx_done();
 | 
						|
            ret = 1;
 | 
						|
        } else {
 | 
						|
            /* notify hplc set pcs busy */
 | 
						|
            ret = bb_cpu_set_hplc_pcs_sts(1);
 | 
						|
            if (ret || mac_get_phy_txrx_sts() == MAC_PHY_TX_STS) {
 | 
						|
                bb_cpu_printf("set hplc pcs status fail\n");
 | 
						|
                /* set rf mac tx done */
 | 
						|
                bb_cpu_mac_set_tx_done();
 | 
						|
                /* free hplc pcs */
 | 
						|
                bb_cpu_set_hplc_pcs_sts(0);
 | 
						|
                ret = 1;
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    return ret;
 | 
						|
}
 | 
						|
 | 
						|
#else /* HPLC_RF_ASYNC_TX */
 | 
						|
 | 
						|
uint32_t bb_cpu_set_hplc_pcs_sts(uint32_t is_busy)
 | 
						|
{
 | 
						|
    (void)is_busy;
 | 
						|
    return 0;
 | 
						|
}
 | 
						|
 | 
						|
uint32_t bb_cpu_async_tx_check()
 | 
						|
{
 | 
						|
    return 0;
 | 
						|
}
 | 
						|
 | 
						|
#endif /* HPLC_RF_ASYNC_TX */
 | 
						|
 | 
						|
/* bb_cpu trigger mac to handle */
 | 
						|
static void bb_cpu_trigger_mac(uint32_t reason)
 | 
						|
{
 | 
						|
    switch (reason) {
 | 
						|
    case BB_CPU_TRIGGER_MAC_TX_DONE:
 | 
						|
    {
 | 
						|
        /* inter-packet interval cifs */
 | 
						|
        bb_cpu_mac_inter_pkt_interval(RF_MAC_CIFS_US);
 | 
						|
        /* check next mpdu is vaild or not */
 | 
						|
        bb_cpu_csma_check_next_mpdu();
 | 
						|
        /* set rf mac tx done */
 | 
						|
        bb_cpu_mac_set_tx_done();
 | 
						|
        /* notify hplc pcs busy free */
 | 
						|
        bb_cpu_set_hplc_pcs_sts(0);
 | 
						|
        /* judge next mpdu */
 | 
						|
        bb_cpu_judge_goto_next_ptr();
 | 
						|
        /* notify main cpu tx done */
 | 
						|
        bb_cpu_notify_sw_tx_done();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_RX_SACK_DONE:
 | 
						|
    {
 | 
						|
        /* notify main cpu tx done */
 | 
						|
        bb_cpu_notify_sw_tx_done();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_RX_DONE:
 | 
						|
    {
 | 
						|
        /* set rf mac rx done */
 | 
						|
        bb_cpu_mac_set_rx_done();
 | 
						|
        BB_CPU_SET_RX_FLAG(0);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_NEED_RX_SACK:
 | 
						|
    {
 | 
						|
        uint32_t interval = RF_MAC_RX_RIFS_US +
 | 
						|
            phy_rf_get_g_stf_ltf_fl() + phy_rf_get_g_sig_fl() +
 | 
						|
            phy_rf_get_g_phr_fl(phy_rf_get_g_tx_sack_mcs()) + RF_MAC_CIFS_US;
 | 
						|
        if (mac_rf_get_cert_flag()) {
 | 
						|
            /* for certification mode, add 1ms as margin */
 | 
						|
            interval += RF_MAC_RX_SACK_CERT_MARGIN;
 | 
						|
        }
 | 
						|
        /* inter-packet interval rx sack */
 | 
						|
        bb_cpu_mac_inter_pkt_interval(interval);
 | 
						|
        /* check next mpdu is vaild or not */
 | 
						|
        bb_cpu_csma_check_next_mpdu();
 | 
						|
        /* set rf mac tx done */
 | 
						|
        bb_cpu_mac_set_tx_done();
 | 
						|
        /* notify hplc pcs busy free */
 | 
						|
        bb_cpu_set_hplc_pcs_sts(0);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_STOP_SCHE_COMPLETE:
 | 
						|
    {
 | 
						|
        /* set rf mac sched stop done */
 | 
						|
        uint32_t ret_fsm = bb_cpu_mac_set_sched_stop_done(1);
 | 
						|
        if (ret_fsm) {
 | 
						|
            /* try again */
 | 
						|
            bb_cpu_mac_set_sched_stop_done(2);
 | 
						|
        }
 | 
						|
        /* notify plc cpu stop sched success */
 | 
						|
        mac_rf_set_stop_sched_sts_success();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_RX_ABORT_COMPLETE:
 | 
						|
    {
 | 
						|
        /* set rf mac rx abort done */
 | 
						|
        bb_cpu_mac_set_rx_abort_done();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_TX_ABORT_COMPLETE:
 | 
						|
    {
 | 
						|
        /* set rf mac tx abort done */
 | 
						|
        bb_cpu_mac_set_tx_abort_done();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_BBCPU_IS_READY:
 | 
						|
    {
 | 
						|
        /* set rf mac csma is ready */
 | 
						|
        bb_cpu_mac_set_csma_is_ready();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TRIGGER_MAC_CMSLIST_DONE:
 | 
						|
    {
 | 
						|
        bb_cpu_set_isr_vaild_from_isr(1);
 | 
						|
        /* set rf mac cmdlist don complete */
 | 
						|
        bb_cpu_mac_set_cmdlist_done();
 | 
						|
        /* some case. cmdlist done come before early stop.
 | 
						|
         * no more early stop processing, but we need set early stop success.
 | 
						|
         */
 | 
						|
        mac_rf_set_stop_sched_sts_success();
 | 
						|
        /* notify plc cpu cmdlist done */
 | 
						|
        bb_cpu_mac_set_sw_irq_to_maincpu(RF_MAC_SW_ISR_CMDLIST_BBCPU_DONE);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        bb_cpu_printf("trg mac rsn:%d\n", reason);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    return;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_backfill_txntb_to_desc(uint32_t *phr, uint32_t tx_ntb)
 | 
						|
{
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)BB_CPU_GET_TX_MPDU();
 | 
						|
        rf_tx_mpdu_end *tx_end = mpdu->tx_status;
 | 
						|
        tx_end->first_try_ts = tx_ntb;
 | 
						|
    } else if (fc_msg.delimiter == FC_DELIM_SACK) {
 | 
						|
        // TODO: maybe add sack tx ntb in rx desc.
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
/* bb_cpu trigger bb to handle */
 | 
						|
void bb_cpu_trigger_bb(uint32_t reason)
 | 
						|
{
 | 
						|
    if (BB_CPU_TRIGGER_BB_TX == reason) {
 | 
						|
        uint32_t tx_ntb;
 | 
						|
        uint32_t phr[4] = { 0 };
 | 
						|
        bb_rf_get_tx_phr(phr);
 | 
						|
        tx_ntb = bb_cpu_mac_trigger_tx(phr);
 | 
						|
        bb_cpu_backfill_txntb_to_desc(phr, tx_ntb);
 | 
						|
    } else if (BB_CPU_TRIGGER_BB_RX == reason) {
 | 
						|
        bb_cpu_mac_trigger_rx();
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("trg bb rsn:%d\n", reason);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_set_event(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* config event id */
 | 
						|
    glb_fsm_ctxt.bb_cpu_event |= 1 << event_id;
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_clr_evt(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* config event id */
 | 
						|
    glb_fsm_ctxt.bb_cpu_event &= ~(0x1 << event_id);
 | 
						|
    glb_fsm_ctxt.bb_cpu_evt_tmp &= ~(0x1 << event_id);
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_clr_multi_evt(uint32_t event_ids)
 | 
						|
{
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* config event id */
 | 
						|
    glb_fsm_ctxt.bb_cpu_event &= ~event_ids;
 | 
						|
    glb_fsm_ctxt.bb_cpu_evt_tmp &= ~event_ids;
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_set_event_from_isr(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* config event id */
 | 
						|
    if (BB_CPU_GET_ISR_VAILD() ||
 | 
						|
        // TODO: need check the necessity of these events.
 | 
						|
        BB_CPU_EVENT_TX_COMP_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RX_PLD_START_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_TX_TIMEOUT_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RX_TIMEOUT_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RST_TIMEOUT_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_SYNC_SPI_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_TX_TONE_ID == event_id  ||
 | 
						|
        BB_CPU_EVENT_TX_CAL_UPDATE_ID == event_id) {
 | 
						|
        glb_fsm_ctxt.bb_cpu_event |= 1 << event_id;
 | 
						|
    }
 | 
						|
 | 
						|
    if (BB_CPU_GET_ISR_VAILD() && (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id)) {
 | 
						|
        bb_cpu_set_isr_vaild_from_isr(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_get_event()
 | 
						|
{
 | 
						|
    uint32_t event_id;
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* get event id */
 | 
						|
    event_id = glb_fsm_ctxt.bb_cpu_event;
 | 
						|
    /* clear event id */
 | 
						|
    glb_fsm_ctxt.bb_cpu_event = 0;
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
 | 
						|
    return event_id;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_wait_event(uint32_t time_to_wait_us)
 | 
						|
{
 | 
						|
    uint32_t cur_time;
 | 
						|
    int64_t time_span = 0;
 | 
						|
    uint32_t start_ts = gp_timer_get_current_val(0);
 | 
						|
    do {
 | 
						|
        cur_time = gp_timer_get_current_val(0);
 | 
						|
        time_span = cur_time - start_ts;
 | 
						|
        /* wrap around */
 | 
						|
        if (time_span < 0) {
 | 
						|
            time_span = (0x100000000LL) - start_ts + cur_time;
 | 
						|
        }
 | 
						|
    } while ((time_span < time_to_wait_us) && !glb_fsm_ctxt.bb_cpu_event);
 | 
						|
 | 
						|
    return bb_cpu_get_event();
 | 
						|
}
 | 
						|
 | 
						|
/* bb cpu set timer to wait timeout */
 | 
						|
static void bb_cpu_timer_restart(uint32_t timer_id, uint32_t time_us)
 | 
						|
{
 | 
						|
    bb_cpu_timer_stop(timer_id);
 | 
						|
    bb_cpu_timer_set(timer_id, time_us);
 | 
						|
    bb_cpu_timer_start(timer_id);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_vcs_timer(uint32_t time_us)
 | 
						|
{
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    /* record vcs timer length */
 | 
						|
    BB_CPU_SET_VCS_LEN(time_us);
 | 
						|
    bb_cpu_timer_restart(TIMER_OF_VCS, time_us);
 | 
						|
    /* record vcs timer start ntb */
 | 
						|
    BB_CPU_SET_VCS_NTB(mac_sched_get_lts());
 | 
						|
    /* record vcs timer working */
 | 
						|
    BB_CPU_SET_VCS_WORKING(1);
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_set_vcs_timer_from_isr(uint32_t time_us, uint32_t is_force)
 | 
						|
{
 | 
						|
    uint32_t cur_ntb, dlt_us, time_remain;
 | 
						|
    if (BB_CPU_GET_VCS_WORKING() && !is_force) {
 | 
						|
        cur_ntb = mac_sched_get_lts();
 | 
						|
        dlt_us = (cur_ntb - BB_CPU_GET_VCS_NTB())/25000;
 | 
						|
        if (dlt_us > BB_CPU_GET_VCS_LEN()) {
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        time_remain = BB_CPU_GET_VCS_LEN() - dlt_us;
 | 
						|
        if (time_us > time_remain) {
 | 
						|
            BB_CPU_SET_VCS_LEN(time_us);
 | 
						|
            bb_cpu_timer_restart(TIMER_OF_VCS, time_us);
 | 
						|
            /* record vcs timer start ntb */
 | 
						|
            BB_CPU_SET_VCS_NTB(mac_sched_get_lts());
 | 
						|
            BB_CPU_SET_VCS_WORKING(1);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        BB_CPU_SET_VCS_LEN(time_us);
 | 
						|
        bb_cpu_timer_restart(TIMER_OF_VCS, time_us);
 | 
						|
        /* record vcs timer start ntb */
 | 
						|
        BB_CPU_SET_VCS_NTB(mac_sched_get_lts());
 | 
						|
        BB_CPU_SET_VCS_WORKING(1);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_stop_vcs_working_from_isr()
 | 
						|
{
 | 
						|
    /* pull vcs down */
 | 
						|
    bb_cpu_mac_set_vcs_sts_from_isr(0);
 | 
						|
    BB_CPU_SET_VCS_WORKING(0);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rxabort_cmdlistdone_set_vcs(uint32_t time_us)
 | 
						|
{
 | 
						|
    uint32_t cur_ntb, dlt_us, time_remain;
 | 
						|
 | 
						|
    /* disable irq */
 | 
						|
    bb_cpu_mac_isr_stop();
 | 
						|
 | 
						|
    if (BB_CPU_GET_VCS_WORKING()) {
 | 
						|
        cur_ntb = mac_sched_get_lts();
 | 
						|
        dlt_us = (cur_ntb - BB_CPU_GET_VCS_NTB())/25000;
 | 
						|
        if (dlt_us > BB_CPU_GET_VCS_LEN() &&
 | 
						|
            BB_CPU_VCS_MARGIN > BB_CPU_GET_VCS_LEN()) {
 | 
						|
            bb_cpu_printf("special set vcs err dltus:%lu, vcslen:%lu\n",
 | 
						|
                dlt_us, BB_CPU_GET_VCS_LEN());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        if (dlt_us < (BB_CPU_GET_VCS_LEN() - BB_CPU_VCS_MARGIN)) {
 | 
						|
            time_remain = BB_CPU_GET_VCS_LEN() - dlt_us - BB_CPU_VCS_MARGIN;
 | 
						|
            BB_CPU_SET_VCS_LEN(time_remain);
 | 
						|
            bb_cpu_timer_restart(TIMER_OF_VCS, time_remain);
 | 
						|
        } else {
 | 
						|
            /* record vcs timer length */
 | 
						|
            BB_CPU_SET_VCS_LEN(time_us);
 | 
						|
            bb_cpu_timer_restart(TIMER_OF_VCS, time_us);
 | 
						|
        }
 | 
						|
        /* record vcs timer start ntb */
 | 
						|
        BB_CPU_SET_VCS_NTB(mac_sched_get_lts());
 | 
						|
        BB_CPU_SET_VCS_WORKING(1);
 | 
						|
    } else {
 | 
						|
        /* vcs has already timeout */
 | 
						|
    }
 | 
						|
 | 
						|
    /* enable irq */
 | 
						|
    bb_cpu_mac_isr_start();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_stop_and_clr_tx_timer()
 | 
						|
{
 | 
						|
    bb_cpu_timer_stop(TIMER_OF_TX);
 | 
						|
    bb_cpu_clr_evt(BB_CPU_EVENT_TX_TIMEOUT_ID);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_stop_and_clr_rx_timer()
 | 
						|
{
 | 
						|
    bb_cpu_timer_stop(TIMER_OF_RX);
 | 
						|
    bb_cpu_clr_evt(BB_CPU_EVENT_RX_TIMEOUT_ID);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_stop_and_clr_rst_timer()
 | 
						|
{
 | 
						|
    bb_cpu_timer_stop(TIMER_OF_RST);
 | 
						|
    bb_cpu_clr_evt(BB_CPU_EVENT_RST_TIMEOUT_ID);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_rst_reason_by_evt(uint32_t event_id)
 | 
						|
{
 | 
						|
    if (BB_CPU_EVENT_RX_ABORT_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_RX_ABORT);
 | 
						|
    } else if (BB_CPU_EVENT_TX_ABORT_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_TX_ABORT);
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_STOP_SCHE);
 | 
						|
    } else if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_CMDLIST_DONE);
 | 
						|
    } else {
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_update_tx_pwr(void)
 | 
						|
{
 | 
						|
    int8_t target_pwr;
 | 
						|
 | 
						|
    if (ERR_OK != mac_rf_get_target_tx_power(&target_pwr)) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    if (target_pwr != BB_CPU_GET_CUR_TX_PWR()) {
 | 
						|
        if (0 == bb_rf_set_pa(1, (int32_t)target_pwr)) {
 | 
						|
            bb_cpu_printf("set tx pwr %d to %d\n",
 | 
						|
                BB_CPU_GET_CUR_TX_PWR(), target_pwr);
 | 
						|
            BB_CPU_SET_CUR_TX_PWR(target_pwr);
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_tx_fsm(uint32_t tx_state, uint32_t reason)
 | 
						|
{
 | 
						|
    uint32_t bb_evt = 0;
 | 
						|
    if (BB_CPU_TX_STATE_IDLE == tx_state) {
 | 
						|
        bb_evt = glb_fsm_ctxt.bb_cpu_evt_tmp;
 | 
						|
        if (BB_CPU_ENTER_IDLE_NOR == reason) {
 | 
						|
            /* tx abort cannot be cleared
 | 
						|
             * because a tx abort done reply is required.
 | 
						|
             */
 | 
						|
            if (bb_evt & (~(1 << BB_CPU_EVENT_TX_ABORT_ID))) {
 | 
						|
                bb_cpu_printf("tx fsm:%d, evt:0x%x\n",
 | 
						|
                    BB_CPU_GET_TX_FSM(), bb_evt);
 | 
						|
                IOT_ASSERT(0);
 | 
						|
            }
 | 
						|
        } else if (BB_CPU_ENTER_IDLE_RST == reason) {
 | 
						|
            if (bb_evt & (~(1 << BB_CPU_EVENT_TX_COMP_ID))) {
 | 
						|
                bb_cpu_printf("tx rst fsm:%d, tmpevt:0x%x\n",
 | 
						|
                    BB_CPU_GET_TX_FSM(), bb_evt);
 | 
						|
                IOT_ASSERT(0);
 | 
						|
            }
 | 
						|
            bb_cpu_timer_stop(TIMER_OF_WAIT_SACK);
 | 
						|
            /* check event id, tx complete id can not clear */
 | 
						|
            glb_fsm_ctxt.bb_cpu_evt_tmp &= (1 << BB_CPU_EVENT_TX_COMP_ID);
 | 
						|
        } else {
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        bb_cpu_stop_and_clr_tx_timer();
 | 
						|
    }
 | 
						|
 | 
						|
    BB_CPU_SET_TX_FSM(tx_state);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_rx_fsm(uint32_t rx_state, uint32_t reason)
 | 
						|
{
 | 
						|
    uint32_t bb_evt = 0;
 | 
						|
    if (BB_CPU_RX_STATE_IDLE == rx_state) {
 | 
						|
        bb_evt = glb_fsm_ctxt.bb_cpu_evt_tmp;
 | 
						|
        if (BB_CPU_ENTER_IDLE_NOR == reason) {
 | 
						|
            if (bb_evt) {
 | 
						|
                if ((BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_LISTENING ||
 | 
						|
                    BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_WAIT_PHR ||
 | 
						|
                    /* rx sack and enter rx complete state directly */
 | 
						|
                    BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_RX_COMPLETE) &&
 | 
						|
                    !(bb_evt & (~(1 << BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID)))) {
 | 
						|
                    /* listening status and
 | 
						|
                     * wait sack timeout happened,
 | 
						|
                     * do not care
 | 
						|
                     */
 | 
						|
                } else {
 | 
						|
                    bb_cpu_printf("rx fsm:%d, evt:0x%x\n",
 | 
						|
                        BB_CPU_GET_RX_FSM(), bb_evt);
 | 
						|
                    IOT_ASSERT(0);
 | 
						|
                }
 | 
						|
            }
 | 
						|
        } else if (BB_CPU_ENTER_IDLE_RST == reason) {
 | 
						|
            if (bb_evt) {
 | 
						|
                if (BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_WAIT_SACK_TX_COMPLETE
 | 
						|
                    && !(bb_evt & (~(1 << BB_CPU_EVENT_TX_COMP_ID)))) {
 | 
						|
                    /* wait sack tx complete and
 | 
						|
                     * tx complete happen,
 | 
						|
                     * do not care
 | 
						|
                     */
 | 
						|
                } else {
 | 
						|
                    if (bb_evt & (~(1 << BB_CPU_EVENT_RX_PLD_START_ID))) {
 | 
						|
                        bb_cpu_printf("rx rst fsm:%d, tmpevt:0x%x\n",
 | 
						|
                            BB_CPU_GET_RX_FSM(), bb_evt);
 | 
						|
                        IOT_ASSERT(0);
 | 
						|
                    }
 | 
						|
                }
 | 
						|
            }
 | 
						|
            bb_cpu_timer_stop(TIMER_OF_WAIT_SACK);
 | 
						|
        } else {
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        bb_cpu_stop_and_clr_rx_timer();
 | 
						|
    }
 | 
						|
    BB_CPU_SET_RX_FSM(rx_state);
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_get_option()
 | 
						|
{
 | 
						|
    if (BB_CPU_GET_OPTION() >= PHY_RF_OPTION_MAX) {
 | 
						|
        /* not init */
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
    return BB_CPU_GET_OPTION();
 | 
						|
}
 | 
						|
 | 
						|
extern uint8_t bb_cpu_buf[520];
 | 
						|
 | 
						|
static void bb_cpu_send_data_to_bb(rf_tx_mpdu_start *mpdu)
 | 
						|
{
 | 
						|
    uint32_t pb_size;
 | 
						|
    uint32_t proto = BB_CPU_GET_PROTO();
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), &mpdu->phr0, &fc_msg);
 | 
						|
    pb_size = phy_rf_get_pbsz(mpdu->blkz);
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON) {
 | 
						|
        /* cpu write data */
 | 
						|
        bb_rf_write_data_to_bb(pb_size, (uint8_t *)mpdu->pb->pb_buf_addr);
 | 
						|
    } else if (fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
#if SUPPORT_SMART_GRID
 | 
						|
        if (PLC_PROTO_TYPE_SG == proto) {
 | 
						|
            bb_cpu_buf[0] = mpdu->pb->sof_pb_header;
 | 
						|
            os_mem_cpy((uint8_t *)&bb_cpu_buf[1],
 | 
						|
                (uint8_t *)mpdu->pb->pb_buf_addr,
 | 
						|
                pb_size - SG_SOF_PB_HDR_CRC_LEN);
 | 
						|
        } else
 | 
						|
#endif
 | 
						|
#if SUPPORT_SOUTHERN_POWER_GRID
 | 
						|
        if (PLC_PROTO_TYPE_SPG == proto) {
 | 
						|
            spg_sof_pb_hdr_t *pb_hdr = (spg_sof_pb_hdr_t *)bb_cpu_buf;
 | 
						|
            pb_hdr->seq = mpdu->pb->sof_pb_header;
 | 
						|
            pb_hdr->resv = 0;
 | 
						|
            os_mem_cpy((uint8_t *)&bb_cpu_buf[4],
 | 
						|
                (uint8_t *)mpdu->pb->pb_buf_addr,
 | 
						|
                pb_size - SPG_SOF_PB_HDR_LEN);
 | 
						|
        }
 | 
						|
#endif
 | 
						|
        {
 | 
						|
        }
 | 
						|
        /* cpu write data */
 | 
						|
        bb_rf_write_data_to_bb(pb_size, (uint8_t *)bb_cpu_buf);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
/* align startand beacon header channel and option info */
 | 
						|
static void bb_cpu_align_bcn_hdr_ch_op_info(uint32_t delimiter,
 | 
						|
    uint32_t pb_buf_addr)
 | 
						|
{
 | 
						|
    if (delimiter != FC_DELIM_BEACON || mac_rf_get_cert_mode()) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    switch (BB_CPU_GET_PROTO()) {
 | 
						|
#if SUPPORT_SMART_GRID
 | 
						|
    case PLC_PROTO_TYPE_SG:
 | 
						|
    {
 | 
						|
        beacon_payload_fixed_header_t *fix_header =
 | 
						|
            (beacon_payload_fixed_header_t *)pb_buf_addr;
 | 
						|
        if (!fix_header->simple_beacon) {
 | 
						|
            beacon_payload_std_header_t *std_header =
 | 
						|
            (beacon_payload_std_header_t *)pb_buf_addr;
 | 
						|
            std_header->rf_channel = BB_CPU_GET_CHANNEL_ID();
 | 
						|
            std_header->rf_option = BB_CPU_GET_OPTION();
 | 
						|
        }
 | 
						|
        break;
 | 
						|
    }
 | 
						|
#endif
 | 
						|
#if SUPPORT_SOUTHERN_POWER_GRID
 | 
						|
    case PLC_PROTO_TYPE_SPG:
 | 
						|
    {
 | 
						|
        spg_beacon_payload_fixed_header_t *fix_header =
 | 
						|
            (spg_beacon_payload_fixed_header_t *)pb_buf_addr;
 | 
						|
        if (!fix_header->simple_beacon) {
 | 
						|
            spg_beacon_payload_std_header_t *std_header =
 | 
						|
            (spg_beacon_payload_std_header_t *)pb_buf_addr;
 | 
						|
            std_header->rf_channel = BB_CPU_GET_CHANNEL_ID();
 | 
						|
            std_header->rf_option = BB_CPU_GET_OPTION();
 | 
						|
        }
 | 
						|
        break;
 | 
						|
    }
 | 
						|
#endif
 | 
						|
    default:
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
/* config bb to tx */
 | 
						|
static void bb_cpu_config_bb_tx(bb_rf_hw_info_t *phr_info,
 | 
						|
    bb_rf_hw_info_t *pld_info, void *phr, uint32_t pb_buf,
 | 
						|
    uint32_t pb_size, uint32_t pb_num, uint32_t need_sack, uint32_t crc32_en)
 | 
						|
{
 | 
						|
    uint32_t set_if = 0;
 | 
						|
    uint8_t tx_i_p, tx_q_p;
 | 
						|
    if (!BB_CPU_GET_TXCFG1_VLD()) {
 | 
						|
        /* reset tx before each tx configuration */
 | 
						|
        bb_cpu_tx_reset();
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
 | 
						|
        /* bb rf tx switch1 */
 | 
						|
        bb_rf_tx_switch_step1();
 | 
						|
    }
 | 
						|
    /* bb rf tx switch2 */
 | 
						|
    bb_rf_tx_switch_step2();
 | 
						|
    /* clear txcfg1 vaild */
 | 
						|
    BB_CPU_SET_TXCFG1_VLD(0);
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
    /* channel id changed and tx sof and beacon.
 | 
						|
     * if tx sack not update current channel
 | 
						|
     */
 | 
						|
    if (BB_CPU_GET_CHANNEL_FREQ() != mac_rf_get_cur_channel_freq() &&
 | 
						|
        (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF)) {
 | 
						|
        mac_rf_set_cur_channel_freq(BB_CPU_GET_CHANNEL_FREQ());
 | 
						|
        mac_rf_set_cur_channel_id(BB_CPU_GET_CHANNEL_ID());
 | 
						|
    }
 | 
						|
    int64_t ppmhz = ((int64_t)mac_rf_get_wphy_ppm() *
 | 
						|
        BB_CPU_GET_CHANNEL_FREQ()) >> 26;
 | 
						|
    set_if = mac_rf_get_cur_channel_freq() - (int32_t)ppmhz;
 | 
						|
    IOT_ASSERT(set_if);
 | 
						|
    bb_cpu_update_tx_pwr();
 | 
						|
    /* update tx iq phase */
 | 
						|
    mac_rf_get_tx_iqp_cali(&tx_i_p, &tx_q_p);
 | 
						|
    bb_rf_set_tx_iq_phase(tx_i_p, tx_q_p);
 | 
						|
    /* tx config */
 | 
						|
    bb_rf_tx_set_freq_and_filter(BB_CPU_GET_OPTION(), set_if);
 | 
						|
    /* clear tx info */
 | 
						|
    bb_rf_clear_tx_info();
 | 
						|
 | 
						|
    /* make sure that the channel in beacon header is
 | 
						|
     * the same as the real channel
 | 
						|
     */
 | 
						|
    bb_cpu_align_bcn_hdr_ch_op_info(fc_msg.delimiter, pb_buf);
 | 
						|
 | 
						|
    if (pb_num && BB_CPU_TXRX_USE_DMA) {
 | 
						|
        /* start dma */
 | 
						|
        bb_cpu_dma_start(RF_PHY_TX_DMA_BASEADDR, pb_buf, pb_size * pb_num);
 | 
						|
    }
 | 
						|
 | 
						|
    /* just beacon need calcualte crc32 */
 | 
						|
    if (crc32_en && fc_msg.delimiter == FC_DELIM_BEACON &&
 | 
						|
        BB_CPU_GET_PROTO() == PLC_PROTO_TYPE_SG) {
 | 
						|
        bb_rf_set_crc32_en(1);
 | 
						|
    } else {
 | 
						|
        bb_rf_set_crc32_en(0);
 | 
						|
    }
 | 
						|
 | 
						|
    /* set need sack dtei.
 | 
						|
     * dtei = 0 : do not need sack;
 | 
						|
     * dtei != 0 : need wait dtei tx sack.
 | 
						|
     */
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_SOF && need_sack) {
 | 
						|
        BB_CPU_SET_TXDTEI(fc_msg.dst_tei);
 | 
						|
    } else {
 | 
						|
         BB_CPU_SET_TXDTEI(PLC_TEI_INVAL);
 | 
						|
    }
 | 
						|
 | 
						|
    /* set tx phy header */
 | 
						|
    bb_rf_set_tx_phr((uint32_t *)phr);
 | 
						|
    /* config tx phy header info */
 | 
						|
    bb_rf_cfg_tx_phr_info(phr_info);
 | 
						|
    /* config tx payload info */
 | 
						|
    bb_rf_cfg_tx_pld_info(pld_info, pb_num);
 | 
						|
    return;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_mpdu_start(rf_tx_mpdu_start *mpdu)
 | 
						|
{
 | 
						|
    bb_rf_hw_info_t *phr_info = NULL;
 | 
						|
    bb_rf_hw_info_t *pld_info = NULL;
 | 
						|
    rf_tx_pb_start *pb = NULL;
 | 
						|
    uint32_t pbaddr = 0;
 | 
						|
    uint32_t pb_size = 0;
 | 
						|
    uint32_t pb_mum = 0;
 | 
						|
    uint32_t need_ack = 0;
 | 
						|
    uint32_t crc32_en = 0;
 | 
						|
    phr_info = bb_rf_get_phr_hw_info(mpdu->option, mpdu->phr_mcs);
 | 
						|
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), &mpdu->phr0, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        pld_info = bb_rf_get_pld_hw_info(mpdu->option,
 | 
						|
            mpdu->blkz, mpdu->pld_mcs);
 | 
						|
        pb_size = phy_rf_get_pbsz(mpdu->blkz);
 | 
						|
 | 
						|
        /* mabey multi pb */
 | 
						|
        // pb_mum = mpdu->pb_num;
 | 
						|
        pb_mum = 1;
 | 
						|
 | 
						|
        pb = mpdu->pb;
 | 
						|
        pbaddr = pb->pb_buf_addr;
 | 
						|
 | 
						|
        need_ack = mpdu->need_ack;
 | 
						|
        crc32_en = mpdu->crc32_en;
 | 
						|
    }
 | 
						|
    bb_cpu_config_bb_tx(phr_info, pld_info, (void *)&mpdu->phr0,
 | 
						|
        pbaddr, pb_size, pb_mum, need_ack, crc32_en);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_phr_start(uint32_t *phr, uint32_t phr_mcs)
 | 
						|
{
 | 
						|
    bb_rf_hw_info_t *phr_info;
 | 
						|
    phr_info = bb_rf_get_phr_hw_info(bb_cpu_get_option(), phr_mcs);
 | 
						|
 | 
						|
    bb_cpu_config_bb_tx(phr_info, NULL, (void *)phr,
 | 
						|
        0, 0, 0, 0, 0);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_backfill_tx_desc(uint32_t timeout)
 | 
						|
{
 | 
						|
    rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)BB_CPU_GET_TX_MPDU();
 | 
						|
    rf_tx_mpdu_end *tx_end = mpdu->tx_status;
 | 
						|
    /* notify hw tx done */
 | 
						|
    mpdu->notify_hw_tx_done = 1;
 | 
						|
    /* notify sw tx done */
 | 
						|
    tx_end->tx_done = 1;
 | 
						|
 | 
						|
    // TODO: need backfill tx desc
 | 
						|
    if (timeout) {
 | 
						|
        bb_cpu_backfill_txntb_to_desc(&mpdu->phr0, 0);
 | 
						|
    } else {
 | 
						|
    }
 | 
						|
    return;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_backfill_tx_desc_sack(uint32_t timeout)
 | 
						|
{
 | 
						|
    rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)BB_CPU_GET_TX_MPDU();
 | 
						|
    rf_tx_mpdu_end *tx_end = mpdu->tx_status;
 | 
						|
    tx_end->rx_sack_ok = 0;
 | 
						|
    if (timeout) {
 | 
						|
        /* notify sw tx ok */
 | 
						|
        tx_end->tx_ok = 0;
 | 
						|
    } else {
 | 
						|
        /* notify sw tx ok */
 | 
						|
        tx_end->tx_ok = 1;
 | 
						|
 | 
						|
        uint32_t phr[4] = { 0 };
 | 
						|
        /* get phy header */
 | 
						|
        bb_rf_get_rx_phr(phr);
 | 
						|
 | 
						|
        bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
        bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
        if (fc_msg.delimiter == FC_DELIM_SACK) {
 | 
						|
            tx_end->rx_sack_ok = !fc_msg.result_in_sack;
 | 
						|
        }
 | 
						|
    }
 | 
						|
    return;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_prepare_tx_sack(uint32_t phr_mcs, uint32_t rx_snr)
 | 
						|
{
 | 
						|
    uint32_t sack_phr_tbl[4] = { 0 };
 | 
						|
    uint32_t phr[4] = { 0 };
 | 
						|
    bb_rf_get_rx_phr(phr);
 | 
						|
 | 
						|
    switch (BB_CPU_GET_PROTO()) {
 | 
						|
#if SUPPORT_SMART_GRID
 | 
						|
    case PLC_PROTO_TYPE_SG:
 | 
						|
    {
 | 
						|
        frame_control_t *sack_phr = (frame_control_t *)sack_phr_tbl;
 | 
						|
        frame_control_t *phr_t = (frame_control_t *)phr;
 | 
						|
        IOT_ASSERT(FC_DELIM_SOF == phr_t->delimiter_type);
 | 
						|
 | 
						|
        sack_phr->delimiter_type = FC_DELIM_SACK;
 | 
						|
        sack_phr->network_type = 0;
 | 
						|
        sack_phr->nid = phr_t->nid;
 | 
						|
 | 
						|
        sack_phr->vf.rf_sack.rx_result = !!bb_rf_get_pld_crc24_err();
 | 
						|
        sack_phr->vf.rf_sack.resv0 = 0;
 | 
						|
        sack_phr->vf.rf_sack.stei = phr_t->vf.rf_sof.dst_tei;
 | 
						|
        sack_phr->vf.rf_sack.dtei = phr_t->vf.rf_sof.src_tei;
 | 
						|
        sack_phr->vf.rf_sack.resv1 = 0;
 | 
						|
        sack_phr->vf.rf_sack.snr = rx_snr;
 | 
						|
        sack_phr->vf.rf_sack.load = 0;
 | 
						|
        sack_phr->vf.rf_sack.resv2 = 0;
 | 
						|
        sack_phr->vf.rf_sack.ext_deli = 0;
 | 
						|
        sack_phr->vf.rf_sack.version = SG_STANDARD_VERSION;
 | 
						|
        break;
 | 
						|
    }
 | 
						|
#endif
 | 
						|
#if SUPPORT_SOUTHERN_POWER_GRID
 | 
						|
    case PLC_PROTO_TYPE_SPG:
 | 
						|
    {
 | 
						|
        spg_frame_control_t *sack_phr = (spg_frame_control_t *)sack_phr_tbl;
 | 
						|
        spg_frame_control_t *phr_t = (spg_frame_control_t *)phr;
 | 
						|
        IOT_ASSERT(FC_DELIM_SOF == phr_t->delimiter_type);
 | 
						|
 | 
						|
        sack_phr->delimiter_type = FC_DELIM_SACK;
 | 
						|
        sack_phr->access_ind = phr_t->access_ind;
 | 
						|
        sack_phr->snid = phr_t->snid;
 | 
						|
 | 
						|
        sack_phr->vf.rf_sack.rx_result = !!bb_rf_get_pld_crc24_err();
 | 
						|
        sack_phr->vf.rf_sack.resv0 = 0;
 | 
						|
        sack_phr->vf.rf_sack.dtei = phr_t->vf.rf_sof.src_tei;
 | 
						|
        sack_phr->vf.rf_sack.resv1 = 0;
 | 
						|
        sack_phr->vf.rf_sack.resv2 = 0;
 | 
						|
        sack_phr->vf.rf_sack.resv3 = 0;
 | 
						|
        sack_phr->vf.rf_sack.ext_deli = 0;
 | 
						|
        sack_phr->vf.rf_sack.version = SPG_STANDARD_VERSION;
 | 
						|
        break;
 | 
						|
    }
 | 
						|
#endif
 | 
						|
    default:
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
 | 
						|
    /* config bb */
 | 
						|
    bb_cpu_tx_phr_start(sack_phr_tbl, phr_mcs);
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_get_sig_and_cfg_bb()
 | 
						|
{
 | 
						|
    uint32_t ret = 1;
 | 
						|
    /* TODO: maybe add timeout */
 | 
						|
    while (!bb_rf_get_rx_sig_is_ready());
 | 
						|
    if (!bb_rf_get_rx_sig_is_err()) {
 | 
						|
        bb_rf_hw_info_t *phr_info = NULL;
 | 
						|
        uint32_t option = bb_cpu_get_option();
 | 
						|
        uint32_t phr_mcs_id = bb_rf_get_rx_sig_info();
 | 
						|
        if (phr_mcs_id >= PHY_MCS_MAX) {
 | 
						|
            bb_cpu_printf("rx phrmcs err:%d\n", phr_mcs_id);
 | 
						|
            phr_mcs_id = PHY_MCS_MAX - 1;
 | 
						|
        }
 | 
						|
        phr_info = bb_rf_get_phr_hw_info(option, phr_mcs_id);
 | 
						|
        /* config phy header info */
 | 
						|
        bb_rf_cfg_rx_phr_info(phr_info);
 | 
						|
        /* set rx state continue */
 | 
						|
        bb_rf_set_rx_state_cont(1);
 | 
						|
        ret = 0;
 | 
						|
    }
 | 
						|
    return ret;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_calu_tx_fl(void *phr, uint32_t phr_mcs)
 | 
						|
{
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    uint32_t frame_len = 0;
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        frame_len = phy_rf_get_g_stf_ltf_fl() + phy_rf_get_g_sig_fl() +
 | 
						|
            phy_rf_get_g_phr_fl(phr_mcs) +
 | 
						|
            phy_rf_get_g_psdu_fl(fc_msg.rf_mcs, fc_msg.rf_pb_sz_idx);
 | 
						|
    } else if (fc_msg.delimiter == FC_DELIM_SACK) {
 | 
						|
        /* tx sack use fixed phr mcs */
 | 
						|
        frame_len = phy_rf_get_g_stf_ltf_fl() + phy_rf_get_g_sig_fl() +
 | 
						|
            phy_rf_get_g_phr_fl(phy_rf_get_g_tx_sack_mcs());
 | 
						|
    } else if (fc_msg.delimiter == FC_DELIM_RF_TEST) {
 | 
						|
        frame_len = phy_rf_get_g_stf_ltf_fl() + phy_rf_get_g_sig_fl() +
 | 
						|
            phy_rf_get_g_phr_fl(phr_mcs);
 | 
						|
    } else {
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
 | 
						|
    /* the actual sending time is longer than the fl.
 | 
						|
     * so add 1ms margin, prevents timeout interrupts and
 | 
						|
     * tx done interrupt from occurring simultaneously.
 | 
						|
     */
 | 
						|
    return frame_len + BB_CPU_TX_TIMEOUT_MARGIN;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_calu_rx_fl_from_phr(uint32_t *phr)
 | 
						|
{
 | 
						|
    uint32_t frame_len = 0;
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON) {
 | 
						|
        frame_len = phy_rf_get_g_psdu_fl(fc_msg.rf_mcs, fc_msg.rf_pb_sz_idx) +
 | 
						|
            RF_MAC_BIFS_US;
 | 
						|
    } else if (fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        frame_len = phy_rf_get_g_psdu_fl(fc_msg.rf_mcs, fc_msg.rf_pb_sz_idx);
 | 
						|
        /* calculate fl */
 | 
						|
        if (fc_msg.nid != 0 &&
 | 
						|
            fc_msg.dst_tei != 0 &&
 | 
						|
            fc_msg.dst_tei != 0xFFF) {
 | 
						|
            /* TODO: some rule to select phr mcs */
 | 
						|
            uint32_t sack_len = phy_rf_get_g_stf_ltf_fl() +
 | 
						|
                phy_rf_get_g_sig_fl() +
 | 
						|
                phy_rf_get_g_phr_fl(phy_rf_get_g_tx_sack_mcs());
 | 
						|
            frame_len += sack_len + RF_MAC_CIFS_US + RF_MAC_RX_RIFS_US;
 | 
						|
        } else {
 | 
						|
            frame_len += RF_MAC_CIFS_US;
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    return frame_len;
 | 
						|
}
 | 
						|
 | 
						|
static bb_rf_hw_info_t * bb_cpu_get_hw_info(void *phr)
 | 
						|
{
 | 
						|
    bb_rf_hw_info_t *pld_info = NULL;
 | 
						|
    uint32_t option = bb_cpu_get_option();
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        pld_info = bb_rf_get_pld_hw_info(option, fc_msg.rf_pb_sz_idx,
 | 
						|
            fc_msg.rf_mcs);
 | 
						|
    }
 | 
						|
 | 
						|
    return pld_info;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_get_pb_size(void *phr)
 | 
						|
{
 | 
						|
    uint32_t pb_size = 0;
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        pb_size = phy_rf_get_pbsz(fc_msg.rf_pb_sz_idx);
 | 
						|
    }
 | 
						|
 | 
						|
    return pb_size;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_cfg_rx_pld_info_to_bb(uint32_t *phr, uint8_t *rx_buf)
 | 
						|
{
 | 
						|
    uint32_t need_rx_pld = 0;
 | 
						|
    bb_rf_hw_info_t *pld_info = NULL;
 | 
						|
    uint32_t pb_size = 0, pb_num = 1;
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        pld_info = bb_cpu_get_hw_info(phr);
 | 
						|
        IOT_ASSERT(pld_info != NULL);
 | 
						|
 | 
						|
        need_rx_pld = 1;
 | 
						|
 | 
						|
        pb_size = bb_cpu_get_pb_size(phr);
 | 
						|
        IOT_ASSERT(pb_size);
 | 
						|
 | 
						|
        /* clear rx info */
 | 
						|
        bb_rf_clear_rx_info();
 | 
						|
 | 
						|
        IOT_ASSERT(rx_buf);
 | 
						|
        if (BB_CPU_TXRX_USE_DMA) {
 | 
						|
            /* start dma */
 | 
						|
            bb_cpu_dma_start(RF_PHY_RX_DMA_BASEADDR, (uint32_t)rx_buf,
 | 
						|
                pb_size * pb_num);
 | 
						|
        } else {
 | 
						|
            IOT_ASSERT(BB_CPU_GET_RX_BUF() == NULL);
 | 
						|
            /* use copy */
 | 
						|
            BB_CPU_SET_RX_BUF(rx_buf);
 | 
						|
        }
 | 
						|
 | 
						|
        /* need to check crc32 or not */
 | 
						|
        if (bb_cpu_mac_get_rx_crc32_check() &&
 | 
						|
            fc_msg.delimiter == FC_DELIM_BEACON &&
 | 
						|
            BB_CPU_GET_PROTO() == PLC_PROTO_TYPE_SG) {
 | 
						|
            bb_rf_set_crc32_en(1);
 | 
						|
        } else {
 | 
						|
            bb_rf_set_crc32_en(0);
 | 
						|
        }
 | 
						|
        /* config rx payload info */
 | 
						|
        bb_rf_cfg_rx_pld_info(pld_info, pb_num);
 | 
						|
        /* set rx state continue */
 | 
						|
        bb_rf_set_rx_state_cont(1);
 | 
						|
        /* set rx dec continue */
 | 
						|
        bb_rf_set_dec_cont(1);
 | 
						|
    }
 | 
						|
 | 
						|
    return need_rx_pld;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_hdl_idle_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t frame_len = 0, hwqid;
 | 
						|
    if (BB_CPU_EVENT_MAC_TX_FILL_INFO_ID == event_id) {
 | 
						|
        if (BB_CPU_GET_TXDTEI()) {
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* record hwqid */
 | 
						|
        hwqid = bb_cpu_mac_get_cur_hwqid();
 | 
						|
        BB_CPU_SET_HWQID(hwqid);
 | 
						|
 | 
						|
        rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)bb_cpu_mac_get_txq_ptr();
 | 
						|
        if (mpdu->desc_type) {
 | 
						|
            bb_cpu_printf("rftxdummy err, hwq:%d, csma:%d, tdma:%d\n",
 | 
						|
                hwqid, bb_cpu_mac_judge_cur_cmd_need_tx_csma(),
 | 
						|
                bb_cpu_mac_judge_cur_cmd_need_tx_tdma());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* store tx mpdu addr */
 | 
						|
        BB_CPU_SET_TX_MPDU(bb_cpu_mac_get_txq_ptr());
 | 
						|
        /* config bb */
 | 
						|
        bb_cpu_tx_mpdu_start(mpdu);
 | 
						|
        /* bb cpu set tx to wait ok state */
 | 
						|
        bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_WAIT_COMPLETE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu trigger bb to tx */
 | 
						|
        bb_cpu_trigger_bb(BB_CPU_TRIGGER_BB_TX);
 | 
						|
        /* frame form mpdu */
 | 
						|
        frame_len = bb_cpu_calu_tx_fl((void *)&mpdu->phr0, mpdu->phr_mcs);
 | 
						|
        /* config timer and wait timeout */
 | 
						|
        bb_cpu_timer_restart(TIMER_OF_TX, frame_len);
 | 
						|
        if (!BB_CPU_TXRX_USE_DMA) {
 | 
						|
            /* send data to bb replace dma */
 | 
						|
            bb_cpu_send_data_to_bb(mpdu);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("txdile fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        /* BB_CPU_EVENT_RX_ABORT_ID will not appear in tx state */
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_hdl_wait_complete_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t time = 0;
 | 
						|
    /* stop tx timer */
 | 
						|
    bb_cpu_stop_and_clr_tx_timer();
 | 
						|
    if (event_id == BB_CPU_EVENT_TX_TIMEOUT_ID) {
 | 
						|
        bb_cpu_backfill_tx_desc(1);
 | 
						|
        if (BB_CPU_GET_TXDTEI()) {
 | 
						|
            BB_CPU_SET_TXDTEI(PLC_TEI_INVAL);
 | 
						|
            bb_cpu_backfill_tx_desc_sack(1);
 | 
						|
        }
 | 
						|
        /* timeout reset bb/rf */
 | 
						|
        bb_cpu_tx_reset();
 | 
						|
        /* clera tx complete event */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_TX_COMP_ID);
 | 
						|
        /* bb cpu set tx to idle state */
 | 
						|
        bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac tx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_TX_DONE);
 | 
						|
    } else if (event_id == BB_CPU_EVENT_TX_COMP_ID) {
 | 
						|
        /* tx ok whit pld cnt increase */
 | 
						|
        mac_rf_tx_ok_with_pld_cnt_inc();
 | 
						|
        if (BB_CPU_GET_TXDTEI()) {
 | 
						|
            // need wait sack
 | 
						|
            /* backfill tx desc */
 | 
						|
            bb_cpu_backfill_tx_desc(0);
 | 
						|
            /* bb cpu set tx to idle state */
 | 
						|
            bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* bb cpu trigger mac need rx sack */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_NEED_RX_SACK);
 | 
						|
            /* config timer and wait timeout, time = wait sig + wait phr */
 | 
						|
            time = RF_MAC_RX_RIFS_US + phy_rf_get_g_stf_ltf_fl() +
 | 
						|
                phy_rf_get_g_sig_fl() +
 | 
						|
                phy_rf_get_g_phr_fl(phy_rf_get_g_tx_sack_mcs());
 | 
						|
            if (mac_rf_get_cert_flag()) {
 | 
						|
                /* for certification mode, add 1ms as margin */
 | 
						|
                time += RF_MAC_RX_SACK_CERT_MARGIN;
 | 
						|
            }
 | 
						|
            bb_cpu_timer_restart(TIMER_OF_WAIT_SACK, time);
 | 
						|
        } else {
 | 
						|
            /* backfill tx desc */
 | 
						|
            bb_cpu_backfill_tx_desc(0);
 | 
						|
            /* bb cpu set tx to idle state */
 | 
						|
            bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* bb cpu trigger mac tx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_TX_DONE);
 | 
						|
        }
 | 
						|
    } else if (event_id == BB_CPU_EVENT_RST_ID ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_TX_ABORT_ID == event_id) {
 | 
						|
        if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* bb cpu set tx to idle state */
 | 
						|
        bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter wait sack tx done */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_WAIT_TX_DONE_ID);
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("txwaitcomp fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        /* BB_CPU_EVENT_RX_ABORT_ID will not appear in tx state */
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_tx_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
#if BB_CPU_DEBUG_PRINT
 | 
						|
   if (event_id != BB_CPU_EVENT_MAC_TX_FILL_INFO_ID) {
 | 
						|
        bb_cpu_printf("tx fsm:%d, evt:%d\n", BB_CPU_GET_TX_FSM(), event_id);
 | 
						|
    }
 | 
						|
#endif
 | 
						|
    switch (BB_CPU_GET_TX_FSM()) {
 | 
						|
    case BB_CPU_TX_STATE_IDLE:
 | 
						|
    {
 | 
						|
        bb_cpu_tx_hdl_idle_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_TX_STATE_WAIT_COMPLETE:
 | 
						|
    {
 | 
						|
        bb_cpu_tx_hdl_wait_complete_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_config_start()
 | 
						|
{
 | 
						|
    /* if bb is rx state, do not init rx again */
 | 
						|
    if (BB_CPU_GET_RX_FLAG()) {
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    /* need handle sleep or not */
 | 
						|
    if (mac_rf_is_pa_disable()) {
 | 
						|
        mac_rf_disable_pa(0);
 | 
						|
        /* ip request delay 1ms */
 | 
						|
        iot_delay_us(1000);
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
        bb_rf_init(BB_CPU_GET_OPTION(), 1);
 | 
						|
    } else {
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
    }
 | 
						|
    /* clear tx iq phase */
 | 
						|
    bb_rf_set_tx_iq_phase(0, 0);
 | 
						|
    /* reset rx before each rx configuration */
 | 
						|
    bb_cpu_rx_reset();
 | 
						|
    /* clear sig */
 | 
						|
    bb_cpu_clr_evt(BB_CPU_EVENT_RX_SIG_ID);
 | 
						|
    uint32_t set_if = 0;
 | 
						|
    /* channel id changed and not in tx sack status.
 | 
						|
     * if in tx sack status, not update current channel.
 | 
						|
     */
 | 
						|
    if (BB_CPU_GET_CHANNEL_FREQ() != mac_rf_get_cur_channel_freq() &&
 | 
						|
        BB_CPU_GET_TXDTEI() == PLC_TEI_INVAL) {
 | 
						|
        mac_rf_set_cur_channel_freq(BB_CPU_GET_CHANNEL_FREQ());
 | 
						|
        mac_rf_set_cur_channel_id(BB_CPU_GET_CHANNEL_ID());
 | 
						|
    }
 | 
						|
    int64_t ppmhz = ((int64_t)mac_rf_get_wphy_ppm() *
 | 
						|
        BB_CPU_GET_CHANNEL_FREQ()) >> 26;
 | 
						|
    set_if = mac_rf_get_cur_channel_freq() - (int32_t)ppmhz;
 | 
						|
    IOT_ASSERT(set_if);
 | 
						|
    /* rx config */
 | 
						|
    bb_rf_rx_cfg(BB_CPU_GET_OPTION(), set_if);
 | 
						|
 | 
						|
    /* bb cpu trigger bb to tx */
 | 
						|
    bb_cpu_trigger_bb(BB_CPU_TRIGGER_BB_RX);
 | 
						|
    BB_CPU_SET_RX_FLAG(1);
 | 
						|
    /* clear txcfg1 vaild */
 | 
						|
    BB_CPU_SET_TXCFG1_VLD(0);
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_get_data_from_bb(uint32_t *phr)
 | 
						|
{
 | 
						|
    uint32_t pb_size;
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
        fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        pb_size = bb_cpu_get_pb_size(phr);
 | 
						|
        /* cpu read data */
 | 
						|
        bb_rf_read_data_from_bb(pb_size, BB_CPU_GET_RX_BUF());
 | 
						|
        BB_CPU_SET_RX_BUF(NULL);
 | 
						|
        while (!bb_rf_get_pld_crc_rdy());
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_rx_judge_need_tx_sack()
 | 
						|
{
 | 
						|
    uint32_t need_sack = 0;
 | 
						|
    uint32_t phr[4] = { 0 };
 | 
						|
    if (BB_CPU_GET_TXDTEI()) {
 | 
						|
        return need_sack;
 | 
						|
    }
 | 
						|
    /* get phy header */
 | 
						|
    bb_rf_get_rx_phr(phr);
 | 
						|
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
        if (fc_msg.nid != 0 &&
 | 
						|
            fc_msg.dst_tei != 0 &&
 | 
						|
            fc_msg.dst_tei != 0xFFF &&
 | 
						|
            mac_get_hw_nid() == fc_msg.nid &&
 | 
						|
            mac_get_hw_tei() == fc_msg.dst_tei) {
 | 
						|
            need_sack = 1;
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    return need_sack;
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_cpu_rx_judge_is_mine_sack()
 | 
						|
{
 | 
						|
    uint32_t is_mine_sack = 0;
 | 
						|
    uint32_t phr[4] = { 0 };
 | 
						|
    if (BB_CPU_GET_TXDTEI() == PLC_TEI_INVAL) {
 | 
						|
        return is_mine_sack;
 | 
						|
    }
 | 
						|
    /* get phy header */
 | 
						|
    bb_rf_get_rx_phr(phr);
 | 
						|
 | 
						|
    bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
    bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
 | 
						|
    if (fc_msg.delimiter == FC_DELIM_SACK) {
 | 
						|
        if (mac_get_hw_nid() == fc_msg.nid &&
 | 
						|
            mac_get_hw_tei() == fc_msg.dst_tei &&
 | 
						|
            ((BB_CPU_GET_PROTO() == PLC_PROTO_TYPE_SG) ?
 | 
						|
            (BB_CPU_GET_TXDTEI() == fc_msg.src_tei) : 1)) {
 | 
						|
            is_mine_sack = 1;
 | 
						|
        }
 | 
						|
    }
 | 
						|
    return is_mine_sack;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_self_sack(uint32_t timeout)
 | 
						|
{
 | 
						|
    /* there must have a sign need sack = 1 */
 | 
						|
    IOT_ASSERT(BB_CPU_GET_TXDTEI());
 | 
						|
    /* stop wait sack timer */
 | 
						|
    bb_cpu_timer_stop(TIMER_OF_WAIT_SACK);
 | 
						|
    bb_cpu_clr_evt(BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID);
 | 
						|
    /* clear tx need sack flag */
 | 
						|
    BB_CPU_SET_TXDTEI(PLC_TEI_INVAL);
 | 
						|
    bb_cpu_backfill_tx_desc_sack(timeout);
 | 
						|
    /* bb cpu trigger mac rx sack done */
 | 
						|
    bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_SACK_DONE);
 | 
						|
}
 | 
						|
 | 
						|
static uint32_t bb_rf_get_vaild_rx_phr(uint32_t *phr)
 | 
						|
{
 | 
						|
    uint32_t ret = 1;
 | 
						|
    while (!bb_rf_get_phr_crc_rdy());
 | 
						|
    bb_rf_get_rx_phr(phr);
 | 
						|
    if (!bb_rf_get_phr_crc24_err()) {
 | 
						|
        bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
        bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
        if (fc_msg.delimiter == FC_DELIM_BEACON ||
 | 
						|
            fc_msg.delimiter == FC_DELIM_SOF) {
 | 
						|
            if (fc_msg.rf_mcs < PHY_MCS_MAX &&
 | 
						|
                fc_msg.rf_pb_sz_idx < BLOCK_SIZE_MAX) {
 | 
						|
                ret = 0;
 | 
						|
            }
 | 
						|
        } else {
 | 
						|
            ret = 0;
 | 
						|
        }
 | 
						|
    }
 | 
						|
    return ret;
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_listening_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t ret = 0;
 | 
						|
    if (BB_CPU_EVENT_RX_SIG_ID == event_id) {
 | 
						|
        /* get sig info and config bb to rx phy header */
 | 
						|
        ret = bb_cpu_get_sig_and_cfg_bb();
 | 
						|
        if (!ret) {
 | 
						|
            /* sig ok cnt increase */
 | 
						|
            mac_rf_rx_sig_ok_cnt_inc();
 | 
						|
            bb_cpu_clr_evt(BB_CPU_EVENT_PS_IDLE_ID);
 | 
						|
            /* bb cpu rx enter wait phy header state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_WAIT_PHR, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* config timer and wait timeout, rx timeout */
 | 
						|
            bb_cpu_timer_restart(TIMER_OF_RX, RF_MAC_EIFS_US);
 | 
						|
        } else {
 | 
						|
#if BB_CPU_RX_EXCEPTION_NOTIFY_PLC_CPU
 | 
						|
            /* sig err, get ring id */
 | 
						|
            bb_cpu_hw_ring_select(NULL);
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_SIG_ERR);
 | 
						|
            /* set rx ring complete, fill desc */
 | 
						|
            bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
#endif
 | 
						|
            /* sig error cnt increase */
 | 
						|
            mac_rf_rx_sig_err_cnt_inc();
 | 
						|
            /* timeout reset bb/rf */
 | 
						|
            bb_cpu_rx_reset();
 | 
						|
            /* clear out specific unhandled interrupts */
 | 
						|
            bb_cpu_clr_multi_evt((1 << BB_CPU_EVENT_RX_PHR_ID) |
 | 
						|
                (1 << BB_CPU_EVENT_PS_IDLE_ID));
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* bb cpu trigger mac rx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID == event_id) {
 | 
						|
        bb_cpu_rx_hdl_self_sack(1);
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RX_ABORT_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            bb_cpu_printf("rxlisten cmdlist done err\n");
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        if (BB_CPU_EVENT_RX_ABORT_ID == event_id &&
 | 
						|
            bb_cpu_mac_judge_cur_cmd_need_tx_csma()) {
 | 
						|
            bb_cpu_printf("rxlisten rx abort but tx csma\n");
 | 
						|
            /* csma should not appear rx abort event */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* some case, early stop and rx sig trigger together,
 | 
						|
         * and rx sig must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
        bb_cpu_clr_multi_evt((1 << BB_CPU_EVENT_RX_SIG_ID) |
 | 
						|
        /* some case, early stop and rx abort trigger together,
 | 
						|
         * and rx abort must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_RX_ABORT_ID) |
 | 
						|
        /* some case, early stop and backoff timeout trigger together,
 | 
						|
         * and backoff timeout must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_BACKOFF_TIMEOUT_ID) |
 | 
						|
        /* some case, early stop and wait rx sack timeout trigger together,
 | 
						|
         * and wait rx sack timeout must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID) |
 | 
						|
        /* some case, early stop and set power save idle trigger together,
 | 
						|
         * and set power save idle must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_PS_IDLE_ID));
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter reset */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
    } else if (BB_CPU_EVENT_BACKOFF_TIMEOUT_ID == event_id) {
 | 
						|
        /* timeout reset bb/rf */
 | 
						|
        bb_cpu_rf_reset();
 | 
						|
        /* config tx cfg in advance on csma */
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
        /* bb rf tx switch1 */
 | 
						|
        bb_rf_tx_switch_step1();
 | 
						|
        BB_CPU_SET_TXCFG1_VLD(1);
 | 
						|
        /* clear out specific unhandled interrupts */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_SIG_ID);
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_PHR_ID);
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac backoff complete, cpu2 is ready */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_BBCPU_IS_READY);
 | 
						|
    } else if (BB_CPU_EVENT_CSMA_TX_CHECK_ID == event_id) {
 | 
						|
        /* update hwq mpdu pointer */
 | 
						|
        bb_cpu_csma_check_txq();
 | 
						|
    } else if (BB_CPU_EVENT_SET_CHANNEL_ID == event_id) {
 | 
						|
        /* if rx listening state, restart rx with new config */
 | 
						|
        uint32_t bb_evt = glb_fsm_ctxt.bb_cpu_evt_tmp;
 | 
						|
        if (!(bb_evt & ((1 << BB_CPU_EVENT_RST_ID) |
 | 
						|
            (1 << BB_CPU_EVENT_CMDLIST_DONE_ID) |
 | 
						|
            (1 << BB_CPU_EVENT_RX_ABORT_ID) |
 | 
						|
            (1 << BB_CPU_EVENT_RX_SIG_ID) |
 | 
						|
            (1 << BB_CPU_EVENT_BACKOFF_TIMEOUT_ID)))) {
 | 
						|
            if (!mac_rf_is_pa_disable()) {
 | 
						|
                /* trigger bb to rx */
 | 
						|
                bb_cpu_rx_config_start();
 | 
						|
            }
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_PS_IDLE_ID == event_id) {
 | 
						|
        if (mac_rf_get_bb_ps_idle()) {
 | 
						|
            /* if rx listening state, restart rx with new config */
 | 
						|
            uint32_t bb_evt = glb_fsm_ctxt.bb_cpu_evt_tmp;
 | 
						|
            if (!(bb_evt & ((1 << BB_CPU_EVENT_RST_ID) |
 | 
						|
                (1 << BB_CPU_EVENT_CMDLIST_DONE_ID) |
 | 
						|
                (1 << BB_CPU_EVENT_RX_ABORT_ID) |
 | 
						|
                (1 << BB_CPU_EVENT_RX_SIG_ID) |
 | 
						|
                (1 << BB_CPU_EVENT_BACKOFF_TIMEOUT_ID)))) {
 | 
						|
                /* reset rf */
 | 
						|
                bb_cpu_rf_reset();
 | 
						|
                mac_rf_disable_pa(1);
 | 
						|
                /* clear sig */
 | 
						|
                bb_cpu_clr_evt(BB_CPU_EVENT_RX_SIG_ID);
 | 
						|
            }
 | 
						|
        } else {
 | 
						|
            /* trigger bb to rx */
 | 
						|
            bb_cpu_rx_config_start();
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rxlisten fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_update_snr_rssi_info()
 | 
						|
{
 | 
						|
#if HPLC_BBCPU_CALC_SNR_RSSI
 | 
						|
    int8_t snr, rssi;
 | 
						|
    uint8_t rx_gain;
 | 
						|
    uint16_t raw_snr;
 | 
						|
    bb_rf_get_snr_rssi(BB_CPU_GET_OPTION(), &snr, &rssi, &rx_gain, &raw_snr);
 | 
						|
    /* update rx snr */
 | 
						|
    bb_cpu_hw_ring_snr_set(snr);
 | 
						|
    bb_cpu_hw_ring_raw_snr_set(raw_snr);
 | 
						|
    /* update rx rssi */
 | 
						|
    bb_cpu_hw_ring_rssi_set(rssi);
 | 
						|
    /* update rx gain */
 | 
						|
    bb_cpu_hw_ring_rx_gain_set(rx_gain);
 | 
						|
#else /* HPLC_BBCPU_CALC_SNR_RSSI */
 | 
						|
    bb_cpu_hw_ring_raw_snr_rssi_set(bb_rf_get_raw_snr_rssi_reg1(),
 | 
						|
        bb_rf_get_raw_snr_rssi_reg2(), bb_rf_get_raw_snr_rssi_reg3());
 | 
						|
#endif /* HPLC_BBCPU_CALC_SNR_RSSI */
 | 
						|
    /* update rx ppmhz */
 | 
						|
    bb_cpu_hw_ring_rx_ppmhz_set(bb_rf_get_evaluate_ppm_hz());
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_wait_phr_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t ret = 0;
 | 
						|
    uint32_t phr[4] = { 0 };
 | 
						|
    uint32_t rx_fl = 0;
 | 
						|
    uint8_t *rx_buf;
 | 
						|
    /* BB_CPU_EVENT_RX_PLD_START_ID is regarded as
 | 
						|
     * BB_CPU_EVENT_RX_TIMEOUT_ID in wait_phr state.
 | 
						|
     */
 | 
						|
    if ((event_id == BB_CPU_EVENT_RX_TIMEOUT_ID) ||
 | 
						|
        (BB_CPU_EVENT_RX_PLD_START_ID == event_id)) {
 | 
						|
#if BB_CPU_RX_EXCEPTION_NOTIFY_PLC_CPU
 | 
						|
        /* phy header err, get ring id */
 | 
						|
        bb_cpu_hw_ring_select(NULL);
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_PHR_TIMEOUT);
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
#endif
 | 
						|
        /* timeout reset bb/rf */
 | 
						|
        bb_cpu_rx_reset();
 | 
						|
        /* clear out specific unhandled interrupts */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_PHR_ID);
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac rx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
    } else if (BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID == event_id) {
 | 
						|
        bb_cpu_rx_hdl_self_sack(1);
 | 
						|
    } else if (event_id == BB_CPU_EVENT_RX_PHR_ID) {
 | 
						|
        /* get phy header */
 | 
						|
        ret = bb_rf_get_vaild_rx_phr(phr);
 | 
						|
        if (!ret) {
 | 
						|
            /* update rx phr to hw rx ring */
 | 
						|
            rx_buf = bb_cpu_hw_ring_select(phr);
 | 
						|
            /* get phy header info and config bb to rx pld */
 | 
						|
            if (bb_cpu_cfg_rx_pld_info_to_bb(phr, rx_buf)) {
 | 
						|
                /* phr ok with pld cnt increase */
 | 
						|
                mac_rf_rx_phr_ok_with_pld_cnt_inc();
 | 
						|
                /* calulate fl len */
 | 
						|
                rx_fl = bb_cpu_calu_rx_fl_from_phr(phr);
 | 
						|
                IOT_ASSERT(rx_fl != 0);
 | 
						|
                /* set frame length */
 | 
						|
                bb_cpu_set_vcs_timer(rx_fl + BB_CPU_VCS_MARGIN);
 | 
						|
                /* config timer and wait timeout, rx timeout */
 | 
						|
                bb_cpu_timer_restart(TIMER_OF_RX, rx_fl);
 | 
						|
                /* need wait pld, bb cpu tx enter wait payload state */
 | 
						|
                bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_WAIT_PLD,
 | 
						|
                    BB_CPU_ENTER_IDLE_NOR);
 | 
						|
                /* update rx snr/rssi/rx_gain/ppm */
 | 
						|
                bb_cpu_rx_update_snr_rssi_info();
 | 
						|
            } else {
 | 
						|
                /* update rx snr/rssi/rx_gain/ppm */
 | 
						|
                bb_cpu_rx_update_snr_rssi_info();
 | 
						|
                /* timeout reset bb/rf */
 | 
						|
                bb_cpu_rx_reset();
 | 
						|
                /* phr ok without pld cnt increase */
 | 
						|
                mac_rf_rx_phr_ok_without_pld_cnt_inc();
 | 
						|
                /* only have a phy header ,set cifs */
 | 
						|
                bb_cpu_set_vcs_timer(RF_MAC_CIFS_US);
 | 
						|
                bb_cpu_stop_and_clr_rx_timer();
 | 
						|
                /* bb cpu tx enter rx complete state */
 | 
						|
                bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_RX_COMPLETE,
 | 
						|
                    BB_CPU_ENTER_IDLE_NOR);
 | 
						|
                /* set event to rx backfill desc */
 | 
						|
                bb_cpu_rx_sm(BB_CPU_EVENT_RX_BACKFILL_DESC_ID);
 | 
						|
            }
 | 
						|
        } else {
 | 
						|
#if BB_CPU_RX_EXCEPTION_NOTIFY_PLC_CPU
 | 
						|
            /* phy header err, get ring id */
 | 
						|
            bb_cpu_hw_ring_select(NULL);
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_PHR_ERR);
 | 
						|
            /* update rx snr/rssi/rx_gain/ppm */
 | 
						|
            bb_cpu_rx_update_snr_rssi_info();
 | 
						|
            /* set rx ring complete, fill desc */
 | 
						|
            bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
#endif
 | 
						|
            /* phr error cnt increase */
 | 
						|
            mac_rf_rx_phr_err_cnt_inc();
 | 
						|
            /* timeout reset bb/rf */
 | 
						|
            bb_cpu_rx_reset();
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* bb cpu trigger mac rx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        if ((BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            bb_cpu_printf("rxwaitphr cmdlist done err\n");
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
#if BB_CPU_RX_EXCEPTION_NOTIFY_PLC_CPU
 | 
						|
        /* phy header err, get ring id */
 | 
						|
        bb_cpu_hw_ring_select(NULL);
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PHR_RESET);
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
#endif
 | 
						|
        /* some case, early stop and rx phr trigger together,
 | 
						|
         * and rx phr must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_PHR_ID);
 | 
						|
        /* some case, early stop and wait sack timeout trigger together,
 | 
						|
         * and wait sack timeout must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID);
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter reset */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
    } else if (BB_CPU_EVENT_RX_ABORT_ID == event_id) {
 | 
						|
        if (bb_cpu_mac_judge_cur_cmd_need_tx_tdma()) {
 | 
						|
#if BB_CPU_RX_EXCEPTION_NOTIFY_PLC_CPU
 | 
						|
            /* phy header err, get ring id */
 | 
						|
            bb_cpu_hw_ring_select(NULL);
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PHR_RX_ABORT);
 | 
						|
            /* set rx ring complete, fill desc */
 | 
						|
            bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
#endif
 | 
						|
            /* some case, rx abort and rx phr trigger together,
 | 
						|
             * and rx sig must be cleard after rx abort is processed
 | 
						|
             */
 | 
						|
            bb_cpu_clr_evt(BB_CPU_EVENT_RX_PHR_ID);
 | 
						|
            /* some case, rx abort and wait sack timeout trigger together,
 | 
						|
             * and wait sack timeout must be cleard after rx abort is processed
 | 
						|
             */
 | 
						|
            bb_cpu_clr_evt(BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID);
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
            /* send event to global sm, enter reset */
 | 
						|
            bb_cpu_global_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rxwaitphr rx abort but tx csma\n");
 | 
						|
            /* csma should not appear rx abort event */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rxwaitphr fsm:%d, evt:%d, vcs:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id, bb_cpu_mac_get_vcs_sts());
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_add_pld_cnt()
 | 
						|
{
 | 
						|
    if (bb_rf_get_pld_crc24_err()) {
 | 
						|
        mac_rf_rx_pld_err_cnt_inc();
 | 
						|
    } else {
 | 
						|
        mac_rf_rx_pld_ok_cnt_inc();
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_wait_pld_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* stop rx timer */
 | 
						|
    bb_cpu_stop_and_clr_rx_timer();
 | 
						|
    if (event_id == BB_CPU_EVENT_RX_TIMEOUT_ID) {
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PLD_TIMEOUT);
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
        /* timeout reset bb/rf */
 | 
						|
        bb_cpu_rx_reset();
 | 
						|
        BB_CPU_SET_RX_BUF(NULL);
 | 
						|
        /* clear out specific unhandled interrupts */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_PLD_START_ID);
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac rx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
        bb_cpu_set_vcs_timer(BB_CPU_VCS_INTERVAL);
 | 
						|
    } else if (event_id == BB_CPU_EVENT_RX_PLD_START_ID) {
 | 
						|
        if (!BB_CPU_TXRX_USE_DMA) {
 | 
						|
            uint32_t phr[4] = { 0 };
 | 
						|
            /* get phy header */
 | 
						|
            bb_rf_get_rx_phr(phr);
 | 
						|
            /* start move data */
 | 
						|
            bb_cpu_get_data_from_bb(phr);
 | 
						|
        }
 | 
						|
        /* add pld cnt */
 | 
						|
        bb_cpu_add_pld_cnt();
 | 
						|
        /* bb cpu tx enter rx complete state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_RX_COMPLETE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* set event to rx backfill desc */
 | 
						|
        bb_cpu_rx_sm(BB_CPU_EVENT_RX_BACKFILL_DESC_ID);
 | 
						|
    } else if (BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID == event_id) {
 | 
						|
        bb_cpu_rx_hdl_self_sack(1);
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        if ((BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            bb_cpu_printf("rxwaitpld cmdlist done err\n");
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* some case, reset/cmdlist done and wait sack timeout trigger together,
 | 
						|
         * and wait sack timeout must be cleard after rx abort is processed
 | 
						|
         */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID);
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PLD_RESET);
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter wait rx complete
 | 
						|
         * this state need wait BB_CPU_EVENT_RX_PLD_START_ID.
 | 
						|
         */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_WAIT_RX_COMPLETE_ID);
 | 
						|
    } else if (BB_CPU_EVENT_RX_ABORT_ID == event_id) {
 | 
						|
        if (bb_cpu_mac_judge_cur_cmd_need_tx_tdma()) {
 | 
						|
            /* some case, rx abort and wait sack timeout trigger together,
 | 
						|
             * and wait sack timeout must be cleard after rx abort is processed
 | 
						|
             */
 | 
						|
            bb_cpu_clr_evt(BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID);
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PLD_RX_ABORT);
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
            /* send event to global sm, enter wait rx complete
 | 
						|
             * this state need wait BB_CPU_EVENT_RX_PLD_START_ID.
 | 
						|
             */
 | 
						|
            bb_cpu_global_sm(BB_CPU_EVENT_RST_WAIT_RX_COMPLETE_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rxwaitpld rx abort but tx csma\n");
 | 
						|
            /* csma should not appear rx abort event */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rxwaitpld fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_rx_complete_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    if (event_id == BB_CPU_EVENT_RX_BACKFILL_DESC_ID) {
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_RX_SUCCESS);
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* if here need to wait for rx sack in tx state, do not tx sack */
 | 
						|
        if (bb_cpu_rx_judge_need_tx_sack()) {
 | 
						|
            // need tx sack
 | 
						|
            /* prepare tx sack */
 | 
						|
            bb_cpu_prepare_tx_sack(phy_rf_get_g_tx_sack_mcs(), INVALID_SNR);
 | 
						|
            uint32_t span_us = (mac_sched_get_lts() -
 | 
						|
                bb_cpu_mac_get_rx_pld_start_local_ntb()) / 25;
 | 
						|
            if (span_us < RF_MAC_TX_RIFS_US) {
 | 
						|
                iot_delay_us(RF_MAC_TX_RIFS_US - span_us);
 | 
						|
            }
 | 
						|
            /* bb cpu trigger bb tx */
 | 
						|
            bb_cpu_trigger_bb(BB_CPU_TRIGGER_BB_TX);
 | 
						|
            /* bb cpu set tx to wait sack tx state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_WAIT_SACK_TX_COMPLETE,
 | 
						|
                BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        } else {
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
            /* bb cpu trigger mac rx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
            /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
            bb_cpu_set_vcs_timer(BB_CPU_VCS_INTERVAL);
 | 
						|
            if (bb_cpu_rx_judge_is_mine_sack()) {
 | 
						|
                bb_cpu_rx_hdl_self_sack(0);
 | 
						|
            }
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID == event_id) {
 | 
						|
        bb_cpu_rx_hdl_self_sack(1);
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            bb_cpu_printf("rxcomp cmdlist done err\n");
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_COMPLETE_RESET);
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
        /* bb cpu set rx to end state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter reset */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
    } else if (BB_CPU_EVENT_RX_ABORT_ID == event_id) {
 | 
						|
        if (bb_cpu_mac_judge_cur_cmd_need_tx_tdma()) {
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_COMPLETE_RX_ABORT);
 | 
						|
            /* set rx ring complete, fill desc */
 | 
						|
            bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
            /* bb cpu set rx to end state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
            /* send event to global sm, enter reset */
 | 
						|
            bb_cpu_global_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rxcomp rx abort but tx csma\n");
 | 
						|
            /* csma should not appear rx abort event */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rxcomp fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rx_hdl_wait_sack_tx_complete_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* stop rx timer */
 | 
						|
    bb_cpu_stop_and_clr_rx_timer();
 | 
						|
    if (event_id == BB_CPU_EVENT_RX_TIMEOUT_ID) {
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_tx_sack_status(BB_CPU_RX_RING_TX_SACK_TIMEOUT);
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
        /* timeout reset bb/rf */
 | 
						|
        bb_cpu_rx_reset();
 | 
						|
        /* clear out specific unhandled interrupts */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_TX_COMP_ID);
 | 
						|
        /* bb cpu set tx to idle state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac rx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
        bb_cpu_set_vcs_timer(BB_CPU_VCS_INTERVAL);
 | 
						|
    } else if (event_id == BB_CPU_EVENT_TX_COMP_ID) {
 | 
						|
        /* tx ok cnt increase */
 | 
						|
        mac_rf_get_tx_ok_without_pld_cnt();
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_tx_sack_status(BB_CPU_RX_RING_TX_SACK_SUCCESS);
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
        /* bb cpu set tx to idle state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* bb cpu trigger mac rx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
        bb_cpu_set_vcs_timer(BB_CPU_VCS_INTERVAL);
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id &&
 | 
						|
            bb_cpu_mac_judge_next_cmd_is_vld()) {
 | 
						|
            bb_cpu_printf("rxwaitsack cmdlist done err\n");
 | 
						|
            /* next cmd is invaild if cmdlist done,
 | 
						|
             * otherwise something is error.
 | 
						|
             */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* set rx ring status */
 | 
						|
        bb_cpu_hw_ring_set_tx_sack_status(BB_CPU_RX_RING_TX_SACK_RESET);
 | 
						|
        /* bb cpu set rx to idle state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* set reset reason */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
        /* send event to global sm, enter wait sack tx done */
 | 
						|
        bb_cpu_global_sm(BB_CPU_EVENT_RST_WAIT_TX_DONE_ID);
 | 
						|
    } else if (BB_CPU_EVENT_RX_ABORT_ID == event_id) {
 | 
						|
        if (bb_cpu_mac_judge_cur_cmd_need_tx_tdma()) {
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_tx_sack_status(BB_CPU_RX_RING_TX_SACK_RX_ABORT);
 | 
						|
            /* bb cpu set rx to idle state */
 | 
						|
            bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_RST);
 | 
						|
            /* bb cpu set to idle state */
 | 
						|
            BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(event_id);
 | 
						|
            /* send event to global sm, enter wait sack tx done */
 | 
						|
            bb_cpu_global_sm(BB_CPU_EVENT_RST_WAIT_TX_DONE_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rxwaitsack rx abort but tx csma\n");
 | 
						|
            /* csma should not appear rx abort event */
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rxwaitsack fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_rx_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
#if BB_CPU_DEBUG_PRINT
 | 
						|
    if (BB_CPU_EVENT_BACKOFF_TIMEOUT_ID != event_id &&
 | 
						|
        BB_CPU_EVENT_RX_ABORT_ID != event_id) {
 | 
						|
        bb_cpu_printf("rx fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RX_FSM(), event_id);
 | 
						|
    }
 | 
						|
#endif
 | 
						|
    switch (BB_CPU_GET_RX_FSM()) {
 | 
						|
    case BB_CPU_RX_STATE_LISTENING:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_listening_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RX_STATE_WAIT_PHR:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_wait_phr_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RX_STATE_WAIT_PLD:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_wait_pld_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RX_STATE_RX_COMPLETE:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_rx_complete_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RX_STATE_WAIT_SACK_TX_COMPLETE:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_wait_sack_tx_complete_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rst_trigger_mac(uint32_t rst_reason)
 | 
						|
{
 | 
						|
    if (BB_CPU_TO_RST_IS_RX_ABORT == rst_reason) {
 | 
						|
        /* clear the reason */
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
        /* bb cpu trigger mac rx abort complete */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_ABORT_COMPLETE);
 | 
						|
    } else if (BB_CPU_TO_RST_IS_TX_ABORT == rst_reason) {
 | 
						|
        /* clear the reason */
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
        /* bb cpu trigger mac to tx abort complete */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_TX_ABORT_COMPLETE);
 | 
						|
    } else if (BB_CPU_TO_RST_IS_STOP_SCHE == rst_reason) {
 | 
						|
        /* clear the reason */
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
        /* bb cpu trigger mac to reset complete */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_STOP_SCHE_COMPLETE);
 | 
						|
    } else if (BB_CPU_TO_RST_IS_CMDLIST_DONE == rst_reason) {
 | 
						|
        /* clear the reason */
 | 
						|
        BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
        /* bb cpu trigger mac cmdlist done complete */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_CMSLIST_DONE);
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rst trg mac fsm:%d, rsn:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), rst_reason);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rst_hdl_idle_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t phr[4] = { 0 };
 | 
						|
    uint32_t frame_len = 0, prev_state = 0;
 | 
						|
    if (BB_CPU_EVENT_RST_ID == event_id) {
 | 
						|
        bb_cpu_rf_reset();
 | 
						|
        /* some case, early stop and rx start trigger together,
 | 
						|
         * and rx start must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
        bb_cpu_clr_multi_evt((1 << BB_CPU_EVENT_MAC_RX_START_ID) |
 | 
						|
        /* some case, rx abort and rx sig trigger together,
 | 
						|
         * and rx sig must be cleard after rx abort is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_RX_SIG_ID) |
 | 
						|
        /* some case, rx abort and rx phr trigger together,
 | 
						|
         * and rx sig must be cleard after rx abort is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_RX_PHR_ID) |
 | 
						|
        /* some case, early stop and tx start trigger together,
 | 
						|
         * and tx start must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_MAC_TX_START_ID) |
 | 
						|
        /* some case, early stop and rx abort trigger together,
 | 
						|
         * and rx abort must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_RX_ABORT_ID) |
 | 
						|
        /* some case, early stop and set power save idle trigger together,
 | 
						|
         * and set power save idle must be cleard after early stop is processed
 | 
						|
         */
 | 
						|
            (1 << BB_CPU_EVENT_PS_IDLE_ID));
 | 
						|
        if (glb_fsm_ctxt.bb_cpu_evt_tmp) {
 | 
						|
            bb_cpu_printf("rstidle evt:0x%x\n",
 | 
						|
                glb_fsm_ctxt.bb_cpu_evt_tmp);
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* check need sack or not.
 | 
						|
         * mabey backfill desc tx ok = 0 and trigger mac
 | 
						|
         */
 | 
						|
        if (BB_CPU_GET_TXDTEI()) {
 | 
						|
            bb_cpu_rx_hdl_self_sack(1);
 | 
						|
        }
 | 
						|
        /* bb cpu set reset to idle state */
 | 
						|
        BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_IDLE);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        bb_cpu_rst_trigger_mac(BB_CPU_GET_RST_RS());
 | 
						|
    } else if (BB_CPU_EVENT_RST_WAIT_TX_DONE_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_WAIT_TX_COMPLETE);
 | 
						|
        bb_rf_get_tx_phr(phr);
 | 
						|
        bb_cpu_rf_fc_t fc_msg = {0};
 | 
						|
        bb_cpu_mac_get_msg_from_phr(BB_CPU_GET_PROTO(), phr, &fc_msg);
 | 
						|
        if (fc_msg.delimiter == FC_DELIM_SACK) {
 | 
						|
            frame_len = phy_rf_get_g_stf_ltf_fl() + phy_rf_get_g_sig_fl() +
 | 
						|
                phy_rf_get_g_phr_fl(phy_rf_get_g_tx_sack_mcs());
 | 
						|
        } else {
 | 
						|
            rf_tx_mpdu_start *mpdu = (rf_tx_mpdu_start *)BB_CPU_GET_TX_MPDU();
 | 
						|
            /* come from phr */
 | 
						|
            frame_len = bb_cpu_calu_tx_fl(phr, mpdu->phr_mcs);
 | 
						|
        }
 | 
						|
        /* if cco early stop/cmdlist done/rx abort need reset immediately */
 | 
						|
        if (mac_get_hw_role() || (BB_CPU_GET_RST_RS() ==
 | 
						|
            BB_CPU_TO_RST_IS_STOP_SCHE)) {
 | 
						|
            if (BB_CPU_GET_PREV_STATE() == BB_CPU_STATE_IDLE) {
 | 
						|
                prev_state = BB_CPU_GET_MORE_PREV_STATE();
 | 
						|
            } else{
 | 
						|
                prev_state = BB_CPU_GET_PREV_STATE();
 | 
						|
            }
 | 
						|
            if (prev_state == BB_CPU_STATE_RX) {
 | 
						|
                frame_len = BB_CPU_RESET_RX_IMME;
 | 
						|
            }
 | 
						|
        }
 | 
						|
        /* config timer and wait timeout */
 | 
						|
        bb_cpu_timer_restart(TIMER_OF_RST, frame_len);
 | 
						|
    } else if (BB_CPU_EVENT_RST_WAIT_RX_COMPLETE_ID == event_id) {
 | 
						|
        BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_WAIT_RX_PLD_START);
 | 
						|
        bb_rf_get_rx_phr(phr);
 | 
						|
        /* come form phr */
 | 
						|
        frame_len = bb_cpu_calu_rx_fl_from_phr(phr);
 | 
						|
        /* if cco early stop/cmdlist done/rx abort need reset immediately */
 | 
						|
        if (mac_get_hw_role() || (BB_CPU_GET_RST_RS() ==
 | 
						|
            BB_CPU_TO_RST_IS_STOP_SCHE)) {
 | 
						|
            frame_len = BB_CPU_RESET_RX_IMME;
 | 
						|
        }
 | 
						|
        /* config timer and wait timeout */
 | 
						|
        bb_cpu_timer_restart(TIMER_OF_RST, frame_len);
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rstidle fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rst_hdl_wait_tx_complete_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t prev_state = 0;
 | 
						|
    if (BB_CPU_EVENT_TX_COMP_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RST_TIMEOUT_ID == event_id) {
 | 
						|
        /* stop reset timer */
 | 
						|
        bb_cpu_stop_and_clr_rst_timer();
 | 
						|
        if (BB_CPU_GET_PREV_STATE() == BB_CPU_STATE_IDLE) {
 | 
						|
            prev_state = BB_CPU_GET_MORE_PREV_STATE();
 | 
						|
        } else{
 | 
						|
            prev_state = BB_CPU_GET_PREV_STATE();
 | 
						|
        }
 | 
						|
        if (prev_state == BB_CPU_STATE_RX) {
 | 
						|
            if (BB_CPU_EVENT_TX_COMP_ID == event_id) {
 | 
						|
                /* tx ok without pld cnt increase */
 | 
						|
                mac_rf_tx_ok_without_pld_cnt_inc();
 | 
						|
                /* set rx ring status */
 | 
						|
                bb_cpu_hw_ring_set_tx_sack_status(BB_CPU_RX_RING_TX_SACK_SUCCESS);
 | 
						|
            } else {
 | 
						|
                /* set rx ring status */
 | 
						|
                bb_cpu_hw_ring_set_tx_sack_status(
 | 
						|
                    BB_CPU_RX_RING_TX_SACK_RESET_TIMEOUT);
 | 
						|
            }
 | 
						|
            /* set ring rx done */
 | 
						|
            bb_cpu_hw_ring_rx_done_set();
 | 
						|
            /* bb cpu trigger mac rx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
            /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
            bb_cpu_rxabort_cmdlistdone_set_vcs(BB_CPU_VCS_INTERVAL);
 | 
						|
        } else if (prev_state == BB_CPU_STATE_TX) {
 | 
						|
            if (BB_CPU_EVENT_TX_COMP_ID == event_id) {
 | 
						|
                /* tx ok with pld cnt increase */
 | 
						|
                mac_rf_tx_ok_with_pld_cnt_inc();
 | 
						|
            }
 | 
						|
            /* check need sack or not.
 | 
						|
             * mabey backfill desc tx ok = 0 and trigger mac
 | 
						|
             */
 | 
						|
            if (BB_CPU_GET_TXDTEI()) {
 | 
						|
                BB_CPU_SET_TXDTEI(PLC_TEI_INVAL);
 | 
						|
                bb_cpu_backfill_tx_desc_sack(1);
 | 
						|
            }
 | 
						|
            bb_cpu_backfill_tx_desc(0);
 | 
						|
            /* bb cpu trigger mac tx done */
 | 
						|
            bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_TX_DONE);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rstwaittxcomp err, prev fsm:%d\n",
 | 
						|
                BB_CPU_GET_PREV_STATE());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* bb cpu set reset to idle state */
 | 
						|
        BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_IDLE);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        /* reset bb rf */
 | 
						|
        bb_cpu_tx_reset();
 | 
						|
        /* clear tx complete evt */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_TX_COMP_ID);
 | 
						|
        bb_cpu_rst_trigger_mac(BB_CPU_GET_RST_RS());
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id) {
 | 
						|
        /* this position can only be BB_CPU_TO_RST_IS_TX_ABORT and
 | 
						|
         * BB_CPU_TO_RST_IS_CMDLIST_DONE, otherwise sometion wrong.
 | 
						|
         */
 | 
						|
        /* set reset reason is early stop */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_TX_ABORT ||
 | 
						|
            BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_CMDLIST_DONE) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_RST_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rstwaittxcomp earlystop err rsn:%d\n",
 | 
						|
                BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        /* this position can only be BB_CPU_TO_RST_IS_TX_ABORT and
 | 
						|
         * BB_CPU_TO_RST_IS_STOP_SCHE, otherwise sometion wrong.
 | 
						|
         */
 | 
						|
        /* set reset reason is early stop */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_TX_ABORT) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_CMDLIST_DONE_ID);
 | 
						|
        } else if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_STOP_SCHE) {
 | 
						|
            /* stop schedule, keep reason */
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("rstwaittxcomp cmdlistdone err rsn:%d\n",
 | 
						|
                BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("rstwaittxcomp fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_rst_hdl_wait_rx_pld_start_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    if (BB_CPU_EVENT_RX_PLD_START_ID == event_id ||
 | 
						|
        BB_CPU_EVENT_RST_TIMEOUT_ID == event_id) {
 | 
						|
        if (BB_CPU_EVENT_RX_PLD_START_ID == event_id) {
 | 
						|
            if (!BB_CPU_TXRX_USE_DMA) {
 | 
						|
                uint32_t phr[4] = { 0 };
 | 
						|
                /* get phy header */
 | 
						|
                bb_rf_get_rx_phr(phr);
 | 
						|
                /* start move data */
 | 
						|
                bb_cpu_get_data_from_bb(phr);
 | 
						|
            }
 | 
						|
            /* add pld cnt */
 | 
						|
            bb_cpu_add_pld_cnt();
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_RX_SUCCESS);
 | 
						|
        } else {
 | 
						|
            BB_CPU_SET_RX_BUF(NULL);
 | 
						|
            /* set rx ring status */
 | 
						|
            bb_cpu_hw_ring_set_rx_status(BB_CPU_RX_RING_WAIT_PLD_RESET_TIMEOUT);
 | 
						|
        }
 | 
						|
 | 
						|
        /* check need sack or not.
 | 
						|
         * mabey backfill desc tx ok = 0 and trigger mac
 | 
						|
         */
 | 
						|
        if (BB_CPU_GET_TXDTEI()) {
 | 
						|
            bb_cpu_rx_hdl_self_sack(1);
 | 
						|
        }
 | 
						|
 | 
						|
        /* stop reset timer */
 | 
						|
        bb_cpu_stop_and_clr_rst_timer();
 | 
						|
        /* set rx ring complete, fill desc */
 | 
						|
        bb_cpu_hw_ring_rx_complete_cfg_desc();
 | 
						|
        /* set ring rx done */
 | 
						|
        bb_cpu_hw_ring_rx_done_set();
 | 
						|
        /* bb cpu trigger mac rx done */
 | 
						|
        bb_cpu_trigger_mac(BB_CPU_TRIGGER_MAC_RX_DONE);
 | 
						|
        /* rx done set vcs timer BB_CPU_VCS_INTERVAL */
 | 
						|
        bb_cpu_rxabort_cmdlistdone_set_vcs(BB_CPU_VCS_INTERVAL);
 | 
						|
        /* reset bb rf */
 | 
						|
        bb_cpu_rf_reset();
 | 
						|
        /* clear rx pld start evt */
 | 
						|
        bb_cpu_clr_evt(BB_CPU_EVENT_RX_PLD_START_ID);
 | 
						|
        /* bb cpu set reset to idle state */
 | 
						|
        BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_IDLE);
 | 
						|
        /* bb cpu set to idle state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
        bb_cpu_rst_trigger_mac(BB_CPU_GET_RST_RS());
 | 
						|
    } else if (BB_CPU_EVENT_RST_ID == event_id) {
 | 
						|
        /* this position can only be BB_CPU_TO_RST_IS_RX_ABORT and
 | 
						|
         * BB_CPU_TO_RST_IS_CMDLIST_DONE, otherwise sometion wrong.
 | 
						|
         */
 | 
						|
        /* set reset reason is early stop */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_RX_ABORT ||
 | 
						|
            BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_CMDLIST_DONE) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_RST_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("waitrxpld earlystop err rsn:%d\n",
 | 
						|
                BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else if (BB_CPU_EVENT_CMDLIST_DONE_ID == event_id) {
 | 
						|
        /* this position can only be BB_CPU_TO_RST_IS_RX_ABORT and
 | 
						|
         * BB_CPU_TO_RST_IS_STOP_SCHE, otherwise sometion wrong.
 | 
						|
         */
 | 
						|
        /* set reset reason is early stop */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_RX_ABORT) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_CMDLIST_DONE_ID);
 | 
						|
        } else if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_STOP_SCHE) {
 | 
						|
            /* stop schedule, keep reason */
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("waitrxpld cmdlistdone err rsn:%d\n",
 | 
						|
                BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
    } else {
 | 
						|
        bb_cpu_printf("waitrxpld fsm:%d, evt:%d\n",
 | 
						|
            BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_reset_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
#if BB_CPU_DEBUG_PRINT
 | 
						|
    bb_cpu_printf("rst fsm:%d, evt:%d\n", BB_CPU_GET_RST_FSM(), event_id);
 | 
						|
#endif
 | 
						|
    switch (BB_CPU_GET_RST_FSM()) {
 | 
						|
    case BB_CPU_RST_STATE_IDLE:
 | 
						|
    {
 | 
						|
        bb_cpu_rst_hdl_idle_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RST_STATE_WAIT_TX_COMPLETE:
 | 
						|
    {
 | 
						|
        bb_cpu_rst_hdl_wait_tx_complete_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_RST_STATE_WAIT_RX_PLD_START:
 | 
						|
    {
 | 
						|
        bb_cpu_rst_hdl_wait_rx_pld_start_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_set_channel()
 | 
						|
{
 | 
						|
    uint32_t channel_id = mac_rf_get_tobe_set_channel_id();
 | 
						|
    uint32_t channel_freq = phy_rf_get_channel_freq_by_id(
 | 
						|
        BB_CPU_GET_OPTION(), channel_id);
 | 
						|
    BB_CPU_SET_CHANNEL(channel_id, channel_freq);
 | 
						|
    mac_rf_set_channel_success();
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_idle_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    uint32_t option, proto;
 | 
						|
#if BB_CPU_DEBUG_PRINT
 | 
						|
    if (event_id != BB_CPU_EVENT_MAC_TX_START_ID) {
 | 
						|
        bb_cpu_printf("idle evt:%d\n", event_id);
 | 
						|
    }
 | 
						|
#endif
 | 
						|
    switch (event_id) {
 | 
						|
    case BB_CPU_EVENT_MAC_TX_START_ID:
 | 
						|
    {
 | 
						|
#if ENA_RF_MULTI_CSMA_HWQ_WAR
 | 
						|
        /* if mpdu is simu, end tx process.
 | 
						|
         * this usually happens with cmd start.
 | 
						|
         */
 | 
						|
        uint32_t hwqid = bb_cpu_mac_get_cur_hwqid();
 | 
						|
        if (hwqid >= MAC_RF_QUE_CSMA_0 && hwqid < MAX_ENABLE_CSMA_HWQ) {
 | 
						|
            if ((uint32_t)&simu_mpdu_tbl[hwqid] == bb_cpu_mac_get_txq_ptr()) {
 | 
						|
                /* set rf mac tx done */
 | 
						|
                bb_cpu_mac_set_tx_done();
 | 
						|
                return;
 | 
						|
            }
 | 
						|
        }
 | 
						|
#endif
 | 
						|
        /* hplc and rf asynchronous tx */
 | 
						|
        if (bb_cpu_async_tx_check()) {
 | 
						|
            return;
 | 
						|
        }
 | 
						|
        /* tx start increase */
 | 
						|
        mac_rf_tx_start_cnt_inc();
 | 
						|
        /* bb cpu enter tx state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_TX);
 | 
						|
        /* set event to fill tx info */
 | 
						|
        bb_cpu_tx_sm(BB_CPU_EVENT_MAC_TX_FILL_INFO_ID);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_MAC_RX_START_ID:
 | 
						|
    {
 | 
						|
        /* update txq before rx */
 | 
						|
        bb_cpu_csma_check_txq();
 | 
						|
        /* bb cpu enter rx state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_RX);
 | 
						|
        /* bb cpu tx enter rx listening state */
 | 
						|
        bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_LISTENING, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
        if (!mac_rf_get_bb_ps_idle()) {
 | 
						|
            /* trigger bb to rx */
 | 
						|
            bb_cpu_rx_config_start();
 | 
						|
        }
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_WAIT_SACK_TIMEOUT_ID:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_hdl_self_sack(1);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_CMDLIST_DONE_ID:
 | 
						|
    {
 | 
						|
        if (BB_CPU_GET_RST_RS() != BB_CPU_TO_RST_IS_INVALID) {
 | 
						|
            bb_cpu_printf("idle cmdlistdone err rsn:%d\n", BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* NOTE: cmdlist done happened, wmac can not trigger rx start interrupt.
 | 
						|
         *       so it is possible to be idle status when cmdList occurs.
 | 
						|
         *       we need haddle cmdlist done event.
 | 
						|
         */
 | 
						|
        bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_CMDLIST_DONE_ID);
 | 
						|
        /* bb cpu enter reset state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_RST);
 | 
						|
        /* set event to eventid */
 | 
						|
        bb_cpu_reset_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_RST_ID:
 | 
						|
    case BB_CPU_EVENT_RST_WAIT_TX_DONE_ID:
 | 
						|
    case BB_CPU_EVENT_RST_WAIT_RX_COMPLETE_ID:
 | 
						|
    {
 | 
						|
        /* set reset reason when trigger reset event on idle state */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_INVALID) {
 | 
						|
            IOT_ASSERT(BB_CPU_EVENT_RST_ID == event_id);
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_RST_ID);
 | 
						|
        }
 | 
						|
        /* bb cpu enter reset state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_RST);
 | 
						|
        /* set event to eventid */
 | 
						|
        bb_cpu_reset_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_BB_INIT_ID:
 | 
						|
    {
 | 
						|
        option = mac_rf_get_option();
 | 
						|
        IOT_ASSERT(option < PHY_RF_OPTION_MAX);
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
        bb_rf_init(option, 0);
 | 
						|
        BB_CPU_SET_OPTION(option);
 | 
						|
        /* init proto */
 | 
						|
        proto = mac_rf_get_proto();
 | 
						|
        IOT_ASSERT(proto <= PLC_PROTO_TYPE_RAWDATA);
 | 
						|
        BB_CPU_SET_PROTO(proto);
 | 
						|
        /* fl init */
 | 
						|
        phy_rf_fl_init(option);
 | 
						|
        /* set channel */
 | 
						|
        bb_cpu_set_channel();
 | 
						|
        mac_rf_set_cur_channel_id(BB_CPU_GET_CHANNEL_ID());
 | 
						|
        mac_rf_set_cur_channel_freq(BB_CPU_GET_CHANNEL_FREQ());
 | 
						|
        /* set option success */
 | 
						|
        mac_rf_set_option_success();
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_TX_ABORT_ID:
 | 
						|
    {
 | 
						|
        /* while processing tx complete, tx abort be triggered.
 | 
						|
         * it is already in idle state, when processing tx abort.
 | 
						|
         * so need to be added.
 | 
						|
         */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_INVALID) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_TX_ABORT_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("idle tx abort, rsn:%d\n", BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* bb cpu enter reset state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_RST);
 | 
						|
        /* set event to eventid */
 | 
						|
        bb_cpu_reset_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_RX_ABORT_ID:
 | 
						|
    {
 | 
						|
        /* while processing rx err(sig err/phr err), rx abort be triggered.
 | 
						|
         * it is already in idle state, when processing rx abort.
 | 
						|
         * so need to be added.
 | 
						|
         */
 | 
						|
        if (BB_CPU_GET_RST_RS() == BB_CPU_TO_RST_IS_INVALID) {
 | 
						|
            /* set reset reason */
 | 
						|
            bb_cpu_set_rst_reason_by_evt(BB_CPU_EVENT_RX_ABORT_ID);
 | 
						|
        } else {
 | 
						|
            bb_cpu_printf("idle rx abort, rsn:%d\n", BB_CPU_GET_RST_RS());
 | 
						|
            IOT_ASSERT(0);
 | 
						|
        }
 | 
						|
        /* bb cpu enter reset state */
 | 
						|
        BB_CPU_SET_GLB_FSM(BB_CPU_STATE_RST);
 | 
						|
        /* set event to eventid */
 | 
						|
        bb_cpu_reset_sm(BB_CPU_EVENT_RST_ID);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_EVENT_BACKOFF_TIMEOUT_ID:
 | 
						|
    {
 | 
						|
        /* rf mac backoff timer can not stop by sw */
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        bb_cpu_printf("idle err evt:%d\n", event_id);
 | 
						|
        IOT_ASSERT(0);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_global_sm(uint32_t event_id)
 | 
						|
{
 | 
						|
    /* MP mode handle something */
 | 
						|
    uint32_t on_off, rf_option, freq, tone_num, tone_att;
 | 
						|
    if (BB_CPU_EVENT_SYNC_SPI_ID == event_id) {
 | 
						|
        uint16_t spi_addr, spi_value;
 | 
						|
        mac_rf_get_rf_spi(&spi_addr, &spi_value);
 | 
						|
        if (mac_rf_get_rf_spi_rw_cfg()) {
 | 
						|
            /* write */
 | 
						|
            rf_spi_write(spi_addr, spi_value);
 | 
						|
        } else {
 | 
						|
            /* read */
 | 
						|
            uint16_t value = rf_spi_read(spi_addr);
 | 
						|
            mac_rf_set_rf_spi(spi_addr, value);
 | 
						|
        }
 | 
						|
        mac_rf_set_rf_spi_status(1);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
     /* MP mode handle something */
 | 
						|
    if (BB_CPU_EVENT_TX_CAL_UPDATE_ID == event_id) {
 | 
						|
        mac_rf_get_tx_tone_para(&on_off, &rf_option, &tone_num, &tone_att);
 | 
						|
        bb_rf_update_cali(rf_option);
 | 
						|
        mac_rf_set_tx_cal_update_status(1);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    if (BB_CPU_EVENT_TX_TONE_ID == event_id) {
 | 
						|
        mac_rf_get_tx_tone_para(&on_off, &rf_option, &tone_num, &tone_att);
 | 
						|
        freq = mac_rf_get_tx_tone_freq();
 | 
						|
        bb_cpu_printf("txtone of:%d, opt:%d, tnum:%d, frq:%lu\n",
 | 
						|
            on_off, rf_option, tone_num, freq);
 | 
						|
        bb_cpu_rf_reset();
 | 
						|
        bb_rf_jesd_reset();
 | 
						|
        /* cfg data unsigned */
 | 
						|
        rf_spi_write(30, 0x101);
 | 
						|
        /* tx config */
 | 
						|
        bb_rf_tx_cfg(rf_option, freq);
 | 
						|
        bb_cpu_update_tx_pwr();
 | 
						|
        mac_rf_tx_tone(on_off, tone_num, tone_att);
 | 
						|
        mac_rf_set_tx_tone_status(1);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    if (BB_CPU_EVENT_SET_CHANNEL_ID == event_id) {
 | 
						|
        bb_cpu_printf("glb fsm:%d, evt:%d\n", BB_CPU_GET_GLB_FSM(), event_id);
 | 
						|
        bb_cpu_set_channel();
 | 
						|
        /* if current fsm is rx listening, need restart rx with new config */
 | 
						|
        if (!(BB_CPU_GET_GLB_FSM() == BB_CPU_STATE_RX &&
 | 
						|
            BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_LISTENING)) {
 | 
						|
            return;
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    /* process BB_CPU_EVENT_CSMA_TX_CHECK_ID just on rx listening fsm,
 | 
						|
     * else ignore this event.
 | 
						|
     */
 | 
						|
    if (BB_CPU_EVENT_CSMA_TX_CHECK_ID == event_id &&
 | 
						|
        !(BB_CPU_GET_GLB_FSM() == BB_CPU_STATE_RX &&
 | 
						|
        BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_LISTENING)) {
 | 
						|
        bb_cpu_printf("glb fsm:%d, rxfsm:%d, evt:%d\n", BB_CPU_GET_GLB_FSM(),
 | 
						|
            BB_CPU_GET_RX_FSM(), event_id);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    /* just need to respond in rx listening state */
 | 
						|
    if (BB_CPU_EVENT_PS_IDLE_ID == event_id &&
 | 
						|
        !(BB_CPU_GET_GLB_FSM() == BB_CPU_STATE_RX &&
 | 
						|
        BB_CPU_GET_RX_FSM() == BB_CPU_RX_STATE_LISTENING)) {
 | 
						|
        bb_cpu_printf("glb fsm:%d, evt:%d\n", BB_CPU_GET_GLB_FSM(), event_id);
 | 
						|
        return;
 | 
						|
    }
 | 
						|
 | 
						|
    switch (BB_CPU_GET_GLB_FSM()) {
 | 
						|
    case BB_CPU_STATE_IDLE:
 | 
						|
    {
 | 
						|
        bb_cpu_idle_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_STATE_TX:
 | 
						|
    {
 | 
						|
        bb_cpu_tx_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_STATE_RX:
 | 
						|
    {
 | 
						|
        bb_cpu_rx_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    case BB_CPU_STATE_RST:
 | 
						|
    {
 | 
						|
        bb_cpu_reset_sm(event_id);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
    default:
 | 
						|
        IOT_ASSERT(0);
 | 
						|
        break;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_fsm_main()
 | 
						|
{
 | 
						|
    uint8_t i, j;
 | 
						|
    uint8_t *bb_cpu_evt;
 | 
						|
 | 
						|
    for(;;) {
 | 
						|
        /* waitting a valid event */
 | 
						|
        glb_fsm_ctxt.bb_cpu_evt_tmp = bb_cpu_wait_event(BB_MAX_TIME);
 | 
						|
        bb_cpu_evt = (uint8_t *)&glb_fsm_ctxt.bb_cpu_evt_tmp;
 | 
						|
 | 
						|
        i = 0;
 | 
						|
        while (glb_fsm_ctxt.bb_cpu_evt_tmp) {
 | 
						|
            while (*bb_cpu_evt) {
 | 
						|
                j = iot_bitops_ffs(*bb_cpu_evt) - 1;
 | 
						|
                glb_fsm_ctxt.bb_cpu_evt_tmp &= ~(0x1 << (i+ j));
 | 
						|
                bb_cpu_global_sm(i + j);
 | 
						|
            }
 | 
						|
            bb_cpu_evt++;
 | 
						|
            i += 8;
 | 
						|
        }
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
static void bb_cpu_simu_mpdu_init()
 | 
						|
{
 | 
						|
#if ENA_RF_MULTI_CSMA_HWQ_WAR
 | 
						|
    for (uint8_t i  = 0; i < MAX_ENABLE_CSMA_HWQ; i++) {
 | 
						|
        simu_mpdu_tbl[i].notify_hw_tx_done = 0;
 | 
						|
        simu_mpdu_tbl[i].desc_type = DESC_TYPE_TX_MPDU_START;
 | 
						|
        simu_mpdu_tbl[i].tx_fl = 0xffffff;
 | 
						|
        simu_mpdu_tbl[i].next = NULL;
 | 
						|
        /* simulate mpdu flag */
 | 
						|
        simu_mpdu_tbl[i].tx_status = NULL;
 | 
						|
    }
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
void bb_cpu_fsm_init()
 | 
						|
{
 | 
						|
    int8_t default_power;
 | 
						|
    BB_CPU_SET_GLB_FSM(BB_CPU_STATE_IDLE);
 | 
						|
    bb_cpu_set_tx_fsm(BB_CPU_TX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
    bb_cpu_set_rx_fsm(BB_CPU_RX_STATE_IDLE, BB_CPU_ENTER_IDLE_NOR);
 | 
						|
    BB_CPU_SET_RST_FSM(BB_CPU_RST_STATE_IDLE);
 | 
						|
    BB_CPU_SET_RST_RS(BB_CPU_TO_RST_IS_INVALID);
 | 
						|
    BB_CPU_SET_TXDTEI(PLC_TEI_INVAL);
 | 
						|
    BB_CPU_SET_HWQID(MAX_MAC_RF_TXQ_NUM);
 | 
						|
    BB_CPU_SET_RX_BUF(NULL);
 | 
						|
    /* set vcs is idle */
 | 
						|
    bb_cpu_mac_set_vcs_sts_from_isr(0);
 | 
						|
    /* set isr is vaild */
 | 
						|
    bb_cpu_set_isr_vaild(1);
 | 
						|
    /* set option is vaild */
 | 
						|
    BB_CPU_SET_OPTION(PHY_RF_OPTION_MAX);
 | 
						|
    bb_cpu_simu_mpdu_init();
 | 
						|
    phy_rf_get_power(NULL, NULL, &default_power, NULL);
 | 
						|
    BB_CPU_SET_CUR_TX_PWR(default_power);
 | 
						|
    /* set txcfg1 vaild */
 | 
						|
    BB_CPU_SET_TXCFG1_VLD(0);
 | 
						|
}
 | 
						|
 | 
						|
int8_t bb_cpu_get_tx_pwr()
 | 
						|
{
 | 
						|
    return BB_CPU_GET_CUR_TX_PWR();
 | 
						|
}
 | 
						|
 | 
						|
uint32_t bb_cpu_get_proto()
 | 
						|
{
 | 
						|
    return BB_CPU_GET_PROTO();
 | 
						|
}
 | 
						|
 |