diff options
Diffstat (limited to 'drivers/net/wireless/realtek/rtw89/core.c')
-rw-r--r-- | drivers/net/wireless/realtek/rtw89/core.c | 684 |
1 files changed, 641 insertions, 43 deletions
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 57590f5577a3..917b2adede61 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ #include <linux/ip.h> +#include <linux/sort.h> #include <linux/udp.h> #include "cam.h" @@ -272,17 +273,18 @@ rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq) return NULL; } -bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) +bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate) { - struct ieee80211_rate rate; + const struct ieee80211_rate *rate; - if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) { - rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate); + if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) { + rtw89_debug(rtwdev, RTW89_DBG_UNEXP, + "invalid legacy rate %d\n", legacy_rate); return false; } - rate = rtw89_bitrates[rpt_rate]; - *bitrate = rate.bitrate; + rate = &rtw89_bitrates[legacy_rate]; + *bitrate = rate->bitrate; return true; } @@ -697,6 +699,66 @@ static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ } +u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel) +{ + switch (qsel) { + default: + rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel); + fallthrough; + case RTW89_TX_QSEL_BE_0: + case RTW89_TX_QSEL_BE_1: + case RTW89_TX_QSEL_BE_2: + case RTW89_TX_QSEL_BE_3: + return RTW89_TXCH_ACH0; + case RTW89_TX_QSEL_BK_0: + case RTW89_TX_QSEL_BK_1: + case RTW89_TX_QSEL_BK_2: + case RTW89_TX_QSEL_BK_3: + return RTW89_TXCH_ACH1; + case RTW89_TX_QSEL_VI_0: + case RTW89_TX_QSEL_VI_1: + case RTW89_TX_QSEL_VI_2: + case RTW89_TX_QSEL_VI_3: + return RTW89_TXCH_ACH2; + case RTW89_TX_QSEL_VO_0: + case RTW89_TX_QSEL_VO_1: + case RTW89_TX_QSEL_VO_2: + case RTW89_TX_QSEL_VO_3: + return RTW89_TXCH_ACH3; + case RTW89_TX_QSEL_B0_MGMT: + return RTW89_TXCH_CH8; + case RTW89_TX_QSEL_B0_HI: + return RTW89_TXCH_CH9; + case RTW89_TX_QSEL_B1_MGMT: + return RTW89_TXCH_CH10; + case RTW89_TX_QSEL_B1_HI: + return RTW89_TXCH_CH11; + } +} +EXPORT_SYMBOL(rtw89_core_get_ch_dma); + +u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel) +{ + switch (qsel) { + default: + rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel); + fallthrough; + case RTW89_TX_QSEL_BE_0: + case RTW89_TX_QSEL_BK_0: + return RTW89_TXCH_ACH0; + case RTW89_TX_QSEL_VI_0: + case RTW89_TX_QSEL_VO_0: + return RTW89_TXCH_ACH2; + case RTW89_TX_QSEL_B0_MGMT: + case RTW89_TX_QSEL_B0_HI: + return RTW89_TXCH_CH8; + case RTW89_TX_QSEL_B1_MGMT: + case RTW89_TX_QSEL_B1_HI: + return RTW89_TXCH_CH10; + } +} +EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1); + static void rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) @@ -710,7 +772,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, u8 qsel, ch_dma; qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req); - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); desc_info->qsel = qsel; desc_info->ch_dma = ch_dma; @@ -927,7 +989,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid); qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid); - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); desc_info->ch_dma = ch_dma; desc_info->tid_indicate = tid_indicate; @@ -1073,42 +1135,46 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, } } +static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, + tx_wait_work.work); + + rtw89_tx_wait_list_clear(rtwdev); +} + void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel) { u8 ch_dma; - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); rtw89_hci_tx_kick_off(rtwdev, ch_dma); } int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, - int qsel, unsigned int timeout) + struct rtw89_tx_wait_info *wait, int qsel, + unsigned int timeout) { - struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); - struct rtw89_tx_wait_info *wait; unsigned long time_left; int ret = 0; - wait = kzalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - rtw89_core_tx_kick_off(rtwdev, qsel); - return 0; - } - - init_completion(&wait->completion); - rcu_assign_pointer(skb_data->wait, wait); + lockdep_assert_wiphy(rtwdev->hw->wiphy); rtw89_core_tx_kick_off(rtwdev, qsel); time_left = wait_for_completion_timeout(&wait->completion, msecs_to_jiffies(timeout)); - if (time_left == 0) - ret = -ETIMEDOUT; - else if (!wait->tx_done) - ret = -EAGAIN; - rcu_assign_pointer(skb_data->wait, NULL); - kfree_rcu(wait, rcu_head); + if (time_left == 0) { + ret = -ETIMEDOUT; + list_add_tail(&wait->list, &rtwdev->tx_waits); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work, + RTW89_TX_WAIT_WORK_TIMEOUT); + } else { + if (!wait->tx_done) + ret = -EAGAIN; + rtw89_tx_wait_release(wait); + } return ret; } @@ -1157,10 +1223,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev, static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link, - struct sk_buff *skb, int *qsel, bool sw_mld) + struct sk_buff *skb, int *qsel, bool sw_mld, + struct rtw89_tx_wait_info *wait) { struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link); struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); + struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct rtw89_core_tx_request tx_req = {}; int ret; @@ -1177,6 +1245,8 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev, rtw89_core_tx_update_desc_info(rtwdev, &tx_req); rtw89_core_tx_wake(rtwdev, &tx_req); + rcu_assign_pointer(skb_data->wait, wait); + ret = rtw89_hci_tx_write(rtwdev, &tx_req); if (ret) { rtw89_err(rtwdev, "failed to transmit skb to HCI\n"); @@ -1213,7 +1283,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, } } - return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false); + return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false, + NULL); } static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) @@ -1835,6 +1906,10 @@ static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev, tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL); phy_ppdu->rpl_avg = tmp_rpl >> 1; + + if (!phy_ppdu->hdr_2_en) + phy_ppdu->rx_path_en = + le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN); } static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev, @@ -2221,6 +2296,435 @@ static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link, WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp)); } +static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2) +{ + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th; + u32 tbtt_diff_th = bcn_track->tbtt_diff_th; + + if (tbtt2 > tbtt1) + swap(tbtt1, tbtt2); + + if (tbtt1 - tbtt2 > tbtt_diff_th) + return tbtt1; + else if (tbtt2 > close_bcn_intvl_th) + return tbtt2; + else if (tbtt1 > close_bcn_intvl_th) + return tbtt1; + else + return tbtt2; +} + +static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 offset = bcn_track->tbtt_offset; + + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_port_reg *p = mac->port_base; + u32 bcnspc, val; + + bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link, + p->bcn_space, B_AX_BCN_SPACE_MASK); + val = bcnspc - (offset / 1024); + val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) | + B_AX_TBTT_SHIFT_OFST_SIGN; + + rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift, + B_AX_TBTT_SHIFT_OFST_MASK, val); + + return; + } + + rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset); +} + +static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + u32 *tbtt_us = bcn_stat->tbtt_us; + u32 offset = tbtt_us[0]; + u8 i; + + for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++) + offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset); + + if (bcn_track->tbtt_offset == offset) + return; + + bcn_track->tbtt_offset = offset; + rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link); +} + +static int cmp_u16(const void *a, const void *b) +{ + return *(const u16 *)a - *(const u16 *)b; +} + +static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int) +{ + if (tbtt < offset) + return beacon_int - offset + tbtt; + + return tbtt - offset; +} + +static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev) +{ + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + u16 offset_tu = bcn_track->tbtt_offset / 1024; + u16 *tbtt_tu = bcn_stat->tbtt_tu; + u16 *drift = bcn_stat->drift; + u8 i; + + bcn_stat->tbtt_tu_min = U16_MAX; + bcn_stat->tbtt_tu_max = 0; + for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { + drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu, + bcn_track->beacon_int); + + bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]); + bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]); + } + + sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL); +} + +static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev) +{ + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; + u16 lower_bound, upper_bound, outlier_count = 0; + u16 *drift = bcn_stat->drift; + u16 *bins = bcn_dist->bins; + u16 q1, q3, iqr, tmp; + u8 i; + + BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0); + + memset(bcn_dist, 0, sizeof(*bcn_dist)); + + bcn_dist->min = drift[0]; + bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1]; + + tmp = RTW89_BCN_TRACK_STAT_NR / 4; + q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; + + tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4; + q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2; + + iqr = q3 - q1; + tmp = (3 * iqr) / 2; + + if (bcn_dist->min <= 5) + lower_bound = bcn_dist->min; + else if (q1 > tmp) + lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; + else + lower_bound = 0; + + upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR; + + for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) { + u16 tbtt = bcn_stat->tbtt_tu[i]; + u16 min = bcn_stat->tbtt_tu_min; + u8 bin_idx; + + /* histogram */ + bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH, + RTW89_BCN_TRACK_MAX_BIN_NUM - 1); + bins[bin_idx]++; + + /* boxplot outlier */ + if (drift[i] < lower_bound || drift[i] > upper_bound) + outlier_count++; + } + + bcn_dist->outlier_count = outlier_count; + bcn_dist->lower_bound = lower_bound; + bcn_dist->upper_bound = upper_bound; +} + +static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold) +{ + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m; + u16 *drift = bcn_stat->drift; + int index = -1; + u8 count = 0; + + while (l <= r) { + m = l + (r - l) / 2; + + if (drift[m] <= threshold) { + index = m; + l = m + 1; + } else { + r = m - 1; + } + } + + count = (index == -1) ? 0 : (index + 1); + + return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR; +} + +static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target) +{ + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; + u16 tbtt_tu_max = bcn_stat->tbtt_tu_max; + u16 upper, lower = bcn_stat->tbtt_tu_min; + u8 i, count = 0; + + for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) { + upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1; + if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1) + upper = max(upper, tbtt_tu_max); + + count += bcn_dist->bins[i]; + if (count > target) + break; + + lower = upper + 1; + } + + return upper; +} + +static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ +#define RTW89_SYMBOL_TIME_2GHZ 192 +#define RTW89_SYMBOL_TIME_5GHZ 20 +#define RTW89_SYMBOL_TIME_6GHZ 20 + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; + u16 bitrate, val; + + if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate)) + return 0; + + val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate; + + switch (chan->band_type) { + default: + case RTW89_BAND_2G: + val += RTW89_SYMBOL_TIME_2GHZ; + break; + case RTW89_BAND_5G: + val += RTW89_SYMBOL_TIME_5GHZ; + break; + case RTW89_BAND_6G: + val += RTW89_SYMBOL_TIME_6GHZ; + break; + } + + /* convert to millisecond */ + return DIV_ROUND_UP(val, 1000); +} + +static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ +#define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5 +#define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */ +#define RTW89_BCN_TRACK_STRONG_RSSI 80 + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist; + u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th; + u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th; + u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); + u16 target_bcn_th = bcn_track->target_bcn_th; + u16 low_bcn_th = bcn_track->low_bcn_th; + u16 med_bcn_th = bcn_track->med_bcn_th; + u16 beacon_int = bcn_track->beacon_int; + u16 bcn_timeout; + + if (pkt_stat->beacon_nr < low_bcn_th) { + bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT; + goto out; + } + + if (bcn_dist->outlier_count >= outlier_high_bcn_th) { + bcn_timeout = bcn_dist->max; + goto out; + } + + if (pkt_stat->beacon_nr < med_bcn_th) { + if (bcn_dist->outlier_count > outlier_low_bcn_th) + bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2; + else + bcn_timeout = bcn_dist->upper_bound + + RTW89_BCN_TRACK_EXTEND_TIMEOUT; + + goto out; + } + + if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) { + if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) { + /* ideal case */ + bcn_timeout = 0; + } else { + u16 offset_tu = bcn_track->tbtt_offset / 1024; + u16 upper_bound; + + upper_bound = + rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th); + bcn_timeout = + _rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int); + } + + goto out; + } + + bcn_timeout = bcn_stat->drift[target_bcn_th]; + +out: + bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan); +} + +static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + rtw89_bcn_calc_drift(rtwdev); + rtw89_bcn_calc_distribution(rtwdev); + rtw89_bcn_calc_timeout(rtwdev, rtwvif_link); +} + +static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + struct rtw89_vif_link *rtwvif_link; + struct rtw89_vif *rtwvif; + unsigned int link_id; + + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) + return; + + if (!rtwdev->lps_enabled) + return; + + if (!bcn_track->is_data_ready) + return; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { + if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION || + rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)) + continue; + + rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link); + rtw89_bcn_update_timeout(rtwdev, rtwvif_link); + } + } +} + +static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev) +{ + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) + return true; + + return bcn_track->is_data_ready; +} + +static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ +#define RTW89_BCN_TRACK_MED_BCN 70 +#define RTW89_BCN_TRACK_LOW_BCN 30 +#define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30 +#define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20 + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD); + struct ieee80211_bss_conf *bss_conf; + u32 beacons_in_period; + u32 bcn_intvl_us; + u16 beacon_int; + u8 dtim; + + rcu_read_lock(); + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); + beacon_int = bss_conf->beacon_int; + dtim = bss_conf->dtim_period; + rcu_read_unlock(); + + beacons_in_period = period / beacon_int / dtim; + bcn_intvl_us = ieee80211_tu_to_usec(beacon_int); + + bcn_track->low_bcn_th = + (beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT; + bcn_track->med_bcn_th = + (beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT; + bcn_track->outlier_low_bcn_th = + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT; + bcn_track->outlier_high_bcn_th = + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT; + bcn_track->target_bcn_th = + (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT; + + bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3); + bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT; + bcn_track->beacon_int = beacon_int; + bcn_track->dtim = dtim; +} + +static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev) +{ + memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat)); + memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track)); +} + +static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev, + struct ieee80211_bss_conf *bss_conf, + struct sk_buff *skb) +{ +#define RTW89_APPEND_TSF_2GHZ 384 +#define RTW89_APPEND_TSF_5GHZ 52 +#define RTW89_APPEND_TSF_6GHZ 52 + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat; + struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; + u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int); + u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp); + u8 wp, num = bcn_stat->num; + u16 append; + + if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw)) + return; + + switch (rx_status->band) { + default: + case NL80211_BAND_2GHZ: + append = RTW89_APPEND_TSF_2GHZ; + break; + case NL80211_BAND_5GHZ: + append = RTW89_APPEND_TSF_5GHZ; + break; + case NL80211_BAND_6GHZ: + append = RTW89_APPEND_TSF_6GHZ; + break; + } + + wp = bcn_stat->wp; + div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]); + bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024; + bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR; + bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR); + bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR; +} + static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -2237,6 +2741,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, struct ieee80211_bss_conf *bss_conf; struct rtw89_vif_link *rtwvif_link; const u8 *bssid = iter_data->bssid; + const u8 *target_bssid; if (rtwdev->scanning && (ieee80211_is_beacon(hdr->frame_control) || @@ -2258,7 +2763,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, goto out; } - if (!ether_addr_equal(bss_conf->bssid, bssid)) + target_bssid = ieee80211_is_beacon(hdr->frame_control) && + bss_conf->nontransmitted ? + bss_conf->transmitter_bssid : bss_conf->bssid; + if (!ether_addr_equal(target_bssid, bssid)) goto out; if (is_mld) { @@ -2272,7 +2780,6 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len); rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); } - pkt_stat->beacon_nr++; if (phy_ppdu) { ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg); @@ -2280,7 +2787,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx; } + pkt_stat->beacon_nr++; pkt_stat->beacon_rate = desc_info->data_rate; + pkt_stat->beacon_len = skb->len; + + rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb); } if (!ether_addr_equal(bss_conf->addr, hdr->addr1)) @@ -3226,7 +3737,7 @@ static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid) u8 qsel, ch_dma; qsel = rtw89_core_get_qsel(rtwdev, tid); - ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel); + ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel); return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma); } @@ -3411,6 +3922,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; struct rtw89_sta_link *rtwsta_link; + struct rtw89_tx_wait_info *wait; struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; struct rtw89_sta *rtwsta; @@ -3420,6 +3932,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) return 0; + wait = kzalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return -ENOMEM; + + init_completion(&wait->completion); + rcu_read_lock(); sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); if (!sta) { @@ -3434,6 +3952,8 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt goto out; } + wait->skb = skb; + hdr = (struct ieee80211_hdr *)skb->data; if (ps) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); @@ -3441,10 +3961,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt rtwsta_link = rtwsta->links[rtwvif_link->link_id]; if (unlikely(!rtwsta_link)) { ret = -ENOLINK; + dev_kfree_skb_any(skb); goto out; } - ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true); + ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true, + wait); if (ret) { rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret); dev_kfree_skb_any(skb); @@ -3453,10 +3975,11 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt rcu_read_unlock(); - return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel, + return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel, timeout); out: rcu_read_unlock(); + kfree(wait); return ret; } @@ -3697,6 +4220,9 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) vif->type == NL80211_IFTYPE_P2P_CLIENT)) continue; + if (!rtw89_core_bcn_track_can_lps(rtwdev)) + continue; + rtw89_enter_lps(rtwdev, rtwvif, true); } } @@ -3883,6 +4409,7 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work) rtw89_btc_ntfy_wl_sta(rtwdev); } rtw89_mac_bf_monitor_track(rtwdev); + rtw89_core_bcn_track(rtwdev); rtw89_phy_stat_track(rtwdev); rtw89_phy_env_monitor_track(rtwdev); rtw89_phy_dig(rtwdev); @@ -4129,8 +4656,10 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev, rtw89_assoc_link_clr(rtwsta_link); - if (vif->type == NL80211_IFTYPE_STATION) + if (vif->type == NL80211_IFTYPE_STATION) { rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false); + rtw89_core_bcn_track_reset(rtwdev); + } if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) rtw89_p2p_noa_once_deinit(rtwvif_link); @@ -4271,6 +4800,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev, BTC_ROLE_MSTS_STA_CONN_END); rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan); rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link); + rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link); ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id); if (ret) { @@ -4829,39 +5359,96 @@ void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work) } } -int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond) +struct rtw89_wait_response * +rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond) { - struct completion *cmpl = &wait->completion; - unsigned long time_left; + struct rtw89_wait_response *prep; unsigned int cur; + /* use -EPERM _iff_ telling eval side not to make any changes */ + cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond); if (cur != RTW89_WAIT_COND_IDLE) - return -EBUSY; + return ERR_PTR(-EPERM); + + prep = kzalloc(sizeof(*prep), GFP_KERNEL); + if (!prep) + return ERR_PTR(-ENOMEM); + + init_completion(&prep->completion); + + rcu_assign_pointer(wait->resp, prep); + + return prep; +} + +int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait, + struct rtw89_wait_response *prep, int err) +{ + unsigned long time_left; + + if (IS_ERR(prep)) { + err = err ?: PTR_ERR(prep); + + /* special error case: no permission to reset anything */ + if (PTR_ERR(prep) == -EPERM) + return err; - time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); + goto reset; + } + + if (err) + goto cleanup; + + time_left = wait_for_completion_timeout(&prep->completion, + RTW89_WAIT_FOR_COND_TIMEOUT); if (time_left == 0) { - atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto cleanup; } + wait->data = prep->data; + +cleanup: + rcu_assign_pointer(wait->resp, NULL); + kfree_rcu(prep, rcu_head); + +reset: + atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); + + if (err) + return err; + if (wait->data.err) return -EFAULT; return 0; } +static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp, + const struct rtw89_completion_data *data) +{ + resp->data = *data; + complete(&resp->completion); +} + void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, const struct rtw89_completion_data *data) { + struct rtw89_wait_response *resp; unsigned int cur; + guard(rcu)(); + + resp = rcu_dereference(wait->resp); + if (!resp) + return; + cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE); if (cur != cond) return; - wait->data = *data; - complete(&wait->completion); + rtw89_complete_cond_resp(resp, data); } void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event) @@ -4908,6 +5495,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) { int ret; + rtw89_phy_init_bb_afe(rtwdev); + ret = rtw89_mac_init(rtwdev); if (ret) { rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret); @@ -4952,6 +5541,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON); rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable); rtw89_fw_h2c_init_ba_cam(rtwdev); + rtw89_tas_fw_timer_enable(rtwdev, true); return 0; } @@ -4967,6 +5557,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) return; + rtw89_tas_fw_timer_enable(rtwdev, false); rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF); clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); @@ -4978,6 +5569,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) wiphy_work_cancel(wiphy, &btc->dhcp_notify_work); wiphy_work_cancel(wiphy, &btc->icmp_notify_work); cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work); wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work); wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work); @@ -5203,6 +5795,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]); } INIT_LIST_HEAD(&rtwdev->scan_info.chan_list); + INIT_LIST_HEAD(&rtwdev->tx_waits); INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); @@ -5214,6 +5807,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work); + wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work); INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); @@ -5813,6 +6407,7 @@ int rtw89_core_register(struct rtw89_dev *rtwdev) return ret; } + rtw89_phy_dm_init_data(rtwdev); rtw89_debugfs_init(rtwdev); return 0; @@ -5863,6 +6458,9 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, ops->cancel_remain_on_channel = NULL; } + if (!chip->support_noise) + ops->get_survey = NULL; + driver_data_size = sizeof(struct rtw89_dev) + bus_data_size; hw = ieee80211_alloc_hw(driver_data_size, ops); if (!hw) |