developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 1 | From: Felix Fietkau <nbd@nbd.name> |
| 2 | Date: Sat, 6 Feb 2021 16:33:14 +0100 |
| 3 | Subject: [PATCH] mac80211: minstrel_ht: rework rate downgrade code and |
| 4 | max_prob rate selection |
| 5 | |
| 6 | The current fallback code for fast rate switching on potentially failing rates |
| 7 | is triggering too often if there is some strong noise on the channel. This can |
| 8 | lead to wild fluctuations in the rate selection. |
| 9 | Additionally, switching down to max_prob_rate can create a significant gap down |
| 10 | in throughput, especially when using only 2 spatial streams, because max_prob_rate |
| 11 | is limited to using fewer streams than the max_tp rates. |
| 12 | In order to improve throughput without reducing reliability too much, use the |
| 13 | rate downgrade code for the max_prob_rate only, and allow the non-downgraded |
| 14 | max_prob_rate to use as many spatial streams as the max_tp rates |
| 15 | |
| 16 | Signed-off-by: Felix Fietkau <nbd@nbd.name> |
| 17 | --- |
| 18 | |
| 19 | --- a/net/mac80211/rc80211_minstrel_ht.c |
| 20 | +++ b/net/mac80211/rc80211_minstrel_ht.c |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 21 | @@ -580,6 +580,14 @@ minstrel_ht_set_best_prob_rate(struct mi |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 22 | int cur_tp_avg, cur_group, cur_idx; |
| 23 | int max_gpr_group, max_gpr_idx; |
| 24 | int max_gpr_tp_avg, max_gpr_prob; |
| 25 | + int min_dur; |
| 26 | + |
| 27 | + min_dur = max(minstrel_get_duration(mi->max_tp_rate[0]), |
| 28 | + minstrel_get_duration(mi->max_tp_rate[1])); |
| 29 | + |
| 30 | + /* make the rate at least 18% slower than max tp rates */ |
| 31 | + if (minstrel_get_duration(index) <= min_dur * 19 / 16) |
| 32 | + return; |
| 33 | |
| 34 | cur_group = MI_RATE_GROUP(index); |
| 35 | cur_idx = MI_RATE_IDX(index); |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 36 | @@ -601,11 +609,6 @@ minstrel_ht_set_best_prob_rate(struct mi |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 37 | !minstrel_ht_is_legacy_group(max_tp_group)) |
| 38 | return; |
| 39 | |
| 40 | - /* skip rates faster than max tp rate with lower prob */ |
| 41 | - if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) && |
| 42 | - mrs->prob_avg < max_tp_prob) |
| 43 | - return; |
| 44 | - |
| 45 | max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate); |
| 46 | max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate); |
| 47 | max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg; |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 48 | @@ -663,40 +666,6 @@ minstrel_ht_assign_best_tp_rates(struct |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 49 | |
| 50 | } |
| 51 | |
| 52 | -/* |
| 53 | - * Try to increase robustness of max_prob rate by decrease number of |
| 54 | - * streams if possible. |
| 55 | - */ |
| 56 | -static inline void |
| 57 | -minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) |
| 58 | -{ |
| 59 | - struct minstrel_mcs_group_data *mg; |
| 60 | - int tmp_max_streams, group, tmp_idx, tmp_prob; |
| 61 | - int tmp_tp = 0; |
| 62 | - |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 63 | - if (!mi->sta->deflink.ht_cap.ht_supported) |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 64 | - return; |
| 65 | - |
| 66 | - group = MI_RATE_GROUP(mi->max_tp_rate[0]); |
| 67 | - tmp_max_streams = minstrel_mcs_groups[group].streams; |
| 68 | - for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { |
| 69 | - mg = &mi->groups[group]; |
| 70 | - if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) |
| 71 | - continue; |
| 72 | - |
| 73 | - tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate); |
| 74 | - tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg; |
| 75 | - |
| 76 | - if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && |
| 77 | - (minstrel_mcs_groups[group].streams < tmp_max_streams)) { |
| 78 | - mi->max_prob_rate = mg->max_group_prob_rate; |
| 79 | - tmp_tp = minstrel_ht_get_tp_avg(mi, group, |
| 80 | - tmp_idx, |
| 81 | - tmp_prob); |
| 82 | - } |
| 83 | - } |
| 84 | -} |
| 85 | - |
| 86 | static u16 |
| 87 | __minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi, |
| 88 | enum minstrel_sample_type type) |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 89 | @@ -1176,8 +1145,6 @@ minstrel_ht_update_stats(struct minstrel |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 90 | |
| 91 | mi->max_prob_rate = tmp_max_prob_rate; |
| 92 | |
| 93 | - /* Try to increase robustness of max_prob_rate*/ |
| 94 | - minstrel_ht_prob_rate_reduce_streams(mi); |
| 95 | minstrel_ht_refill_sample_rates(mi); |
| 96 | |
| 97 | #ifdef CPTCFG_MAC80211_DEBUGFS |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 98 | @@ -1256,7 +1223,7 @@ minstrel_ht_ri_txstat_valid(struct minst |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static void |
| 102 | -minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) |
| 103 | +minstrel_downgrade_prob_rate(struct minstrel_ht_sta *mi, u16 *idx) |
| 104 | { |
| 105 | int group, orig_group; |
| 106 | |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 107 | @@ -1271,11 +1238,7 @@ minstrel_downgrade_rate(struct minstrel_ |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 108 | minstrel_mcs_groups[orig_group].streams) |
| 109 | continue; |
| 110 | |
| 111 | - if (primary) |
| 112 | - *idx = mi->groups[group].max_group_tp_rate[0]; |
| 113 | - else |
| 114 | - *idx = mi->groups[group].max_group_tp_rate[1]; |
| 115 | - break; |
| 116 | + *idx = mi->groups[group].max_group_prob_rate; |
| 117 | } |
| 118 | } |
| 119 | |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 120 | @@ -1286,7 +1249,7 @@ minstrel_ht_tx_status(void *priv, struct |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 121 | struct ieee80211_tx_info *info = st->info; |
| 122 | struct minstrel_ht_sta *mi = priv_sta; |
| 123 | struct ieee80211_tx_rate *ar = info->status.rates; |
| 124 | - struct minstrel_rate_stats *rate, *rate2; |
| 125 | + struct minstrel_rate_stats *rate; |
| 126 | struct minstrel_priv *mp = priv; |
| 127 | u32 update_interval = mp->update_interval; |
| 128 | bool last, update = false; |
developer | 79e690d | 2022-12-13 17:05:25 +0800 | [diff] [blame] | 129 | @@ -1354,18 +1317,13 @@ minstrel_ht_tx_status(void *priv, struct |
developer | 6a1998b | 2022-12-08 18:09:45 +0800 | [diff] [blame] | 130 | /* |
| 131 | * check for sudden death of spatial multiplexing, |
| 132 | * downgrade to a lower number of streams if necessary. |
| 133 | + * only do this for the max_prob_rate to prevent spurious |
| 134 | + * rate fluctuations when the link changes suddenly |
| 135 | */ |
| 136 | - rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); |
| 137 | + rate = minstrel_get_ratestats(mi, mi->max_prob_rate); |
| 138 | if (rate->attempts > 30 && |
| 139 | rate->success < rate->attempts / 4) { |
| 140 | - minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); |
| 141 | - update = true; |
| 142 | - } |
| 143 | - |
| 144 | - rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); |
| 145 | - if (rate2->attempts > 30 && |
| 146 | - rate2->success < rate2->attempts / 4) { |
| 147 | - minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); |
| 148 | + minstrel_downgrade_prob_rate(mi, &mi->max_prob_rate); |
| 149 | update = true; |
| 150 | } |
| 151 | } |