Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/net/wireless/intel/iwlwifi/mvm/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 183 kB image not shown  

Quelle  mac80211.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
 * Copyright (C) 2016-2017 Intel Deutschland GmbH
 */

#include <linux/kernel.h>
#include <linux/fips.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/time.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>

#include "iwl-drv.h"
#include "iwl-op-mode.h"
#include "iwl-io.h"
#include "mvm.h"
#include "sta.h"
#include "time-event.h"
#include "iwl-nvm-utils.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "fw/error-dump.h"
#include "iwl-prph.h"
#include "iwl-nvm-parse.h"
#include "time-sync.h"

#define IWL_MVM_LIMITS(ap)     \
 {       \
  .max = 1,     \
  .types = BIT(NL80211_IFTYPE_STATION),  \
 },       \
 {       \
  .max = 1,     \
  .types = ap |     \
    BIT(NL80211_IFTYPE_P2P_CLIENT) | \
    BIT(NL80211_IFTYPE_P2P_GO),  \
 },       \
 {       \
  .max = 1,     \
  .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
 }

static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
 IWL_MVM_LIMITS(0)
};

static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
 IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
};

static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
 {
  .num_different_channels = 2,
  .max_interfaces = 3,
  .limits = iwl_mvm_limits,
  .n_limits = ARRAY_SIZE(iwl_mvm_limits),
 },
 {
  .num_different_channels = 1,
  .max_interfaces = 3,
  .limits = iwl_mvm_limits_ap,
  .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap),
 },
};

static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
 .max_peers = IWL_TOF_MAX_APS,
 .report_ap_tsf = 1,
 .randomize_mac_addr = 1,

 .ftm = {
  .supported = 1,
  .asap = 1,
  .non_asap = 1,
  .request_lci = 1,
  .request_civicloc = 1,
  .trigger_based = 1,
  .non_trigger_based = 1,
  .max_bursts_exponent = -1, /* all supported */
  .max_ftms_per_burst = 0, /* no limits */
  .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
         BIT(NL80211_CHAN_WIDTH_20) |
         BIT(NL80211_CHAN_WIDTH_40) |
         BIT(NL80211_CHAN_WIDTH_80) |
         BIT(NL80211_CHAN_WIDTH_160),
  .preambles = BIT(NL80211_PREAMBLE_LEGACY) |
        BIT(NL80211_PREAMBLE_HT) |
        BIT(NL80211_PREAMBLE_VHT) |
        BIT(NL80211_PREAMBLE_HE),
 },
};

static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
     enum set_key_cmd cmd,
     struct ieee80211_vif *vif,
     struct ieee80211_sta *sta,
     struct ieee80211_key_conf *key);

static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
 int i;

 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
 for (i = 0; i < NUM_PHY_CTX; i++) {
  mvm->phy_ctxts[i].id = i;
  mvm->phy_ctxts[i].ref = 0;
 }
}

struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        const char *alpha2,
        enum iwl_mcc_source src_id,
        bool *changed)
{
 struct ieee80211_regdomain *regd = NULL;
 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mcc_update_resp_v8 *resp;
 u8 resp_ver;

 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);

 lockdep_assert_held(&mvm->mutex);

 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
 if (IS_ERR_OR_NULL(resp)) {
  IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
         PTR_ERR_OR_ZERO(resp));
  resp = NULL;
  goto out;
 }

 if (changed) {
  u32 status = le32_to_cpu(resp->status);

  *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
       status == MCC_RESP_ILLEGAL);
 }
 resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
        MCC_UPDATE_CMD, 0);
 IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);

 regd = iwl_parse_nvm_mcc_info(mvm->trans,
          __le32_to_cpu(resp->n_channels),
          resp->channels,
          __le16_to_cpu(resp->mcc),
          __le16_to_cpu(resp->geo_info),
          le32_to_cpu(resp->cap), resp_ver);
 /* Store the return source id */
 src_id = resp->source_id;
 if (IS_ERR_OR_NULL(regd)) {
  IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
         PTR_ERR_OR_ZERO(regd));
  goto out;
 }

 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
        regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
 mvm->lar_regdom_set = true;
 mvm->mcc_src = src_id;

 iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));

out:
 kfree(resp);
 return regd;
}

void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
{
 bool changed;
 struct ieee80211_regdomain *regd;

 if (!iwl_mvm_is_lar_supported(mvm))
  return;

 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
 if (!IS_ERR_OR_NULL(regd)) {
  /* only update the regulatory core if changed */
  if (changed)
   regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);

  kfree(regd);
 }
}

struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
         bool *changed)
{
 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
         iwl_mvm_is_wifi_mcc_supported(mvm) ?
         MCC_SOURCE_GET_CURRENT :
         MCC_SOURCE_OLD_FW, changed);
}

int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync)
{
 enum iwl_mcc_source used_src;
 struct ieee80211_regdomain *regd;
 int ret;
 bool changed;
 const struct ieee80211_regdomain *r =
   wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd);

 if (!r)
  return -ENOENT;

 /* save the last source in case we overwrite it below */
 used_src = mvm->mcc_src;
 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
  /* Notify the firmware we support wifi location updates */
  regd = iwl_mvm_get_current_regdomain(mvm, NULL);
  if (!IS_ERR_OR_NULL(regd))
   kfree(regd);
 }

 /* Now set our last stored MCC and source */
 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
         &changed);
 if (IS_ERR_OR_NULL(regd))
  return -EIO;

 /* update cfg80211 if the regdomain was changed or the caller explicitly
 * asked to update regdomain
 */

 if (changed || force_regd_sync)
  ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
 else
  ret = 0;

 kfree(regd);
 return ret;
}

/* Each capability added here should also be add to tm_if_types_ext_capa_sta */
static const u8 he_if_types_ext_capa_sta[] = {
  [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
  [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
  [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
        WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
  [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
};

static const u8 tm_if_types_ext_capa_sta[] = {
  [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
  [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT |
        WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT,
  [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
        WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
  [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
  [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};

/* Additional interface types for which extended capabilities are
 * specified separately
 */


#define IWL_MVM_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \
     IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \
     __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
     IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
     __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
#define IWL_MVM_MLD_CAPA_OPS (FIELD_PREP_CONST( \
   IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
   IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
   IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)

static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
 {
  .iftype = NL80211_IFTYPE_STATION,
  .extended_capabilities = he_if_types_ext_capa_sta,
  .extended_capabilities_mask = he_if_types_ext_capa_sta,
  .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
  /* relevant only if EHT is supported */
  .eml_capabilities = IWL_MVM_EMLSR_CAPA,
  .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
 },
 {
  .iftype = NL80211_IFTYPE_STATION,
  .extended_capabilities = tm_if_types_ext_capa_sta,
  .extended_capabilities_mask = tm_if_types_ext_capa_sta,
  .extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
  /* relevant only if EHT is supported */
  .eml_capabilities = IWL_MVM_EMLSR_CAPA,
  .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
 },
};

int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
      u32 *tx_ant, u32 *rx_ant)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 *tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 *rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
 return 0;
}

int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
      u32 rx_ant)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

 /* This has been tested on those devices only */
 if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
     mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000 &&
     mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_AX210)
  return -EOPNOTSUPP;

 if (!mvm->nvm_data)
  return -EBUSY;

 /* mac80211 ensures the device is not started,
 * so the firmware cannot be running
 */


 mvm->set_tx_ant = tx_ant;
 mvm->set_rx_ant = rx_ant;

 iwl_reinit_cab(mvm->trans, mvm->nvm_data, tx_ant, rx_ant, mvm->fw);

 return 0;
}

int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
 struct ieee80211_hw *hw = mvm->hw;
 int num_mac, ret, i;
 static const u32 mvm_ciphers[] = {
  WLAN_CIPHER_SUITE_WEP40,
  WLAN_CIPHER_SUITE_WEP104,
  WLAN_CIPHER_SUITE_TKIP,
  WLAN_CIPHER_SUITE_CCMP,
 };
#ifdef CONFIG_PM_SLEEP
 bool unified = fw_has_capa(&mvm->fw->ucode_capa,
       IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
#endif
 u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
 u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);

 /* Tell mac80211 our characteristics */
 ieee80211_hw_set(hw, SIGNAL_DBM);
 ieee80211_hw_set(hw, SPECTRUM_MGMT);
 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
 ieee80211_hw_set(hw, SUPPORTS_PS);
 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 ieee80211_hw_set(hw, CONNECTION_MONITOR);
 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
 ieee80211_hw_set(hw, STA_MMPDU_TXQ);

 /* Set this early since we need to have it for the check below */
 if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
     !iwlwifi_mod_params.disable_11ax &&
     !iwlwifi_mod_params.disable_11be) {
  hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
  /* we handle this already earlier, but need it for MLO */
  ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
 }

 /* With MLD FW API, it tracks timing by itself,
 * no need for any timing from the host
 */

 if (!mvm->mld_api_is_used)
  ieee80211_hw_set(hw, TIMING_BEACON_ONLY);

 /*
 * On older devices, enabling TX A-MSDU occasionally leads to
 * something getting messed up, the command read from the FIFO
 * gets out of sync and isn't a TX command, so that we have an
 * assert EDC.
 *
 * It's not clear where the bug is, but since we didn't used to
 * support A-MSDU until moving the mac80211 iTXQs, just leave it
 * for older devices. We also don't see this issue on any newer
 * devices.
 */

 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
  ieee80211_hw_set(hw, TX_AMSDU);
 ieee80211_hw_set(hw, TX_FRAG_LIST);

 if (iwl_mvm_has_tlc_offload(mvm)) {
  ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
  ieee80211_hw_set(hw, HAS_RATE_CONTROL);
 }

 /* We want to use the mac80211's reorder buffer for 9000 */
 if (iwl_mvm_has_new_rx_api(mvm) &&
     mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_9000)
  ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
  ieee80211_hw_set(hw, AP_LINK_PS);
 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
  /*
 * we absolutely need this for the new TX API since that comes
 * with many more queues than the current code can deal with
 * for station powersave
 */

  return -EINVAL;
 }

 if (mvm->trans->info.num_rxqs > 1)
  ieee80211_hw_set(hw, USES_RSS);

 if (mvm->trans->info.max_skb_frags)
  hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;

 hw->queues = IEEE80211_NUM_ACS;
 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
        IEEE80211_RADIOTAP_MCS_HAVE_STBC;
 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
  IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;

 hw->radiotap_timestamp.units_pos =
  IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
  IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
 /* this is the case for CCK frames, it's better (only 8) for OFDM */
 hw->radiotap_timestamp.accuracy = 22;

 if (!iwl_mvm_has_tlc_offload(mvm))
  hw->rate_control_algorithm = RS_NAME;

 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 hw->max_tx_fragments = mvm->trans->info.max_skb_frags;

 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
 hw->wiphy->cipher_suites = mvm->ciphers;

 if (iwl_mvm_has_new_rx_api(mvm)) {
  mvm->ciphers[hw->wiphy->n_cipher_suites] =
   WLAN_CIPHER_SUITE_GCMP;
  hw->wiphy->n_cipher_suites++;
  mvm->ciphers[hw->wiphy->n_cipher_suites] =
   WLAN_CIPHER_SUITE_GCMP_256;
  hw->wiphy->n_cipher_suites++;
 }

 if (iwlwifi_mod_params.swcrypto)
  IWL_ERR(mvm,
   "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n");
 if (!iwlwifi_mod_params.bt_coex_active)
  IWL_ERR(mvm,
   "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n");

 if (!fips_enabled)
  ieee80211_hw_set(hw, MFP_CAPABLE);

 mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
 hw->wiphy->n_cipher_suites++;
 if (iwl_mvm_has_new_rx_api(mvm)) {
  mvm->ciphers[hw->wiphy->n_cipher_suites] =
   WLAN_CIPHER_SUITE_BIP_GMAC_128;
  hw->wiphy->n_cipher_suites++;
  mvm->ciphers[hw->wiphy->n_cipher_suites] =
   WLAN_CIPHER_SUITE_BIP_GMAC_256;
  hw->wiphy->n_cipher_suites++;
 }

 wiphy_ext_feature_set(hw->wiphy,
         NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
 wiphy_ext_feature_set(hw->wiphy,
         NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
  hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
 }

 /*
 * beacon protection must be handled by firmware,
 * so cannot be done with fips_enabled
 */

 if (!fips_enabled && sec_key_ver &&
     fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT))
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_BEACON_PROTECTION);
 else if (!fips_enabled &&
   fw_has_capa(&mvm->fw->ucode_capa,
        IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
  hw->wiphy->hw_timestamp_max_peers = 1;

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);

 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
 hw->wiphy->features |=
  NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
  NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
  NL80211_FEATURE_ND_RANDOM_MAC_ADDR;

 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
 hw->chanctx_data_size = sizeof(u16);
 hw->txq_data_size = sizeof(struct iwl_mvm_txq);

 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
  BIT(NL80211_IFTYPE_P2P_CLIENT) |
  BIT(NL80211_IFTYPE_AP) |
  BIT(NL80211_IFTYPE_P2P_GO) |
  BIT(NL80211_IFTYPE_P2P_DEVICE) |
  BIT(NL80211_IFTYPE_ADHOC);

 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);

 /* The new Tx API does not allow to pass the key or keyid of a MPDU to
 * the hw, preventing us to control which key(id) to use per MPDU.
 * Till that's fixed we can't use Extended Key ID for the newer cards.
 */

 if (!iwl_mvm_has_new_tx_api(mvm))
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_EXT_KEY_ID);
 hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;

 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
 if (iwl_mvm_is_lar_supported(mvm))
  hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
 else
  hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
            REGULATORY_DISABLE_BEACON_HINTS;

 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_DFS_CONCURRENT);

 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;

 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
 hw->wiphy->n_iface_combinations =
  ARRAY_SIZE(iwl_mvm_iface_combinations);

 hw->wiphy->max_remain_on_channel_duration = 10000;
 hw->max_listen_interval = IWL_MVM_CONN_LISTEN_INTERVAL;

 /* Extract MAC address */
 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
 hw->wiphy->addresses = mvm->addresses;
 hw->wiphy->n_addresses = 1;

 /* Extract additional MAC addresses if available */
 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
  min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;

 for (i = 1; i < num_mac; i++) {
  memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
         ETH_ALEN);
  mvm->addresses[i].addr[5]++;
  hw->wiphy->n_addresses++;
 }

 iwl_mvm_reset_phy_ctxts(mvm);

 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);

 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;

 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
 BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
       IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));

 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
  mvm->max_scans = IWL_MAX_UMAC_SCANS;
 else
  mvm->max_scans = IWL_MAX_LMAC_SCANS;

 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
  hw->wiphy->bands[NL80211_BAND_2GHZ] =
   &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
  hw->wiphy->bands[NL80211_BAND_5GHZ] =
   &mvm->nvm_data->bands[NL80211_BAND_5GHZ];

  if (fw_has_capa(&mvm->fw->ucode_capa,
    IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
      fw_has_api(&mvm->fw->ucode_capa,
          IWL_UCODE_TLV_API_LQ_SS_PARAMS))
   hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
    IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
 }
 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) &&
     mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels)
  hw->wiphy->bands[NL80211_BAND_6GHZ] =
   &mvm->nvm_data->bands[NL80211_BAND_6GHZ];

 hw->wiphy->hw_version = mvm->trans->info.hw_id;

 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
  hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
 else
  hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;

 hw->wiphy->max_sched_scan_reqs = 1;
 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
 hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw);
 /* we create the 802.11 header and zero length SSID IE. */
 hw->wiphy->max_sched_scan_ie_len =
  SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;

 /*
 * the firmware uses u8 for num of iterations, but 0xff is saved for
 * infinite loop, so the maximum number of iterations is actually 254.
 */

 hw->wiphy->max_sched_scan_plan_iterations = 254;

 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
          NL80211_FEATURE_LOW_PRIORITY_SCAN |
          NL80211_FEATURE_P2P_GO_OPPPS |
          NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
          NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;

 /* when firmware supports RLC/SMPS offload, do not set these
 * driver features, since it's no longer supported by driver.
 */

 if (!iwl_mvm_has_rlc_offload(mvm))
  hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS |
           NL80211_FEATURE_DYNAMIC_SMPS;

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
  hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
  hw->wiphy->features |= NL80211_FEATURE_QUIET;

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
  hw->wiphy->features |=
   NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
  hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;

 if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
      IWL_FW_CMD_VER_UNKNOWN) >= 3)
  hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;

 if (fw_has_api(&mvm->fw->ucode_capa,
         IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_SCAN_START_TIME);
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_BSS_PARENT_TSF);
 }

 if (iwl_mvm_is_oce_supported(mvm)) {
  u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);

  wiphy_ext_feature_set(hw->wiphy,
   NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
  wiphy_ext_feature_set(hw->wiphy,
   NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
  wiphy_ext_feature_set(hw->wiphy,
   NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);

  /* Old firmware also supports probe deferral and suppression */
  if (scan_ver < 15)
   wiphy_ext_feature_set(hw->wiphy,
           NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
 }

 hw->wiphy->iftype_ext_capab = NULL;
 hw->wiphy->num_iftype_ext_capab = 0;

 if (mvm->nvm_data->sku_cap_11ax_enable &&
     !iwlwifi_mod_params.disable_11ax) {
  hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa;
  hw->wiphy->num_iftype_ext_capab =
   ARRAY_SIZE(add_iftypes_ext_capa) - 1;

  ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
  ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
 }

 if (iwl_fw_lookup_cmd_ver(mvm->fw,
      WIDE_ID(DATA_PATH_GROUP,
       WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
      IWL_FW_CMD_VER_UNKNOWN) >= 1) {
  IWL_DEBUG_INFO(mvm->trans, "Timing measurement supported\n");

  if (!hw->wiphy->iftype_ext_capab) {
   hw->wiphy->num_iftype_ext_capab = 1;
   hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa +
    ARRAY_SIZE(add_iftypes_ext_capa) - 1;
  } else {
   hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + 1;
  }
 }

 if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
         TOF_RANGE_REQ_CMD),
      IWL_FW_CMD_VER_UNKNOWN) >= 11) {
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);

  if (fw_has_capa(&mvm->fw->ucode_capa,
    IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
   wiphy_ext_feature_set(hw->wiphy,
           NL80211_EXT_FEATURE_SECURE_LTF);
 }

 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;

#ifdef CONFIG_PM_SLEEP
 if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
     device_can_wakeup(mvm->trans->dev) && !fips_enabled) {
  mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
         WIPHY_WOWLAN_DISCONNECT |
         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
         WIPHY_WOWLAN_RFKILL_RELEASE |
         WIPHY_WOWLAN_NET_DETECT;
  mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
         WIPHY_WOWLAN_GTK_REKEY_FAILURE |
         WIPHY_WOWLAN_4WAY_HANDSHAKE;

  mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
  mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
  mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
  mvm->wowlan.max_nd_match_sets =
   iwl_umac_scan_get_max_profiles(mvm->fw);
  hw->wiphy->wowlan = &mvm->wowlan;
 }
#endif

 ret = iwl_mvm_leds_init(mvm);
 if (ret)
  return ret;

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
  IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
  hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
  ieee80211_hw_set(hw, TDLS_WIDER_BW);
 }

 if (fw_has_capa(&mvm->fw->ucode_capa,
   IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
  IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
  hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 }

 hw->netdev_features |= mvm->trans->mac_cfg->base->features;
 if (!iwl_mvm_is_csum_supported(mvm))
  hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;

 if (mvm->cfg->vht_mu_mimo_supported)
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);

 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
  wiphy_ext_feature_set(hw->wiphy,
          NL80211_EXT_FEATURE_PROTECTED_TWT);

 iwl_mvm_vendor_cmds_register(mvm);

 hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
 hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);

 ret = ieee80211_register_hw(mvm->hw);
 if (ret) {
  iwl_mvm_leds_exit(mvm);
 }

 return ret;
}

static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
      struct ieee80211_sta *sta)
{
 if (likely(sta)) {
  if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
   return;
 } else {
  if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
   return;
 }

 ieee80211_free_txskb(mvm->hw, skb);
}

void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
      struct ieee80211_tx_control *control, struct sk_buff *skb)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct ieee80211_sta *sta = control->sta;
 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 struct ieee80211_hdr *hdr = (void *)skb->data;
 bool offchannel = IEEE80211_SKB_CB(skb)->flags &
  IEEE80211_TX_CTL_TX_OFFCHAN;
 u32 link_id = u32_get_bits(info->control.flags,
       IEEE80211_TX_CTRL_MLO_LINK);
 struct ieee80211_sta *tmp_sta = sta;

 if (iwl_mvm_is_radio_killed(mvm)) {
  IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
  goto drop;
 }

 if (offchannel &&
     !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
     !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
  goto drop;

 /*
 * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs
 * so we treat the others as broadcast
 */

 if (ieee80211_is_mgmt(hdr->frame_control))
  sta = NULL;

 /* this shouldn't even happen: just drop */
 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
     !offchannel)
  goto drop;

 if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED &&
     !ieee80211_is_probe_resp(hdr->frame_control)) {
  /* translate MLD addresses to LINK addresses */
  struct ieee80211_link_sta *link_sta =
   rcu_dereference(tmp_sta->link[link_id]);
  struct ieee80211_bss_conf *link_conf =
   rcu_dereference(info->control.vif->link_conf[link_id]);
  struct ieee80211_mgmt *mgmt;

  if (WARN_ON(!link_sta || !link_conf))
   goto drop;

  /* if sta is NULL, the frame is a management frame */
  mgmt = (void *)hdr;
  memcpy(mgmt->da, link_sta->addr, ETH_ALEN);
  memcpy(mgmt->sa, link_conf->addr, ETH_ALEN);
  memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN);
 }

 iwl_mvm_tx_skb(mvm, skb, sta);
 return;
 drop:
 ieee80211_free_txskb(hw, skb);
}

void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
 struct sk_buff *skb = NULL;

 /*
 * No need for threads to be pending here, they can leave the first
 * taker all the work.
 *
 * mvmtxq->tx_request logic:
 *
 * If 0, no one is currently TXing, set to 1 to indicate current thread
 * will now start TX and other threads should quit.
 *
 * If 1, another thread is currently TXing, set to 2 to indicate to
 * that thread that there was another request. Since that request may
 * have raced with the check whether the queue is empty, the TXing
 * thread should check the queue's status one more time before leaving.
 * This check is done in order to not leave any TX hanging in the queue
 * until the next TX invocation (which may not even happen).
 *
 * If 2, another thread is currently TXing, and it will already double
 * check the queue, so do nothing.
 */

 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
  return;

 rcu_read_lock();
 do {
  while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
     &mvmtxq->state) &&
         !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
     &mvmtxq->state) &&
         !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA,
     &mvmtxq->state) &&
         !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
   skb = ieee80211_tx_dequeue(hw, txq);

   if (!skb) {
    if (txq->sta)
     IWL_DEBUG_TX(mvm,
           "TXQ of sta %pM tid %d is now empty\n",
           txq->sta->addr,
           txq->tid);
    break;
   }

   iwl_mvm_tx_skb(mvm, skb, txq->sta);
  }
 } while (atomic_dec_return(&mvmtxq->tx_request));
 rcu_read_unlock();
}

void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
          struct ieee80211_txq *txq)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);

 if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
     !txq->sta) {
  iwl_mvm_mac_itxq_xmit(hw, txq);
  return;
 }

 /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
 * to handle any packets we leave on the txq now
 */


 spin_lock_bh(&mvm->add_stream_lock);
 /* The list is being deleted only after the queue is fully allocated. */
 if (list_empty(&mvmtxq->list) &&
     /* recheck under lock */
     !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
  list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
  schedule_work(&mvm->add_stream_wk);
 }
 spin_unlock_bh(&mvm->add_stream_lock);
}

#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)  \
 do {        \
  if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))  \
   break;      \
  iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
 } while (0)

static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
       struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
       enum ieee80211_ampdu_mlme_action action)
{
 struct iwl_fw_dbg_trigger_tlv *trig;
 struct iwl_fw_dbg_trigger_ba *ba_trig;

 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
         FW_DBG_TRIGGER_BA);
 if (!trig)
  return;

 ba_trig = (void *)trig->data;

 switch (action) {
 case IEEE80211_AMPDU_TX_OPERATIONAL: {
  struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];

  CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
     "TX AGG START: MAC %pM tid %d ssn %d\n",
     sta->addr, tid, tid_data->ssn);
  break;
  }
 case IEEE80211_AMPDU_TX_STOP_CONT:
  CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
     "TX AGG STOP: MAC %pM tid %d\n",
     sta->addr, tid);
  break;
 case IEEE80211_AMPDU_RX_START:
  CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
     "RX AGG START: MAC %pM tid %d ssn %d\n",
     sta->addr, tid, rx_ba_ssn);
  break;
 case IEEE80211_AMPDU_RX_STOP:
  CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
     "RX AGG STOP: MAC %pM tid %d\n",
     sta->addr, tid);
  break;
 default:
  break;
 }
}

int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
        struct ieee80211_vif *vif,
        struct ieee80211_ampdu_params *params)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 int ret;
 struct ieee80211_sta *sta = params->sta;
 enum ieee80211_ampdu_mlme_action action = params->action;
 u16 tid = params->tid;
 u16 *ssn = ¶ms->ssn;
 u16 buf_size = params->buf_size;
 bool amsdu = params->amsdu;
 u16 timeout = params->timeout;

 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
       sta->addr, tid, action);

 if (!(mvm->nvm_data->sku_cap_11n_enable))
  return -EACCES;

 mutex_lock(&mvm->mutex);

 switch (action) {
 case IEEE80211_AMPDU_RX_START:
  if (iwl_mvm_vif_from_mac80211(vif)->deflink.ap_sta_id ==
      iwl_mvm_sta_from_mac80211(sta)->deflink.sta_id) {
   struct iwl_mvm_vif *mvmvif;
   u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
   struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];

   mdata->opened_rx_ba_sessions = true;
   mvmvif = iwl_mvm_vif_from_mac80211(vif);
   cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
  }
  if (!iwl_enable_rx_ampdu()) {
   ret = -EINVAL;
   break;
  }
  ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
      timeout);
  break;
 case IEEE80211_AMPDU_RX_STOP:
  ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
      timeout);
  break;
 case IEEE80211_AMPDU_TX_START:
  if (!iwl_enable_tx_ampdu()) {
   ret = -EINVAL;
   break;
  }
  ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
  break;
 case IEEE80211_AMPDU_TX_STOP_CONT:
  ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
  break;
 case IEEE80211_AMPDU_TX_STOP_FLUSH:
 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
  ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
  break;
 case IEEE80211_AMPDU_TX_OPERATIONAL:
  ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
           buf_size, amsdu);
  break;
 default:
  WARN_ON_ONCE(1);
  ret = -EINVAL;
  break;
 }

 if (!ret) {
  u16 rx_ba_ssn = 0;

  if (action == IEEE80211_AMPDU_RX_START)
   rx_ba_ssn = *ssn;

  iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
         rx_ba_ssn, action);
 }
 mutex_unlock(&mvm->mutex);

 return ret;
}

static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
         struct ieee80211_vif *vif)
{
 struct iwl_mvm *mvm = data;
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 struct iwl_probe_resp_data *probe_data;
 unsigned int link_id;

 mvmvif->uploaded = false;

 spin_lock_bh(&mvm->time_event_lock);
 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
 spin_unlock_bh(&mvm->time_event_lock);

 mvmvif->roc_activity = ROC_NUM_ACTIVITIES;

 mvmvif->bf_enabled = false;
 mvmvif->ba_enabled = false;
 mvmvif->ap_sta = NULL;

 mvmvif->esr_active = false;
 vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;

 for_each_mvm_vif_valid_link(mvmvif, link_id) {
  mvmvif->link[link_id]->ap_sta_id = IWL_INVALID_STA;
  mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
  mvmvif->link[link_id]->phy_ctxt = NULL;
  mvmvif->link[link_id]->active = 0;
  mvmvif->link[link_id]->igtk = NULL;
  memset(&mvmvif->link[link_id]->bf_data, 0,
         sizeof(mvmvif->link[link_id]->bf_data));
 }

 probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
            lockdep_is_held(&mvm->mutex));
 if (probe_data)
  kfree_rcu(probe_data, rcu_head);
 RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}

static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
{
 struct iwl_mvm *mvm = data;
 struct iwl_mvm_sta *mvm_sta;
 struct ieee80211_vif *vif;
 int link_id;

 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 vif = mvm_sta->vif;

 if (!sta->valid_links)
  return;

 for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
  struct iwl_mvm_link_sta *mvm_link_sta;

  mvm_link_sta =
   rcu_dereference_check(mvm_sta->link[link_id],
           lockdep_is_held(&mvm->mutex));
  if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
   /*
 * We have a link STA but the link is inactive in
 * mac80211. This will happen if we failed to
 * deactivate the link but mac80211 roll back the
 * deactivation of the link.
 * Delete the stale data to avoid issues later on.
 */

   iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
        link_id);
  }
 }
}

static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
 iwl_mvm_stop_device(mvm);

 mvm->cur_aid = 0;

 mvm->scan_status = 0;
 mvm->ps_disabled = false;
 mvm->rfkill_safe_init_done = false;

 /* just in case one was running */
 iwl_mvm_cleanup_roc_te(mvm);
 ieee80211_remain_on_channel_expired(mvm->hw);

 iwl_mvm_ftm_restart(mvm);

 /*
 * cleanup all interfaces, even inactive ones, as some might have
 * gone down during the HW restart
 */

 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);

 /* cleanup stations as links may be gone after restart */
 ieee80211_iterate_stations_atomic(mvm->hw,
       iwl_mvm_cleanup_sta_iterator, mvm);

 mvm->p2p_device_vif = NULL;

 iwl_mvm_reset_phy_ctxts(mvm);
 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));

 ieee80211_wake_queues(mvm->hw);

 mvm->rx_ba_sessions = 0;
 mvm->fwrt.dump.conf = FW_DBG_INVALID;
 mvm->monitor_on = false;
#ifdef CONFIG_IWLWIFI_DEBUGFS
 mvm->beacon_inject_active = false;
#endif

 /* keep statistics ticking */
 iwl_mvm_accu_radio_stats(mvm);
}

int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
{
 bool fast_resume = false;
 int ret;

 lockdep_assert_held(&mvm->mutex);

 ret = iwl_mvm_mei_get_ownership(mvm);
 if (ret)
  return ret;

 if (mvm->mei_nvm_data) {
  /* We got the NIC, we can now free the MEI NVM data */
  kfree(mvm->mei_nvm_data);
  mvm->mei_nvm_data = NULL;

  /*
 * We can't free the nvm_data we allocated based on the SAP
 * data because we registered to cfg80211 with the channels
 * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data
 * just in order to be able free it later.
 * NULLify nvm_data so that we will read the NVM from the
 * firmware this time.
 */

  mvm->temp_nvm_data = mvm->nvm_data;
  mvm->nvm_data = NULL;
 }

#ifdef CONFIG_PM_SLEEP
 /* fast_resume will be cleared by iwl_mvm_fast_resume */
 fast_resume = mvm->fast_resume;

 if (fast_resume) {
  iwl_mvm_mei_device_state(mvm, true);
  ret = iwl_mvm_fast_resume(mvm);
  if (ret) {
   iwl_mvm_stop_device(mvm);
   /* iwl_mvm_up() will be called further down */
  } else {
   /*
 * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
 * mac_down() so that debugfs will stop honoring
 * requests after we flush all the workers.
 * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
 * now that we are back. This is a bit abusing the
 * flag since the firmware wasn't really ever stopped,
 * but this still serves the purpose.
 */

   set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  }
 }
#endif /* CONFIG_PM_SLEEP */

 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
  /*
 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
 * so later code will - from now on - see that we're doing it.
 */

  set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
  clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
  /* Clean up some internal and mac80211 state on restart */
  iwl_mvm_restart_cleanup(mvm);
 }

 /* we also want to load the firmware if fast_resume failed */
 if (!fast_resume || ret)
  ret = iwl_mvm_up(mvm);

 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
          NULL);
 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
          NULL);

 mvm->last_reset_or_resume_time_jiffies = jiffies;

 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
  /* Something went wrong - we need to finish some cleanup
 * that normally iwl_mvm_mac_restart_complete() below
 * would do.
 */

  clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 }

 return ret;
}

int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 int ret;
 int retry, max_retry = 0;

 mutex_lock(&mvm->mutex);

 /* we are starting the mac not in error flow, and restart is enabled */
 if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
     iwlwifi_mod_params.fw_restart)
  max_retry = IWL_MAX_INIT_RETRY;

 for (retry = 0; retry <= max_retry; retry++) {
  ret = __iwl_mvm_mac_start(mvm);
  if (ret != -ETIMEDOUT)
   break;

  IWL_ERR(mvm, "mac start retry %d\n", retry);
 }

 mutex_unlock(&mvm->mutex);

 iwl_mvm_mei_set_sw_rfkill_state(mvm);

 return ret;
}

static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
 int ret;

 guard(mvm)(mvm);

 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);

 ret = iwl_mvm_update_quotas(mvm, true, NULL);
 if (ret)
  IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
   ret);

 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY);

 /*
 * If we have TDLS peers, remove them. We don't know the last seqno/PN
 * of packets the FW sent out, so we must reconnect.
 */

 iwl_mvm_teardown_tdls_peers(mvm);

 IWL_INFO(mvm, "restart completed\n");
 iwl_trans_finish_sw_reset(mvm->trans);

 /* no need to lock, adding in parallel would schedule too */
 if (!list_empty(&mvm->add_stream_txqs))
  schedule_work(&mvm->add_stream_wk);
}

void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
       enum ieee80211_reconfig_type reconfig_type)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

 switch (reconfig_type) {
 case IEEE80211_RECONFIG_TYPE_RESTART:
  iwl_mvm_restart_complete(mvm);
  break;
 case IEEE80211_RECONFIG_TYPE_SUSPEND:
  break;
 }
}

void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
{
 lockdep_assert_held(&mvm->mutex);

 iwl_mvm_ftm_initiator_smooth_stop(mvm);

 /* firmware counters are obviously reset now, but we shouldn't
 * partially track so also clear the fw_reset_accu counters.
 */

 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));

 /* async_handlers_wk is now blocked */

 if (!iwl_mvm_has_new_station_api(mvm->fw))
  iwl_mvm_rm_aux_sta(mvm);

 if (suspend &&
     mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
  iwl_mvm_fast_suspend(mvm);
  /* From this point on, we won't touch the device */
  iwl_mvm_mei_device_state(mvm, false);
 } else {
  iwl_mvm_stop_device(mvm);
 }

 iwl_mvm_async_handlers_purge(mvm);
 /* async_handlers_list is empty and will stay empty: HW is stopped */

 /*
 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
 * hw (as restart_complete() won't be called in this case) and mac80211
 * won't execute the restart.
 * But make sure to cleanup interfaces that have gone down before/during
 * HW restart was requested.
 */

 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
     test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
          &mvm->status))
  ieee80211_iterate_interfaces(mvm->hw, 0,
          iwl_mvm_cleanup_iterator, mvm);

 /* We shouldn't have any UIDs still set.  Loop over all the UIDs to
 * make sure there's nothing left there and warn if any is found.
 */

 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
  int i;

  for (i = 0; i < mvm->max_scans; i++) {
   if (WARN_ONCE(mvm->scan_uid_status[i],
          "UMAC scan UID %d status was not cleaned\n",
          i))
    mvm->scan_uid_status[i] = 0;
  }
 }
}

void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

 /* Stop internal MLO scan, if running */
 mutex_lock(&mvm->mutex);
 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
 mutex_unlock(&mvm->mutex);

 wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
 wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
 flush_work(&mvm->async_handlers_wk);
 flush_work(&mvm->add_stream_wk);

 /*
 * Lock and clear the firmware running bit here already, so that
 * new commands coming in elsewhere, e.g. from debugfs, will not
 * be able to proceed. This is important here because one of those
 * debugfs files causes the firmware dump to be triggered, and if we
 * don't stop debugfs accesses before canceling that it could be
 * retriggered after we flush it but before we've cleared the bit.
 */

 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);

 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);

 /*
 * The work item could be running or queued if the
 * ROC time event stops just as we get here.
 */

 flush_work(&mvm->roc_done_wk);

 iwl_mvm_mei_set_sw_rfkill_state(mvm);

 mutex_lock(&mvm->mutex);
 __iwl_mvm_mac_stop(mvm, suspend);
 mutex_unlock(&mvm->mutex);

 /*
 * The worker might have been waiting for the mutex, let it run and
 * discover that its list is now empty.
 */

 cancel_work_sync(&mvm->async_handlers_wk);
 wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}

struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
 u16 i;

 lockdep_assert_held(&mvm->mutex);

 for (i = 0; i < NUM_PHY_CTX; i++)
  if (!mvm->phy_ctxts[i].ref)
   return &mvm->phy_ctxts[i];

 IWL_ERR(mvm, "No available PHY context\n");
 return NULL;
}

int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
    struct ieee80211_bss_conf *link_conf,
    s16 tx_power)
{
 u32 cmd_id = REDUCE_TX_POWER_CMD;
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(link_conf->vif);
 u32 mac_id = mvmvif->id;
 int len;
 struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
  .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
  .common.link_id = cpu_to_le32(mac_id),
 };
 struct iwl_dev_tx_power_cmd cmd_v9_v10;
 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
 u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
  IWL_DEV_MAX_TX_POWER : 8 * tx_power;
 void *cmd_data = &cmd;

 cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);

 if (cmd_ver > 8) {
  u32 link_id;

  if (WARN_ON(!mvmvif->link[link_conf->link_id]))
   return -ENODEV;

  link_id = mvmvif->link[link_conf->link_id]->fw_link_id;

  /* Those fields sit on the same place for v9 and v10 */
  cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK);
  cmd_v9_v10.common.link_id = cpu_to_le32(link_id);
  cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
  cmd_data = &cmd_v9_v10;
 }

 if (cmd_ver == 10)
  len = sizeof(cmd_v9_v10.v10);
 else if (cmd_ver == 9)
  len = sizeof(cmd_v9_v10.v9);
 else if (cmd_ver == 8)
  len = sizeof(cmd.v8);
 else if (fw_has_api(&mvm->fw->ucode_capa,
       IWL_UCODE_TLV_API_REDUCE_TX_POWER))
  len = sizeof(cmd.v5);
 else if (fw_has_capa(&mvm->fw->ucode_capa,
        IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
  len = sizeof(cmd.v4);
 else
  len = sizeof(cmd.v3);

 /* all structs have the same common part, add its length */
 len += sizeof(cmd.common);

 if (cmd_ver < 9)
  len += sizeof(cmd.per_band);

 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);

}

static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
{
 struct ieee80211_hw *hw = data;
 int i;

 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
  struct iwl_mvm_txq *mvmtxq =
   iwl_mvm_txq_from_mac80211(sta->txq[i]);

  clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
  iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]);
 }
}

int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
    struct ieee80211_vif *vif,
    struct ieee80211_bss_conf *link_conf)
{
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 int ret;

 mutex_lock(&mvm->mutex);

 if (vif->type == NL80211_IFTYPE_STATION) {
  struct iwl_mvm_sta *mvmsta;
  unsigned int link_id = link_conf->link_id;
  u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id;

  mvmvif->csa_bcn_pending = false;
  mvmvif->csa_blocks_tx = false;
  mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);

  if (WARN_ON(!mvmsta)) {
   ret = -EIO;
   goto out_unlock;
  }

  iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
  if (mvm->mld_api_is_used)
   iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
  else
   iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);

  if (!fw_has_capa(&mvm->fw->ucode_capa,
     IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
   ret = iwl_mvm_enable_beacon_filter(mvm, vif);
   if (ret)
    goto out_unlock;

   iwl_mvm_stop_session_protection(mvm, vif);
  }
 } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) {
  struct iwl_mvm_txq *mvmtxq =
   iwl_mvm_txq_from_mac80211(vif->txq);

  clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);

  local_bh_disable();
  iwl_mvm_mac_itxq_xmit(hw, vif->txq);
  ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw);
  local_bh_enable();

  mvmvif->csa_blocks_tx = false;
 }

 mvmvif->ps_disabled = false;

 ret = iwl_mvm_power_update_ps(mvm);

out_unlock:
 if (mvmvif->csa_failed)
  ret = -EIO;
 mutex_unlock(&mvm->mutex);

 return ret;
}

void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
      struct ieee80211_vif *vif,
      struct ieee80211_bss_conf *link_conf)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 struct iwl_chan_switch_te_cmd cmd = {
  .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
         mvmvif->color)),
  .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
 };

 /*
 * In the new flow since FW is in charge of the timing,
 * if driver has canceled the channel switch he will receive the
 * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
 */

 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
        CHANNEL_SWITCH_ERROR_NOTIF, 0))
  return;

 IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);

 mutex_lock(&mvm->mutex);
 if (!fw_has_capa(&mvm->fw->ucode_capa,
    IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
  iwl_mvm_remove_csa_period(mvm, vif);
 else
  WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
          WIDE_ID(MAC_CONF_GROUP,
           CHANNEL_SWITCH_TIME_EVENT_CMD),
          0, sizeof(cmd), &cmd));
 mvmvif->csa_failed = true;
 mutex_unlock(&mvm->mutex);

 /* If we're here, we can't support MLD */
 iwl_mvm_post_channel_switch(hw, vif, &vif->bss_conf);
}

void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
{
 struct iwl_mvm_vif *mvmvif;
 struct ieee80211_vif *vif;

 mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);

 /* Trigger disconnect (should clear the CSA state) */
 ieee80211_chswitch_done(vif, false, 0);
}

static u8
iwl_mvm_chandef_get_primary_80(struct cfg80211_chan_def *chandef)
{
 int data_start;
 int control_start;
 int bw;

 if (chandef->width == NL80211_CHAN_WIDTH_320)
  bw = 320;
 else if (chandef->width == NL80211_CHAN_WIDTH_160)
  bw = 160;
 else
  return 0;

 /* data is bw wide so the start is half the width */
 data_start = chandef->center_freq1 - bw / 2;
 /* control is 20Mhz width */
 control_start = chandef->chan->center_freq - 10;

 return (control_start - data_start) / 80;
}

static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
      struct ieee80211_vif *vif)
{
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 int ret;

 lockdep_assert_held(&mvm->mutex);

 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
 if (ret) {
  IWL_ERR(mvm, "Failed to allocate bcast sta\n");
  return ret;
 }

 /* Only queue for this station is the mcast queue,
 * which shouldn't be in TFD mask anyway
 */

 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.mcast_sta, 0,
     vif->type,
     IWL_STA_MULTICAST);
}

static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
     struct wiphy_work *wk)
{
 struct iwl_mvm_vif *mvmvif =
  container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
 struct iwl_mvm *mvm = mvmvif->mvm;
 struct ieee80211_vif *vif =
  container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);

 guard(mvm)(mvm);
 iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
}

static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
 struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
        mlo_int_scan_wk.work);
 struct ieee80211_vif *vif =
  container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);

 guard(mvm)(mvmvif->mvm);
 iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
}

static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
{
 struct iwl_mvm_vif *mvmvif =
  container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
 struct iwl_mvm *mvm = mvmvif->mvm;
 struct ieee80211_vif *vif =
  container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);

 guard(mvm)(mvm);
 iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
}

static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy,
         struct wiphy_work *wk)
{
 struct iwl_mvm_vif *mvmvif =
  container_of(wk, struct iwl_mvm_vif,
        unblock_esr_tmp_non_bss_wk.work);
 struct iwl_mvm *mvm = mvmvif->mvm;
 struct ieee80211_vif *vif =
  container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);

 mutex_lock(&mvm->mutex);
 iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
 mutex_unlock(&mvm->mutex);
}

void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
{
 lockdep_assert_held(&mvm->mutex);

 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
  return;

 mvmvif->deflink.average_beacon_energy = 0;

 INIT_DELAYED_WORK(&mvmvif->csa_work,
     iwl_mvm_channel_switch_disconnect_wk);

 wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
    iwl_mvm_prevent_esr_done_wk);

 wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
    iwl_mvm_mlo_int_scan_wk);

 wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
   iwl_mvm_unblock_esr_tpt);

 wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk,
    iwl_mvm_unblock_esr_tmp_non_bss);
}

static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
         struct ieee80211_vif *vif)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 int ret;
 int i;

 mutex_lock(&mvm->mutex);

 iwl_mvm_mac_init_mvmvif(mvm, mvmvif);

 mvmvif->mvm = mvm;

 /* the first link always points to the default one */
 mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
 mvmvif->deflink.active = 0;
 mvmvif->link[0] = &mvmvif->deflink;

 vif->driver_flags = IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;

 iwl_mvm_set_link_fw_id(mvm, vif, &vif->bss_conf);

 /*
 * Not much to do here. The stack will not allow interface
 * types or combinations that we didn't advertise, so we
 * don't really have to check the types.
 */


 /* make sure that beacon statistics don't go backwards with FW reset */
 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
  for_each_mvm_vif_valid_link(mvmvif, i)
   mvmvif->link[i]->beacon_stats.accu_num_beacons +=
    mvmvif->link[i]->beacon_stats.num_beacons;

 /* Allocate resources for the MAC context, and add it to the fw  */
 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
 if (ret)
  goto out;

 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);

 /*
 * The AP binding flow can be done only after the beacon
 * template is configured (which happens only in the mac80211
 * start_ap() flow), and adding the broadcast station can happen
 * only after the binding.
 * In addition, since modifying the MAC before adding a bcast
 * station is not allowed by the FW, delay the adding of MAC context to
 * the point where we can also add the bcast station.
 * In short: there's not much we can do at this point, other than
 * allocating resources :)
 */

 if (vif->type == NL80211_IFTYPE_AP ||
     vif->type == NL80211_IFTYPE_ADHOC) {
  if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
   iwl_mvm_vif_dbgfs_add_link(mvm, vif);
  ret = 0;
  goto out;
 }

 mvmvif->features |= hw->netdev_features;

 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
 if (ret)
  goto out_unlock;

 ret = iwl_mvm_power_update_mac(mvm);
 if (ret)
  goto out_remove_mac;

 /* beacon filtering */
 ret = iwl_mvm_disable_beacon_filter(mvm, vif);
 if (ret)
  goto out_remove_mac;

 if (!mvm->bf_allowed_vif &&
     vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
  mvm->bf_allowed_vif = mvmvif;
  vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
         IEEE80211_VIF_SUPPORTS_CQM_RSSI;
 }

 if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
  mvm->p2p_device_vif = vif;

 iwl_mvm_tcm_add_vif(mvm, vif);

 if (vif->type == NL80211_IFTYPE_MONITOR) {
  mvm->monitor_on = true;
  mvm->monitor_p80 =
   iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
 }

 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
  iwl_mvm_vif_dbgfs_add_link(mvm, vif);

 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
     vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
     !mvm->csme_vif && mvm->mei_registered) {
  iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
  iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
  mvm->csme_vif = vif;
 }

out:
 if (!ret && (vif->type == NL80211_IFTYPE_AP ||
       vif->type == NL80211_IFTYPE_ADHOC))
  ret = iwl_mvm_alloc_bcast_mcast_sta(mvm, vif);

 goto out_unlock;

 out_remove_mac:
 mvmvif->deflink.phy_ctxt = NULL;
 iwl_mvm_mac_ctxt_remove(mvm, vif);
 out_unlock:
 mutex_unlock(&mvm->mutex);

 return ret;
}

void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
     struct ieee80211_vif *vif)
{
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);

 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  /*
 * Flush the ROC worker which will flush the OFFCHANNEL queue.
 * We assume here that all the packets sent to the OFFCHANNEL
 * queue are sent in ROC session.
 */

  flush_work(&mvm->roc_done_wk);
 }

 wiphy_delayed_work_cancel(mvm->hw->wiphy,
      &mvmvif->prevent_esr_done_wk);

 wiphy_delayed_work_cancel(mvm->hw->wiphy,
      &mvmvif->mlo_int_scan_wk);

 wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
 wiphy_delayed_work_cancel(mvm->hw->wiphy,
      &mvmvif->unblock_esr_tmp_non_bss_wk);

 cancel_delayed_work_sync(&mvmvif->csa_work);
}

static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
      struct ieee80211_vif *vif)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 struct iwl_probe_resp_data *probe_data;

 iwl_mvm_prepare_mac_removal(mvm, vif);

 if (!(vif->type == NL80211_IFTYPE_AP ||
       vif->type == NL80211_IFTYPE_ADHOC))
  iwl_mvm_tcm_rm_vif(mvm, vif);

 mutex_lock(&mvm->mutex);

 if (vif == mvm->csme_vif) {
  iwl_mei_set_netdev(NULL);
  mvm->csme_vif = NULL;
 }

 probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
            lockdep_is_held(&mvm->mutex));
 RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
 if (probe_data)
  kfree_rcu(probe_data, rcu_head);

 if (mvm->bf_allowed_vif == mvmvif) {
  mvm->bf_allowed_vif = NULL;
  vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
           IEEE80211_VIF_SUPPORTS_CQM_RSSI);
 }

 if (vif->bss_conf.ftm_responder)
  memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));

 iwl_mvm_vif_dbgfs_rm_link(mvm, vif);

 /*
 * For AP/GO interface, the tear down of the resources allocated to the
 * interface is be handled as part of the stop_ap flow.
 */

 if (vif->type == NL80211_IFTYPE_AP ||
     vif->type == NL80211_IFTYPE_ADHOC)
  goto out;

 iwl_mvm_power_update_mac(mvm);

 /* Before the interface removal, mac80211 would cancel the ROC, and the
 * ROC worker would be scheduled if needed. The worker would be flushed
 * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
 * binding etc. so nothing needs to be done here.
 */

 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
  if (mvmvif->deflink.phy_ctxt) {
   iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
   mvmvif->deflink.phy_ctxt = NULL;
  }
  mvm->p2p_device_vif = NULL;
 }

 iwl_mvm_mac_ctxt_remove(mvm, vif);

 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);

 if (vif->type == NL80211_IFTYPE_MONITOR)
  mvm->monitor_on = false;

out:
 if (vif->type == NL80211_IFTYPE_AP ||
     vif->type == NL80211_IFTYPE_ADHOC) {
  iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta);
  iwl_mvm_dealloc_bcast_sta(mvm, vif);
 }

 mutex_unlock(&mvm->mutex);
}

struct iwl_mvm_mc_iter_data {
 struct iwl_mvm *mvm;
 int port_id;
};

static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
          struct ieee80211_vif *vif)
{
 struct iwl_mvm_mc_iter_data *data = _data;
 struct iwl_mvm *mvm = data->mvm;
 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
 struct iwl_host_cmd hcmd = {
  .id = MCAST_FILTER_CMD,
  .flags = CMD_ASYNC,
  .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
 };
 int ret, len;

 /* if we don't have free ports, mcast frames will be dropped */
 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
  return;

 if (vif->type != NL80211_IFTYPE_STATION ||
     !vif->cfg.assoc)
  return;

 cmd->port_id = data->port_id++;
 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);

 hcmd.len[0] = len;
 hcmd.data[0] = cmd;

 ret = iwl_mvm_send_cmd(mvm, &hcmd);
 if (ret)
  IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}

static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
{
 struct iwl_mvm_mc_iter_data iter_data = {
  .mvm = mvm,
 };
 int ret;

 lockdep_assert_held(&mvm->mutex);

 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
  return;

 ieee80211_iterate_active_interfaces_atomic(
  mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
  iwl_mvm_mc_iface_iterator, &iter_data);

 /*
 * Send a (synchronous) ech command so that we wait for the
 * multiple asynchronous MCAST_FILTER_CMD commands sent by
 * the interface iterator. Otherwise, we might get here over
 * and over again (by userspace just sending a lot of these)
 * and the CPU can send them faster than the firmware can
 * process them.
 * Note that the CPU is still faster - but with this we'll
 * actually send fewer commands overall because the CPU will
 * not schedule the work in mac80211 as frequently if it's
 * still running when rescheduled (possibly multiple times).
 */

 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
 if (ret)
  IWL_ERR(mvm, "Failed to synchronize multicast groups update\n");
}

u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
         struct netdev_hw_addr_list *mc_list)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mcast_filter_cmd *cmd;
 struct netdev_hw_addr *addr;
 int addr_count;
 bool pass_all;
 int len;

 addr_count = netdev_hw_addr_list_count(mc_list);
 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
     IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
 if (pass_all)
  addr_count = 0;

 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
 cmd = kzalloc(len, GFP_ATOMIC);
 if (!cmd)
  return 0;

 if (pass_all) {
  cmd->pass_all = 1;
  return (u64)(unsigned long)cmd;
 }

 netdev_hw_addr_list_for_each(addr, mc_list) {
  IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
       cmd->count, addr->addr);
  memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
         addr->addr, ETH_ALEN);
  cmd->count++;
 }

 return (u64)(unsigned long)cmd;
}

void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
         unsigned int changed_flags,
         unsigned int *total_flags, u64 multicast)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;

 guard(mvm)(mvm);

 /* replace previous configuration */
 kfree(mvm->mcast_filter_cmd);
 mvm->mcast_filter_cmd = cmd;

 if (!cmd)
  goto out;

 if (changed_flags & FIF_ALLMULTI)
  cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);

 if (cmd->pass_all)
  cmd->count = 0;

 iwl_mvm_recalc_multicast(mvm);
out:
 *total_flags = 0;
}

static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
     struct ieee80211_vif *vif,
     unsigned int filter_flags,
     unsigned int changed_flags)
{
 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);

 /* We support only filter for probe requests */
 if (!(changed_flags & FIF_PROBE_REQ))
  return;

 /* Supported only for p2p client interfaces */
 if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc ||
     !vif->p2p)
  return;

 guard(mvm)(mvm);
 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}

int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
 struct iwl_mu_group_mgmt_cmd cmd = {};

 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
        WLAN_MEMBERSHIP_LEN);
 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
        WLAN_USER_POSITION_LEN);

 return iwl_mvm_send_cmd_pdu(mvm,
        WIDE_ID(DATA_PATH_GROUP,
         UPDATE_MU_GROUPS_CMD),
        0, sizeof(cmd), &cmd);
}

static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
        struct ieee80211_vif *vif)
{
 if (vif->bss_conf.mu_mimo_owner) {
  struct iwl_mu_group_mgmt_notif *notif = _data;

  /*
 * MU-MIMO Group Id action frame is little endian. We treat
 * the data received from firmware as if it came from the
 * action frame, so no conversion is needed.
 */

  ieee80211_update_mu_groups(vif, 0,
        (u8 *)¬if->membership_status,
        (u8 *)¬if->user_position);
 }
}

void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
          struct iwl_rx_cmd_buffer *rxb)
{
 struct iwl_rx_packet *pkt = rxb_addr(rxb);
 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;

 ieee80211_iterate_active_interfaces_atomic(
   mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
   iwl_mvm_mu_mimo_iface_iterator, notif);
}

static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
{
 u8 byte_num = ppe_pos_bit / 8;
 u8 bit_num = ppe_pos_bit % 8;
 u8 residue_bits;
 u8 res;

 if (bit_num <= 5)
  return (ppe[byte_num] >> bit_num) &
         (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);

 /*
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=96 H=93 G=94

¤ Dauer der Verarbeitung: 0.10 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.