blob: 250c5d8574c1ec6675c8fa6711dd243e0a5c164b [file] [log] [blame]
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: ieee80211_input.c 2610 2007-07-25 15:26:38Z mrenzmann $
*/
#ifndef EXPORT_SYMTAB
#define EXPORT_SYMTAB
#endif
/*
* IEEE 802.11 input handling.
*/
#ifndef AUTOCONF_INCLUDED
#include <linux/config.h>
#endif
#include <linux/version.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/random.h>
#include <linux/if_vlan.h>
#include <net/iw_handler.h> /* wireless_send_event(..) */
#include <linux/wireless.h> /* SIOCGIWTHRSPY */
#include <linux/if_arp.h> /* ARPHRD_ETHER */
#include <linux/jiffies.h>
#include "net80211/if_llc.h"
#include "net80211/if_ethersubr.h"
#include "net80211/if_media.h"
#include "net80211/ieee80211_var.h"
#include "net80211/ieee80211_linux.h"
#include "net80211/ieee80211_dot11_msg.h"
#include "net80211/ieee80211_tpc.h"
#include "net80211/ieee80211_tdls.h"
#include "net80211/ieee80211_mlme_statistics.h"
#include "qtn/wlan_ioctl.h"
#include "qtn/qtn_global.h"
#include "qtn_logging.h"
#include <qdrv/qdrv_debug.h>
#include <qtn/shared_params.h>
#include <qtn/hardware_revision.h>
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#include <linux/if_bridge.h>
#include <linux/net/bridge/br_public.h>
#endif
#if defined(CONFIG_QTN_BSA_SUPPORT)
#include "net80211/ieee80211_bsa.h"
#endif
extern u_int16_t ht_rate_table_20MHz_800[];
extern u_int16_t ht_rate_table_40MHz_800[];
#ifdef IEEE80211_DEBUG
/*
* Decide if a received management frame should be
* printed when debugging is enabled. This filters some
* of the less interesting frames that come frequently
* (e.g. beacons).
*/
static __inline int
doprint(struct ieee80211vap *vap, int subtype)
{
switch (subtype) {
case IEEE80211_FC0_SUBTYPE_BEACON:
return (vap->iv_ic->ic_flags & IEEE80211_F_SCAN);
case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
return (vap->iv_opmode == IEEE80211_M_IBSS);
}
return 1;
}
/*
* Emit a debug message about discarding a frame or information
* element. One format is for extracting the mac address from
* the frame header; the other is for when a header is not
* available or otherwise appropriate.
*/
#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_frame(_vap, _wh, _type, _fmt, __VA_ARGS__);\
} while (0)
#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_ie(_vap, _wh, _type, _fmt, __VA_ARGS__);\
} while (0)
#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\
} while (0)
static const u_int8_t *ieee80211_getbssid(struct ieee80211vap *,
const struct ieee80211_frame *);
static void ieee80211_discard_frame(struct ieee80211vap *,
const struct ieee80211_frame *, const char *, const char *, ...);
static void ieee80211_discard_ie(struct ieee80211vap *,
const struct ieee80211_frame *, const char *, const char *, ...);
static void ieee80211_discard_mac(struct ieee80211vap *,
const u_int8_t mac[IEEE80211_ADDR_LEN], const char *,
const char *, ...);
#else
#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...)
#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...)
#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...)
#endif /* IEEE80211_DEBUG */
static struct sk_buff *ieee80211_defrag(struct ieee80211_node *,
struct sk_buff *, int);
static void ieee80211_deliver_data(struct ieee80211_node *, struct sk_buff *);
static struct sk_buff *ieee80211_decap(struct ieee80211vap *,
struct sk_buff *, int);
static void ieee80211_send_error(struct ieee80211_node *, const u_int8_t *,
int, int);
static void ieee80211_recv_pspoll(struct ieee80211_node *, struct sk_buff *);
static int accept_data_frame(struct ieee80211vap *, struct ieee80211_node *,
struct ieee80211_key *, struct sk_buff *, struct ether_header *);
static void forward_mgmt_to_app(struct ieee80211vap *vap, int subtype, struct sk_buff *skb,
struct ieee80211_frame *wh);
static void forward_mgmt_to_app_for_further_processing(struct ieee80211vap *vap,
int subtype, struct sk_buff *skb, struct ieee80211_frame *wh);
#ifdef USE_HEADERLEN_RESV
static __be16 ath_eth_type_trans(struct sk_buff *, struct net_device *);
#endif
static void ieee80211_recv_action_tdls(struct ieee80211_node *ni, struct sk_buff *skb,
struct ieee80211_action *ia, int ieee80211_header, int rssi);
static void ieee80211_recv_action_vht(struct ieee80211_node *ni,
struct ieee80211_action *ia,
int subtype,
struct ieee80211_frame *wh,
u_int8_t *frm,
u_int8_t *efrm);
static void ieee80211_recv_action_wnm(struct ieee80211_node *ni,
struct ieee80211_action *ia,
int subtype,
struct ieee80211_frame *wh,
u_int8_t *efrm);
/**
* Given a node and the RSSI value of a just received frame from the node, this
* function checks if to raise an iwspy event because we iwspy the node and RSSI
* exceeds threshold (if active).
*
* @param vap: VAP
* @param ni: sender node
* @param rssi: RSSI value of received frame
*/
static void
iwspy_event(struct ieee80211vap *vap, struct ieee80211_node *ni, u_int rssi)
{
if (vap->iv_spy.thr_low && vap->iv_spy.num && ni && (rssi <
vap->iv_spy.thr_low || rssi > vap->iv_spy.thr_high)) {
int i;
for (i = 0; i < vap->iv_spy.num; i++) {
if (IEEE80211_ADDR_EQ(ni->ni_macaddr,
&(vap->iv_spy.mac[i * IEEE80211_ADDR_LEN]))) {
union iwreq_data wrq;
struct iw_thrspy thr;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG,
"%s: we spy %s, threshold is active "
"and rssi exceeds it -> raise an iwspy"
" event\n", __func__, ether_sprintf(
ni->ni_macaddr));
memset(&wrq, 0, sizeof(wrq));
wrq.data.length = 1;
memset(&thr, 0, sizeof(struct iw_thrspy));
memcpy(thr.addr.sa_data, ni->ni_macaddr,
IEEE80211_ADDR_LEN);
thr.addr.sa_family = ARPHRD_ETHER;
set_quality(&thr.qual, rssi, vap->iv_ic->ic_channoise);
set_quality(&thr.low, vap->iv_spy.thr_low, vap->iv_ic->ic_channoise);
set_quality(&thr.high, vap->iv_spy.thr_high, vap->iv_ic->ic_channoise);
wireless_send_event(vap->iv_dev,
SIOCGIWTHRSPY, &wrq, (char*) &thr);
break;
}
}
}
}
static inline int
ieee80211_tdls_status_mismatch(struct ieee80211_node *ni)
{
if (IEEE80211_NODE_IS_TDLS_INACTIVE(ni) ||
IEEE80211_NODE_IS_TDLS_IDLE(ni))
return 1;
return 0;
}
int ieee80211_tdls_tqe_path_check(struct ieee80211_node *ni,
struct sk_buff *skb, int rssi, uint16_t ether_type)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_action *ia;
uint8_t *payload_type;
struct ether_header *eh = (struct ether_header *) skb->data;
if (ether_type == __constant_htons(ETHERTYPE_80211MGT)) {
payload_type = (uint8_t*)(eh + 1);
if ( *payload_type == IEEE80211_SNAP_TYPE_TDLS) {
if (vap->iv_opmode == IEEE80211_M_STA) {
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
"TDLS %s: got 802.11 management over data, type=%u ptr=%p (%p)\n",
__func__, *payload_type, payload_type, eh);
ia = (struct ieee80211_action *)(payload_type + 1);
ieee80211_recv_action_tdls(ni, skb, ia, 0, rssi);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->hs20_enable || g_l2_ext_filter)) {
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
"%s Dropping TDLS frame due to HS2.0 enabled\n", __func__);
return 1;
}
} else {
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
"TDLS %s: unsupported type %u\n",
__func__, *payload_type);
vap->iv_stats.is_rx_mgtdiscard++;
}
} else if (ieee80211_tdls_status_mismatch(ni)) {
enum ieee80211_tdls_operation operation;
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
"TDLS %s: data not allowed before tdls link is ready, peer status: %u\n",
__func__, ni->tdls_status);
vap->iv_stats.is_rx_tdls_stsmismatch++;
operation = IEEE80211_TDLS_TEARDOWN;
ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
return 1;
}
return 0;
}
EXPORT_SYMBOL(ieee80211_tdls_tqe_path_check);
static int ieee80211_action_frame_check(struct ieee80211vap *vap,
struct sk_buff *skb, struct llc *llc, int min_len)
{
int ret = 0;
if ((vap->iv_opmode == IEEE80211_M_STA) &&
(skb->len >= min_len) &&
(llc->llc_dsap == LLC_SNAP_LSAP) &&
(llc->llc_ssap == LLC_SNAP_LSAP) &&
(llc->llc_control == LLC_UI) &&
(llc->llc_snap.org_code[0] == 0) &&
(llc->llc_snap.org_code[1] == 0) &&
(llc->llc_snap.org_code[2] == 0) &&
(llc->llc_un.type_snap.ether_type ==
htons(ETHERTYPE_80211MGT))) {
ret = 1;
}
return ret;
}
static void ieee80211_tdls_mailbox_path_check(struct ieee80211_node *ni,
struct sk_buff *skb, struct llc *llc, int rssi, int min_len)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_action *ia;
uint8_t *payload_type;
min_len += sizeof(*payload_type) + sizeof(*ia);
if (ieee80211_action_frame_check(vap, skb, llc, min_len)) {
if (unlikely(ieee80211_msg(vap, IEEE80211_MSG_TDLS) &&
ieee80211_tdls_msg(vap, IEEE80211_TDLS_MSG_DBG))) {
ieee80211_dump_pkt(vap->iv_ic, skb->data, min_len, -1, rssi);
}
payload_type = (u_int8_t *)llc + LLC_SNAPFRAMELEN;
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_DBG,
"TDLS %s: got 802.11 management over data, type=%u ptr=%p (%p)\n",
__func__, *payload_type, payload_type, llc);
if (*payload_type == IEEE80211_SNAP_TYPE_TDLS) {
ia = (struct ieee80211_action *)(payload_type + 1);
ieee80211_recv_action_tdls(ni, skb, ia, 1, rssi);
} else {
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
"TDLS %s: unsupported type %u\n",
__func__, *payload_type);
vap->iv_stats.is_rx_mgtdiscard++;
}
} else if (ieee80211_tdls_status_mismatch(ni)) {
enum ieee80211_tdls_operation operation;
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, IEEE80211_TDLS_MSG_WARN,
"TDLS %s: data not allowed before tdls link is ready, peer status: %u\n",
__func__, ni->tdls_status);
vap->iv_stats.is_rx_tdls_stsmismatch++;
operation = IEEE80211_TDLS_TEARDOWN;
ieee80211_tdls_send_event(ni, IEEE80211_EVENT_TDLS, &operation);
}
}
static int
ieee80211_is_tdls_disc_resp(struct sk_buff *skb, int hdrlen)
{
struct ieee80211_action *ia = (struct ieee80211_action *)(skb->data + hdrlen);
if (skb->len < (hdrlen + sizeof(struct ieee80211_action)))
return 0;
if ((ia->ia_category == IEEE80211_ACTION_CAT_PUBLIC) &&
(ia->ia_action == IEEE80211_ACTION_PUB_TDLS_DISC_RESP))
return 1;
else
return 0;
}
static int
ieee80211_is_tdls_action_frame(struct sk_buff *skb, int hdrlen)
{
static const uint8_t snap_e_header_pref[] = {LLC_SNAP_LSAP, LLC_SNAP_LSAP, LLC_UI, 0x00, 0x00};
uint8_t *data = &skb->data[hdrlen];
uint16_t ether_type = get_unaligned((uint16_t*)&data[6]);
int32_t snap_encap_pref = !memcmp(data, snap_e_header_pref, sizeof(snap_e_header_pref));
return (snap_encap_pref && (ether_type == htons(ETHERTYPE_80211MGT)));
}
static __inline int
ieee80211_tdls_frame_should_accept(struct sk_buff *skb, int type, int hdrlen)
{
return (type == IEEE80211_FC0_TYPE_DATA && ieee80211_is_tdls_action_frame(skb, hdrlen)) ||
(type == IEEE80211_FC0_TYPE_MGT && ieee80211_is_tdls_disc_resp(skb, hdrlen));
}
static int ieee80211_input_should_drop(struct ieee80211_node *ni, uint8_t *bssid,
struct ieee80211_frame *wh, uint8_t type,
uint8_t subtype, struct sk_buff *skb)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = vap->iv_ic;
uint8_t dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
if (dir == IEEE80211_FC1_DIR_DSTODS)
return 0;
#ifdef QTN_BG_SCAN
if ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN) &&
(type == IEEE80211_FC0_TYPE_MGT) &&
(subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
return 0;
}
#endif
if (ic->ic_flags_qtn & IEEE80211_QTN_MONITOR)
return 0;
if ((type != IEEE80211_FC0_TYPE_CTL) && vap->tdls_over_qhop_en
&& ieee80211_tdls_frame_should_accept(skb, type, ieee80211_hdrspace(ic, wh)))
return 0;
/* PS-POLL frame in State 1 */
if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_myaddr) &&
(subtype == IEEE80211_FC0_SUBTYPE_PS_POLL)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
bssid, NULL, "%s", "ps-poll in unauth state");
vap->iv_stats.is_rx_ps_unauth++;
ieee80211_send_error(ni, wh->i_addr2,
IEEE80211_FC0_SUBTYPE_DEAUTH,
IEEE80211_REASON_NOT_AUTHED);
return 1;
}
/* Packet from unknown source - send deauth. */
if (ni == vap->iv_bss && !ieee80211_is_bcst(wh->i_addr1)) {
if (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_DEAUTH) {
/*
* Corner case
* AP may have changed mode to STA but we are still unconscious.
* If Deauthentication frames from AP are dropped here, we have no chance
* to disconnect with AP.
*/
if (IEEE80211_ADDR_EQ(wh->i_addr2, ni->ni_bssid))
return 0;
}
/*
* intended for Repeater AP but slip into STA interface
* sliently discard
*/
if (vap->iv_opmode != IEEE80211_M_STA ||
type != IEEE80211_FC0_TYPE_MGT ||
(subtype != IEEE80211_FC0_SUBTYPE_PROBE_REQ &&
#if defined(PLATFORM_QFDR)
subtype != IEEE80211_FC0_SUBTYPE_AUTH &&
subtype != IEEE80211_FC0_SUBTYPE_DEAUTH &&
#endif
subtype != IEEE80211_FC0_SUBTYPE_ASSOC_REQ)) {
ieee80211_send_error(ni, wh->i_addr2,
IEEE80211_FC0_SUBTYPE_DEAUTH,
IEEE80211_REASON_NOT_AUTHED);
}
}
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
bssid, NULL, "not from bss %pM", ni->ni_bssid);
vap->iv_stats.is_rx_wrongbss++;
return 1;
}
void ieee80211_update_current_mode(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_vap->iv_ic;
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
if (IEEE80211_NODE_IS_VHT(ni)) {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_AC;
} else if (IEEE80211_NODE_IS_HT(ni)) {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_NA;
} else {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_A;
}
} else {
if (IEEE80211_NODE_IS_HT(ni)) {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_NG;
} else {
/* Check the last rate since the list was sorted */
if ((ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates - 1]
& IEEE80211_RATE_VAL) > IEEE80211_RATE_11MBPS) {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_G;
} else {
ni->ni_wifi_mode = IEEE80211_WIFI_MODE_B;
}
}
}
}
static int ieee80211_input_pmf_should_drop(struct ieee80211vap *vap,
struct ieee80211_node *ni, struct ieee80211_frame *wh,
struct sk_buff *skb, u_int8_t subtype)
{
if (!ni->ni_associd || !RSN_IS_MFP(ni->ni_rsn.rsn_caps))
return 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
return 0;
}
if ((vap->iv_opmode == IEEE80211_M_STA)) {
if (!ni->ni_sa_query_timeout &&
(subtype == IEEE80211_FC0_SUBTYPE_DEAUTH ||
subtype == IEEE80211_FC0_SUBTYPE_DISASSOC)) {
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
forward_mgmt_to_app(vap, subtype, skb, wh);
return 1;
} else {
ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_REQ,
++ni->ni_sa_query_tid);
return 1;
}
}
}
if ((subtype == IEEE80211_FC0_SUBTYPE_AUTH) &&
ieee80211_node_is_authorized(ni)) {
ieee80211_send_sa_query(ni, IEEE80211_ACTION_W_SA_QUERY_REQ,
++ni->ni_sa_query_tid);
return 1;
}
if (ieee80211_mgmt_is_robust(wh))
return 1;
return 0;
}
/*
* Process a received frame. The node associated with the sender
* should be supplied. If nothing was found in the node table then
* the caller is assumed to supply a reference to ic_bss instead.
* The RSSI and a timestamp are also supplied. The RSSI data is used
* during AP scanning to select a AP to associate with; it can have
* any units so long as values have consistent units and higher values
* mean ``better signal''. The receive timestamp is currently not used
* by the 802.11 layer.
*
* Context: softIRQ (tasklet)
*/
int
ieee80211_input(struct ieee80211_node *ni,
struct sk_buff *skb, int rssi, u_int32_t rstamp)
{
#define HAS_SEQ(type) ((type & 0x4) == 0)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = vap->iv_ic;
struct net_device *dev = vap->iv_dev;
struct ieee80211_frame *wh;
struct ieee80211_key *key;
struct ether_header *eh;
struct llc *llc;
int hdrspace;
u_int8_t dir, type = -1, subtype;
u_int8_t *bssid;
u_int16_t rxseq;
/* Variable to track whether the node inactive timer should be reset */
int node_reference_held = 0;
struct qtn_wds_ext_event_data extender_event_data;
KASSERT(ni != NULL, ("null node"));
KASSERT(skb->len >= sizeof(struct ieee80211_frame_min),
("frame length too short: %u", skb->len));
/* XXX adjust device in sk_buff? */
type = -1; /* undefined */
/*
* In monitor mode, send everything directly to bpf.
* Also do not process frames w/o i_addr2 any further.
* XXX may want to include the CRC
*/
if (vap->iv_opmode == IEEE80211_M_MONITOR)
goto out;
if (skb->len < sizeof(struct ieee80211_frame_min)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
ni->ni_macaddr, NULL,
"too short (1): len %u", skb->len);
vap->iv_stats.is_rx_tooshort++;
goto out;
}
if ((vap->iv_opmode != IEEE80211_M_STA) || IEEE80211_NODE_IS_TDLS_ACTIVE(ni))
ni->ni_inact = ni->ni_inact_reload;
/*
* Bit of a cheat here, we use a pointer for a 3-address
* frame format but don't reference fields past outside
* ieee80211_frame_min w/o first validating the data is
* present.
*/
wh = (struct ieee80211_frame *)skb->data;
if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
IEEE80211_FC0_VERSION_0) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
ni->ni_macaddr, NULL, "wrong version %x", wh->i_fc[0]);
vap->iv_stats.is_rx_badversion++;
goto err;
}
dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
if (dir == IEEE80211_FC1_DIR_NODS)
bssid = wh->i_addr3;
else
bssid = wh->i_addr2;
if (!IEEE80211_ADDR_EQ(bssid, ni->ni_bssid)) {
if (ieee80211_input_should_drop(ni, bssid, wh, type, subtype, skb)) {
goto out;
}
}
iwspy_event(vap, ni, rssi);
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
if (dir != IEEE80211_FC1_DIR_NODS)
bssid = wh->i_addr1;
else if (type == IEEE80211_FC0_TYPE_CTL)
bssid = wh->i_addr1;
else {
if (skb->len < sizeof(struct ieee80211_frame)) {
IEEE80211_DISCARD_MAC(vap,
IEEE80211_MSG_INPUT, ni->ni_macaddr,
NULL, "too short (2): len %u",
skb->len);
vap->iv_stats.is_rx_tooshort++;
goto out;
}
bssid = wh->i_addr3;
}
/* Do not try to find a node reference if the packet really did come from the BSS */
if (type == IEEE80211_FC0_TYPE_DATA && ni == vap->iv_bss &&
!IEEE80211_ADDR_EQ(vap->iv_bss->ni_macaddr, wh->i_addr2)) {
/* Try to find sender in local node table. */
ni = ieee80211_find_node(vap->iv_bss->ni_table, wh->i_addr2);
if (ni == NULL) {
/*
* Fake up a node for this newly discovered
* member of the IBSS. This should probably
* done after an ACL check.
*/
ni = ieee80211_fakeup_adhoc_node(vap,
wh->i_addr2);
if (ni == NULL) {
/* NB: stat kept for alloc failure */
goto err;
}
}
node_reference_held = 1;
}
iwspy_event(vap, ni, rssi);
break;
case IEEE80211_M_HOSTAP:
if (dir != IEEE80211_FC1_DIR_NODS)
bssid = wh->i_addr1;
else if (type == IEEE80211_FC0_TYPE_CTL)
bssid = wh->i_addr1;
else {
if (skb->len < sizeof(struct ieee80211_frame)) {
IEEE80211_DISCARD_MAC(vap,
IEEE80211_MSG_INPUT, ni->ni_macaddr,
NULL, "too short (2): len %u",
skb->len);
vap->iv_stats.is_rx_tooshort++;
goto out;
}
bssid = wh->i_addr3;
}
/*
* Validate the bssid.
*/
if (!IEEE80211_ADDR_EQ(bssid, vap->iv_bss->ni_bssid) &&
!IEEE80211_ADDR_EQ(bssid, dev->broadcast)) {
/* It can be a beacon from other network. Required for certification. */
vap->iv_stats.is_rx_wrongbss++;
if (!((type == IEEE80211_FC0_TYPE_MGT) && ((subtype == IEEE80211_FC0_SUBTYPE_BEACON)
#ifdef QTN_BG_SCAN
|| ((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
&& (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP))
#endif /* QTN_BG_SCAN */
))) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
bssid, NULL, "%s %02X %02X", "not to bss", type, subtype);
goto out;
}
}
break;
case IEEE80211_M_WDS:
if (skb->len < sizeof(struct ieee80211_frame_addr4)) {
IEEE80211_DISCARD_MAC(vap,
IEEE80211_MSG_INPUT, ni->ni_macaddr,
NULL, "too short (3): len %u",
skb->len);
vap->iv_stats.is_rx_tooshort++;
goto out;
}
bssid = wh->i_addr1;
if (!IEEE80211_ADDR_EQ(bssid, vap->iv_myaddr) &&
!IEEE80211_ADDR_EQ(bssid, dev->broadcast)) {
/* not interested in */
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
bssid, NULL, "%s", "not to bss");
vap->iv_stats.is_rx_wrongbss++;
goto out;
}
if (!IEEE80211_ADDR_EQ(wh->i_addr2, vap->wds_mac)) {
/* not interested in */
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
wh->i_addr2, NULL, "%s", "not from DS");
vap->iv_stats.is_rx_wrongbss++;
goto out;
}
break;
default:
/* XXX catch bad values */
goto out;
}
ni->ni_rstamp = rstamp;
ni->ni_last_rx = jiffies;
if (HAS_SEQ(type)) {
u_int8_t tid;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
tid = ((struct ieee80211_qosframe *)wh)->
i_qos[0] & IEEE80211_QOS_TID;
if (TID_TO_WME_AC(tid) >= WME_AC_VI)
ic->ic_wme.wme_hipri_traffic++;
tid++;
} else
tid = 0;
rxseq = le16toh(*(__le16 *)wh->i_seq);
if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
IEEE80211_SEQ_EQ(rxseq, ni->ni_rxseqs[tid]) &&
!((type == IEEE80211_FC0_TYPE_MGT) &&
(subtype == IEEE80211_FC0_SUBTYPE_AUTH))
#ifdef QTN_BG_SCAN
&& !((ic->ic_flags_qtn & IEEE80211_QTN_BGSCAN)
&& (type == IEEE80211_FC0_TYPE_MGT)
&& (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP ||
subtype == IEEE80211_FC0_SUBTYPE_BEACON))
#endif /* QTN_BG_SCAN */
) {
/* duplicate, discard */
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
bssid, "duplicate",
"seqno <%u,%u> fragno <%u,%u> tid %u",
rxseq >> IEEE80211_SEQ_SEQ_SHIFT,
ni->ni_rxseqs[tid] >>
IEEE80211_SEQ_SEQ_SHIFT,
rxseq & IEEE80211_SEQ_FRAG_MASK,
ni->ni_rxseqs[tid] &
IEEE80211_SEQ_FRAG_MASK,
tid);
vap->iv_stats.is_rx_dup++;
IEEE80211_NODE_STAT(ni, rx_dup);
goto out;
}
ni->ni_rxseqs[tid] = rxseq;
}
if (node_reference_held) {
ieee80211_free_node(ni);
}
}
switch (type) {
case IEEE80211_FC0_TYPE_DATA:
hdrspace = ieee80211_hdrspace(ic, wh);
if (skb->len < hdrspace) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "too short: len %u, expecting %u",
skb->len, hdrspace);
vap->iv_stats.is_rx_tooshort++;
goto out; /* XXX */
}
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
if ((dir != IEEE80211_FC1_DIR_FROMDS) &&
(dir != IEEE80211_FC1_DIR_NODS) &&
(!((vap->iv_flags_ext & IEEE80211_FEXT_WDS) &&
(dir == IEEE80211_FC1_DIR_DSTODS)))) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "invalid dir 0x%x", dir);
vap->iv_stats.is_rx_wrongdir++;
goto out;
}
if ((dev->flags & IFF_MULTICAST) &&
IEEE80211_IS_MULTICAST(wh->i_addr1)) {
if (IEEE80211_ADDR_EQ(wh->i_addr3, vap->iv_myaddr)) {
/*
* In IEEE802.11 network, multicast packet
* sent from me is broadcasted from AP.
* It should be silently discarded for
* SIMPLEX interface.
*
* NB: Linux has no IFF_ flag to indicate
* if an interface is SIMPLEX or not;
* so we always assume it to be true.
*/
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, NULL, "%s", "multicast echo");
vap->iv_stats.is_rx_mcastecho++;
goto out;
}
/*
* if it is broadcast by me on behalf of
* a station behind me, drop it.
*/
if (vap->iv_flags_ext & IEEE80211_FEXT_WDS) {
struct ieee80211_node_table *nt;
struct ieee80211_node *ni_wds;
nt = &ic->ic_sta;
ni_wds = ieee80211_find_wds_node(nt, wh->i_addr3);
if (ni_wds) {
ieee80211_free_node(ni_wds); /* Decr ref count */
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, NULL, "%s",
"multicast echo originated from node behind me");
vap->iv_stats.is_rx_mcastecho++;
goto out;
}
}
}
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
if (dir != IEEE80211_FC1_DIR_NODS) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "invalid dir 0x%x", dir);
vap->iv_stats.is_rx_wrongdir++;
goto out;
}
/* XXX no power-save support */
break;
case IEEE80211_M_HOSTAP:
/*
* FIXME - QOS Null check added because Quantenna image
* currently doesn't set the to/from DS bits.
*/
if ((dir != IEEE80211_FC1_DIR_TODS) &&
(dir != IEEE80211_FC1_DIR_DSTODS) &&
(subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "invalid dir 0x%x", dir);
vap->iv_stats.is_rx_wrongdir++;
goto out;
}
/* check if source STA is associated */
if (ni == vap->iv_bss) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "%s", "unknown src");
/* NB: caller deals with reference */
if (vap->iv_state == IEEE80211_S_RUN) {
if ((dir == IEEE80211_FC1_DIR_DSTODS) &&
(IEEE80211_IS_MULTICAST(wh->i_addr1))) {
/*
* Some 3rd party wds ap sends wds pkts with receiver
* addr as bcast/mcast which will be received by our ap
* and lead to a lot of deauth. But they just ignore our
* deauth frame. To avoid too much deauth messages, We can
* safely ignore them.
*/
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "%s", "mcast wds pkt");
} else {
ieee80211_send_error(ni, wh->i_addr2,
IEEE80211_FC0_SUBTYPE_DEAUTH,
IEEE80211_REASON_NOT_AUTHED);
}
}
vap->iv_stats.is_rx_notassoc++;
goto err;
}
if (ni->ni_associd == 0) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "%s", "unassoc src");
IEEE80211_SEND_MGMT(ni,
IEEE80211_FC0_SUBTYPE_DISASSOC,
IEEE80211_REASON_NOT_ASSOCED);
vap->iv_stats.is_rx_notassoc++;
goto err;
}
/*
* If we're a 4 address packet, make sure we have an entry in
* the node table for the packet source address (addr4).
* If not, add one.
*/
if (dir == IEEE80211_FC1_DIR_DSTODS) {
struct ieee80211_node_table *nt;
struct ieee80211_frame_addr4 *wh4;
struct ieee80211_node *ni_wds;
if (!(vap->iv_flags_ext & IEEE80211_FEXT_WDS)) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "%s", "4 addr not allowed");
goto err;
}
wh4 = (struct ieee80211_frame_addr4 *)skb->data;
nt = &ic->ic_sta;
ni_wds = ieee80211_find_wds_node(nt, wh4->i_addr4);
/* Last call increments ref count if !NULL */
if ((ni_wds != NULL) && (ni_wds != ni)) {
/*
* node with source address (addr4) moved
* to another WDS capable station.
*/
(void) ieee80211_remove_wds_addr(nt, wh4->i_addr4);
ieee80211_add_wds_addr(nt, ni, wh4->i_addr4, 0);
}
if (ni_wds == NULL)
ieee80211_add_wds_addr(nt, ni, wh4->i_addr4, 0);
else
ieee80211_free_node(ni_wds);
}
/*
* Check for power save state change.
*/
if (!(ni->ni_flags & IEEE80211_NODE_UAPSD)) {
if ((wh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^
(ni->ni_flags & IEEE80211_NODE_PWR_MGT))
ieee80211_node_pwrsave(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT);
} else if (ni->ni_flags & IEEE80211_NODE_PS_CHANGED) {
int pwr_save_changed = 0;
IEEE80211_LOCK_IRQ(ic);
if ((*(__le16 *)(&wh->i_seq[0])) == ni->ni_pschangeseq) {
ni->ni_flags &= ~IEEE80211_NODE_PS_CHANGED;
pwr_save_changed = 1;
}
IEEE80211_UNLOCK_IRQ(ic);
if (pwr_save_changed)
ieee80211_node_pwrsave(ni, wh->i_fc[1] & IEEE80211_FC1_PWR_MGT);
}
break;
case IEEE80211_M_WDS:
if (dir != IEEE80211_FC1_DIR_DSTODS) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "data", "invalid dir 0x%x", dir);
vap->iv_stats.is_rx_wrongdir++;
goto out;
}
break;
default:
/* XXX here to keep compiler happy */
goto out;
}
/*
* Handle privacy requirements. Note that we
* must not be preempted from here until after
* we (potentially) call ieee80211_crypto_demic;
* otherwise we may violate assumptions in the
* crypto cipher modules used to do delayed update
* of replay sequence numbers.
*/
if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
/*
* Discard encrypted frames when privacy is off.
*/
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "WEP", "%s", "PRIVACY off");
vap->iv_stats.is_rx_noprivacy++;
IEEE80211_NODE_STAT(ni, rx_noprivacy);
goto out;
}
key = ieee80211_crypto_decap(ni, skb, hdrspace);
if (key == NULL) {
/* NB: stats+msgs handled in crypto_decap */
IEEE80211_NODE_STAT(ni, rx_wepfail);
//FIXME: This MUST be re-enabled - it could present a security hole.
//Needs more thought.
//
//RK-2009-11-24: this was commented out to allow WPA2 AES fragments
//to pass through the slow driver path.
//goto out;
}
wh = (struct ieee80211_frame *)skb->data;
wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
} else
key = NULL;
/*
* Next up, any fragmentation.
*/
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
skb = ieee80211_defrag(ni, skb, hdrspace);
if (skb == NULL) {
/* Fragment dropped or frame not complete yet */
goto out;
}
}
/*
* Next strip any MSDU crypto bits.
*/
if (key != NULL &&
!ieee80211_crypto_demic(vap, key, skb, hdrspace)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
ni->ni_macaddr, "data", "%s", "demic error");
IEEE80211_NODE_STAT(ni, rx_demicfail);
goto out;
}
/* TDLS data encapsulated management frame */
llc = (struct llc *) (skb->data + hdrspace);
ieee80211_tdls_mailbox_path_check(ni, skb, llc, rssi,
hdrspace + LLC_SNAPFRAMELEN);
/*
* Finally, strip the 802.11 header.
*/
wh = NULL; /* no longer valid, catch any uses */
skb = ieee80211_decap(vap, skb, hdrspace);
if (skb == NULL) {
/* don't count Null data frames as errors */
if (subtype == IEEE80211_FC0_SUBTYPE_NODATA)
goto out;
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
ni->ni_macaddr, "data", "%s", "decap error");
vap->iv_stats.is_rx_decap++;
IEEE80211_NODE_STAT(ni, rx_decap);
goto err;
}
eh = (struct ether_header *) skb->data;
if (! accept_data_frame(vap, ni, key, skb, eh))
goto out;
vap->iv_devstats.rx_packets++;
vap->iv_devstats.rx_bytes += skb->len;
IEEE80211_NODE_STAT(ni, rx_data);
IEEE80211_NODE_STAT_ADD(ni, rx_bytes, skb->len);
ic->ic_lastdata = jiffies;
/* if sub type is NULL DATA or QOS NULL DATA, don't send to linux protocol stack */
if ((subtype == IEEE80211_FC0_SUBTYPE_NODATA) || (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
"%s: NULL or QOS NULL DATA: don't deliver to linux protocol stack\n", __func__);
goto out;
}
ieee80211_deliver_data(ni, skb);
return IEEE80211_FC0_TYPE_DATA;
case IEEE80211_FC0_TYPE_MGT:
/* Only accept action frames and peer beacons for WDS */
if (vap->iv_opmode == IEEE80211_M_WDS &&
subtype != IEEE80211_FC0_SUBTYPE_ACTION_NOACK &&
subtype != IEEE80211_FC0_SUBTYPE_ACTION &&
subtype != IEEE80211_FC0_SUBTYPE_BEACON) {
struct ieee80211vap *pri_vap = TAILQ_FIRST(&ic->ic_vaps);
if ((ic->ic_extender_role == IEEE80211_EXTENDER_ROLE_MBS) &&
ieee80211_extender_find_peer_wds_info(ic, wh->i_addr2)) {
IEEE80211_EXTENDER_DPRINTF(vap, IEEE80211_EXTENDER_MSG_WARN,
"QHop: unexpected frame 0x%x from peer %pM\n",
subtype, wh->i_addr2);
extender_event_data_prepare(ic, NULL,
&extender_event_data,
WDS_EXT_LINK_STATUS_UPDATE,
wh->i_addr2);
ieee80211_extender_send_event(pri_vap, &extender_event_data, NULL);
ieee80211_extender_remove_peer_wds_info(ic, wh->i_addr2);
}
vap->iv_stats.is_rx_mgtdiscard++;
goto out;
}
IEEE80211_NODE_STAT(ni, rx_mgmt);
if (dir != IEEE80211_FC1_DIR_NODS) {
vap->iv_stats.is_rx_wrongdir++;
goto err;
}
if (skb->len < sizeof(struct ieee80211_frame)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
ni->ni_macaddr, "mgt", "too short: len %u",
skb->len);
vap->iv_stats.is_rx_tooshort++;
goto out;
}
#ifdef IEEE80211_DEBUG
if ((ieee80211_msg_debug(vap) && doprint(vap, subtype)) ||
ieee80211_msg_dumppkts(vap)) {
ieee80211_note(vap, "received %s from %s rssi %d\n",
ieee80211_mgt_subtype_name[subtype >>
IEEE80211_FC0_SUBTYPE_SHIFT],
ether_sprintf(wh->i_addr2), rssi);
}
#endif
if (vap->iv_pmf) {
if (ieee80211_input_pmf_should_drop(vap, ni, wh, skb, subtype))
goto out;
}
if (wh->i_fc[1] & IEEE80211_FC1_PROT) {
if (subtype != IEEE80211_FC0_SUBTYPE_AUTH) {
/*
* Only shared key auth frames with a challenge
* should be encrypted, discard all others.
*/
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, ieee80211_mgt_subtype_name[subtype >>
IEEE80211_FC0_SUBTYPE_SHIFT],
"%s", "WEP set but not permitted");
vap->iv_stats.is_rx_mgtdiscard++; /* XXX */
goto out;
}
if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
/*
* Discard encrypted frames when privacy is off.
*/
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, "mgt", "%s", "WEP set but PRIVACY off");
vap->iv_stats.is_rx_noprivacy++;
goto out;
}
hdrspace = ieee80211_hdrspace(ic, wh);
key = ieee80211_crypto_decap(ni, skb, hdrspace);
if (key == NULL) {
/* NB: stats+msgs handled in crypto_decap */
goto out;
}
wh = (struct ieee80211_frame *)skb->data;
wh->i_fc[1] &= ~IEEE80211_FC1_PROT;
}
ic->ic_recv_mgmt(ni, skb, subtype, rssi, rstamp);
goto out;
case IEEE80211_FC0_TYPE_CTL: {
u_int8_t reason;
IEEE80211_NODE_STAT(ni, rx_ctrl);
vap->iv_stats.is_rx_ctl++;
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
if (subtype == IEEE80211_FC0_SUBTYPE_PS_POLL)
ieee80211_recv_pspoll(ni, skb);
/*if a sta receive a PS-POLL, a deauth should be sent*/
if (vap->iv_opmode == IEEE80211_M_STA &&
subtype == IEEE80211_FC0_SUBTYPE_PS_POLL &&
vap->iv_state < IEEE80211_S_RUN) {
if (vap->iv_state <= IEEE80211_S_AUTH) {
reason = IEEE80211_REASON_NOT_AUTHED;
} else {
reason = IEEE80211_REASON_NOT_ASSOCED;
}
IEEE80211_DISCARD(vap,
IEEE80211_MSG_POWER | IEEE80211_MSG_DEBUG,
wh, "receive ps-poll", "state-%d, send deauth",
((reason == IEEE80211_REASON_NOT_AUTHED) ? 1:2));
vap->iv_stats.is_ps_unassoc++;
ieee80211_send_error(ni, wh->i_addr2,
IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
}
goto out;
}
default:
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
wh, NULL, "bad frame type 0x%x", type);
/* should not come here */
break;
}
err:
vap->iv_devstats.rx_errors++;
out:
if (skb != NULL)
dev_kfree_skb(skb);
return type;
#undef HAS_SEQ
}
EXPORT_SYMBOL(ieee80211_input);
/*
* Determines whether a frame should be accepted, based on information
* about the frame's origin and encryption, and policy for this vap.
*/
static int accept_data_frame(struct ieee80211vap *vap,
struct ieee80211_node *ni, struct ieee80211_key *key,
struct sk_buff *skb, struct ether_header *eh)
{
#define IS_EAPOL(eh) ((eh)->ether_type == __constant_htons(ETH_P_PAE))
#define PAIRWISE_SET(vap) ((vap)->iv_nw_keys[0].wk_cipher != &ieee80211_cipher_none)
if (IS_EAPOL(eh)) {
/* encrypted eapol is always OK */
if (key)
return 1;
/* cleartext eapol is OK if we don't have pairwise keys yet */
if (! PAIRWISE_SET(vap))
return 1;
/* cleartext eapol is OK if configured to allow it */
if (! IEEE80211_VAP_DROPUNENC_EAPOL(vap))
return 1;
/* cleartext eapol is OK if other unencrypted is OK */
if (! (vap->iv_flags & IEEE80211_F_DROPUNENC))
return 1;
/* not OK */
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
eh->ether_shost, "data",
"unauthorized port: ether type 0x%x len %u",
ntohs(eh->ether_type), skb->len);
vap->iv_stats.is_rx_unauth++;
vap->iv_devstats.rx_errors++;
IEEE80211_NODE_STAT(ni, rx_unauth);
return 0;
}
if (!ieee80211_node_is_authorized(ni)) {
/*
* Deny any non-PAE frames received prior to
* authorization. For open/shared-key
* authentication the port is mark authorized
* after authentication completes. For 802.1x
* the port is not marked authorized by the
* authenticator until the handshake has completed.
*/
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT,
eh->ether_shost, "data",
"unauthorized port: ether type 0x%x len %u",
ntohs(eh->ether_type), skb->len);
vap->iv_stats.is_rx_unauth++;
vap->iv_devstats.rx_errors++;
IEEE80211_NODE_STAT(ni, rx_unauth);
return 0;
}
return 1;
#undef IS_EAPOL
#undef PAIRWISE_SET
}
/*
* Context: softIRQ (tasklet)
*/
int
ieee80211_input_all(struct ieee80211com *ic,
struct sk_buff *skb, int rssi, u_int32_t rstamp)
{
struct ieee80211vap *vap;
int type = -1;
struct sk_buff *skb1;
struct ieee80211_node *ni;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
if (vap->iv_opmode == IEEE80211_M_WDS) {
/* Discard input from non-peer */
continue;
}
if (TAILQ_NEXT(vap, iv_next) != NULL) {
skb1 = skb_copy(skb, GFP_ATOMIC);
if (skb1 == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
"%s: SKB copy failed\n", __func__);
continue;
}
} else {
skb1 = skb;
skb = NULL;
}
ni = vap->iv_bss;
ieee80211_ref_node(ni);
type = ieee80211_input(ni, skb1, rssi, rstamp);
ieee80211_free_node(ni);
}
/* No more vaps, reclaim skb */
if (skb != NULL)
dev_kfree_skb(skb);
return type;
}
EXPORT_SYMBOL(ieee80211_input_all);
/*
* This function reassemble fragments using the skb of the 1st fragment,
* if large enough. If not, a new skb is allocated to hold incoming
* fragments.
*
* Fragments are copied at the end of the previous fragment. A different
* strategy could have been used, where a non-linear skb is allocated and
* fragments attached to that skb.
*/
static struct sk_buff *
ieee80211_defrag(struct ieee80211_node *ni, struct sk_buff *skb, int hdrlen)
{
struct ieee80211_frame *wh = (struct ieee80211_frame *) skb->data;
u_int16_t rxseq, last_rxseq;
u_int8_t fragno, last_fragno;
u_int8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
rxseq = le16_to_cpu(*(__le16 *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
fragno = le16_to_cpu(*(__le16 *)wh->i_seq) & IEEE80211_SEQ_FRAG_MASK;
/* Quick way out, if there's nothing to defragment */
if (!more_frag && fragno == 0 && ni->ni_rxfrag == NULL)
return skb;
ni->ni_stats.ns_rx_fragment_pkts++;
/*
* Remove frag to ensure it doesn't get reaped by timer.
*/
if (ni->ni_table == NULL) {
/*
* Should never happen. If the node is orphaned (not in
* the table) then input packets should not reach here.
* Otherwise, a concurrent request that yanks the table
* should be blocked by other interlocking and/or by first
* shutting the driver down. Regardless, be defensive
* here and just bail
*/
/* XXX need msg+stat */
dev_kfree_skb(skb);
return NULL;
}
/*
* Use this lock to make sure ni->ni_rxfrag is
* not freed by the timer process while we use it.
* XXX bogus
*/
IEEE80211_NODE_LOCK_IRQ(ni->ni_table);
/*
* Update the time stamp. As a side effect, it
* also makes sure that the timer will not change
* ni->ni_rxfrag for at least 1 second, or in
* other words, for the remaining of this function.
*/
ni->ni_rxfragstamp = jiffies;
IEEE80211_NODE_UNLOCK_IRQ(ni->ni_table);
/*
* Validate that fragment is in order and
* related to the previous ones.
*/
if (ni->ni_rxfrag) {
struct ieee80211_frame *lwh;
lwh = (struct ieee80211_frame *) ni->ni_rxfrag->data;
last_rxseq = le16_to_cpu(*(__le16 *)lwh->i_seq) >>
IEEE80211_SEQ_SEQ_SHIFT;
last_fragno = le16_to_cpu(*(__le16 *)lwh->i_seq) &
IEEE80211_SEQ_FRAG_MASK;
if (rxseq != last_rxseq
|| fragno != last_fragno + 1
|| (!IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1))
|| (!IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2))
|| (ni->ni_rxfrag->end - ni->ni_rxfrag->tail <
skb->len)) {
/*
* Unrelated fragment or no space for it,
* clear current fragments
*/
dev_kfree_skb(ni->ni_rxfrag);
ni->ni_rxfrag = NULL;
}
}
/* If this is the first fragment */
if (ni->ni_rxfrag == NULL && fragno == 0) {
ni->ni_rxfrag = skb;
/* If more frags are coming */
if (more_frag) {
if (skb_is_nonlinear(skb)) {
/*
* We need a continous buffer to
* assemble fragments
*/
ni->ni_rxfrag = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb(skb);
}
/*
* Check that we have enough space to hold
* incoming fragments
* 1. Don't assume MTU is the RX frame size limit.
* 2. Don't assume original packet starts from skb->head, in case
* kernel reserve some bytes at headroom.
*/
else if ((skb_end_pointer(skb) - skb->data) <
(IEEE80211_MAX_LEN + hdrlen)) {
ni->ni_rxfrag = skb_copy_expand(skb, 0,
(IEEE80211_MAX_LEN + hdrlen - skb->len),
GFP_ATOMIC);
dev_kfree_skb(skb);
}
}
} else {
if (ni->ni_rxfrag) {
struct ieee80211_frame *lwh = (struct ieee80211_frame *)
ni->ni_rxfrag->data;
/*
* We know we have enough space to copy,
* we've verified that before
*/
/* Copy current fragment at end of previous one */
memcpy(skb_tail_pointer(ni->ni_rxfrag),
skb->data + hdrlen, skb->len - hdrlen);
/* Update tail and length */
skb_put(ni->ni_rxfrag, skb->len - hdrlen);
/* Keep a copy of last sequence and fragno */
*(__le16 *) lwh->i_seq = *(__le16 *) wh->i_seq;
}
/* we're done with the fragment */
dev_kfree_skb(skb);
}
if (more_frag) {
/* More to come */
skb = NULL;
} else {
/* Last fragment received, we're done! */
skb = ni->ni_rxfrag;
ni->ni_rxfrag = NULL;
}
return skb;
}
static void
ieee80211_deliver_data(struct ieee80211_node *ni, struct sk_buff *skb)
{
struct ieee80211vap *vap = ni->ni_vap;
struct net_device *dev = vap->iv_dev;
struct ether_header *eh = (struct ether_header *) skb->data;
if (unlikely(g_l2_ext_filter)) {
if (!skb->ext_l2_filter && vap->iv_opmode == IEEE80211_M_HOSTAP) {
if (!skb->dev)
skb->dev = dev;
#ifdef USE_HEADERLEN_RESV
skb->protocol = ath_eth_type_trans(skb, skb->dev);
#else
skb->protocol = eth_type_trans(skb, skb->dev);
#endif
if (!(skb->protocol == __constant_htons(ETH_P_PAE) &&
IEEE80211_ADDR_EQ(eh->ether_dhost, vap->iv_myaddr))) {
vap->iv_ic->ic_send_to_l2_ext_filter(vap, skb);
return ;
}
}
}
/*
* perform as a bridge within the vap
* - intra-vap bridging only
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
(vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) {
struct sk_buff *skb1 = NULL;
if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
skb1 = skb_copy(skb, GFP_ATOMIC);
} else {
/*
* Check if destination is associated with the
* same vap and authorized to receive traffic.
* Beware of traffic destined for the vap itself;
* sending it will not work; just let it be
* delivered normally.
*/
struct ieee80211_node *ni1 = ieee80211_find_node(
&vap->iv_ic->ic_sta, eh->ether_dhost);
if (ni1 != NULL) {
if (ni1->ni_vap == vap &&
ieee80211_node_is_authorized(ni1) &&
ni1 != vap->iv_bss) {
skb1 = skb;
skb = NULL;
}
/* XXX statistic? */
ieee80211_free_node(ni1);
}
}
if (skb1 != NULL) {
skb1->dev = dev;
skb_reset_mac_header(skb1);
skb_set_network_header(skb1, sizeof(struct ether_header));
skb1->protocol = __constant_htons(ETH_P_802_2);
/* XXX insert vlan tag before queue it? */
dev_queue_xmit(skb1);
}
}
if (skb != NULL) {
if (!skb->dev)
skb->dev = dev;
#ifdef USE_HEADERLEN_RESV
skb->protocol = ath_eth_type_trans(skb, skb->dev);
#else
skb->protocol = eth_type_trans(skb, skb->dev);
#endif
if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) {
/* TODO: There is no equivalent function in 4.7. For now lets
* just pass this skb to upper layer
*/
#ifdef QTN_ENABLE_BRIDGE
/* attach vlan tag */
vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan);
#else
netif_rx(skb);
#endif
} else {
netif_rx(skb);
}
dev->last_rx = jiffies;
}
}
static struct sk_buff *
ieee80211_decap(struct ieee80211vap *vap, struct sk_buff *skb, int hdrlen)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_qosframe_addr4 wh; /* Max size address frames */
struct ether_header *eh;
struct llc *llc;
__be16 ether_type = 0;
memcpy(&wh, skb->data, hdrlen); /* Only copy hdrlen over */
llc = (struct llc *) skb_pull(skb, hdrlen);
if (skb->len >= LLC_SNAPFRAMELEN &&
llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP &&
llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 &&
llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0) {
ether_type = llc->llc_un.type_snap.ether_type;
skb_pull(skb, LLC_SNAPFRAMELEN);
llc = NULL;
}
eh = (struct ether_header *) skb_push(skb, sizeof(struct ether_header));
switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
case IEEE80211_FC1_DIR_NODS:
IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
/*
* for TDLS Function, TDLS link with third-party station
* which is 3-address mode.
*/
ic->ic_bridge_set_dest_addr(skb, (void *)eh);
IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
break;
case IEEE80211_FC1_DIR_TODS:
IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2);
break;
case IEEE80211_FC1_DIR_FROMDS:
IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1);
ic->ic_bridge_set_dest_addr(skb, (void *)eh);
IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3);
break;
case IEEE80211_FC1_DIR_DSTODS:
IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3);
/*
* for TDLS Function, associate with third-party AP
* which is 3-address mode.
*/
if (IEEE80211_ADDR_EQ(wh.i_addr1, wh.i_addr3))
ic->ic_bridge_set_dest_addr(skb, (void *)eh);
IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4);
break;
}
if (!ALIGNED_POINTER(skb->data + sizeof(*eh), u_int32_t)) {
struct sk_buff *n;
/* XXX does this always work? */
n = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb(skb);
if (n == NULL)
return NULL;
skb = n;
eh = (struct ether_header *) skb->data;
}
if (llc != NULL)
eh->ether_type = htons(skb->len - sizeof(*eh));
else
eh->ether_type = ether_type;
return skb;
}
int
ieee80211_parse_rates(struct ieee80211_node *ni,
const u_int8_t *rates, const u_int8_t *xrates)
{
struct ieee80211_rateset *rs = &ni->ni_rates;
memset(rs, 0, sizeof(*rs));
rs->rs_nrates = rates[1];
rs->rs_legacy_nrates = rates[1];
memcpy(rs->rs_rates, rates + 2, rs->rs_nrates);
if (xrates != NULL) {
u_int8_t nxrates = 0;
/*
* Tack on 11g extended supported rate element.
*/
nxrates = xrates[1];
if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) {
struct ieee80211vap *vap = ni->ni_vap;
nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates;
IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni,
"extended rate set too large;"
" only using %u of %u rates",
nxrates, xrates[1]);
vap->iv_stats.is_rx_rstoobig++;
}
memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates);
rs->rs_nrates += nxrates;
rs->rs_legacy_nrates += nxrates;
}
return 1;
}
/*
* Install received rate set information in the node's state block.
*/
int
ieee80211_setup_rates(struct ieee80211_node *ni,
const u_int8_t *rates, const u_int8_t *xrates, int flags)
{
struct ieee80211_rateset *rs = &ni->ni_rates;
memset(rs, 0, sizeof(*rs));
rs->rs_nrates = rates[1];
rs->rs_legacy_nrates = rates[1];
memcpy(rs->rs_rates, rates + 2, rs->rs_nrates);
if (xrates != NULL) {
u_int8_t nxrates = 0;
/*
* Tack on 11g extended supported rate element.
*/
nxrates = xrates[1];
if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) {
struct ieee80211vap *vap = ni->ni_vap;
nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates;
IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni,
"extended rate set too large;"
" only using %u of %u rates",
nxrates, xrates[1]);
vap->iv_stats.is_rx_rstoobig++;
}
memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates);
rs->rs_nrates += nxrates;
rs->rs_legacy_nrates += nxrates;
}
return ieee80211_fix_rate(ni, flags);
}
static void
ieee80211_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh,
int rssi, u_int32_t rstamp, u_int16_t seq, u_int16_t status)
{
struct ieee80211vap *vap = ni->ni_vap;
int node_reference_held = 0;
if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "open auth",
"bad sta auth mode %u", ni->ni_authmode);
vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
/*
* To send the frame to the requesting STA we have to create a node
* for the station that we're going to reject.
*/
if (ni == vap->iv_bss) {
ni = ieee80211_tmp_node(vap, wh->i_addr2);
if (ni == NULL) {
return;
}
node_reference_held = 1;
}
IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH,
(seq + 1) | (IEEE80211_STATUS_ALG << 16));
if (node_reference_held) {
ieee80211_free_node(ni);
}
return;
}
}
switch (vap->iv_opmode) {
case IEEE80211_M_IBSS:
if (vap->iv_state != IEEE80211_S_RUN ||
seq != IEEE80211_AUTH_OPEN_REQUEST) {
vap->iv_stats.is_rx_bad_auth++;
return;
}
ieee80211_new_state(vap, IEEE80211_S_AUTH,
wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK);
break;
case IEEE80211_M_AHDEMO:
case IEEE80211_M_WDS:
/* should not come here */
break;
case IEEE80211_M_HOSTAP:
if (vap->iv_state != IEEE80211_S_RUN ||
seq != IEEE80211_AUTH_OPEN_REQUEST) {
vap->iv_stats.is_rx_bad_auth++;
mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
return;
}
/* always accept open authentication requests */
if (ni == vap->iv_bss) {
ni = ieee80211_dup_bss(vap, wh->i_addr2);
if (ni == NULL) {
mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
return;
}
ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
node_reference_held = 1;
}
IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
ni, "station authenticated (%s)", "open");
if (node_reference_held) {
ieee80211_free_node(ni);
}
mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH, 1);
break;
case IEEE80211_M_STA:
if (vap->iv_state != IEEE80211_S_AUTH ||
seq != IEEE80211_AUTH_OPEN_RESPONSE) {
vap->iv_stats.is_rx_bad_auth++;
return;
}
if (status != 0) {
IEEE80211_NOTE(vap,
IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
"open auth failed (reason %d)", status);
vap->iv_stats.is_rx_auth_fail++;
ieee80211_new_state(vap, IEEE80211_S_SCAN,
IEEE80211_SCAN_FAIL_STATUS);
} else
ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
break;
case IEEE80211_M_MONITOR:
break;
}
}
/*
* Send a management frame error response to the specified
* station. If ni is associated with the station then use
* it; otherwise allocate a temporary node suitable for
* transmitting the frame and then free the reference so
* it will go away as soon as the frame has been transmitted.
*/
static void
ieee80211_send_error(struct ieee80211_node *ni,
const u_int8_t *mac, int subtype, int arg)
{
struct ieee80211vap *vap = ni->ni_vap;
int node_reference_held = 0;
if (ni == vap->iv_bss) {
if (vap->iv_opmode == IEEE80211_M_STA) {
ni = _ieee80211_tmp_node(vap, mac, mac);
} else {
ni = ieee80211_tmp_node(vap, mac);
}
if (ni == NULL) {
return;
}
node_reference_held = 1;
}
IEEE80211_SEND_MGMT(ni, subtype, arg);
if (node_reference_held) {
ieee80211_free_node(ni);
}
}
static int
alloc_challenge(struct ieee80211_node *ni)
{
if (ni->ni_challenge == NULL)
MALLOC(ni->ni_challenge, u_int32_t*, IEEE80211_CHALLENGE_LEN,
M_DEVBUF, M_NOWAIT);
if (ni->ni_challenge == NULL) {
IEEE80211_NOTE(ni->ni_vap,
IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
"%s", "shared key challenge alloc failed");
/* XXX statistic */
}
return (ni->ni_challenge != NULL);
}
/* XXX TODO: add statistics */
static void
ieee80211_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
u_int8_t *frm, u_int8_t *efrm, int rssi, u_int32_t rstamp,
u_int16_t seq, u_int16_t status)
{
struct ieee80211vap *vap = ni->ni_vap;
u_int8_t *challenge;
int node_reference_held = 0;
int estatus;
/*
* NB: this can happen as we allow pre-shared key
* authentication to be enabled w/o wep being turned
* on so that configuration of these can be done
* in any order. It may be better to enforce the
* ordering in which case this check would just be
* for sanity/consistency.
*/
estatus = 0; /* NB: silence compiler */
if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"%s", " PRIVACY is disabled");
estatus = IEEE80211_STATUS_ALG;
goto bad;
}
/*
* Pre-shared key authentication is evil; accept
* it only if explicitly configured (it is supported
* mainly for compatibility with clients like OS X).
*/
if (ni->ni_authmode != IEEE80211_AUTH_AUTO &&
ni->ni_authmode != IEEE80211_AUTH_SHARED) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"bad sta auth mode %u", ni->ni_authmode);
vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */
estatus = IEEE80211_STATUS_ALG;
goto bad;
}
challenge = NULL;
if (frm + 1 < efrm) {
if ((frm[1] + 2) > (efrm - frm)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"ie %d/%d too long",
frm[0], (frm[1] + 2) - (efrm - frm));
vap->iv_stats.is_rx_bad_auth++;
estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
if (*frm == IEEE80211_ELEMID_CHALLENGE)
challenge = frm;
frm += frm[1] + 2;
}
switch (seq) {
case IEEE80211_AUTH_SHARED_CHALLENGE:
case IEEE80211_AUTH_SHARED_RESPONSE:
if (challenge == NULL) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"%s", "no challenge");
vap->iv_stats.is_rx_bad_auth++;
estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
if (challenge[1] != IEEE80211_CHALLENGE_LEN) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"bad challenge len %d", challenge[1]);
vap->iv_stats.is_rx_bad_auth++;
estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
default:
break;
}
switch (vap->iv_opmode) {
case IEEE80211_M_MONITOR:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_IBSS:
case IEEE80211_M_WDS:
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"bad operating mode %u", vap->iv_opmode);
return;
case IEEE80211_M_HOSTAP:
if (vap->iv_state != IEEE80211_S_RUN) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"bad state %u", vap->iv_state);
estatus = IEEE80211_STATUS_ALG; /* XXX */
goto bad;
}
switch (seq) {
case IEEE80211_AUTH_SHARED_REQUEST:
if (ni == vap->iv_bss) {
ni = ieee80211_dup_bss(vap, wh->i_addr2);
if (ni == NULL) {
return;
}
ni->ni_node_type = IEEE80211_NODE_TYPE_STA;
node_reference_held = 1;
}
ni->ni_rssi = rssi;
ni->ni_rstamp = rstamp;
ni->ni_last_rx = jiffies;
if (!alloc_challenge(ni)) {
/* NB: don't return error so they rexmit */
if (node_reference_held) {
ieee80211_free_node(ni);
}
return;
}
get_random_bytes(ni->ni_challenge,
IEEE80211_CHALLENGE_LEN);
IEEE80211_NOTE(vap,
IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
"shared key %sauth request", node_reference_held ? "" : "re");
break;
case IEEE80211_AUTH_SHARED_RESPONSE:
if (ni == vap->iv_bss) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key response",
"%s", "unknown station");
/* NB: don't send a response */
return;
}
if (ni->ni_challenge == NULL) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key response",
"%s", "no challenge recorded");
vap->iv_stats.is_rx_bad_auth++;
estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
if (memcmp(ni->ni_challenge, &challenge[2],
challenge[1]) != 0) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key response",
"%s", "challenge mismatch");
vap->iv_stats.is_rx_auth_fail++;
estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
IEEE80211_NOTE(vap,
IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni,
"station authenticated (%s)", "shared key");
ieee80211_node_authorize(ni);
break;
default:
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"bad seq %d", seq);
vap->iv_stats.is_rx_bad_auth++;
estatus = IEEE80211_STATUS_SEQUENCE;
goto bad;
}
IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
if (node_reference_held) {
ieee80211_free_node(ni);
}
break;
case IEEE80211_M_STA:
if (vap->iv_state != IEEE80211_S_AUTH)
return;
switch (seq) {
case IEEE80211_AUTH_SHARED_PASS:
if (ni->ni_challenge != NULL) {
FREE(ni->ni_challenge, M_DEVBUF);
ni->ni_challenge = NULL;
}
if (status != 0) {
IEEE80211_NOTE_MAC(vap,
IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH,
ieee80211_getbssid(vap, wh),
"shared key auth failed (reason %d)",
status);
vap->iv_stats.is_rx_auth_fail++;
/* XXX IEEE80211_SCAN_FAIL_STATUS */
goto bad;
}
ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
break;
case IEEE80211_AUTH_SHARED_CHALLENGE:
if (!alloc_challenge(ni))
goto bad;
/* XXX could optimize by passing recvd challenge */
memcpy(ni->ni_challenge, &challenge[2], challenge[1]);
IEEE80211_SEND_MGMT(ni,
IEEE80211_FC0_SUBTYPE_AUTH, seq + 1);
break;
default:
IEEE80211_DISCARD(vap, IEEE80211_MSG_AUTH,
wh, "shared key auth", "bad seq %d", seq);
vap->iv_stats.is_rx_bad_auth++;
goto bad;
}
break;
}
if(vap->iv_opmode == IEEE80211_M_HOSTAP) {
mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH, 1);
}
return;
bad:
/*
* Send an error response; but only when operating as an AP.
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
/* XXX hack to workaround calling convention */
ieee80211_send_error(ni, wh->i_addr2,
IEEE80211_FC0_SUBTYPE_AUTH,
(seq + 1) | (estatus<<16));
mlme_stats_delayed_update(wh->i_addr2, MLME_STAT_AUTH_FAILS, 1);
} else if (vap->iv_opmode == IEEE80211_M_STA) {
/*
* Kick the state machine. This short-circuits
* using the mgt frame timeout to trigger the
* state transition.
*/
if (vap->iv_state == IEEE80211_S_AUTH)
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
}
}
/* Verify the existence and length of __elem or get out. */
#define IEEE80211_VERIFY_ELEMENT(__elem, __maxlen) do { \
if ((__elem) == NULL) { \
IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
wh, ieee80211_mgt_subtype_name[subtype >> \
IEEE80211_FC0_SUBTYPE_SHIFT], \
"%s", "no " #__elem ); \
vap->iv_stats.is_rx_elem_missing++; \
return; \
} \
if ((__elem)[1] > (__maxlen)) { \
IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
wh, ieee80211_mgt_subtype_name[subtype >> \
IEEE80211_FC0_SUBTYPE_SHIFT], \
"bad " #__elem " len %d", (__elem)[1]); \
vap->iv_stats.is_rx_elem_toobig++; \
return; \
} \
} while (0)
#define IEEE80211_VERIFY_LENGTH(_len, _minlen) do { \
if ((_len) < (_minlen)) { \
IEEE80211_DISCARD(vap, IEEE80211_MSG_ELEMID, \
wh, ieee80211_mgt_subtype_name[subtype >> \
IEEE80211_FC0_SUBTYPE_SHIFT], \
"%s", "ie too short"); \
vap->iv_stats.is_rx_elem_toosmall++; \
return; \
} \
} while (0)
#define IEEE80211_VERIFY_TDLS_LENGTH(_len, _minlen) do { \
if ((_len) < (_minlen)) { \
IEEE80211_TDLS_DPRINTF(vap, IEEE80211_MSG_TDLS, \
IEEE80211_TDLS_MSG_DBG, "%s", "ie too short", __FUNCTION__); \
vap->iv_stats.is_rx_elem_toosmall++; \
return; \
} \
} while (0)
#ifdef IEEE80211_DEBUG
static void
ieee80211_ssid_mismatch(struct ieee80211vap *vap, const char *tag,
u_int8_t mac[IEEE80211_ADDR_LEN], u_int8_t *ssid)
{
printf("[%s] discard %s frame, ssid mismatch: ",
ether_sprintf(mac), tag);
ieee80211_print_essid(ssid + 2, ssid[1]);
printf("\n");
}
#endif
enum ieee80211_verify_ssid_action {
IEEE80211_VERIFY_SSID_ACTION_NO = 0,
IEEE80211_VERIFY_SSID_ACTION_RETURN = 1,
IEEE80211_VERIFY_SSID_ACTION_NODE_DEL_AND_RETURN = 2
};
static int ieee80211_verify_ssid(struct ieee80211vap *vap,
struct ieee80211_node *ni,
struct ieee80211_frame *wh,
u_int8_t *ssid,
int subtype)
{
if (ssid[1] != 0 &&
(ssid[1] != (vap->iv_bss)->ni_esslen ||
memcmp(ssid + 2, (vap->iv_bss)->ni_essid, ssid[1]) != 0)) {
#ifdef IEEE80211_DEBUG
if (ieee80211_msg_input(vap) &&
subtype != IEEE80211_FC0_SUBTYPE_PROBE_REQ) {
ieee80211_ssid_mismatch(vap,
ieee80211_mgt_subtype_name[subtype >>
IEEE80211_FC0_SUBTYPE_SHIFT],
wh->i_addr2, ssid);
}
#endif
vap->iv_stats.is_rx_ssidmismatch++;
if ((ni != vap->iv_bss) && ((subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
(subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ))) {
return IEEE80211_VERIFY_SSID_ACTION_NODE_DEL_AND_RETURN;
} else {
return IEEE80211_VERIFY_SSID_ACTION_RETURN;
}
} else if ((ssid[1] == 0) && (vap->iv_flags & IEEE80211_F_HIDESSID) &&
(subtype == IEEE80211_FC0_SUBTYPE_PROBE_REQ)) {
return IEEE80211_VERIFY_SSID_ACTION_RETURN;
}
/* Reject empty ssid in association requests */
if ((vap->iv_qtn_options & IEEE80211_QTN_NO_SSID_ASSOC_DISABLED) &&
((subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ) ||
(subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)) &&
(ssid[1] == 0)) {
return IEEE80211_VERIFY_SSID_ACTION_RETURN;
}
return IEEE80211_VERIFY_SSID_ACTION_NO;
}
/* unaligned little endian access */
#define LE_READ_2(p) \
((u_int16_t) \
((((const u_int8_t *)(p))[0] ) | \
(((const u_int8_t *)(p))[1] << 8)))
#define LE_READ_3(p) \
((u_int32_t) \
((((const u_int8_t *)(p))[0] ) | \
(((const u_int8_t *)(p))[1] << 8) | \
(((const u_int8_t *)(p))[2] << 16) | 0))
#define LE_READ_4(p) \
((u_int32_t) \
((((const u_int8_t *)(p))[0] ) | \
(((const u_int8_t *)(p))[1] << 8) | \
(((const u_int8_t *)(p))[2] << 16) | \
(((const u_int8_t *)(p))[3] << 24)))
#define BE_READ_2(p) \
((u_int16_t) \
((((const u_int8_t *)(p))[1] ) | \
(((const u_int8_t *)(p))[0] << 8)))
#define BE_READ_4(p) \
((u_int32_t) \
((((const u_int8_t *)(p))[3] ) | \
(((const u_int8_t *)(p))[2] << 8) | \
(((const u_int8_t *)(p))[1] << 16) | \
(((const u_int8_t *)(p))[0] << 24)))
static __inline int
iswpaoui(const u_int8_t *frm)
{
return frm[1] > 3 && LE_READ_4(frm+2) == ((WPA_RSN_OUI_TYPE<<24)|WPA_OUI);
}
static __inline int
iswmeoui(const u_int8_t *frm)
{
return frm[1] > 3 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI);
}
static __inline int
iswmeparam(const u_int8_t *frm)
{
return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
frm[6] == WME_PARAM_OUI_SUBTYPE;
}
static __inline int
iswmeinfo(const u_int8_t *frm)
{
return frm[1] > 5 && LE_READ_4(frm+2) == ((WME_OUI_TYPE<<24)|WME_OUI) &&
frm[6] == WME_INFO_OUI_SUBTYPE;
}
static __inline int
iswscoui(const u_int8_t *frm)
{
return frm[1] > 3 && LE_READ_4(frm+2) == ((WSC_OUI_TYPE<<24)|WPA_OUI);
}
static __inline int
isatherosoui(const u_int8_t *frm)
{
return frm[1] > 3 && LE_READ_4(frm+2) == ((ATH_OUI_TYPE<<24)|ATH_OUI);
}
static __inline int
isqtnie(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_CFG)));
}
static __inline int
isosenie(const u_int8_t *frm)
{
return (frm[1] > 3 &&
(LE_READ_3(frm + 2) == WFA_OUI) &&
((frm[5] == WFA_TYPE_OSEN)));
}
static __inline int
is_peer_mrvl( u_int8_t *rlnk, void *bcmie, void *rtkie, struct ieee80211_ie_qtn *qtnie,
struct ieee80211_ie_vhtcap *vhtcap, struct ieee80211_node *ni)
{
if (unlikely(!bcmie && !qtnie && !rlnk && !rtkie && !ieee80211_node_is_intel(ni) &&
(ni->ni_flags & IEEE80211_NODE_VHT) &&
!IEEE80211_VHTCAP_GET_SU_BEAMFORMER((struct ieee80211_ie_vhtcap *)vhtcap) &&
IEEE80211_VHTCAP_GET_SU_BEAMFORMEE((struct ieee80211_ie_vhtcap *)vhtcap) &&
(IEEE80211_VHTCAP_GET_BFSTSCAP((struct ieee80211_ie_vhtcap *)vhtcap) == IEEE80211_VHTCAP_RX_STS_4)) &&
!IEEE80211_VHT_HAS_3SS(ni->ni_vhtcap.rxmcsmap)) {
return 1;
}
return 0;
}
#ifdef CONFIG_QVSP
static __inline int
isvspie(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_VSP_CTRL)));
}
static __inline int
isqtnwmeie(const uint8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm + 2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_QWME)));
}
#endif
static __inline int
is_qtn_scs_oui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_SCS)));
}
static __inline int
isbroadcomoui(const u_int8_t *frm)
{
return (frm[1] > 3 && (LE_READ_4(frm+2) & 0x00ffffff) == BCM_OUI);
}
static __inline int
isbroadcomoui2(const u_int8_t *frm)
{
return (frm[1] > 3 && (LE_READ_4(frm+2) & 0x00ffffff) == BCM_OUI_2);
}
static __inline int
isqtnpairoui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
(frm[5] == QTN_OUI_PAIRING));
}
static __inline int
is_qtn_oui_tdls_brmacs(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_TDLS_BRMACS)));
}
static __inline int
is_qtn_oui_tdls_sta_info(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
((frm[5] == QTN_OUI_TDLS)));
}
static __inline int
isrlnkoui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == RLNK_OUI));
}
static __inline int
is_qtn_ext_role_oui(const u_int8_t *frm)
{
return ((frm[1] > 3) &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
(frm[5] == QTN_OUI_EXTENDER_ROLE));
}
static __inline int
is_qtn_ext_bssid_oui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
(frm[5] == QTN_OUI_EXTENDER_BSSID));
}
static __inline int
is_qtn_ext_state_oui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == QTN_OUI) &&
(frm[5] == QTN_OUI_EXTENDER_STATE));
}
static __inline int
isrealtekoui(const u_int8_t *frm)
{
return (frm[1] > 3 &&
((LE_READ_4(frm+2) & 0x00ffffff) == RTK_OUI));
}
static __inline int
isqtnmrespoui(const u_int8_t *frm)
{
return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
(frm[1] >= 5) && (LE_READ_3(&frm[2]) == QTN_OUI) &&
(frm[6] == QTN_OUI_RM_SPCIAL || frm[6] == QTN_OUI_RM_ALL);
}
static __inline int
isbrcmvhtoui(const u_int8_t *frm)
{
return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
(frm[1] >= 5) && (LE_READ_3(&frm[2]) == BCM_OUI) &&
(LE_READ_2(&frm[5]) == BCM_OUI_VHT_TYPE);
}
static __inline int
is_qtn_ocac_state_ie(const u_int8_t *frm)
{
return (frm[0] == IEEE80211_ELEMID_VENDOR) &&
(frm[1] == OCAC_STATE_IE_LEN) &&
(LE_READ_3(&frm[2]) == QTN_OUI) &&
(frm[5] == QTN_OUI_OCAC_STATE);
}
/*
* Convert a WPA cipher selector OUI to an internal
* cipher algorithm. Where appropriate we also
* record any key length.
*/
static int
wpa_cipher(u_int8_t *sel, u_int8_t *keylen)
{
#define WPA_SEL(x) (((x) << 24) | WPA_OUI)
u_int32_t w = LE_READ_4(sel);
switch (w) {
case WPA_SEL(WPA_CSE_NULL):
return IEEE80211_CIPHER_NONE;
case WPA_SEL(WPA_CSE_WEP40):
if (keylen)
*keylen = 40 / NBBY;
return IEEE80211_CIPHER_WEP;
case WPA_SEL(WPA_CSE_WEP104):
if (keylen)
*keylen = 104 / NBBY;
return IEEE80211_CIPHER_WEP;
case WPA_SEL(WPA_CSE_TKIP):
return IEEE80211_CIPHER_TKIP;
case WPA_SEL(WPA_CSE_CCMP):
return IEEE80211_CIPHER_AES_CCM;
}
return 32; /* NB: so 1<< is discarded */
#undef WPA_SEL
}
/*
* Convert a WPA key management/authentication algorithm
* to an internal code.
*/
static int
wpa_keymgmt(u_int8_t *sel)
{
#define WPA_SEL(x) (((x)<<24)|WPA_OUI)
u_int32_t w = LE_READ_4(sel);
switch (w) {
case WPA_SEL(WPA_ASE_8021X_UNSPEC):
return WPA_ASE_8021X_UNSPEC;
case WPA_SEL(WPA_ASE_8021X_PSK):
return WPA_ASE_8021X_PSK;
case WPA_SEL(WPA_ASE_NONE):
return WPA_ASE_NONE;
}
return 0; /* NB: so is discarded */
#undef WPA_SEL
}
/*
* Parse a WPA information element to collect parameters
* and validate the parameters against what has been
* configured for the system.
*/
static int
ieee80211_parse_wpa(struct ieee80211vap *vap, u_int8_t *frm,
struct ieee80211_rsnparms *rsn_parm, const struct ieee80211_frame *wh)
{
u_int8_t len = frm[1];
u_int32_t w;
int n;
struct ieee80211com *ic = vap->iv_ic;
/*
* Check the length once for fixed parts: OUI, type,
* version, mcast cipher, and 2 selector counts.
* Other, variable-length data, must be checked separately.
*/
if (!(vap->iv_flags & IEEE80211_F_WPA1)) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "vap not WPA, flags 0x%x", vap->iv_flags);
return IEEE80211_REASON_IE_INVALID;
}
if (len < 14) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "too short, len %u", len);
return IEEE80211_REASON_IE_INVALID;
}
frm += 6, len -= 4; /* NB: len is payload only */
/* NB: iswapoui already validated the OUI and type */
w = LE_READ_2(frm);
if (w != WPA_VERSION) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "bad version %u", w);
return IEEE80211_REASON_IE_INVALID;
}
frm += 2;
len -= 2;
/* multicast/group cipher */
w = wpa_cipher(frm, &rsn_parm->rsn_mcastkeylen);
if (w != rsn_parm->rsn_mcastcipher) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "mcast cipher mismatch; got %u, expected %u",
w, rsn_parm->rsn_mcastcipher);
return IEEE80211_REASON_IE_INVALID;
}
if (!IEEE80211_IS_TKIP_ALLOWED(ic)) {
if (w == IEEE80211_CIPHER_TKIP)
return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
}
frm += 4;
len -= 4;
/* unicast ciphers */
n = LE_READ_2(frm);
frm += 2;
len -= 2;
if (len < n*4+2) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "ucast cipher data too short; len %u, n %u",
len, n);
return IEEE80211_REASON_IE_INVALID;
}
w = 0;
for (; n > 0; n--) {
w |= 1 << wpa_cipher(frm, &rsn_parm->rsn_ucastkeylen);
frm += 4;
len -= 4;
}
w &= rsn_parm->rsn_ucastcipherset;
if (w == 0) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "%s", "ucast cipher set empty");
return IEEE80211_REASON_IE_INVALID;
}
if (w & (1 << IEEE80211_CIPHER_TKIP)) {
if (!IEEE80211_IS_TKIP_ALLOWED(ic))
return IEEE80211_REASON_STA_CIPHER_NOT_SUPP;
else
rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_TKIP;
} else {
rsn_parm->rsn_ucastcipher = IEEE80211_CIPHER_AES_CCM;
}
/* key management algorithms */
n = LE_READ_2(frm);
frm += 2;
len -= 2;
if (len < n * 4) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "key mgmt alg data too short; len %u, n %u",
len, n);
return IEEE80211_REASON_IE_INVALID;
}
w = 0;
for (; n > 0; n--) {
w |= wpa_keymgmt(frm);
frm += 4;
len -= 4;
}
w &= rsn_parm->rsn_keymgmtset;
if (w == 0) {
IEEE80211_DISCARD_IE(vap,
IEEE80211_MSG_ELEMID | IEEE80211_MSG_WPA,
wh, "WPA", "%s", "no acceptable key mgmt alg");
return IEEE80211_REASON_IE_INVALID;
}
if (w & WPA_ASE_8021X_UNSPEC)
rsn_parm->rsn_keymgmt = WPA_ASE_8021X_UNSPEC;
else
rsn_parm->rsn_keymgmt = WPA_ASE_8021X_PSK;
if (len > 2) /* optional capabilities */
rsn_parm->rsn_caps = LE_READ_2(frm);
return 0;
}
/*
* Convert an RSN cipher selector OUI to an internal
* cipher algorithm. Where appropriate we also
* record any key length.
*/
static int
rsn_cipher(u_int8_t *sel, u_int8_t *keylen)
{
#define RSN_SEL(x) (((x) << 24) | RSN_OUI)
u_int32_t w = LE_READ_4(sel);
switch (w) {
case RSN_SEL(RSN_CSE_NULL):
return IEEE80211_CIPHER_NONE;
case RSN_SEL(RSN_CSE_WEP40):
if (keylen)
*keylen = 40 / NBBY;
return IEEE80211_CIPHER_WEP;
case RSN_SEL(RSN_CSE_WEP104):
if (keylen)
*keylen = 104 / NBBY;
return IEEE80211_CIPHER_WEP;
case RSN_SEL(RSN_CSE_TKIP):
return IEEE80211_CIPHER_TKIP;
case RSN_SEL(RSN_CSE_CCMP):
return IEEE80211_CIPHER_AES_CCM;
case RSN_SEL(RSN_CSE_WRAP):
return IEEE80211_CIPHER_AES_OCB;
}
return 32; /* NB: so 1<< is discarded */
#undef RSN_SEL
}
/*
* Convert an RSN key management/authentication algorithm
* to an internal code.
*/
static int
rsn_keymgmt(u_int8_t *sel)
{
#define RSN_SEL(x) (((x) << 24) | RSN_OUI)
u_int32_t w = LE_READ_4(sel);
switch (w) {
case RSN_SEL(RSN_ASE_8021X_UNSPEC):
return RSN_ASE_8021X_UNSPEC;
case RSN_SEL(RSN_ASE_8021X_PSK):
return RSN_ASE_8021X_PSK;
case RSN_SEL(RSN_ASE_FT_PSK):
return RSN_ASE_FT_PSK;
case RSN_SEL(RSN_ASE_FT_8021X):
return RSN_ASE_FT_8021X;
case RSN_SEL(RSN_ASE_8021X_SHA256):
return RSN_ASE_8021X_SHA256;
case RSN_SEL(RSN_ASE_8021X_PSK_SHA256):
return RSN_ASE_8021X_PSK_SHA256;
case RSN_SEL(RSN_ASE_NONE):
return RSN_ASE_NONE;
}
return 0; /* NB: so is discarded */
#undef RSN_SEL
}
/*
* Parse a WPA/RSN information element to collect parameters
* and populate the rsn parameters in struct
*/
int
ieee80211_get_rsn_from_ie(struct ieee80211vap *vap, u_int8_t *frm,
struct ieee80211_rsnparms *rsn_parm)
{
u_int8_t len = frm[1];
u_int32_t w;
int n;
/*
* Check the length once for fixed parts:
* version, mcast cipher, and 2 selector counts.
* Other, variable-length data, must be checked separately.
*/
if (!(vap->iv_flags & IEEE80211_F_WPA2)) {
printk( "vap not RSN, flags 0x%x", vap->iv_flags);
return IEEE80211_REASON_IE_INVALID;
}
if (len < 10) {
printk( "too short, len %u", len);
return IEEE80211_REASON_IE_INVALID;
}
frm += 2;
w = LE_READ_2(frm);
if (w != RSN_VERSION) {
printk( "bad version %u", w);
return IEEE80211_REASON_IE_INVALID;
}
frm += 2;
len -= 2;
/* multicast/group cipher */
w = rsn_cipher(frm, &rsn_parm->rsn_mcastkeylen);
rsn_parm->rsn_mcastcipher = w;
frm += 4;
len -= 4;
/* unicast ciphers */
n = LE_READ_2(frm);
frm += 2;
len -= 2;
if (len < n * 4 + 2) {
printk("ucast cipher data too short; len %u, n %u",
len, n);
return IEEE80211_REASON_IE_INVALID;
}
w = 0;