2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
49 struct wmi_roam_ev_arg *arg);
50 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_wow_ev_arg *arg);
53 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
54 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
55 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
56 u16 rd5g, u16 ctl2g, u16 ctl5g,
57 enum wmi_dfs_region dfs_reg);
58 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
60 struct sk_buff *(*gen_init)(struct ath10k *ar);
61 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
62 const struct wmi_start_scan_arg *arg);
63 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
64 const struct wmi_stop_scan_arg *arg);
65 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
66 enum wmi_vdev_type type,
67 enum wmi_vdev_subtype subtype,
68 const u8 macaddr[ETH_ALEN]);
69 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
71 const struct wmi_vdev_start_request_arg *arg,
73 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
74 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
76 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
77 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
78 u32 param_id, u32 param_value);
79 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
80 const struct wmi_vdev_install_key_arg *arg);
81 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
82 const struct wmi_vdev_spectral_conf_arg *arg);
83 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
84 u32 trigger, u32 enable);
85 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
86 const struct wmi_wmm_params_all_arg *arg);
87 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
88 const u8 peer_addr[ETH_ALEN],
89 enum wmi_peer_type peer_type);
90 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
91 const u8 peer_addr[ETH_ALEN]);
92 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
93 const u8 peer_addr[ETH_ALEN],
95 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_peer_param param_id,
99 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
100 const struct wmi_peer_assoc_complete_arg *arg);
101 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
102 enum wmi_sta_ps_mode psmode);
103 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
104 enum wmi_sta_powersave_param param_id,
106 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
108 enum wmi_ap_ps_peer_param param_id,
110 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
111 const struct wmi_scan_chan_list_arg *arg);
112 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
113 const void *bcn, size_t bcn_len,
114 u32 bcn_paddr, bool dtim_zero,
116 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
117 const struct wmi_wmm_params_all_arg *arg);
118 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
119 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
120 enum wmi_force_fw_hang_type type,
122 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
123 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
125 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
126 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
127 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
128 u32 period, u32 duration,
131 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
132 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
134 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
135 const u8 *mac, u32 tid, u32 buf_size);
136 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
137 const u8 *mac, u32 tid,
139 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid, u32 initiator,
142 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
143 u32 tim_ie_offset, struct sk_buff *bcn,
144 u32 prb_caps, u32 prb_erp,
145 void *prb_ies, size_t prb_ies_len);
146 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
147 struct sk_buff *bcn);
148 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
150 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
151 const u8 peer_addr[ETH_ALEN],
152 const struct wmi_sta_uapsd_auto_trig_arg *args,
154 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
155 const struct wmi_sta_keepalive_arg *arg);
156 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
157 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
158 enum wmi_wow_wakeup_event event,
160 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
161 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
167 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
169 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
171 enum wmi_tdls_state state);
172 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
173 const struct wmi_tdls_peer_update_cmd_arg *arg,
174 const struct wmi_tdls_peer_capab_arg *cap,
175 const struct wmi_channel_arg *chan);
176 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
179 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
182 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
184 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
187 ar->wmi.ops->rx(ar, skb);
192 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
195 if (!ar->wmi.ops->map_svc)
198 ar->wmi.ops->map_svc(in, out, len);
203 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
204 struct wmi_scan_ev_arg *arg)
206 if (!ar->wmi.ops->pull_scan)
209 return ar->wmi.ops->pull_scan(ar, skb, arg);
213 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
214 struct wmi_mgmt_rx_ev_arg *arg)
216 if (!ar->wmi.ops->pull_mgmt_rx)
219 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
223 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
224 struct wmi_ch_info_ev_arg *arg)
226 if (!ar->wmi.ops->pull_ch_info)
229 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
233 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
234 struct wmi_vdev_start_ev_arg *arg)
236 if (!ar->wmi.ops->pull_vdev_start)
239 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
243 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
244 struct wmi_peer_kick_ev_arg *arg)
246 if (!ar->wmi.ops->pull_peer_kick)
249 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
253 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
254 struct wmi_swba_ev_arg *arg)
256 if (!ar->wmi.ops->pull_swba)
259 return ar->wmi.ops->pull_swba(ar, skb, arg);
263 ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
264 struct wmi_phyerr_ev_arg *arg)
266 if (!ar->wmi.ops->pull_phyerr)
269 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
273 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
274 struct wmi_svc_rdy_ev_arg *arg)
276 if (!ar->wmi.ops->pull_svc_rdy)
279 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
283 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
284 struct wmi_rdy_ev_arg *arg)
286 if (!ar->wmi.ops->pull_rdy)
289 return ar->wmi.ops->pull_rdy(ar, skb, arg);
293 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
294 struct ath10k_fw_stats *stats)
296 if (!ar->wmi.ops->pull_fw_stats)
299 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
303 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
304 struct wmi_roam_ev_arg *arg)
306 if (!ar->wmi.ops->pull_roam_ev)
309 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
313 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
314 struct wmi_wow_ev_arg *arg)
316 if (!ar->wmi.ops->pull_wow_event)
319 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
323 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
325 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
329 if (!ar->wmi.ops->gen_mgmt_tx)
332 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
336 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
340 /* FIXME There's no ACK event for Management Tx. This probably
341 * shouldn't be called here either. */
342 info->flags |= IEEE80211_TX_STAT_ACK;
343 ieee80211_tx_status_irqsafe(ar->hw, msdu);
349 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
350 u16 ctl2g, u16 ctl5g,
351 enum wmi_dfs_region dfs_reg)
355 if (!ar->wmi.ops->gen_pdev_set_rd)
358 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
363 return ath10k_wmi_cmd_send(ar, skb,
364 ar->wmi.cmd->pdev_set_regdomain_cmdid);
368 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
372 if (!ar->wmi.ops->gen_pdev_suspend)
375 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
379 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
383 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
387 if (!ar->wmi.ops->gen_pdev_resume)
390 skb = ar->wmi.ops->gen_pdev_resume(ar);
394 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
398 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
402 if (!ar->wmi.ops->gen_pdev_set_param)
405 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
409 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
413 ath10k_wmi_cmd_init(struct ath10k *ar)
417 if (!ar->wmi.ops->gen_init)
420 skb = ar->wmi.ops->gen_init(ar);
424 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
428 ath10k_wmi_start_scan(struct ath10k *ar,
429 const struct wmi_start_scan_arg *arg)
433 if (!ar->wmi.ops->gen_start_scan)
436 skb = ar->wmi.ops->gen_start_scan(ar, arg);
440 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
444 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
448 if (!ar->wmi.ops->gen_stop_scan)
451 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
455 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
459 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
460 enum wmi_vdev_type type,
461 enum wmi_vdev_subtype subtype,
462 const u8 macaddr[ETH_ALEN])
466 if (!ar->wmi.ops->gen_vdev_create)
469 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
473 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
477 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
481 if (!ar->wmi.ops->gen_vdev_delete)
484 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
488 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
492 ath10k_wmi_vdev_start(struct ath10k *ar,
493 const struct wmi_vdev_start_request_arg *arg)
497 if (!ar->wmi.ops->gen_vdev_start)
500 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
504 return ath10k_wmi_cmd_send(ar, skb,
505 ar->wmi.cmd->vdev_start_request_cmdid);
509 ath10k_wmi_vdev_restart(struct ath10k *ar,
510 const struct wmi_vdev_start_request_arg *arg)
514 if (!ar->wmi.ops->gen_vdev_start)
517 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
521 return ath10k_wmi_cmd_send(ar, skb,
522 ar->wmi.cmd->vdev_restart_request_cmdid);
526 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
530 if (!ar->wmi.ops->gen_vdev_stop)
533 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
537 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
541 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
545 if (!ar->wmi.ops->gen_vdev_up)
548 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
552 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
556 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
560 if (!ar->wmi.ops->gen_vdev_down)
563 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
567 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
571 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
576 if (!ar->wmi.ops->gen_vdev_set_param)
579 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
584 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
588 ath10k_wmi_vdev_install_key(struct ath10k *ar,
589 const struct wmi_vdev_install_key_arg *arg)
593 if (!ar->wmi.ops->gen_vdev_install_key)
596 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
600 return ath10k_wmi_cmd_send(ar, skb,
601 ar->wmi.cmd->vdev_install_key_cmdid);
605 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
606 const struct wmi_vdev_spectral_conf_arg *arg)
611 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
615 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
616 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
620 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
626 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
631 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
632 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
636 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
637 const u8 peer_addr[ETH_ALEN],
638 const struct wmi_sta_uapsd_auto_trig_arg *args,
644 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
647 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
652 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
653 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
657 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
658 const struct wmi_wmm_params_all_arg *arg)
663 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
667 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
668 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
672 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
673 const u8 peer_addr[ETH_ALEN],
674 enum wmi_peer_type peer_type)
678 if (!ar->wmi.ops->gen_peer_create)
681 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
685 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
689 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
690 const u8 peer_addr[ETH_ALEN])
694 if (!ar->wmi.ops->gen_peer_delete)
697 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
701 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
705 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
706 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
710 if (!ar->wmi.ops->gen_peer_flush)
713 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
717 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
721 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
722 enum wmi_peer_param param_id, u32 param_value)
726 if (!ar->wmi.ops->gen_peer_set_param)
729 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
734 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
738 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
739 enum wmi_sta_ps_mode psmode)
743 if (!ar->wmi.ops->gen_set_psmode)
746 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
750 return ath10k_wmi_cmd_send(ar, skb,
751 ar->wmi.cmd->sta_powersave_mode_cmdid);
755 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
756 enum wmi_sta_powersave_param param_id, u32 value)
760 if (!ar->wmi.ops->gen_set_sta_ps)
763 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
767 return ath10k_wmi_cmd_send(ar, skb,
768 ar->wmi.cmd->sta_powersave_param_cmdid);
772 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
773 enum wmi_ap_ps_peer_param param_id, u32 value)
777 if (!ar->wmi.ops->gen_set_ap_ps)
780 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
784 return ath10k_wmi_cmd_send(ar, skb,
785 ar->wmi.cmd->ap_ps_peer_param_cmdid);
789 ath10k_wmi_scan_chan_list(struct ath10k *ar,
790 const struct wmi_scan_chan_list_arg *arg)
794 if (!ar->wmi.ops->gen_scan_chan_list)
797 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
801 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
805 ath10k_wmi_peer_assoc(struct ath10k *ar,
806 const struct wmi_peer_assoc_complete_arg *arg)
810 if (!ar->wmi.ops->gen_peer_assoc)
813 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
817 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
821 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
822 const void *bcn, size_t bcn_len,
823 u32 bcn_paddr, bool dtim_zero,
829 if (!ar->wmi.ops->gen_beacon_dma)
832 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
833 dtim_zero, deliver_cab);
837 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
838 ar->wmi.cmd->pdev_send_bcn_cmdid);
848 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
849 const struct wmi_wmm_params_all_arg *arg)
853 if (!ar->wmi.ops->gen_pdev_set_wmm)
856 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
860 return ath10k_wmi_cmd_send(ar, skb,
861 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
865 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
869 if (!ar->wmi.ops->gen_request_stats)
872 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
876 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
880 ath10k_wmi_force_fw_hang(struct ath10k *ar,
881 enum wmi_force_fw_hang_type type, u32 delay_ms)
885 if (!ar->wmi.ops->gen_force_fw_hang)
888 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
892 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
896 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
900 if (!ar->wmi.ops->gen_dbglog_cfg)
903 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
907 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
911 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
915 if (!ar->wmi.ops->gen_pktlog_enable)
918 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
922 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
926 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
930 if (!ar->wmi.ops->gen_pktlog_disable)
933 skb = ar->wmi.ops->gen_pktlog_disable(ar);
937 return ath10k_wmi_cmd_send(ar, skb,
938 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
942 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
943 u32 next_offset, u32 enabled)
947 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
950 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
951 next_offset, enabled);
955 return ath10k_wmi_cmd_send(ar, skb,
956 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
960 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
964 if (!ar->wmi.ops->gen_pdev_get_temperature)
967 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
971 return ath10k_wmi_cmd_send(ar, skb,
972 ar->wmi.cmd->pdev_get_temperature_cmdid);
976 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
980 if (!ar->wmi.ops->gen_addba_clear_resp)
983 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
987 return ath10k_wmi_cmd_send(ar, skb,
988 ar->wmi.cmd->addba_clear_resp_cmdid);
992 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
993 u32 tid, u32 buf_size)
997 if (!ar->wmi.ops->gen_addba_send)
1000 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1002 return PTR_ERR(skb);
1004 return ath10k_wmi_cmd_send(ar, skb,
1005 ar->wmi.cmd->addba_send_cmdid);
1009 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1010 u32 tid, u32 status)
1012 struct sk_buff *skb;
1014 if (!ar->wmi.ops->gen_addba_set_resp)
1017 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1019 return PTR_ERR(skb);
1021 return ath10k_wmi_cmd_send(ar, skb,
1022 ar->wmi.cmd->addba_set_resp_cmdid);
1026 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1027 u32 tid, u32 initiator, u32 reason)
1029 struct sk_buff *skb;
1031 if (!ar->wmi.ops->gen_delba_send)
1034 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1037 return PTR_ERR(skb);
1039 return ath10k_wmi_cmd_send(ar, skb,
1040 ar->wmi.cmd->delba_send_cmdid);
1044 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1045 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1046 void *prb_ies, size_t prb_ies_len)
1048 struct sk_buff *skb;
1050 if (!ar->wmi.ops->gen_bcn_tmpl)
1053 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1054 prb_caps, prb_erp, prb_ies,
1057 return PTR_ERR(skb);
1059 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1063 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1065 struct sk_buff *skb;
1067 if (!ar->wmi.ops->gen_prb_tmpl)
1070 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1072 return PTR_ERR(skb);
1074 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1078 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1080 struct sk_buff *skb;
1082 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1085 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1087 return PTR_ERR(skb);
1089 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1093 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1094 const struct wmi_sta_keepalive_arg *arg)
1096 struct sk_buff *skb;
1099 if (!ar->wmi.ops->gen_sta_keepalive)
1102 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1104 return PTR_ERR(skb);
1106 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1107 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1111 ath10k_wmi_wow_enable(struct ath10k *ar)
1113 struct sk_buff *skb;
1116 if (!ar->wmi.ops->gen_wow_enable)
1119 skb = ar->wmi.ops->gen_wow_enable(ar);
1121 return PTR_ERR(skb);
1123 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1124 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1128 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1129 enum wmi_wow_wakeup_event event,
1132 struct sk_buff *skb;
1135 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1138 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1140 return PTR_ERR(skb);
1142 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1143 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1147 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1149 struct sk_buff *skb;
1152 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1155 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1157 return PTR_ERR(skb);
1159 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1160 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1164 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1165 const u8 *pattern, const u8 *mask,
1166 int pattern_len, int pattern_offset)
1168 struct sk_buff *skb;
1171 if (!ar->wmi.ops->gen_wow_add_pattern)
1174 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1175 pattern, mask, pattern_len,
1178 return PTR_ERR(skb);
1180 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1181 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1185 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1187 struct sk_buff *skb;
1190 if (!ar->wmi.ops->gen_wow_del_pattern)
1193 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1195 return PTR_ERR(skb);
1197 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1198 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1202 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1203 enum wmi_tdls_state state)
1205 struct sk_buff *skb;
1207 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1210 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1212 return PTR_ERR(skb);
1214 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1218 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1219 const struct wmi_tdls_peer_update_cmd_arg *arg,
1220 const struct wmi_tdls_peer_capab_arg *cap,
1221 const struct wmi_channel_arg *chan)
1223 struct sk_buff *skb;
1225 if (!ar->wmi.ops->gen_tdls_peer_update)
1228 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1230 return PTR_ERR(skb);
1232 return ath10k_wmi_cmd_send(ar, skb,
1233 ar->wmi.cmd->tdls_peer_update_cmdid);
1237 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1239 struct sk_buff *skb;
1241 if (!ar->wmi.ops->gen_adaptive_qcs)
1244 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1246 return PTR_ERR(skb);
1248 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);