summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
blob: 75cbd1509b52043863de0088ad18b307c412b85f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
/******************************************************************************
 *
 * Copyright(c) 2009-2014  Realtek Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
 * wlanfae <wlanfae@realtek.com>
 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
 * Hsinchu 300, Taiwan.
 *
 * Larry Finger <Larry.Finger@lwfinger.net>
 *
 *****************************************************************************/

#include "../wifi.h"
#include "phy_common.h"
#include "../rtl8723ae/reg.h"
#include <linux/module.h>

/* These routines are common to RTL8723AE and RTL8723bE */

u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
			     u32 regaddr, u32 bitmask)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u32 returnvalue, originalvalue, bitshift;

	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
	originalvalue = rtl_read_dword(rtlpriv, regaddr);
	bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
	returnvalue = (originalvalue & bitmask) >> bitshift;

	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
		 regaddr, originalvalue);
	return returnvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);

void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
			      u32 bitmask, u32 data)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u32 originalvalue, bitshift;

	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
		 data);

	if (bitmask != MASKDWORD) {
		originalvalue = rtl_read_dword(rtlpriv, regaddr);
		bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
		data = ((originalvalue & (~bitmask)) | (data << bitshift));
	}

	rtl_write_dword(rtlpriv, regaddr, data);

	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
		 regaddr, bitmask, data);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);

u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
{
	u32 i;

	for (i = 0; i <= 31; i++) {
		if (((bitmask >> i) & 0x1) == 1)
			break;
	}
	return i;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);

u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
			       enum radio_path rfpath, u32 offset)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_phy *rtlphy = &(rtlpriv->phy);
	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
	u32 newoffset;
	u32 tmplong, tmplong2;
	u8 rfpi_enable = 0;
	u32 retvalue;

	offset &= 0xff;
	newoffset = offset;
	if (RT_CANNOT_IO(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
		return 0xFFFFFFFF;
	}
	tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
	if (rfpath == RF90_PATH_A)
		tmplong2 = tmplong;
	else
		tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
	tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
	    (newoffset << 23) | BLSSIREADEDGE;
	rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
		      tmplong & (~BLSSIREADEDGE));
	mdelay(1);
	rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
	mdelay(1);
	rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
		      tmplong | BLSSIREADEDGE);
	mdelay(1);
	if (rfpath == RF90_PATH_A)
		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
						 BIT(8));
	else if (rfpath == RF90_PATH_B)
		rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
						 BIT(8));
	if (rfpi_enable)
		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
					 BLSSIREADBACKDATA);
	else
		retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
					 BLSSIREADBACKDATA);
	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "RFR-%d Addr[0x%x]=0x%x\n",
		 rfpath, pphyreg->rf_rb, retvalue);
	return retvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);

void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
				 enum radio_path rfpath,
				 u32 offset, u32 data)
{
	u32 data_and_addr;
	u32 newoffset;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_phy *rtlphy = &(rtlpriv->phy);
	struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];

	if (RT_CANNOT_IO(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
		return;
	}
	offset &= 0xff;
	newoffset = offset;
	data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
	rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
	RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
		 "RFW-%d Addr[0x%x]=0x%x\n",
		 rfpath, pphyreg->rf3wire_offset,
		 data_and_addr);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);

long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
				  enum wireless_mode wirelessmode,
				  u8 txpwridx)
{
	long offset;
	long pwrout_dbm;

	switch (wirelessmode) {
	case WIRELESS_MODE_B:
		offset = -7;
		break;
	case WIRELESS_MODE_G:
	case WIRELESS_MODE_N_24G:
		offset = -8;
		break;
	default:
		offset = -8;
		break;
	}
	pwrout_dbm = txpwridx / 2 + offset;
	return pwrout_dbm;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm);

void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_phy *rtlphy = &(rtlpriv->phy);

	rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
	rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
	rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
	rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;

	rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
	rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
	rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
	rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;

	rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
	rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;

	rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
	rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;

	rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
	    RFPGA0_XA_LSSIPARAMETER;
	rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
	    RFPGA0_XB_LSSIPARAMETER;

	rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
	rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
	rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
	rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;

	rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
	rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
	rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
	rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;

	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;

	rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
	rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;

	rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
	rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
	rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
	rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;

	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
	rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
	rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;

	rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
	rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
	rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
	rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;

	rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
	rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
	rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
	rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;

	rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
	rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
	rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
	rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;

	rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
	rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
	rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
	rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;

	rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
	rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
	rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
	rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;

	rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
	rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
	rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
	rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;

	rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
	rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;

}
EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def);

bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
				      u32 cmdtableidx,
				      u32 cmdtablesz,
				      enum swchnlcmd_id cmdid,
				      u32 para1, u32 para2,
				      u32 msdelay)
{
	struct swchnlcmd *pcmd;

	if (cmdtable == NULL) {
		RT_ASSERT(false, "cmdtable cannot be NULL.\n");
		return false;
	}

	if (cmdtableidx >= cmdtablesz)
		return false;

	pcmd = cmdtable + cmdtableidx;
	pcmd->cmdid = cmdid;
	pcmd->para1 = para1;
	pcmd->para2 = para2;
	pcmd->msdelay = msdelay;
	return true;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray);

void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
					bool iqk_ok,
					long result[][8],
					u8 final_candidate,
					bool btxonly)
{
	u32 oldval_0, x, tx0_a, reg;
	long y, tx0_c;

	if (final_candidate == 0xFF) {
		return;
	} else if (iqk_ok) {
		oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
					  MASKDWORD) >> 22) & 0x3FF;
		x = result[final_candidate][0];
		if ((x & 0x00000200) != 0)
			x = x | 0xFFFFFC00;
		tx0_a = (x * oldval_0) >> 8;
		rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
		rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
			      ((x * oldval_0 >> 7) & 0x1));
		y = result[final_candidate][1];
		if ((y & 0x00000200) != 0)
			y = y | 0xFFFFFC00;
		tx0_c = (y * oldval_0) >> 8;
		rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
			      ((tx0_c & 0x3C0) >> 6));
		rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
			      (tx0_c & 0x3F));
		rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
			      ((y * oldval_0 >> 7) & 0x1));
		if (btxonly)
			return;
		reg = result[final_candidate][2];
		rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
		reg = result[final_candidate][3] & 0x3F;
		rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
		reg = (result[final_candidate][3] >> 6) & 0xF;
		rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
	}
}
EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix);

void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
				 u32 *addabackup, u32 registernum)
{
	u32 i;

	for (i = 0; i < registernum; i++)
		addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
}
EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers);

void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
				    u32 *macreg, u32 *macbackup)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u32 i;

	for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
		macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
	macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers);

void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
				       u32 *addareg, u32 *addabackup,
				       u32 regiesternum)
{
	u32 i;

	for (i = 0; i < regiesternum; i++)
		rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers);

void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
				      u32 *macreg, u32 *macbackup)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u32 i;

	for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
		rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
	rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers);

void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
			      bool is_patha_on, bool is2t)
{
	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
	u32 pathon;
	u32 i;

	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
		pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
		if (!is2t) {
			pathon = 0x0bdb25a0;
			rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
		} else {
			rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
		}
	} else {
		/* rtl8723be */
		pathon = 0x01c00014;
		rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
	}

	for (i = 1; i < IQK_ADDA_REG_NUM; i++)
		rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on);

void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
					 u32 *macreg, u32 *macbackup)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	u32 i = 0;

	rtl_write_byte(rtlpriv, macreg[i], 0x3F);

	for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
		rtl_write_byte(rtlpriv, macreg[i],
			       (u8) (macbackup[i] & (~BIT(3))));
	rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
}
EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration);

void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw)
{
	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
	rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
	rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby);

void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
{
	u32 mode;

	mode = pi_mode ? 0x01000100 : 0x01000000;
	rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
	rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch);
an class="n">mddev *mddev, struct cluster_msg *msg) { struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot); if (rdev && test_bit(Faulty, &rdev->flags)) clear_bit(Faulty, &rdev->flags); else pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot); } static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) { switch (msg->type) { case METADATA_UPDATED: pr_info("%s: %d Received message: METADATA_UPDATE from %d\n", __func__, __LINE__, msg->slot); process_metadata_update(mddev, msg); break; case RESYNCING: pr_info("%s: %d Received message: RESYNCING from %d\n", __func__, __LINE__, msg->slot); process_suspend_info(mddev->cluster_info, msg->slot, msg->low, msg->high); break; case NEWDISK: pr_info("%s: %d Received message: NEWDISK from %d\n", __func__, __LINE__, msg->slot); process_add_new_disk(mddev, msg); break; case REMOVE: pr_info("%s: %d Received REMOVE from %d\n", __func__, __LINE__, msg->slot); process_remove_disk(mddev, msg); break; case RE_ADD: pr_info("%s: %d Received RE_ADD from %d\n", __func__, __LINE__, msg->slot); process_readd_disk(mddev, msg); break; default: pr_warn("%s:%d Received unknown message from %d\n", __func__, __LINE__, msg->slot); } } /* * thread for receiving message */ static void recv_daemon(struct md_thread *thread) { struct md_cluster_info *cinfo = thread->mddev->cluster_info; struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres; struct dlm_lock_resource *message_lockres = cinfo->message_lockres; struct cluster_msg msg; /*get CR on Message*/ if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) { pr_err("md/raid1:failed to get CR on MESSAGE\n"); return; } /* read lvb and wake up thread to process this message_lockres */ memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg)); process_recvd_msg(thread->mddev, &msg); /*release CR on ack_lockres*/ dlm_unlock_sync(ack_lockres); /*up-convert to EX on message_lockres*/ dlm_lock_sync(message_lockres, DLM_LOCK_EX); /*get CR on ack_lockres again*/ dlm_lock_sync(ack_lockres, DLM_LOCK_CR); /*release CR on message_lockres*/ dlm_unlock_sync(message_lockres); } /* lock_comm() * Takes the lock on the TOKEN lock resource so no other * node can communicate while the operation is underway. */ static int lock_comm(struct md_cluster_info *cinfo) { int error; error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX); if (error) pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", __func__, __LINE__, error); return error; } static void unlock_comm(struct md_cluster_info *cinfo) { dlm_unlock_sync(cinfo->token_lockres); } /* __sendmsg() * This function performs the actual sending of the message. This function is * usually called after performing the encompassing operation * The function: * 1. Grabs the message lockresource in EX mode * 2. Copies the message to the message LVB * 3. Downconverts message lockresource to CR * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes * and the other nodes read the message. The thread will wait here until all other * nodes have released ack lock resource. * 5. Downconvert ack lockresource to CR */ static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) { int error; int slot = cinfo->slot_number - 1; cmsg->slot = cpu_to_le32(slot); /*get EX on Message*/ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX); if (error) { pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); goto failed_message; } memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg, sizeof(struct cluster_msg)); /*down-convert EX to CR on Message*/ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR); if (error) { pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n", error); goto failed_message; } /*up-convert CR to EX on Ack*/ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX); if (error) { pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n", error); goto failed_ack; } /*down-convert EX to CR on Ack*/ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR); if (error) { pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n", error); goto failed_ack; } failed_ack: dlm_unlock_sync(cinfo->message_lockres); failed_message: return error; } static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg) { int ret; lock_comm(cinfo); ret = __sendmsg(cinfo, cmsg); unlock_comm(cinfo); return ret; } static int gather_all_resync_info(struct mddev *mddev, int total_slots) { struct md_cluster_info *cinfo = mddev->cluster_info; int i, ret = 0; struct dlm_lock_resource *bm_lockres; struct suspend_info *s; char str[64]; for (i = 0; i < total_slots; i++) { memset(str, '\0', 64); snprintf(str, 64, "bitmap%04d", i); bm_lockres = lockres_init(mddev, str, NULL, 1); if (!bm_lockres) return -ENOMEM; if (i == (cinfo->slot_number - 1)) continue; bm_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); if (ret == -EAGAIN) { memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); s = read_resync_info(mddev, bm_lockres); if (s) { pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", __func__, __LINE__, (unsigned long long) s->lo, (unsigned long long) s->hi, i); spin_lock_irq(&cinfo->suspend_lock); s->slot = i; list_add(&s->list, &cinfo->suspend_list); spin_unlock_irq(&cinfo->suspend_lock); } ret = 0; lockres_free(bm_lockres); continue; } if (ret) goto out; /* TODO: Read the disk bitmap sb and check if it needs recovery */ dlm_unlock_sync(bm_lockres); lockres_free(bm_lockres); } out: return ret; } static int join(struct mddev *mddev, int nodes) { struct md_cluster_info *cinfo; int ret, ops_rv; char str[64]; if (!try_module_get(THIS_MODULE)) return -ENOENT; cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL); if (!cinfo) return -ENOMEM; init_completion(&cinfo->completion); mutex_init(&cinfo->sb_mutex); mddev->cluster_info = cinfo; memset(str, 0, 64); pretty_uuid(str, mddev->uuid); ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, DLM_LSFL_FS, LVB_SIZE, &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace); if (ret) goto err; wait_for_completion(&cinfo->completion); if (nodes < cinfo->slot_number) { pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).", cinfo->slot_number, nodes); ret = -ERANGE; goto err; } cinfo->sb_lock = lockres_init(mddev, "cmd-super", NULL, 0); if (!cinfo->sb_lock) { ret = -ENOMEM; goto err; } /* Initiate the communication resources */ ret = -ENOMEM; cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv"); if (!cinfo->recv_thread) { pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); goto err; } cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1); if (!cinfo->message_lockres) goto err; cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0); if (!cinfo->token_lockres) goto err; cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); if (!cinfo->ack_lockres) goto err; cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0); if (!cinfo->no_new_dev_lockres) goto err; /* get sync CR lock on ACK. */ if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", ret); /* get sync CR lock on no-new-dev. */ if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR)) pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret); pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); if (!cinfo->bitmap_lockres) goto err; if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { pr_err("Failed to get bitmap lock\n"); ret = -EINVAL; goto err; } INIT_LIST_HEAD(&cinfo->suspend_list); spin_lock_init(&cinfo->suspend_lock); ret = gather_all_resync_info(mddev, nodes); if (ret) goto err; return 0; err: lockres_free(cinfo->message_lockres); lockres_free(cinfo->token_lockres); lockres_free(cinfo->ack_lockres); lockres_free(cinfo->no_new_dev_lockres); lockres_free(cinfo->bitmap_lockres); lockres_free(cinfo->sb_lock); if (cinfo->lockspace) dlm_release_lockspace(cinfo->lockspace, 2); mddev->cluster_info = NULL; kfree(cinfo); module_put(THIS_MODULE); return ret; } static int leave(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; if (!cinfo) return 0; md_unregister_thread(&cinfo->recovery_thread); md_unregister_thread(&cinfo->recv_thread); lockres_free(cinfo->message_lockres); lockres_free(cinfo->token_lockres); lockres_free(cinfo->ack_lockres); lockres_free(cinfo->no_new_dev_lockres); lockres_free(cinfo->sb_lock); lockres_free(cinfo->bitmap_lockres); dlm_release_lockspace(cinfo->lockspace, 2); return 0; } /* slot_number(): Returns the MD slot number to use * DLM starts the slot numbers from 1, wheras cluster-md * wants the number to be from zero, so we deduct one */ static int slot_number(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; return cinfo->slot_number - 1; } static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi); /* Re-acquire the lock to refresh LVB */ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW); } static int metadata_update_start(struct mddev *mddev) { return lock_comm(mddev->cluster_info); } static int metadata_update_finish(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int ret; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(METADATA_UPDATED); ret = __sendmsg(cinfo, &cmsg); unlock_comm(cinfo); return ret; } static int metadata_update_cancel(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; return dlm_unlock_sync(cinfo->token_lockres); } static int resync_send(struct mddev *mddev, enum msg_type type, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int slot = cinfo->slot_number - 1; pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__, (unsigned long long)lo, (unsigned long long)hi); resync_info_update(mddev, lo, hi); cmsg.type = cpu_to_le32(type); cmsg.slot = cpu_to_le32(slot); cmsg.low = cpu_to_le64(lo); cmsg.high = cpu_to_le64(hi); return sendmsg(cinfo, &cmsg); } static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi) { pr_info("%s:%d\n", __func__, __LINE__); return resync_send(mddev, RESYNCING, lo, hi); } static void resync_finish(struct mddev *mddev) { pr_info("%s:%d\n", __func__, __LINE__); resync_send(mddev, RESYNCING, 0, 0); } static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; int ret = 0; struct suspend_info *s; spin_lock_irq(&cinfo->suspend_lock); if (list_empty(&cinfo->suspend_list)) goto out; list_for_each_entry(s, &cinfo->suspend_list, list) if (hi > s->lo && lo < s->hi) { ret = 1; break; } out: spin_unlock_irq(&cinfo->suspend_lock); return ret; } static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev) { struct md_cluster_info *cinfo = mddev->cluster_info; struct cluster_msg cmsg; int ret = 0; struct mdp_superblock_1 *sb = page_address(rdev->sb_page); char *uuid = sb->device_uuid; memset(&cmsg, 0, sizeof(cmsg)); cmsg.type = cpu_to_le32(NEWDISK); memcpy(cmsg.uuid, uuid, 16); cmsg.raid_slot = rdev->desc_nr; lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); if (ret) return ret; cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; /* Some node does not "see" the device */ if (ret == -EAGAIN) ret = -ENOENT; else dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR); return ret; } static int add_new_disk_finish(struct mddev *mddev) { struct cluster_msg cmsg; struct md_cluster_info *cinfo = mddev->cluster_info; int ret; /* Write sb and inform others */ md_update_sb(mddev, 1); cmsg.type = METADATA_UPDATED; ret = __sendmsg(cinfo, &cmsg); unlock_comm(cinfo); return ret; } static int new_disk_ack(struct mddev *mddev, bool ack) { struct md_cluster_info *cinfo = mddev->cluster_info; if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) { pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); return -EINVAL; } if (ack) dlm_unlock_sync(cinfo->no_new_dev_lockres); complete(&cinfo->newdisk_completion); return 0; } static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) { struct cluster_msg cmsg; struct md_cluster_info *cinfo = mddev->cluster_info; cmsg.type = REMOVE; cmsg.raid_slot = rdev->desc_nr; return __sendmsg(cinfo, &cmsg); } static int gather_bitmaps(struct md_rdev *rdev) { int sn, err; sector_t lo, hi; struct cluster_msg cmsg; struct mddev *mddev = rdev->mddev; struct md_cluster_info *cinfo = mddev->cluster_info; cmsg.type = RE_ADD; cmsg.raid_slot = rdev->desc_nr; err = sendmsg(cinfo, &cmsg); if (err) goto out; for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { if (sn == (cinfo->slot_number - 1)) continue; err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); if (err) { pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); goto out; } if ((hi > 0) && (lo < mddev->recovery_cp)) mddev->recovery_cp = lo; } out: return err; } static struct md_cluster_operations cluster_ops = { .join = join, .leave = leave, .slot_number = slot_number, .resync_info_update = resync_info_update, .resync_start = resync_start, .resync_finish = resync_finish, .metadata_update_start = metadata_update_start, .metadata_update_finish = metadata_update_finish, .metadata_update_cancel = metadata_update_cancel, .area_resyncing = area_resyncing, .add_new_disk_start = add_new_disk_start, .add_new_disk_finish = add_new_disk_finish, .new_disk_ack = new_disk_ack, .remove_disk = remove_disk, .gather_bitmaps = gather_bitmaps, }; static int __init cluster_init(void) { pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n"); pr_info("Registering Cluster MD functions\n"); register_md_cluster_operations(&cluster_ops, THIS_MODULE); return 0; } static void cluster_exit(void) { unregister_md_cluster_operations(); } module_init(cluster_init); module_exit(cluster_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Clustering support for MD");