1 /*
2 Copyright (c) 2013, The Linux Foundation. All rights reserved.
3 
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7 		* Redistributions of source code must retain the above copyright
8 			notice, this list of conditions and the following disclaimer.
9 		* Redistributions in binary form must reproduce the above
10 			copyright notice, this list of conditions and the following
11 			disclaimer in the documentation and/or other materials provided
12 			with the distribution.
13 		* Neither the name of The Linux Foundation nor the names of its
14 			contributors may be used to endorse or promote products derived
15 			from this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 
30 #include "ipa_nat_drv.h"
31 #include "ipa_nat_drvi.h"
32 
33 #ifdef USE_GLIB
34 #include <glib.h>
35 #define strlcpy g_strlcpy
36 #endif
37 
38 struct ipa_nat_cache ipv4_nat_cache;
39 pthread_mutex_t nat_mutex    = PTHREAD_MUTEX_INITIALIZER;
40 
41 /* ------------------------------------------
42 		UTILITY FUNCTIONS START
43 	 --------------------------------------------*/
44 
45 /**
46  * UpdateSwSpecParams() - updates sw specific params
47  * @rule: [in/out] nat table rule
48  * @param_type: [in] which param need to update
49  * @value: [in] value of param
50  *
51  * Update SW specific params in the passed rule.
52  *
53  * Returns: None
54  */
UpdateSwSpecParams(struct ipa_nat_rule * rule,uint8_t param_type,uint32_t value)55 void UpdateSwSpecParams(struct ipa_nat_rule *rule,
56 															uint8_t param_type,
57 															uint32_t value)
58 {
59 	uint32_t temp = rule->sw_spec_params;
60 
61 	if (IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE == param_type) {
62 		value = (value << INDX_TBL_ENTRY_SIZE_IN_BITS);
63 		temp &= 0x0000FFFF;
64 	} else {
65 		temp &= 0xFFFF0000;
66 	}
67 
68 	temp = (temp | value);
69 	rule->sw_spec_params = temp;
70 	return;
71 }
72 
73 /**
74  * Read8BitFieldValue()
75  * @rule: [in/out]
76  * @param_type: [in]
77  * @value: [in]
78  *
79  *
80  *
81  * Returns: None
82  */
83 
Read8BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)84 uint8_t Read8BitFieldValue(uint32_t param,
85 														ipa_nat_rule_field_type fld_type)
86 {
87 	void *temp = (void *)&param;
88 
89 	switch (fld_type) {
90 
91 	case PROTOCOL_FIELD:
92 		return ((time_stamp_proto *)temp)->protocol;
93 
94 	default:
95 		IPAERR("Invalid Field type passed\n");
96 		return 0;
97 	}
98 }
99 
Read16BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)100 uint16_t Read16BitFieldValue(uint32_t param,
101 														 ipa_nat_rule_field_type fld_type)
102 {
103 	void *temp = (void *)&param;
104 
105 	switch (fld_type) {
106 
107 	case NEXT_INDEX_FIELD:
108 		return ((next_index_pub_port *)temp)->next_index;
109 
110 	case PUBLIC_PORT_FILED:
111 		return ((next_index_pub_port *)temp)->public_port;
112 
113 	case ENABLE_FIELD:
114 		return ((ipcksum_enbl *)temp)->enable;
115 
116 	case SW_SPEC_PARAM_PREV_INDEX_FIELD:
117 		return ((sw_spec_params *)temp)->prev_index;
118 
119 	case SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD:
120 		return ((sw_spec_params *)temp)->index_table_entry;
121 
122 	case INDX_TBL_TBL_ENTRY_FIELD:
123 		return ((tbl_ent_nxt_indx *)temp)->tbl_entry;
124 
125 	case INDX_TBL_NEXT_INDEX_FILED:
126 		return ((tbl_ent_nxt_indx *)temp)->next_index;
127 
128 #ifdef NAT_DUMP
129 	case IP_CHKSUM_FIELD:
130 		return ((ipcksum_enbl *)temp)->ip_chksum;
131 #endif
132 
133 	default:
134 		IPAERR("Invalid Field type passed\n");
135 		return 0;
136 	}
137 }
138 
Read32BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)139 uint32_t Read32BitFieldValue(uint32_t param,
140 														 ipa_nat_rule_field_type fld_type)
141 {
142 
143 	void *temp = (void *)&param;
144 
145 	switch (fld_type) {
146 
147 	case TIME_STAMP_FIELD:
148 		return ((time_stamp_proto *)temp)->time_stamp;
149 
150 	default:
151 		IPAERR("Invalid Field type passed\n");
152 		return 0;
153 	}
154 }
155 
156 
157 /**
158  * CreateNatDevice() - Create nat devices
159  * @mem: [in] name of device that need to create
160  *
161  * Create Nat device and Register for file create
162  * notification in given directory and wait till
163  * receive notification
164  *
165  * Returns: 0 on success, negative on failure
166  */
CreateNatDevice(struct ipa_ioc_nat_alloc_mem * mem)167 int CreateNatDevice(struct ipa_ioc_nat_alloc_mem *mem)
168 {
169 	int ret;
170 
171 	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_ALLOC_NAT_MEM, mem);
172 	if (ret != 0) {
173 		perror("CreateNatDevice(): ioctl error value");
174 		IPAERR("unable to post nat mem init. Error ;%d\n", ret);
175 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
176 		return -EINVAL;
177 	}
178 	IPADBG("posted IPA_IOC_ALLOC_NAT_MEM to kernel successfully\n");
179 	return 0;
180 }
181 
182 /**
183  * GetNearest2Power() - Returns the nearest power of 2
184  * @num: [in] given number
185  * @ret: [out] nearest power of 2
186  *
187  * Returns the nearest power of 2 for a
188  * given number
189  *
190  * Returns: 0 on success, negative on failure
191  */
GetNearest2Power(uint16_t num,uint16_t * ret)192 int GetNearest2Power(uint16_t num, uint16_t *ret)
193 {
194 	uint16_t number = num;
195 	uint16_t tmp = 1;
196 	*ret = 0;
197 
198 	if (0 == num) {
199 		return -EINVAL;
200 	}
201 
202 	if (1 == num) {
203 		*ret = 2;
204 		return 0;
205 	}
206 
207 	for (;;) {
208 		if (1 == num) {
209 			if (number != tmp) {
210 				tmp *= 2;
211 			}
212 
213 			*ret = tmp;
214 			return 0;
215 		}
216 
217 		num >>= 1;
218 		tmp *= 2;
219 	}
220 
221 	return -EINVAL;
222 }
223 
224 /**
225  * GetNearestEven() - Returns the nearest even number
226  * @num: [in] given number
227  * @ret: [out] nearest even number
228  *
229  * Returns the nearest even number for a given number
230  *
231  * Returns: 0 on success, negative on failure
232  */
GetNearestEven(uint16_t num,uint16_t * ret)233 void GetNearestEven(uint16_t num, uint16_t *ret)
234 {
235 
236 	if (num < 2) {
237 		*ret = 2;
238 		return;
239 	}
240 
241 	while ((num % 2) != 0) {
242 		num = num + 1;
243 	}
244 
245 	*ret = num;
246 	return;
247 }
248 
249 /**
250  * dst_hash() - Find the index into ipv4 base table
251  * @trgt_ip: [in] Target IP address
252  * @trgt_port: [in]  Target port
253  * @public_port: [in]  Public port
254  * @proto: [in] Protocol (TCP/IP)
255  * @size: [in] size of the ipv4 base Table
256  *
257  * This hash method is used to find the hash index of new nat
258  * entry into ipv4 base table. In case of zero index, the
259  * new entry will be stored into N-1 index where N is size of
260  * ipv4 base table
261  *
262  * Returns: >0 index into ipv4 base table, negative on failure
263  */
dst_hash(uint32_t trgt_ip,uint16_t trgt_port,uint16_t public_port,uint8_t proto,uint16_t size)264 static uint16_t dst_hash(uint32_t trgt_ip, uint16_t trgt_port,
265 				uint16_t public_port, uint8_t proto,
266 				uint16_t size)
267 {
268 	uint16_t hash = ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
269 		 (trgt_port) ^ (public_port) ^ (proto);
270 
271 	IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
272 	IPADBG("public_port: 0x%x\n", public_port);
273 	IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
274 
275 	hash = (hash & size);
276 
277 	/* If the hash resulted to zero then set it to maximum value
278 		 as zero is unused entry in nat tables */
279 	if (0 == hash) {
280 		return size;
281 	}
282 
283 	IPADBG("dst_hash returning value: %d\n", hash);
284 	return hash;
285 }
286 
287 /**
288  * src_hash() - Find the index into ipv4 index base table
289  * @priv_ip: [in] Private IP address
290  * @priv_port: [in]  Private port
291  * @trgt_ip: [in]  Target IP address
292  * @trgt_port: [in] Target Port
293  * @proto: [in]  Protocol (TCP/IP)
294  * @size: [in] size of the ipv4 index base Table
295  *
296  * This hash method is used to find the hash index of new nat
297  * entry into ipv4 index base table. In case of zero index, the
298  * new entry will be stored into N-1 index where N is size of
299  * ipv4 index base table
300  *
301  * Returns: >0 index into ipv4 index base table, negative on failure
302  */
src_hash(uint32_t priv_ip,uint16_t priv_port,uint32_t trgt_ip,uint16_t trgt_port,uint8_t proto,uint16_t size)303 static uint16_t src_hash(uint32_t priv_ip, uint16_t priv_port,
304 				uint32_t trgt_ip, uint16_t trgt_port,
305 				uint8_t proto, uint16_t size)
306 {
307 	uint16_t hash =  ((uint16_t)(priv_ip)) ^ ((uint16_t)(priv_ip >> 16)) ^
308 		 (priv_port) ^
309 		 ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
310 		 (trgt_port) ^ (proto);
311 
312 	IPADBG("priv_ip: 0x%x priv_port: 0x%x\n", priv_ip, priv_port);
313 	IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
314 	IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
315 
316 	hash = (hash & size);
317 
318 	/* If the hash resulted to zero then set it to maximum value
319 		 as zero is unused entry in nat tables */
320 	if (0 == hash) {
321 		return size;
322 	}
323 
324 	IPADBG("src_hash returning value: %d\n", hash);
325 	return hash;
326 }
327 
328 /**
329  * ipa_nati_calc_ip_cksum() - Calculate the source nat
330  *														 IP checksum diff
331  * @pub_ip_addr: [in] public ip address
332  * @priv_ip_addr: [in]	Private ip address
333  *
334  * source nat ip checksum different is calculated as
335  * public_ip_addr - private_ip_addr
336  * Here we are using 1's complement to represent -ve number.
337  * So take 1's complement of private ip addr and add it
338  * to public ip addr.
339  *
340  * Returns: >0 ip checksum diff
341  */
ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,uint32_t priv_ip_addr)342 static uint16_t ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,
343 										uint32_t priv_ip_addr)
344 {
345 	uint16_t ret;
346 	uint32_t cksum = 0;
347 
348 	/* Add LSB(2 bytes) of public ip address to cksum */
349 	cksum += (pub_ip_addr & 0xFFFF);
350 
351 	/* Add MSB(2 bytes) of public ip address to cksum
352 		and check for carry forward(CF), if any add it
353 	*/
354 	cksum += (pub_ip_addr>>16);
355 	if (cksum >> 16) {
356 		cksum = (cksum & 0x0000FFFF);
357 		cksum += 1;
358 	}
359 
360 	/* Calculate the 1's complement of private ip address */
361 	priv_ip_addr = (~priv_ip_addr);
362 
363 	/* Add LSB(2 bytes) of private ip address to cksum
364 		 and check for carry forward(CF), if any add it
365 	*/
366 	cksum += (priv_ip_addr & 0xFFFF);
367 	if (cksum >> 16) {
368 		cksum = (cksum & 0x0000FFFF);
369 		cksum += 1;
370 	}
371 
372 	/* Add MSB(2 bytes) of private ip address to cksum
373 		 and check for carry forward(CF), if any add it
374 	*/
375 	cksum += (priv_ip_addr>>16);
376 	if (cksum >> 16) {
377 		cksum = (cksum & 0x0000FFFF);
378 		cksum += 1;
379 	}
380 
381 	/* Return the LSB(2 bytes) of checksum	*/
382 	ret = (uint16_t)cksum;
383 	return ret;
384 }
385 
386 /**
387  * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat
388  *																TCP/UDP checksum diff
389  * @pub_ip_addr: [in] public ip address
390  * @pub_port: [in] public tcp/udp port
391  * @priv_ip_addr: [in]	Private ip address
392  * @priv_port: [in] Private tcp/udp prot
393  *
394  * source nat tcp/udp checksum is calculated as
395  * (pub_ip_addr + pub_port) - (priv_ip_addr + priv_port)
396  * Here we are using 1's complement to represent -ve number.
397  * So take 1's complement of prviate ip addr &private port
398  * and add it public ip addr & public port.
399  *
400  * Returns: >0 tcp/udp checksum diff
401  */
ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,uint16_t pub_port,uint32_t priv_ip_addr,uint16_t priv_port)402 static uint16_t ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,
403 										uint16_t pub_port,
404 										uint32_t priv_ip_addr,
405 										uint16_t priv_port)
406 {
407 	uint16_t ret = 0;
408 	uint32_t cksum = 0;
409 
410 	/* Add LSB(2 bytes) of public ip address to cksum */
411 	cksum += (pub_ip_addr & 0xFFFF);
412 
413 	/* Add MSB(2 bytes) of public ip address to cksum
414 		and check for carry forward(CF), if any add it
415 	*/
416 	cksum += (pub_ip_addr>>16);
417 	if (cksum >> 16) {
418 		cksum = (cksum & 0x0000FFFF);
419 		cksum += 1;
420 	}
421 
422 	/* Add public port to cksum and
423 		 check for carry forward(CF), if any add it */
424 	cksum += pub_port;
425 	if (cksum >> 16) {
426 		cksum = (cksum & 0x0000FFFF);
427 		cksum += 1;
428 	}
429 
430 	/* Calculate the 1's complement of private ip address */
431 	priv_ip_addr = (~priv_ip_addr);
432 
433 	/* Add LSB(2 bytes) of private ip address to cksum
434 		 and check for carry forward(CF), if any add it
435 	*/
436 	cksum += (priv_ip_addr & 0xFFFF);
437 	if (cksum >> 16) {
438 		cksum = (cksum & 0x0000FFFF);
439 		cksum += 1;
440 	}
441 
442 	/* Add MSB(2 bytes) of private ip address to cksum
443 		 and check for carry forward(CF), if any add
444 	*/
445 	cksum += (priv_ip_addr>>16);
446 	if (cksum >> 16) {
447 		cksum = (cksum & 0x0000FFFF);
448 		cksum += 1;
449 	}
450 
451 	/* Calculate the 1's complement of private port */
452 	priv_port = (~priv_port);
453 
454 	/* Add public port to cksum and
455 	 check for carry forward(CF), if any add it */
456 	cksum += priv_port;
457 	if (cksum >> 16) {
458 		cksum = (cksum & 0x0000FFFF);
459 		cksum += 1;
460 	}
461 
462 	/* return the LSB(2 bytes) of checksum */
463 	ret = (uint16_t)cksum;
464 	return ret;
465 }
466 
467 /**
468  * ipa_nati_make_rule_hdl() - makes nat rule handle
469  * @tbl_hdl: [in] nat table handle
470  * @tbl_entry: [in]  nat table entry
471  *
472  * Calculate the nat rule handle which from
473  * nat entry which will be returned to client of
474  * nat driver
475  *
476  * Returns: >0 nat rule handle
477  */
ipa_nati_make_rule_hdl(uint16_t tbl_hdl,uint16_t tbl_entry)478 uint16_t ipa_nati_make_rule_hdl(uint16_t tbl_hdl,
479 				uint16_t tbl_entry)
480 {
481 	struct ipa_nat_ip4_table_cache *tbl_ptr;
482 	uint16_t rule_hdl = 0;
483 	uint16_t cnt = 0;
484 
485 	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
486 
487 	if (tbl_entry >= tbl_ptr->table_entries) {
488 		/* Increase the current expansion table count */
489 		tbl_ptr->cur_expn_tbl_cnt++;
490 
491 		/* Update the index into table */
492 		rule_hdl = tbl_entry - tbl_ptr->table_entries;
493 		rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
494 		/* Update the table type mask */
495 		rule_hdl = (rule_hdl | IPA_NAT_RULE_HDL_TBL_TYPE_MASK);
496 	} else {
497 		/* Increase the current count */
498 		tbl_ptr->cur_tbl_cnt++;
499 
500 		rule_hdl = tbl_entry;
501 		rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
502 	}
503 
504 	for (; cnt < (tbl_ptr->table_entries + tbl_ptr->expn_table_entries); cnt++) {
505 		if (IPA_NAT_INVALID_NAT_ENTRY == tbl_ptr->rule_id_array[cnt]) {
506 			tbl_ptr->rule_id_array[cnt] = rule_hdl;
507 			return cnt + 1;
508 		}
509 	}
510 
511 	return 0;
512 }
513 
514 /**
515  * ipa_nati_parse_ipv4_rule_hdl() - prase rule handle
516  * @tbl_hdl:	[in] nat table rule
517  * @rule_hdl: [in] nat rule handle
518  * @expn_tbl: [out] expansion table or not
519  * @tbl_entry: [out] index into table
520  *
521  * Parse the rule handle to retrieve the nat table
522  * type and entry of nat table
523  *
524  * Returns: None
525  */
ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,uint16_t rule_hdl,uint8_t * expn_tbl,uint16_t * tbl_entry)526 void ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,
527 				uint16_t rule_hdl, uint8_t *expn_tbl,
528 				uint16_t *tbl_entry)
529 {
530 	struct ipa_nat_ip4_table_cache *tbl_ptr;
531 	uint16_t rule_id;
532 
533 	*expn_tbl = 0;
534 	*tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
535 	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_index];
536 
537 	if (rule_hdl >= (tbl_ptr->table_entries + tbl_ptr->expn_table_entries)) {
538 		IPAERR("invalid rule handle\n");
539 		return;
540 	}
541 
542 	rule_id = tbl_ptr->rule_id_array[rule_hdl-1];
543 
544 	/* Retrieve the table type */
545 	*expn_tbl = 0;
546 	if (rule_id & IPA_NAT_RULE_HDL_TBL_TYPE_MASK) {
547 		*expn_tbl = 1;
548 	}
549 
550 	/* Retrieve the table entry */
551 	*tbl_entry = (rule_id >> IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
552 	return;
553 }
554 
ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t tbl_entry)555 uint32_t ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
556 						nat_table_type tbl_type,
557 						uint16_t	tbl_entry)
558 {
559 	struct ipa_nat_rule *tbl_ptr;
560 	uint32_t ret = 0;
561 
562 	if (IPA_NAT_EXPN_TBL == tbl_type) {
563 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
564 	} else {
565 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
566 	}
567 
568 	ret = (char *)&tbl_ptr[tbl_entry] - (char *)tbl_ptr;
569 	ret += cache_ptr->tbl_addr_offset;
570 	return ret;
571 }
572 
ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t indx_tbl_entry)573 uint32_t ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
574 								nat_table_type tbl_type,
575 								uint16_t indx_tbl_entry)
576 {
577 	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
578 	uint32_t ret = 0;
579 
580 	if (IPA_NAT_INDEX_EXPN_TBL == tbl_type) {
581 		indx_tbl_ptr =
582 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
583 	} else {
584 		indx_tbl_ptr =
585 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
586 	}
587 
588 	ret = (char *)&indx_tbl_ptr[indx_tbl_entry] - (char *)indx_tbl_ptr;
589 	ret += cache_ptr->tbl_addr_offset;
590 	return ret;
591 }
592 
593 /* ------------------------------------------
594 		UTILITY FUNCTIONS END
595 --------------------------------------------*/
596 
597 /* ------------------------------------------
598 	 Main Functions
599 --------------------------------------------**/
ipa_nati_reset_tbl(uint8_t tbl_indx)600 void ipa_nati_reset_tbl(uint8_t tbl_indx)
601 {
602 	uint16_t table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
603 	uint16_t expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].expn_table_entries;
604 
605 	/* Base table */
606 	IPADBG("memset() base table to 0, %p\n",
607 				 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr);
608 
609 	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr,
610 				 0,
611 				 IPA_NAT_TABLE_ENTRY_SIZE * table_entries);
612 
613 	/* Base expansino table */
614 	IPADBG("memset() expn base table to 0, %p\n",
615 				 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr);
616 
617 	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr,
618 				 0,
619 				 IPA_NAT_TABLE_ENTRY_SIZE * expn_table_entries);
620 
621 	/* Index table */
622 	IPADBG("memset() index table to 0, %p\n",
623 				 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr);
624 
625 	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr,
626 				 0,
627 				 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * table_entries);
628 
629 	/* Index expansion table */
630 	IPADBG("memset() index expn table to 0, %p\n",
631 				 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr);
632 
633 	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr,
634 				 0,
635 				 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * expn_table_entries);
636 
637 	IPADBG("returning from ipa_nati_reset_tbl()\n");
638 	return;
639 }
640 
ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,uint16_t number_of_entries,uint32_t * tbl_hdl)641 int ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,
642 				uint16_t number_of_entries,
643 				uint32_t *tbl_hdl)
644 {
645 	struct ipa_ioc_nat_alloc_mem mem;
646 	uint8_t tbl_indx = ipv4_nat_cache.table_cnt;
647 	uint16_t table_entries, expn_table_entries;
648 	int ret;
649 
650 	*tbl_hdl = 0;
651 	/* Allocate table */
652 	memset(&mem, 0, sizeof(mem));
653 	ret = ipa_nati_alloc_table(number_of_entries,
654 														 &mem,
655 														 &table_entries,
656 														 &expn_table_entries);
657 	if (0 != ret) {
658 		IPAERR("unable to allocate nat table\n");
659 		return -ENOMEM;
660 	}
661 
662 	/* Update the cache
663 		 The (IPA_NAT_UNUSED_BASE_ENTRIES/2) indicates zero entry entries
664 		 for both base and expansion table
665 	*/
666 	ret = ipa_nati_update_cache(&mem,
667 															public_ip_addr,
668 															table_entries,
669 															expn_table_entries);
670 	if (0 != ret) {
671 		IPAERR("unable to update cache Error: %d\n", ret);
672 		return -EINVAL;
673 	}
674 
675 	/* Reset the nat table before posting init cmd */
676 	ipa_nati_reset_tbl(tbl_indx);
677 
678 	/* Initialize the ipa hw with nat table dimensions */
679 	ret = ipa_nati_post_ipv4_init_cmd(tbl_indx);
680 	if (0 != ret) {
681 		IPAERR("unable to post nat_init command Error %d\n", ret);
682 		return -EINVAL;
683 	}
684 
685 	/* Return table handle */
686 	ipv4_nat_cache.table_cnt++;
687 	*tbl_hdl = ipv4_nat_cache.table_cnt;
688 
689 #ifdef NAT_DUMP
690 	ipa_nat_dump_ipv4_table(*tbl_hdl);
691 #endif
692 	return 0;
693 }
694 
ipa_nati_alloc_table(uint16_t number_of_entries,struct ipa_ioc_nat_alloc_mem * mem,uint16_t * table_entries,uint16_t * expn_table_entries)695 int ipa_nati_alloc_table(uint16_t number_of_entries,
696 				struct ipa_ioc_nat_alloc_mem *mem,
697 				uint16_t *table_entries,
698 				uint16_t *expn_table_entries)
699 {
700 	int fd = 0, ret;
701 	uint16_t total_entries;
702 
703 	/* Copy the table name */
704 	strlcpy(mem->dev_name, NAT_DEV_NAME, IPA_RESOURCE_NAME_MAX);
705 
706 	/* Calculate the size for base table and expansion table */
707 	*table_entries = (uint16_t)(number_of_entries * IPA_NAT_BASE_TABLE_PERCENTAGE);
708 	if (*table_entries == 0) {
709 		*table_entries = 1;
710 	}
711 	if (GetNearest2Power(*table_entries, table_entries)) {
712 		IPAERR("unable to calculate power of 2\n");
713 		return -EINVAL;
714 	}
715 
716 	*expn_table_entries = (uint16_t)(number_of_entries * IPA_NAT_EXPANSION_TABLE_PERCENTAGE);
717 	GetNearestEven(*expn_table_entries, expn_table_entries);
718 
719 	total_entries = (*table_entries)+(*expn_table_entries);
720 
721 	/* Calclate the memory size for both table and index table entries */
722 	mem->size = (IPA_NAT_TABLE_ENTRY_SIZE * total_entries);
723 	IPADBG("Nat Table size: %zu\n", mem->size);
724 	mem->size += (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * total_entries);
725 	IPADBG("Nat Base and Index Table size: %zu\n", mem->size);
726 
727 	if (!ipv4_nat_cache.ipa_fd) {
728 		fd = open(IPA_DEV_NAME, O_RDONLY);
729 		if (fd < 0) {
730 			perror("ipa_nati_alloc_table(): open error value:");
731 			IPAERR("unable to open ipa device\n");
732 			return -EIO;
733 		}
734 		ipv4_nat_cache.ipa_fd = fd;
735 	}
736 
737 	ret = CreateNatDevice(mem);
738 	return ret;
739 }
740 
741 
ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem * mem,uint32_t public_addr,uint16_t tbl_entries,uint16_t expn_tbl_entries)742 int ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem *mem,
743 				uint32_t public_addr,
744 				uint16_t tbl_entries,
745 				uint16_t expn_tbl_entries)
746 {
747 	uint32_t index = ipv4_nat_cache.table_cnt;
748 	char *ipv4_rules_addr = NULL;
749 
750 	int fd = 0;
751 	int flags = MAP_SHARED;
752 	int prot = PROT_READ | PROT_WRITE;
753 	off_t offset = 0;
754 #ifdef IPA_ON_R3PC
755 	int ret = 0;
756 	uint32_t nat_mem_offset = 0;
757 #endif
758 
759 	ipv4_nat_cache.ip4_tbl[index].valid = IPA_NAT_TABLE_VALID;
760 	ipv4_nat_cache.ip4_tbl[index].public_addr = public_addr;
761 	ipv4_nat_cache.ip4_tbl[index].size = mem->size;
762 	ipv4_nat_cache.ip4_tbl[index].tbl_addr_offset = mem->offset;
763 
764 	ipv4_nat_cache.ip4_tbl[index].table_entries = tbl_entries;
765 	ipv4_nat_cache.ip4_tbl[index].expn_table_entries = expn_tbl_entries;
766 
767 	IPADBG("num of ipv4 rules:%d\n", tbl_entries);
768 	IPADBG("num of ipv4 expn rules:%d\n", expn_tbl_entries);
769 
770 	/* allocate memory for nat index expansion table */
771 	if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
772 		ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta =
773 			 malloc(sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
774 
775 		if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
776 			IPAERR("Fail to allocate ipv4 index expansion table meta\n");
777 			return 0;
778 		}
779 
780 		memset(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta,
781 					 0,
782 					 sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
783 	}
784 
785 	/* Allocate memory for rule_id_array */
786 	if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
787 		ipv4_nat_cache.ip4_tbl[index].rule_id_array =
788 			 malloc(sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
789 
790 		if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
791 			IPAERR("Fail to allocate rule id array\n");
792 			return 0;
793 		}
794 
795 		memset(ipv4_nat_cache.ip4_tbl[index].rule_id_array,
796 					 0,
797 					 sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
798 	}
799 
800 
801 	/* open the nat table */
802 	strlcpy(mem->dev_name, NAT_DEV_FULL_NAME, IPA_RESOURCE_NAME_MAX);
803 	fd = open(mem->dev_name, O_RDWR);
804 	if (fd < 0) {
805 		perror("ipa_nati_update_cache(): open error value:");
806 		IPAERR("unable to open nat device. Error:%d\n", fd);
807 		return -EIO;
808 	}
809 
810 	/* copy the nat table name */
811 	strlcpy(ipv4_nat_cache.ip4_tbl[index].table_name,
812 					mem->dev_name,
813 					IPA_RESOURCE_NAME_MAX);
814 	ipv4_nat_cache.ip4_tbl[index].nat_fd = fd;
815 
816 	/* open the nat device Table */
817 #ifndef IPA_ON_R3PC
818 	ipv4_rules_addr = (void *)mmap(NULL, mem->size,
819 																 prot, flags,
820 																 fd, offset);
821 #else
822 	IPADBG("user space r3pc\n");
823 	ipv4_rules_addr = (void *)mmap((caddr_t)0, NAT_MMAP_MEM_SIZE,
824 																 prot, flags,
825 																 fd, offset);
826 #endif
827 	if (MAP_FAILED  == ipv4_rules_addr) {
828 		perror("unable to mmap the memory\n");
829 		return -EINVAL;
830 	}
831 
832 #ifdef IPA_ON_R3PC
833 	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_NAT_OFFSET, &nat_mem_offset);
834 	if (ret != 0) {
835 		perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
836 		IPAERR("unable to post ant offset cmd Error: %d\n", ret);
837 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
838 		return -EIO;
839 	}
840 	ipv4_rules_addr += nat_mem_offset;
841 	ipv4_nat_cache.ip4_tbl[index].mmap_offset = nat_mem_offset;
842 #endif
843 
844 	IPADBG("mmap return value 0x%lx\n", (long unsigned int)ipv4_rules_addr);
845 
846 	ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr = ipv4_rules_addr;
847 
848 	ipv4_nat_cache.ip4_tbl[index].ipv4_expn_rules_addr =
849 	ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * tbl_entries);
850 
851 	ipv4_nat_cache.ip4_tbl[index].index_table_addr =
852 	ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries));
853 
854 	ipv4_nat_cache.ip4_tbl[index].index_table_expn_addr =
855 	ipv4_rules_addr +
856 	(IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries))+
857 	(IPA_NAT_INDEX_TABLE_ENTRY_SIZE * tbl_entries);
858 
859 	return 0;
860 }
861 
862 /* comment: check the implementation once
863 	 offset should be in terms of byes */
ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)864 int ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)
865 {
866 	struct ipa_ioc_v4_nat_init cmd;
867 	uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_index].tbl_addr_offset;
868 	int ret;
869 
870 	cmd.tbl_index = tbl_index;
871 
872 	cmd.ipv4_rules_offset = offset;
873 	cmd.expn_rules_offset = cmd.ipv4_rules_offset +
874 	(ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
875 
876 	cmd.index_offset = cmd.expn_rules_offset +
877 	(ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
878 
879 	cmd.index_expn_offset = cmd.index_offset +
880 	(ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_INDEX_TABLE_ENTRY_SIZE);
881 
882 	cmd.table_entries  = ipv4_nat_cache.ip4_tbl[tbl_index].table_entries - 1;
883 	cmd.expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries;
884 
885 	cmd.ip_addr = ipv4_nat_cache.ip4_tbl[tbl_index].public_addr;
886 
887 	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_INIT_NAT, &cmd);
888 	if (ret != 0) {
889 		perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
890 		IPAERR("unable to post init cmd Error: %d\n", ret);
891 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
892 		return -EINVAL;
893 	}
894 	IPADBG("Posted IPA_IOC_V4_INIT_NAT to kernel successfully\n");
895 
896 	return 0;
897 }
898 
ipa_nati_del_ipv4_table(uint32_t tbl_hdl)899 int ipa_nati_del_ipv4_table(uint32_t tbl_hdl)
900 {
901 	uint8_t index = (uint8_t)(tbl_hdl - 1);
902 	void *addr = (void *)ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr;
903 	struct ipa_ioc_v4_nat_del del_cmd;
904 	int ret;
905 
906 	if (!ipv4_nat_cache.ip4_tbl[index].valid) {
907 		IPAERR("invalid table handle passed\n");
908 		ret = -EINVAL;
909 		goto fail;
910 	}
911 
912 	if (pthread_mutex_lock(&nat_mutex) != 0) {
913 		ret = -1;
914 		goto lock_mutex_fail;
915 	}
916 
917 	/* unmap the device memory from user space */
918 #ifndef IPA_ON_R3PC
919 	munmap(addr, ipv4_nat_cache.ip4_tbl[index].size);
920 #else
921 	addr = (char *)addr - ipv4_nat_cache.ip4_tbl[index].mmap_offset;
922 	munmap(addr, NAT_MMAP_MEM_SIZE);
923 #endif
924 
925 	/* close the file descriptor of nat device */
926 	if (close(ipv4_nat_cache.ip4_tbl[index].nat_fd)) {
927 		IPAERR("unable to close the file descriptor\n");
928 		ret = -EINVAL;
929 		if (pthread_mutex_unlock(&nat_mutex) != 0)
930 			goto unlock_mutex_fail;
931 		goto fail;
932 	}
933 
934 	del_cmd.table_index = index;
935 	del_cmd.public_ip_addr = ipv4_nat_cache.ip4_tbl[index].public_addr;
936 	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_DEL_NAT, &del_cmd);
937 	if (ret != 0) {
938 		perror("ipa_nati_del_ipv4_table(): ioctl error value");
939 		IPAERR("unable to post nat del command init Error: %d\n", ret);
940 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
941 		ret = -EINVAL;
942 		if (pthread_mutex_unlock(&nat_mutex) != 0)
943 			goto unlock_mutex_fail;
944 		goto fail;
945 	}
946 	IPAERR("posted IPA_IOC_V4_DEL_NAT to kernel successfully\n");
947 
948 	free(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta);
949 	free(ipv4_nat_cache.ip4_tbl[index].rule_id_array);
950 
951 	memset(&ipv4_nat_cache.ip4_tbl[index],
952 				 0,
953 				 sizeof(ipv4_nat_cache.ip4_tbl[index]));
954 
955 	/* Decrease the table count by 1*/
956 	ipv4_nat_cache.table_cnt--;
957 
958 	if (pthread_mutex_unlock(&nat_mutex) != 0) {
959 		ret = -1;
960 		goto unlock_mutex_fail;
961 	}
962 
963 	return 0;
964 
965 lock_mutex_fail:
966 	IPAERR("unable to lock the nat mutex\n");
967 	return ret;
968 
969 unlock_mutex_fail:
970 	IPAERR("unable to unlock the nat mutex\n");
971 
972 fail:
973 	return ret;
974 }
975 
ipa_nati_query_timestamp(uint32_t tbl_hdl,uint32_t rule_hdl,uint32_t * time_stamp)976 int ipa_nati_query_timestamp(uint32_t  tbl_hdl,
977 				uint32_t  rule_hdl,
978 				uint32_t  *time_stamp)
979 {
980 	uint8_t tbl_index = (uint8_t)(tbl_hdl - 1);
981 	uint8_t expn_tbl = 0;
982 	uint16_t tbl_entry = 0;
983 	struct ipa_nat_rule *tbl_ptr = NULL;
984 
985 	if (!ipv4_nat_cache.ip4_tbl[tbl_index].valid) {
986 		IPAERR("invalid table handle\n");
987 		return -EINVAL;
988 	}
989 
990 	if (pthread_mutex_lock(&nat_mutex) != 0) {
991 		IPAERR("unable to lock the nat mutex\n");
992 		return -1;
993 	}
994 
995 	ipa_nati_parse_ipv4_rule_hdl(tbl_index, (uint16_t)rule_hdl,
996 															 &expn_tbl, &tbl_entry);
997 
998 	tbl_ptr =
999 	(struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_rules_addr;
1000 	if (expn_tbl) {
1001 		tbl_ptr =
1002 			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_expn_rules_addr;
1003 	}
1004 
1005 	if (tbl_ptr)
1006 		*time_stamp = Read32BitFieldValue(tbl_ptr[tbl_entry].ts_proto,
1007 					TIME_STAMP_FIELD);
1008 
1009 	if (pthread_mutex_unlock(&nat_mutex) != 0) {
1010 		IPAERR("unable to unlock the nat mutex\n");
1011 		return -1;
1012 	}
1013 
1014 	return 0;
1015 }
1016 
ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,uint32_t * rule_hdl)1017 int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,
1018 				const ipa_nat_ipv4_rule *clnt_rule,
1019 				uint32_t *rule_hdl)
1020 {
1021 	struct ipa_nat_ip4_table_cache *tbl_ptr;
1022 	struct ipa_nat_sw_rule sw_rule;
1023 	struct ipa_nat_indx_tbl_sw_rule index_sw_rule;
1024 	uint16_t new_entry, new_index_tbl_entry;
1025 
1026 	memset(&sw_rule, 0, sizeof(sw_rule));
1027 	memset(&index_sw_rule, 0, sizeof(index_sw_rule));
1028 
1029 	/* Generate rule from client input */
1030 	if (ipa_nati_generate_rule(tbl_hdl, clnt_rule,
1031 					&sw_rule, &index_sw_rule,
1032 					&new_entry, &new_index_tbl_entry)) {
1033 		IPAERR("unable to generate rule\n");
1034 		return -EINVAL;
1035 	}
1036 
1037 	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1038 	ipa_nati_copy_ipv4_rule_to_hw(tbl_ptr, &sw_rule, new_entry, (uint8_t)(tbl_hdl-1));
1039 	ipa_nati_copy_ipv4_index_rule_to_hw(tbl_ptr,
1040 																			&index_sw_rule,
1041 																			new_index_tbl_entry,
1042 																			(uint8_t)(tbl_hdl-1));
1043 
1044 	IPADBG("new entry:%d, new index entry: %d\n", new_entry, new_index_tbl_entry);
1045 	if (ipa_nati_post_ipv4_dma_cmd((uint8_t)(tbl_hdl - 1), new_entry)) {
1046 		IPAERR("unable to post dma command\n");
1047 		return -EIO;
1048 	}
1049 
1050 	/* Generate rule handle */
1051 	*rule_hdl  = ipa_nati_make_rule_hdl((uint16_t)tbl_hdl, new_entry);
1052 	if (!(*rule_hdl)) {
1053 		IPAERR("unable to generate rule handle\n");
1054 		return -EINVAL;
1055 	}
1056 
1057 #ifdef NAT_DUMP
1058 	ipa_nat_dump_ipv4_table(tbl_hdl);
1059 #endif
1060 
1061 	return 0;
1062 }
1063 
ipa_nati_generate_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * rule,struct ipa_nat_indx_tbl_sw_rule * index_sw_rule,uint16_t * tbl_entry,uint16_t * indx_tbl_entry)1064 int ipa_nati_generate_rule(uint32_t tbl_hdl,
1065 				const ipa_nat_ipv4_rule *clnt_rule,
1066 				struct ipa_nat_sw_rule *rule,
1067 				struct ipa_nat_indx_tbl_sw_rule *index_sw_rule,
1068 				uint16_t *tbl_entry,
1069 				uint16_t *indx_tbl_entry)
1070 {
1071 	struct ipa_nat_ip4_table_cache *tbl_ptr;
1072 	uint16_t tmp;
1073 
1074 	if (NULL == clnt_rule || NULL == index_sw_rule ||
1075 			NULL == rule || NULL == tbl_entry  ||
1076 			NULL == indx_tbl_entry) {
1077 		IPAERR("invalid parameters\n");
1078 		return -EINVAL;
1079 	}
1080 
1081 	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1082 
1083 	*tbl_entry = ipa_nati_generate_tbl_rule(clnt_rule,
1084 																					rule,
1085 																					tbl_ptr);
1086 	if (IPA_NAT_INVALID_NAT_ENTRY == *tbl_entry) {
1087 		IPAERR("unable to generate table entry\n");
1088 		return -EINVAL;
1089 	}
1090 
1091 	index_sw_rule->tbl_entry = *tbl_entry;
1092 	*indx_tbl_entry = ipa_nati_generate_index_rule(clnt_rule,
1093 																								 index_sw_rule,
1094 																								 tbl_ptr);
1095 	if (IPA_NAT_INVALID_NAT_ENTRY == *indx_tbl_entry) {
1096 		IPAERR("unable to generate index table entry\n");
1097 		return -EINVAL;
1098 	}
1099 
1100 	rule->indx_tbl_entry = *indx_tbl_entry;
1101 	if (*indx_tbl_entry >= tbl_ptr->table_entries) {
1102 		tmp = *indx_tbl_entry - tbl_ptr->table_entries;
1103 		tbl_ptr->index_expn_table_meta[tmp].prev_index = index_sw_rule->prev_index;
1104 	}
1105 
1106 	return 0;
1107 }
1108 
ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1109 uint16_t ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule *clnt_rule,
1110 						struct ipa_nat_sw_rule *sw_rule,
1111 						struct ipa_nat_ip4_table_cache *tbl_ptr)
1112 {
1113 	uint32_t pub_ip_addr;
1114 	uint16_t prev = 0, nxt_indx = 0, new_entry;
1115 	struct ipa_nat_rule *tbl = NULL, *expn_tbl = NULL;
1116 
1117 	pub_ip_addr = tbl_ptr->public_addr;
1118 
1119 	tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_rules_addr;
1120 	expn_tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_expn_rules_addr;
1121 
1122 	/* copy the values from client rule to sw rule */
1123 	sw_rule->private_ip = clnt_rule->private_ip;
1124 	sw_rule->private_port = clnt_rule->private_port;
1125 	sw_rule->protocol = clnt_rule->protocol;
1126 	sw_rule->public_port = clnt_rule->public_port;
1127 	sw_rule->target_ip = clnt_rule->target_ip;
1128 	sw_rule->target_port = clnt_rule->target_port;
1129 
1130 	/* consider only public and private ip fields */
1131 	sw_rule->ip_chksum = ipa_nati_calc_ip_cksum(pub_ip_addr,
1132 																							clnt_rule->private_ip);
1133 
1134 	if (IPPROTO_TCP == sw_rule->protocol ||
1135 			IPPROTO_UDP == sw_rule->protocol) {
1136 		/* consider public and private ip & port fields */
1137 		sw_rule->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum(
1138 			 pub_ip_addr,
1139 			 clnt_rule->public_port,
1140 			 clnt_rule->private_ip,
1141 			 clnt_rule->private_port);
1142 	}
1143 
1144 	sw_rule->rsvd1 = 0;
1145 	sw_rule->enable = IPA_NAT_FLAG_DISABLE_BIT;
1146 	sw_rule->next_index = 0;
1147 
1148 	/*
1149 		SW sets this timer to 0.
1150 		The assumption is that 0 is an invalid clock value and no clock
1151 		wraparounds are expected
1152 	*/
1153 	sw_rule->time_stamp = 0;
1154 	sw_rule->rsvd2 = 0;
1155 	sw_rule->prev_index = 0;
1156 	sw_rule->indx_tbl_entry = 0;
1157 
1158 	new_entry = dst_hash(clnt_rule->target_ip,
1159 											 clnt_rule->target_port,
1160 											 clnt_rule->public_port,
1161 											 clnt_rule->protocol,
1162 											 tbl_ptr->table_entries-1);
1163 
1164 	/* check whether there is any collision
1165 		 if no collision return */
1166 	if (!Read16BitFieldValue(tbl[new_entry].ip_cksm_enbl,
1167 													 ENABLE_FIELD)) {
1168 		sw_rule->prev_index = 0;
1169 		IPADBG("Destination Nat New Entry Index %d\n", new_entry);
1170 		return new_entry;
1171 	}
1172 
1173 	/* First collision */
1174 	if (Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1175 													NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
1176 		sw_rule->prev_index = new_entry;
1177 	} else { /* check for more than one collision	*/
1178 		/* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1179 		nxt_indx = Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1180 																	 NEXT_INDEX_FIELD);
1181 
1182 		while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1183 			prev = nxt_indx;
1184 
1185 			nxt_indx -= tbl_ptr->table_entries;
1186 			nxt_indx = Read16BitFieldValue(expn_tbl[nxt_indx].nxt_indx_pub_port,
1187 																		 NEXT_INDEX_FIELD);
1188 
1189 			/* Handling error case */
1190 			if (prev == nxt_indx) {
1191 				IPAERR("Error: Prev index:%d and next:%d index should not be same\n", prev, nxt_indx);
1192 				return IPA_NAT_INVALID_NAT_ENTRY;
1193 			}
1194 		}
1195 
1196 		sw_rule->prev_index = prev;
1197 	}
1198 
1199 	/* On collision check for the free entry in expansion table */
1200 	new_entry = ipa_nati_expn_tbl_free_entry(expn_tbl,
1201 					tbl_ptr->expn_table_entries);
1202 
1203 	if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1204 		/* Expansion table is full return*/
1205 		IPAERR("Expansion table is full\n");
1206 		IPAERR("Current Table: %d & Expn Entries: %d\n",
1207 			   tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1208 		return IPA_NAT_INVALID_NAT_ENTRY;
1209 	}
1210 	new_entry += tbl_ptr->table_entries;
1211 
1212 	IPADBG("new entry index %d\n", new_entry);
1213 	return new_entry;
1214 }
1215 
1216 /* returns expn table entry index */
ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule * expn_tbl,uint16_t size)1217 uint16_t ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule *expn_tbl,
1218 						uint16_t size)
1219 {
1220 	int cnt;
1221 
1222 	for (cnt = 1; cnt < size; cnt++) {
1223 		if (!Read16BitFieldValue(expn_tbl[cnt].ip_cksm_enbl,
1224 														 ENABLE_FIELD)) {
1225 			IPADBG("new expansion table entry index %d\n", cnt);
1226 			return cnt;
1227 		}
1228 	}
1229 
1230 	IPAERR("nat expansion table is full\n");
1231 	return 0;
1232 }
1233 
ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_indx_tbl_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1234 uint16_t ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule *clnt_rule,
1235 						struct ipa_nat_indx_tbl_sw_rule *sw_rule,
1236 						struct ipa_nat_ip4_table_cache *tbl_ptr)
1237 {
1238 	struct ipa_nat_indx_tbl_rule *indx_tbl, *indx_expn_tbl;
1239 	uint16_t prev = 0, nxt_indx = 0, new_entry;
1240 
1241 	indx_tbl =
1242 	(struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_addr;
1243 	indx_expn_tbl =
1244 	(struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_expn_addr;
1245 
1246 	new_entry = src_hash(clnt_rule->private_ip,
1247 											 clnt_rule->private_port,
1248 											 clnt_rule->target_ip,
1249 											 clnt_rule->target_port,
1250 											 clnt_rule->protocol,
1251 											 tbl_ptr->table_entries-1);
1252 
1253 	/* check whether there is any collision
1254 		 if no collision return */
1255 	if (!Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1256 													 INDX_TBL_TBL_ENTRY_FIELD)) {
1257 		sw_rule->prev_index = 0;
1258 		IPADBG("Source Nat Index Table Entry %d\n", new_entry);
1259 		return new_entry;
1260 	}
1261 
1262 	/* check for more than one collision	*/
1263 	if (Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1264 													INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
1265 		sw_rule->prev_index = new_entry;
1266 		IPADBG("First collosion. Entry %d\n", new_entry);
1267 	} else {
1268 		/* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1269 		nxt_indx = Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1270 																	 INDX_TBL_NEXT_INDEX_FILED);
1271 
1272 		while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1273 			prev = nxt_indx;
1274 
1275 			nxt_indx -= tbl_ptr->table_entries;
1276 			nxt_indx = Read16BitFieldValue(indx_expn_tbl[nxt_indx].tbl_entry_nxt_indx,
1277 																		 INDX_TBL_NEXT_INDEX_FILED);
1278 
1279 			/* Handling error case */
1280 			if (prev == nxt_indx) {
1281 				IPAERR("Error: Prev:%d and next:%d index should not be same\n", prev, nxt_indx);
1282 				return IPA_NAT_INVALID_NAT_ENTRY;
1283 			}
1284 		}
1285 
1286 		sw_rule->prev_index = prev;
1287 	}
1288 
1289 	/* On collision check for the free entry in expansion table */
1290 	new_entry = ipa_nati_index_expn_get_free_entry(indx_expn_tbl,
1291 					tbl_ptr->expn_table_entries);
1292 
1293 	if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1294 		/* Expansion table is full return*/
1295 		IPAERR("Index expansion table is full\n");
1296 		IPAERR("Current Table: %d & Expn Entries: %d\n",
1297 			   tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1298 		return IPA_NAT_INVALID_NAT_ENTRY;
1299 	}
1300 	new_entry += tbl_ptr->table_entries;
1301 
1302 
1303 	if (sw_rule->prev_index == new_entry) {
1304 		IPAERR("Error: prev_entry:%d ", sw_rule->prev_index);
1305 		IPAERR("and new_entry:%d should not be same ", new_entry);
1306 		IPAERR("infinite loop detected\n");
1307 		return IPA_NAT_INVALID_NAT_ENTRY;
1308 	}
1309 
1310 	IPADBG("index table entry %d\n", new_entry);
1311 	return new_entry;
1312 }
1313 
1314 /* returns index expn table entry index */
ipa_nati_index_expn_get_free_entry(struct ipa_nat_indx_tbl_rule * indx_tbl,uint16_t size)1315 uint16_t ipa_nati_index_expn_get_free_entry(
1316 						struct ipa_nat_indx_tbl_rule *indx_tbl,
1317 						uint16_t size)
1318 {
1319 	int cnt;
1320 	for (cnt = 1; cnt < size; cnt++) {
1321 		if (!Read16BitFieldValue(indx_tbl[cnt].tbl_entry_nxt_indx,
1322 														 INDX_TBL_TBL_ENTRY_FIELD)) {
1323 			return cnt;
1324 		}
1325 	}
1326 
1327 	IPAERR("nat index expansion table is full\n");
1328 	return 0;
1329 }
1330 
ipa_nati_write_next_index(uint8_t tbl_indx,nat_table_type tbl_type,uint16_t value,uint32_t offset)1331 void ipa_nati_write_next_index(uint8_t tbl_indx,
1332 				nat_table_type tbl_type,
1333 				uint16_t value,
1334 				uint32_t offset)
1335 {
1336 	struct ipa_ioc_nat_dma_cmd *cmd;
1337 
1338 	IPADBG("Updating next index field of table %d on collosion using dma\n", tbl_type);
1339 	IPADBG("table index: %d, value: %d offset;%d\n", tbl_indx, value, offset);
1340 
1341 	cmd = (struct ipa_ioc_nat_dma_cmd *)
1342 	malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1343 				 sizeof(struct ipa_ioc_nat_dma_one));
1344 	if (NULL == cmd) {
1345 		IPAERR("unable to allocate memory\n");
1346 		return;
1347 	}
1348 
1349 	cmd->dma[0].table_index = tbl_indx;
1350 	cmd->dma[0].base_addr = tbl_type;
1351 	cmd->dma[0].data = value;
1352 	cmd->dma[0].offset = offset;
1353 
1354 	cmd->entries = 1;
1355 	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1356 		perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1357 		IPAERR("unable to call dma icotl to update next index\n");
1358 		IPAERR("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1359 		goto fail;
1360 	}
1361 
1362 fail:
1363 	free(cmd);
1364 
1365 	return;
1366 }
1367 
ipa_nati_copy_ipv4_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_sw_rule * rule,uint16_t entry,uint8_t tbl_index)1368 void ipa_nati_copy_ipv4_rule_to_hw(
1369 				struct ipa_nat_ip4_table_cache *ipv4_cache,
1370 				struct ipa_nat_sw_rule *rule,
1371 				uint16_t entry, uint8_t tbl_index)
1372 {
1373 	struct ipa_nat_rule *tbl_ptr;
1374 	uint16_t prev_entry = rule->prev_index;
1375 	nat_table_type tbl_type;
1376 	uint32_t offset = 0;
1377 
1378 	if (entry < ipv4_cache->table_entries) {
1379 		tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1380 
1381 		memcpy(&tbl_ptr[entry],
1382 					 rule,
1383 					 sizeof(struct ipa_nat_rule));
1384 	} else {
1385 		tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_expn_rules_addr;
1386 		memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1387 					 rule,
1388 					 sizeof(struct ipa_nat_rule));
1389 	}
1390 
1391 	/* Update the previos entry next_index */
1392 	if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1393 
1394 		if (prev_entry < ipv4_cache->table_entries) {
1395 			tbl_type = IPA_NAT_BASE_TBL;
1396 			tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1397 		} else {
1398 			tbl_type = IPA_NAT_EXPN_TBL;
1399 			/* tbp_ptr is already pointing to expansion table
1400 				 no need to initialize it */
1401 			prev_entry = prev_entry - ipv4_cache->table_entries;
1402 		}
1403 
1404 		offset = ipa_nati_get_entry_offset(ipv4_cache, tbl_type, prev_entry);
1405 		offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1406 
1407 		ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1408 	}
1409 
1410 	return;
1411 }
1412 
ipa_nati_copy_ipv4_index_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_indx_tbl_sw_rule * indx_sw_rule,uint16_t entry,uint8_t tbl_index)1413 void ipa_nati_copy_ipv4_index_rule_to_hw(
1414 				struct ipa_nat_ip4_table_cache *ipv4_cache,
1415 				struct ipa_nat_indx_tbl_sw_rule *indx_sw_rule,
1416 				uint16_t entry,
1417 				uint8_t tbl_index)
1418 {
1419 	struct ipa_nat_indx_tbl_rule *tbl_ptr;
1420 	struct ipa_nat_sw_indx_tbl_rule sw_rule;
1421 	uint16_t prev_entry = indx_sw_rule->prev_index;
1422 	nat_table_type tbl_type;
1423 	uint16_t offset = 0;
1424 
1425 	sw_rule.next_index = indx_sw_rule->next_index;
1426 	sw_rule.tbl_entry = indx_sw_rule->tbl_entry;
1427 
1428 	if (entry < ipv4_cache->table_entries) {
1429 		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1430 
1431 		memcpy(&tbl_ptr[entry],
1432 					 &sw_rule,
1433 					 sizeof(struct ipa_nat_indx_tbl_rule));
1434 	} else {
1435 		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_expn_addr;
1436 
1437 		memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1438 					 &sw_rule,
1439 					 sizeof(struct ipa_nat_indx_tbl_rule));
1440 	}
1441 
1442 	/* Update the next field of previous entry on collosion */
1443 	if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1444 		if (prev_entry < ipv4_cache->table_entries) {
1445 			tbl_type = IPA_NAT_INDX_TBL;
1446 			tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1447 		} else {
1448 			tbl_type = IPA_NAT_INDEX_EXPN_TBL;
1449 			/* tbp_ptr is already pointing to expansion table
1450 			 no need to initialize it */
1451 			prev_entry = prev_entry - ipv4_cache->table_entries;
1452 		}
1453 
1454 		offset = ipa_nati_get_index_entry_offset(ipv4_cache, tbl_type, prev_entry);
1455 		offset += IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1456 
1457 		IPADBG("Updating next index field of index table on collosion using dma()\n");
1458 		ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1459 	}
1460 
1461 	return;
1462 }
1463 
ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,uint16_t entry)1464 int ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,
1465 				uint16_t entry)
1466 {
1467 	struct ipa_ioc_nat_dma_cmd *cmd;
1468 	struct ipa_nat_rule *tbl_ptr;
1469 	uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_indx].tbl_addr_offset;
1470 	int ret = 0;
1471 
1472 	cmd = (struct ipa_ioc_nat_dma_cmd *)
1473 	malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1474 				 sizeof(struct ipa_ioc_nat_dma_one));
1475 	if (NULL == cmd) {
1476 		IPAERR("unable to allocate memory\n");
1477 		return -ENOMEM;
1478 	}
1479 
1480 	if (entry < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries) {
1481 		tbl_ptr =
1482 			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
1483 
1484 		cmd->dma[0].table_index = tbl_indx;
1485 		cmd->dma[0].base_addr = IPA_NAT_BASE_TBL;
1486 		cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1487 
1488 		cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1489 		cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1490 	} else {
1491 		tbl_ptr =
1492 			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr;
1493 		entry = entry - ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
1494 
1495 		cmd->dma[0].table_index = tbl_indx;
1496 		cmd->dma[0].base_addr = IPA_NAT_EXPN_TBL;
1497 		cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1498 
1499 		cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1500 		cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1501 		cmd->dma[0].offset += offset;
1502 	}
1503 
1504 	cmd->entries = 1;
1505 	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1506 		perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1507 		IPAERR("unable to call dma icotl\n");
1508 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1509 		ret = -EIO;
1510 		goto fail;
1511 	}
1512 	IPADBG("posted IPA_IOC_NAT_DMA to kernel successfully during add operation\n");
1513 
1514 
1515 fail:
1516 	free(cmd);
1517 
1518 	return ret;
1519 }
1520 
1521 
ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,uint32_t rule_hdl)1522 int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,
1523 				uint32_t rule_hdl)
1524 {
1525 	uint8_t expn_tbl;
1526 	uint16_t tbl_entry;
1527 	struct ipa_nat_ip4_table_cache *tbl_ptr;
1528 	del_type rule_pos;
1529 	uint8_t tbl_indx = (uint8_t)(tbl_hdl - 1);
1530 	int ret;
1531 
1532 	/* Parse the rule handle */
1533 	ipa_nati_parse_ipv4_rule_hdl(tbl_indx, (uint16_t)rule_hdl,
1534 															 &expn_tbl, &tbl_entry);
1535 	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_entry) {
1536 		IPAERR("Invalid Rule Entry\n");
1537 		ret = -EINVAL;
1538 		goto fail;
1539 	}
1540 
1541 	if (pthread_mutex_lock(&nat_mutex) != 0) {
1542 		ret = -1;
1543 		goto mutex_lock_error;
1544 	}
1545 
1546 	IPADBG("Delete below rule\n");
1547 	IPADBG("tbl_entry:%d expn_tbl:%d\n", tbl_entry, expn_tbl);
1548 
1549 	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1550 	if (!tbl_ptr->valid) {
1551 		IPAERR("invalid table handle\n");
1552 		ret = -EINVAL;
1553 		if (pthread_mutex_unlock(&nat_mutex) != 0)
1554 			goto mutex_unlock_error;
1555 		goto fail;
1556 	}
1557 
1558 	ipa_nati_find_rule_pos(tbl_ptr, expn_tbl,
1559 												 tbl_entry, &rule_pos);
1560 	IPADBG("rule_pos:%d\n", rule_pos);
1561 
1562 	if (ipa_nati_post_del_dma_cmd(tbl_indx, tbl_entry,
1563 					expn_tbl, rule_pos)) {
1564 		ret = -EINVAL;
1565 		if (pthread_mutex_unlock(&nat_mutex) != 0)
1566 			goto mutex_unlock_error;
1567 		goto fail;
1568 	}
1569 
1570 	ipa_nati_del_dead_ipv4_head_nodes(tbl_indx);
1571 
1572 	/* Reset rule_id_array entry */
1573 	ipv4_nat_cache.ip4_tbl[tbl_indx].rule_id_array[rule_hdl-1] =
1574 	IPA_NAT_INVALID_NAT_ENTRY;
1575 
1576 #ifdef NAT_DUMP
1577 	IPADBG("Dumping Table after deleting rule\n");
1578 	ipa_nat_dump_ipv4_table(tbl_hdl);
1579 #endif
1580 
1581 	if (pthread_mutex_unlock(&nat_mutex) != 0) {
1582 		ret = -1;
1583 		goto mutex_unlock_error;
1584 	}
1585 
1586 	return 0;
1587 
1588 mutex_lock_error:
1589 	IPAERR("unable to lock the nat mutex\n");
1590 	return ret;
1591 
1592 mutex_unlock_error:
1593 	IPAERR("unable to unlock the nat mutex\n");
1594 
1595 fail:
1596 	return ret;
1597 }
1598 
ReorderCmds(struct ipa_ioc_nat_dma_cmd * cmd,int size)1599 void ReorderCmds(struct ipa_ioc_nat_dma_cmd *cmd, int size)
1600 {
1601 	int indx_tbl_start = 0, cnt, cnt1;
1602 	struct ipa_ioc_nat_dma_cmd *tmp;
1603 
1604 	IPADBG("called ReorderCmds() with entries :%d\n", cmd->entries);
1605 
1606 	for (cnt = 0; cnt < cmd->entries; cnt++) {
1607 		if (cmd->dma[cnt].base_addr == IPA_NAT_INDX_TBL ||
1608 				cmd->dma[cnt].base_addr == IPA_NAT_INDEX_EXPN_TBL) {
1609 			indx_tbl_start = cnt;
1610 			break;
1611 		}
1612 	}
1613 
1614 	if (indx_tbl_start == 0) {
1615 		IPADBG("Reorder not needed\n");
1616 		return;
1617 	}
1618 
1619 	tmp = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1620 	if (tmp == NULL) {
1621 		IPAERR("unable to allocate memory\n");
1622 		return;
1623 	}
1624 
1625 	cnt1 = 0;
1626 	tmp->entries = cmd->entries;
1627 	for (cnt = indx_tbl_start; cnt < cmd->entries; cnt++) {
1628 		tmp->dma[cnt1] = cmd->dma[cnt];
1629 		cnt1++;
1630 	}
1631 
1632 	for (cnt = 0; cnt < indx_tbl_start; cnt++) {
1633 		tmp->dma[cnt1] = cmd->dma[cnt];
1634 		cnt1++;
1635 	}
1636 
1637 	memset(cmd, 0, size);
1638 	memcpy(cmd, tmp, size);
1639 	free(tmp);
1640 
1641 	return;
1642 }
1643 
ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,uint16_t cur_tbl_entry,uint8_t expn_tbl,del_type rule_pos)1644 int ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,
1645 				uint16_t cur_tbl_entry,
1646 				uint8_t expn_tbl,
1647 				del_type rule_pos)
1648 {
1649 
1650 #define MAX_DMA_ENTRIES_FOR_DEL 3
1651 
1652 	struct ipa_nat_ip4_table_cache *cache_ptr;
1653 	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
1654 	struct ipa_nat_rule *tbl_ptr;
1655 	int ret = 0, size = 0;
1656 
1657 	uint16_t indx_tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
1658 	del_type indx_rule_pos;
1659 
1660 	struct ipa_ioc_nat_dma_cmd *cmd;
1661 	uint8_t no_of_cmds = 0;
1662 
1663 	uint16_t prev_entry = IPA_NAT_INVALID_NAT_ENTRY;
1664 	uint16_t next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1665 	uint16_t indx_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1666 	uint16_t indx_next_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1667 	uint16_t table_entry;
1668 
1669 	size = sizeof(struct ipa_ioc_nat_dma_cmd)+
1670 	(MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
1671 
1672 	cmd = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1673 	if (NULL == cmd) {
1674 		IPAERR("unable to allocate memory\n");
1675 		return -ENOMEM;
1676 	}
1677 
1678 	cache_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1679 	if (!expn_tbl) {
1680 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
1681 	} else {
1682 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
1683 	}
1684 
1685 
1686 	if (!Read16BitFieldValue(tbl_ptr[cur_tbl_entry].ip_cksm_enbl,
1687 													 ENABLE_FIELD)) {
1688 		IPAERR("Deleting invalid(not enabled) rule\n");
1689 		ret = -EINVAL;
1690 		goto fail;
1691 	}
1692 
1693 	indx_tbl_entry =
1694 		Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1695 		SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD);
1696 
1697 	/* ================================================
1698 	 Base Table rule Deletion
1699 	 ================================================*/
1700 	/* Just delete the current rule by disabling the flag field */
1701 	if (IPA_NAT_DEL_TYPE_ONLY_ONE == rule_pos) {
1702 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1703 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1704 		cmd->dma[no_of_cmds].data = IPA_NAT_FLAG_DISABLE_BIT_MASK;
1705 
1706 		cmd->dma[no_of_cmds].offset =
1707 			 ipa_nati_get_entry_offset(cache_ptr,
1708 					cmd->dma[no_of_cmds].base_addr,
1709 					cur_tbl_entry);
1710 		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1711 	}
1712 
1713 	/* Just update the protocol field to invalid */
1714 	else if (IPA_NAT_DEL_TYPE_HEAD == rule_pos) {
1715 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1716 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1717 		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_PROTO_FIELD_VALUE;
1718 
1719 		cmd->dma[no_of_cmds].offset =
1720 			 ipa_nati_get_entry_offset(cache_ptr,
1721 					cmd->dma[no_of_cmds].base_addr,
1722 					cur_tbl_entry);
1723 		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_PROTO_FIELD_OFFSET;
1724 
1725 		IPADBG("writing invalid proto: 0x%x\n", cmd->dma[no_of_cmds].data);
1726 	}
1727 
1728 	/*
1729 			 Update the previous entry of next_index field value
1730 			 with current entry next_index field value
1731 	*/
1732 	else if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
1733 		prev_entry =
1734 			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1735 				SW_SPEC_PARAM_PREV_INDEX_FIELD);
1736 
1737 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1738 		cmd->dma[no_of_cmds].data =
1739 			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
1740 					NEXT_INDEX_FIELD);
1741 
1742 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1743 		if (prev_entry >= cache_ptr->table_entries) {
1744 			cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1745 			prev_entry -= cache_ptr->table_entries;
1746 		}
1747 
1748 		cmd->dma[no_of_cmds].offset =
1749 			ipa_nati_get_entry_offset(cache_ptr,
1750 				cmd->dma[no_of_cmds].base_addr, prev_entry);
1751 
1752 		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1753 	}
1754 
1755 	/*
1756 			 Reset the previous entry of next_index field with 0
1757 	*/
1758 	else if (IPA_NAT_DEL_TYPE_LAST == rule_pos) {
1759 		prev_entry =
1760 			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1761 				SW_SPEC_PARAM_PREV_INDEX_FIELD);
1762 
1763 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1764 		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1765 
1766 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1767 		if (prev_entry >= cache_ptr->table_entries) {
1768 			cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1769 			prev_entry -= cache_ptr->table_entries;
1770 		}
1771 
1772 		cmd->dma[no_of_cmds].offset =
1773 			ipa_nati_get_entry_offset(cache_ptr,
1774 				cmd->dma[no_of_cmds].base_addr, prev_entry);
1775 
1776 		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1777 	}
1778 
1779 	/* ================================================
1780 	 Base Table rule Deletion End
1781 	 ================================================*/
1782 
1783 	/* ================================================
1784 	 Index Table rule Deletion
1785 	 ================================================*/
1786 	ipa_nati_find_index_rule_pos(cache_ptr,
1787 															 indx_tbl_entry,
1788 															 &indx_rule_pos);
1789 	IPADBG("Index table entry: 0x%x\n", indx_tbl_entry);
1790 	IPADBG("and position: %d\n", indx_rule_pos);
1791 	if (indx_tbl_entry >= cache_ptr->table_entries) {
1792 		indx_tbl_entry -= cache_ptr->table_entries;
1793 		indx_tbl_ptr =
1794 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1795 	} else {
1796 		indx_tbl_ptr =
1797 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
1798 	}
1799 
1800 	/* Just delete the current rule by resetting nat_table_index field to 0 */
1801 	if (IPA_NAT_DEL_TYPE_ONLY_ONE == indx_rule_pos) {
1802 		no_of_cmds++;
1803 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1804 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1805 		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1806 
1807 		cmd->dma[no_of_cmds].offset =
1808 			ipa_nati_get_index_entry_offset(cache_ptr,
1809 			cmd->dma[no_of_cmds].base_addr,
1810 			indx_tbl_entry);
1811 
1812 		cmd->dma[no_of_cmds].offset +=
1813 			IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1814 	}
1815 
1816 	/* copy the next entry values to current entry */
1817 	else if (IPA_NAT_DEL_TYPE_HEAD == indx_rule_pos) {
1818 		next_entry =
1819 			Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1820 				INDX_TBL_NEXT_INDEX_FILED);
1821 
1822 		next_entry -= cache_ptr->table_entries;
1823 
1824 		no_of_cmds++;
1825 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1826 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1827 
1828 		/* Copy the nat_table_index field value of next entry */
1829 		indx_tbl_ptr =
1830 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1831 		cmd->dma[no_of_cmds].data =
1832 			Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1833 				INDX_TBL_TBL_ENTRY_FIELD);
1834 
1835 		cmd->dma[no_of_cmds].offset =
1836 			ipa_nati_get_index_entry_offset(cache_ptr,
1837 					cmd->dma[no_of_cmds].base_addr,
1838 					indx_tbl_entry);
1839 
1840 		cmd->dma[no_of_cmds].offset +=
1841 			IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1842 
1843 		/* Copy the next_index field value of next entry */
1844 		no_of_cmds++;
1845 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1846 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1847 		cmd->dma[no_of_cmds].data =
1848 			Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1849 				INDX_TBL_NEXT_INDEX_FILED);
1850 
1851 		cmd->dma[no_of_cmds].offset =
1852 			ipa_nati_get_index_entry_offset(cache_ptr,
1853 				cmd->dma[no_of_cmds].base_addr, indx_tbl_entry);
1854 
1855 		cmd->dma[no_of_cmds].offset +=
1856 			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1857 		indx_next_entry = next_entry;
1858 	}
1859 
1860 	/*
1861 			 Update the previous entry of next_index field value
1862 			 with current entry next_index field value
1863 	*/
1864 	else if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
1865 		prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1866 
1867 		no_of_cmds++;
1868 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1869 		cmd->dma[no_of_cmds].data =
1870 			Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1871 				INDX_TBL_NEXT_INDEX_FILED);
1872 
1873 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1874 		if (prev_entry >= cache_ptr->table_entries) {
1875 			cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1876 			prev_entry -= cache_ptr->table_entries;
1877 		}
1878 
1879 		IPADBG("prev_entry: %d update with cur next_index: %d\n",
1880 				prev_entry, cmd->dma[no_of_cmds].data);
1881 		IPADBG("prev_entry: %d exist in table_type:%d\n",
1882 				prev_entry, cmd->dma[no_of_cmds].base_addr);
1883 
1884 		cmd->dma[no_of_cmds].offset =
1885 			ipa_nati_get_index_entry_offset(cache_ptr,
1886 				cmd->dma[no_of_cmds].base_addr, prev_entry);
1887 
1888 		cmd->dma[no_of_cmds].offset +=
1889 			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1890 	}
1891 
1892 	/* Reset the previous entry next_index field with 0 */
1893 	else if (IPA_NAT_DEL_TYPE_LAST == indx_rule_pos) {
1894 		prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1895 
1896 		no_of_cmds++;
1897 		cmd->dma[no_of_cmds].table_index = tbl_indx;
1898 		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1899 
1900 		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1901 		if (prev_entry >= cache_ptr->table_entries) {
1902 			cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1903 			prev_entry -= cache_ptr->table_entries;
1904 		}
1905 
1906 		IPADBG("Reseting prev_entry: %d next_index\n", prev_entry);
1907 		IPADBG("prev_entry: %d exist in table_type:%d\n",
1908 			prev_entry, cmd->dma[no_of_cmds].base_addr);
1909 
1910 		cmd->dma[no_of_cmds].offset =
1911 			 ipa_nati_get_index_entry_offset(cache_ptr,
1912 					cmd->dma[no_of_cmds].base_addr, prev_entry);
1913 
1914 		cmd->dma[no_of_cmds].offset +=
1915 			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1916 	}
1917 
1918 	/* ================================================
1919 	 Index Table rule Deletion End
1920 	 ================================================*/
1921 	cmd->entries = no_of_cmds + 1;
1922 
1923 	if (cmd->entries > 1) {
1924 		ReorderCmds(cmd, size);
1925 	}
1926 	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1927 		perror("ipa_nati_post_del_dma_cmd(): ioctl error value");
1928 		IPAERR("unable to post cmd\n");
1929 		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1930 		ret = -EIO;
1931 		goto fail;
1932 	}
1933 
1934 	/* if entry exist in IPA_NAT_DEL_TYPE_MIDDLE of list
1935 			 Update the previous entry in sw specific parameters
1936 	*/
1937 	if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
1938 		/* Retrieve the current entry prev_entry value */
1939 		prev_entry =
1940 			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1941 				SW_SPEC_PARAM_PREV_INDEX_FIELD);
1942 
1943 		/* Retrieve the next entry */
1944 		next_entry =
1945 			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
1946 				NEXT_INDEX_FIELD);
1947 
1948 		next_entry -= cache_ptr->table_entries;
1949 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
1950 
1951 		/* copy the current entry prev_entry value to next entry*/
1952 		UpdateSwSpecParams(&tbl_ptr[next_entry],
1953 											 IPA_NAT_SW_PARAM_PREV_INDX_BYTE,
1954 											 prev_entry);
1955 	}
1956 
1957 	/* Reset the other field values of current delete entry
1958 			 In case of IPA_NAT_DEL_TYPE_HEAD, don't reset */
1959 	if (IPA_NAT_DEL_TYPE_HEAD != rule_pos) {
1960 		memset(&tbl_ptr[cur_tbl_entry], 0, sizeof(struct ipa_nat_rule));
1961 	}
1962 
1963 	if (indx_rule_pos == IPA_NAT_DEL_TYPE_HEAD) {
1964 
1965     /* Update next next entry previous value to current
1966        entry as we moved the next entry values
1967        to current entry */
1968 		indx_next_next_entry =
1969 			Read16BitFieldValue(indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx,
1970 				INDX_TBL_NEXT_INDEX_FILED);
1971 
1972 		if (indx_next_next_entry != 0 &&
1973 			indx_next_next_entry >= cache_ptr->table_entries) {
1974 
1975 			IPADBG("Next Next entry: %d\n", indx_next_next_entry);
1976 			indx_next_next_entry -= cache_ptr->table_entries;
1977 
1978 			IPADBG("Updating entry: %d prev index to: %d\n",
1979 				indx_next_next_entry, indx_tbl_entry);
1980 			cache_ptr->index_expn_table_meta[indx_next_next_entry].prev_index =
1981 				 indx_tbl_entry;
1982 		}
1983 
1984     /* Now reset the next entry as we copied
1985 				the next entry to current entry */
1986 		IPADBG("Resetting, index table entry(Proper): %d\n",
1987 			(cache_ptr->table_entries + indx_next_entry));
1988 
1989     /* This resets both table entry and next index values */
1990 		indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx = 0;
1991 
1992 		/*
1993 				 In case of IPA_NAT_DEL_TYPE_HEAD, update the sw specific parameters
1994 				 (index table entry) of base table entry
1995 		*/
1996 		indx_tbl_ptr =
1997 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
1998 		table_entry =
1999 				Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2000 						INDX_TBL_TBL_ENTRY_FIELD);
2001 
2002 		if (table_entry >= cache_ptr->table_entries) {
2003 			tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2004 			table_entry -= cache_ptr->table_entries;
2005 		} else {
2006 			tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2007 		}
2008 
2009 		UpdateSwSpecParams(&tbl_ptr[table_entry],
2010 				IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE,
2011 				indx_tbl_entry);
2012 	} else {
2013 		/* Update the prev_entry value (in index_expn_table_meta)
2014 				 for the next_entry in list with current entry prev_entry value
2015 		*/
2016 		if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
2017 			next_entry =
2018 				Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2019 					INDX_TBL_NEXT_INDEX_FILED);
2020 
2021 			if (next_entry >= cache_ptr->table_entries) {
2022 				next_entry -= cache_ptr->table_entries;
2023 			}
2024 
2025 			cache_ptr->index_expn_table_meta[next_entry].prev_index =
2026 				 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
2027 
2028 			cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index =
2029 				 IPA_NAT_INVALID_NAT_ENTRY;
2030 		}
2031 
2032 		IPADBG("At, indx_tbl_entry value: %d\n", indx_tbl_entry);
2033 		IPADBG("At, indx_tbl_entry member address: %p\n",
2034 					 &indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx);
2035 
2036 		indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx = 0;
2037 
2038 	}
2039 
2040 fail:
2041 	free(cmd);
2042 
2043 	return ret;
2044 }
2045 
ipa_nati_find_index_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint16_t tbl_entry,del_type * rule_pos)2046 void ipa_nati_find_index_rule_pos(
2047 				struct ipa_nat_ip4_table_cache *cache_ptr,
2048 				uint16_t tbl_entry,
2049 				del_type *rule_pos)
2050 {
2051 	struct ipa_nat_indx_tbl_rule *tbl_ptr;
2052 
2053 	if (tbl_entry >= cache_ptr->table_entries) {
2054 		tbl_ptr =
2055 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
2056 
2057 		tbl_entry -= cache_ptr->table_entries;
2058 		if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2059 					INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2060 			*rule_pos = IPA_NAT_DEL_TYPE_LAST;
2061 		} else {
2062 			*rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2063 		}
2064 	} else {
2065 		tbl_ptr =
2066 			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
2067 
2068 		if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2069 					INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2070 			*rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2071 		} else {
2072 			*rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2073 		}
2074 	}
2075 }
2076 
ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint8_t expn_tbl,uint16_t tbl_entry,del_type * rule_pos)2077 void ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache *cache_ptr,
2078 														uint8_t expn_tbl,
2079 														uint16_t tbl_entry,
2080 														del_type *rule_pos)
2081 {
2082 	struct ipa_nat_rule *tbl_ptr;
2083 
2084 	if (expn_tbl) {
2085 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2086 		if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2087 														NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2088 			*rule_pos = IPA_NAT_DEL_TYPE_LAST;
2089 		} else {
2090 			*rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2091 		}
2092 	} else {
2093 		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2094 		if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2095 					NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2096 			*rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2097 		} else {
2098 			*rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2099 		}
2100 	}
2101 }
2102 
ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)2103 void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)
2104 {
2105 	struct ipa_nat_rule *tbl_ptr;
2106 	uint16_t cnt;
2107 
2108 	tbl_ptr =
2109 	(struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
2110 
2111 	for (cnt = 0;
2112 			 cnt < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
2113 			 cnt++) {
2114 
2115 		if (Read8BitFieldValue(tbl_ptr[cnt].ts_proto,
2116 					PROTOCOL_FIELD) == IPA_NAT_INVALID_PROTO_FIELD_CMP
2117 				&&
2118 				Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
2119 					NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2120 			/* Delete the IPA_NAT_DEL_TYPE_HEAD node */
2121 			IPADBG("deleting the dead node 0x%x\n", cnt);
2122 			memset(&tbl_ptr[cnt], 0, sizeof(struct ipa_nat_rule));
2123 		}
2124 	} /* end of for loop */
2125 
2126 	return;
2127 }
2128 
2129 
2130 /* ========================================================
2131 						Debug functions
2132 	 ========================================================*/
2133 #ifdef NAT_DUMP
ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)2134 void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)
2135 {
2136 	struct ipa_nat_rule *tbl_ptr;
2137 	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2138 	int cnt;
2139 	uint8_t atl_one = 0;
2140 
2141 	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2142 			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2143 		IPAERR("invalid table handle passed\n");
2144 		return;
2145 	}
2146 
2147 	/* Print ipv4 rules */
2148 	IPADBG("Dumping ipv4 active rules:\n");
2149 	tbl_ptr = (struct ipa_nat_rule *)
2150 	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
2151 	for (cnt = 0;
2152 			 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2153 			 cnt++) {
2154 		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2155 					ENABLE_FIELD)) {
2156 			atl_one = 1;
2157 			ipa_nati_print_rule(&tbl_ptr[cnt], cnt);
2158 		}
2159 	}
2160 	if (!atl_one) {
2161 		IPADBG("No active base rules, total: %d\n",
2162 					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2163 	}
2164 	atl_one = 0;
2165 
2166 	/* Print ipv4 expansion rules */
2167 	IPADBG("Dumping ipv4 active expansion rules:\n");
2168 	tbl_ptr = (struct ipa_nat_rule *)
2169 	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
2170 	for (cnt = 0;
2171 			 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2172 			 cnt++) {
2173 		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2174 					ENABLE_FIELD)) {
2175 			atl_one = 1;
2176 			ipa_nati_print_rule(&tbl_ptr[cnt],
2177 				(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries));
2178 		}
2179 	}
2180 	if (!atl_one) {
2181 		IPADBG("No active base expansion rules, total: %d\n",
2182 					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2183 	}
2184 	atl_one = 0;
2185 
2186 	/* Print ipv4 index rules */
2187 	IPADBG("Dumping ipv4 index active rules:\n");
2188 	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2189 	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
2190 	for (cnt = 0;
2191 			 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2192 			 cnt++) {
2193 		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2194 					INDX_TBL_TBL_ENTRY_FIELD)) {
2195 			atl_one = 1;
2196 			ipa_nati_print_index_rule(&indx_tbl_ptr[cnt], cnt, 0);
2197 		}
2198 	}
2199 	if (!atl_one) {
2200 		IPADBG("No active index table rules, total:%d\n",
2201 					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2202 	}
2203 	atl_one = 0;
2204 
2205 
2206 	/* Print ipv4 index expansion rules */
2207 	IPADBG("Dumping ipv4 index expansion active rules:\n");
2208 	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2209 	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
2210 	for (cnt = 0;
2211 			 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2212 			 cnt++) {
2213 		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2214 					INDX_TBL_TBL_ENTRY_FIELD)) {
2215 			atl_one = 1;
2216 			ipa_nati_print_index_rule(&indx_tbl_ptr[cnt],
2217 				(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries),
2218 				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_expn_table_meta[cnt].prev_index);
2219 		}
2220 	}
2221 	if (!atl_one) {
2222 		IPADBG("No active index expansion rules, total:%d\n",
2223 					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2224 	}
2225 	atl_one = 0;
2226 
2227 }
2228 
ipa_nati_print_rule(struct ipa_nat_rule * param,uint32_t rule_id)2229 void ipa_nati_print_rule(
2230 		struct ipa_nat_rule *param,
2231 		uint32_t rule_id)
2232 {
2233 	struct ipa_nat_sw_rule sw_rule;
2234 	memcpy(&sw_rule, param, sizeof(sw_rule));
2235 	uint32_t ip_addr;
2236 
2237 	IPADUMP("rule-id:%d  ", rule_id);
2238 	ip_addr = sw_rule.target_ip;
2239 	IPADUMP("Trgt-IP:%d.%d.%d.%d	",
2240 				((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2241 			((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2242 
2243 	IPADUMP("Trgt-Port:%d  Priv-Port:%d  ", sw_rule.target_port, sw_rule.private_port);
2244 
2245 	ip_addr = sw_rule.private_ip;
2246 	IPADUMP("Priv-IP:%d.%d.%d.%d ",
2247 							((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2248 							((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2249 
2250 	IPADUMP("Pub-Port:%d	Nxt-indx:%d  ", sw_rule.public_port, sw_rule.next_index);
2251 	IPADUMP("IP-cksm-delta:0x%x  En-bit:0x%x	", sw_rule.ip_chksum, sw_rule.enable);
2252 	IPADUMP("TS:0x%x	Proto:0x%x	", sw_rule.time_stamp, sw_rule.protocol);
2253 	IPADUMP("Prv-indx:%d	indx_tbl_entry:%d	", sw_rule.prev_index, sw_rule.indx_tbl_entry);
2254 	IPADUMP("Tcp-udp-cksum-delta:0x%x", sw_rule.tcp_udp_chksum);
2255 	IPADUMP("\n");
2256 	return;
2257 }
2258 
ipa_nati_print_index_rule(struct ipa_nat_indx_tbl_rule * param,uint32_t rule_id,uint16_t prev_indx)2259 void ipa_nati_print_index_rule(
2260 		struct ipa_nat_indx_tbl_rule *param,
2261 		uint32_t rule_id, uint16_t prev_indx)
2262 {
2263 	struct ipa_nat_sw_indx_tbl_rule sw_rule;
2264 	memcpy(&sw_rule, param, sizeof(sw_rule));
2265 
2266 	IPADUMP("rule-id:%d  Table_entry:%d  Next_index:%d, prev_indx:%d",
2267 					  rule_id, sw_rule.tbl_entry, sw_rule.next_index, prev_indx);
2268 	IPADUMP("\n");
2269 	return;
2270 }
2271 
ipa_nati_query_nat_rules(uint32_t tbl_hdl,nat_table_type tbl_type)2272 int ipa_nati_query_nat_rules(
2273 		uint32_t tbl_hdl,
2274 		nat_table_type tbl_type)
2275 {
2276 	struct ipa_nat_rule *tbl_ptr;
2277 	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2278 	int cnt = 0, ret = 0;
2279 
2280 	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2281 			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2282 		IPAERR("invalid table handle passed\n");
2283 		return ret;
2284 	}
2285 
2286 	/* Print ipv4 rules */
2287 	if (tbl_type == IPA_NAT_BASE_TBL) {
2288 		IPADBG("Counting ipv4 active rules:\n");
2289 		tbl_ptr = (struct ipa_nat_rule *)
2290 			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_rules_addr;
2291 		for (cnt = 0;
2292 				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2293 				 cnt++) {
2294 			if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2295 						ENABLE_FIELD)) {
2296 				ret++;
2297 			}
2298 		}
2299 		if (!ret) {
2300 			IPADBG("No active base rules\n");
2301 		}
2302 
2303 		IPADBG("Number of active base rules: %d\n", ret);
2304 	}
2305 
2306 	/* Print ipv4 expansion rules */
2307 	if (tbl_type == IPA_NAT_EXPN_TBL) {
2308 		IPADBG("Counting ipv4 active expansion rules:\n");
2309 		tbl_ptr = (struct ipa_nat_rule *)
2310 			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_expn_rules_addr;
2311 		for (cnt = 0;
2312 				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2313 				 cnt++) {
2314 			if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2315 						ENABLE_FIELD)) {
2316 				ret++;
2317 			}
2318 		}
2319 		if (!ret) {
2320 			IPADBG("No active base expansion rules\n");
2321 		}
2322 
2323 		IPADBG("Number of active base expansion rules: %d\n", ret);
2324 	}
2325 
2326 	/* Print ipv4 index rules */
2327 	if (tbl_type == IPA_NAT_INDX_TBL) {
2328 		IPADBG("Counting ipv4 index active rules:\n");
2329 		indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2330 			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_addr;
2331 		for (cnt = 0;
2332 				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2333 				 cnt++) {
2334 			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2335 						INDX_TBL_TBL_ENTRY_FIELD)) {
2336 				ret++;
2337 			}
2338 		}
2339 		if (!ret) {
2340 			IPADBG("No active index table rules\n");
2341 		}
2342 
2343 		IPADBG("Number of active index table rules: %d\n", ret);
2344 	}
2345 
2346 	/* Print ipv4 index expansion rules */
2347 	if (tbl_type == IPA_NAT_INDEX_EXPN_TBL) {
2348 		IPADBG("Counting ipv4 index expansion active rules:\n");
2349 		indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2350 			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_expn_addr;
2351 		for (cnt = 0;
2352 				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2353 				 cnt++) {
2354 			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2355 						INDX_TBL_TBL_ENTRY_FIELD)) {
2356 						ret++;
2357 			}
2358 		}
2359 
2360 		if (!ret)
2361 			IPADBG("No active index expansion rules\n");
2362 
2363 		IPADBG("Number of active index expansion rules: %d\n", ret);
2364 	}
2365 
2366 	return ret;
2367 }
2368 #endif
2369