1 /*
2 Copyright (c) 2013 - 2019, The Linux Foundation. All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10 copyright notice, this list of conditions and the following
11 disclaimer in the documentation and/or other materials provided
12 with the distribution.
13 * Neither the name of The Linux Foundation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24 BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26 OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27 IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "ipa_nat_drv.h"
31 #include "ipa_nat_drvi.h"
32 #include <linux/msm_ipa.h>
33
34 #ifdef USE_GLIB
35 #include <glib.h>
36 #define strlcpy g_strlcpy
37 #else
38 #ifndef FEATURE_IPA_ANDROID
strlcpy(char * dst,const char * src,size_t size)39 static size_t strlcpy(char * dst, const char * src, size_t size)
40 {
41 size_t i;
42
43 if (size < 1)
44 return 0;
45 for (i = 0; i < (size - 1) && src[i] != '\0'; i++)
46 dst[i] = src[i];
47 for (; i < size; i++)
48 dst[i] = '\0';
49 return strlen(dst);
50 }
51 #endif
52 #endif
53
54 struct ipa_nat_cache ipv4_nat_cache;
55 pthread_mutex_t nat_mutex = PTHREAD_MUTEX_INITIALIZER;
56
57 static ipa_nat_pdn_entry pdns[IPA_MAX_PDN_NUM];
58
59 /* ------------------------------------------
60 UTILITY FUNCTIONS START
61 --------------------------------------------*/
62
63 /**
64 * UpdateSwSpecParams() - updates sw specific params
65 * @rule: [in/out] nat table rule
66 * @param_type: [in] which param need to update
67 * @value: [in] value of param
68 *
69 * Update SW specific params in the passed rule.
70 *
71 * Returns: None
72 */
UpdateSwSpecParams(struct ipa_nat_rule * rule,uint8_t param_type,uint32_t value)73 void UpdateSwSpecParams(struct ipa_nat_rule *rule,
74 uint8_t param_type,
75 uint32_t value)
76 {
77 uint32_t temp = rule->sw_spec_params;
78
79 if (IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE == param_type) {
80 value = (value << INDX_TBL_ENTRY_SIZE_IN_BITS);
81 temp &= 0x0000FFFF;
82 } else {
83 temp &= 0xFFFF0000;
84 }
85
86 temp = (temp | value);
87 rule->sw_spec_params = temp;
88 return;
89 }
90
91 /**
92 * Read8BitFieldValue()
93 * @rule: [in/out]
94 * @param_type: [in]
95 * @value: [in]
96 *
97 *
98 *
99 * Returns: None
100 */
101
Read8BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)102 uint8_t Read8BitFieldValue(uint32_t param,
103 ipa_nat_rule_field_type fld_type)
104 {
105 void *temp = (void *)¶m;
106
107 switch (fld_type) {
108
109 case PROTOCOL_FIELD:
110 return ((time_stamp_proto *)temp)->protocol;
111
112 default:
113 IPAERR("Invalid Field type passed\n");
114 return 0;
115 }
116 }
117
Read16BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)118 uint16_t Read16BitFieldValue(uint32_t param,
119 ipa_nat_rule_field_type fld_type)
120 {
121 void *temp = (void *)¶m;
122
123 switch (fld_type) {
124
125 case NEXT_INDEX_FIELD:
126 return ((next_index_pub_port *)temp)->next_index;
127
128 case PUBLIC_PORT_FILED:
129 return ((next_index_pub_port *)temp)->public_port;
130
131 case ENABLE_FIELD:
132 return ((ipcksum_enbl *)temp)->enable;
133
134 case SW_SPEC_PARAM_PREV_INDEX_FIELD:
135 return ((sw_spec_params *)temp)->prev_index;
136
137 case SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD:
138 return ((sw_spec_params *)temp)->index_table_entry;
139
140 case INDX_TBL_TBL_ENTRY_FIELD:
141 return ((tbl_ent_nxt_indx *)temp)->tbl_entry;
142
143 case INDX_TBL_NEXT_INDEX_FILED:
144 return ((tbl_ent_nxt_indx *)temp)->next_index;
145
146 #ifdef NAT_DUMP
147 case IP_CHKSUM_FIELD:
148 return ((ipcksum_enbl *)temp)->ip_chksum;
149 #endif
150
151 default:
152 IPAERR("Invalid Field type passed\n");
153 return 0;
154 }
155 }
156
Read32BitFieldValue(uint32_t param,ipa_nat_rule_field_type fld_type)157 uint32_t Read32BitFieldValue(uint32_t param,
158 ipa_nat_rule_field_type fld_type)
159 {
160
161 void *temp = (void *)¶m;
162
163 switch (fld_type) {
164
165 case TIME_STAMP_FIELD:
166 return ((time_stamp_proto *)temp)->time_stamp;
167
168 default:
169 IPAERR("Invalid Field type passed\n");
170 return 0;
171 }
172 }
173
174 /**
175 * GetIPAVer(void) - store IPA HW ver in cache
176 *
177 *
178 * Returns: 0 on success, negative on failure
179 */
GetIPAVer(void)180 int GetIPAVer(void)
181 {
182 int ret;
183
184 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_HW_VERSION, &ipv4_nat_cache.ver);
185 if (ret != 0) {
186 perror("GetIPAVer(): ioctl error value");
187 IPAERR("unable to get IPA version. Error ;%d\n", ret);
188 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
189 return -EINVAL;
190 }
191 IPADBG("IPA version is %d\n", ipv4_nat_cache.ver);
192 return 0;
193 }
194
195 /**
196 * CreateNatDevice() - Create nat devices
197 * @mem: [in] name of device that need to create
198 *
199 * Create Nat device and Register for file create
200 * notification in given directory and wait till
201 * receive notification
202 *
203 * Returns: 0 on success, negative on failure
204 */
CreateNatDevice(struct ipa_ioc_nat_alloc_mem * mem)205 int CreateNatDevice(struct ipa_ioc_nat_alloc_mem *mem)
206 {
207 int ret;
208
209 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_ALLOC_NAT_MEM, mem);
210 if (ret != 0) {
211 perror("CreateNatDevice(): ioctl error value");
212 IPAERR("unable to post nat mem init. Error ;%d\n", ret);
213 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
214 return -EINVAL;
215 }
216 IPADBG("posted IPA_IOC_ALLOC_NAT_MEM to kernel successfully\n");
217 return 0;
218 }
219
220 /**
221 * GetNearest2Power() - Returns the nearest power of 2
222 * @num: [in] given number
223 * @ret: [out] nearest power of 2
224 *
225 * Returns the nearest power of 2 for a
226 * given number
227 *
228 * Returns: 0 on success, negative on failure
229 */
GetNearest2Power(uint16_t num,uint16_t * ret)230 int GetNearest2Power(uint16_t num, uint16_t *ret)
231 {
232 uint16_t number = num;
233 uint16_t tmp = 1;
234 *ret = 0;
235
236 if (0 == num) {
237 return -EINVAL;
238 }
239
240 if (1 == num) {
241 *ret = 2;
242 return 0;
243 }
244
245 for (;;) {
246 if (1 == num) {
247 if (number != tmp) {
248 tmp *= 2;
249 }
250
251 *ret = tmp;
252 return 0;
253 }
254
255 num >>= 1;
256 tmp *= 2;
257 }
258
259 return -EINVAL;
260 }
261
262 /**
263 * GetNearestEven() - Returns the nearest even number
264 * @num: [in] given number
265 * @ret: [out] nearest even number
266 *
267 * Returns the nearest even number for a given number
268 *
269 * Returns: 0 on success, negative on failure
270 */
GetNearestEven(uint16_t num,uint16_t * ret)271 void GetNearestEven(uint16_t num, uint16_t *ret)
272 {
273
274 if (num < 2) {
275 *ret = 2;
276 return;
277 }
278
279 while ((num % 2) != 0) {
280 num = num + 1;
281 }
282
283 *ret = num;
284 return;
285 }
286
287 /**
288 * dst_hash() - Find the index into ipv4 base table
289 * @public_ip: [in] public_ip
290 * @trgt_ip: [in] Target IP address
291 * @trgt_port: [in] Target port
292 * @public_port: [in] Public port
293 * @proto: [in] Protocol (TCP/IP)
294 * @size: [in] size of the ipv4 base Table
295 *
296 * This hash method is used to find the hash index of new nat
297 * entry into ipv4 base table. In case of zero index, the
298 * new entry will be stored into N-1 index where N is size of
299 * ipv4 base table
300 *
301 * Returns: >0 index into ipv4 base table, negative on failure
302 */
dst_hash(uint32_t public_ip,uint32_t trgt_ip,uint16_t trgt_port,uint16_t public_port,uint8_t proto,uint16_t size)303 static uint16_t dst_hash(uint32_t public_ip, uint32_t trgt_ip,
304 uint16_t trgt_port, uint16_t public_port,
305 uint8_t proto, uint16_t size)
306 {
307 uint16_t hash = ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
308 (trgt_port) ^ (public_port) ^ (proto);
309
310 if (ipv4_nat_cache.ver >= IPA_HW_v4_0)
311 hash ^= ((uint16_t)(public_ip)) ^
312 ((uint16_t)(public_ip >> 16));
313
314 IPADBG("public ip 0x%X\n", public_ip);
315 IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
316 IPADBG("public_port: 0x%x\n", public_port);
317 IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
318
319 hash = (hash & size);
320
321 /* If the hash resulted to zero then set it to maximum value
322 as zero is unused entry in nat tables */
323 if (0 == hash) {
324 return size;
325 }
326
327 IPADBG("dst_hash returning value: %d\n", hash);
328 return hash;
329 }
330
331 /**
332 * src_hash() - Find the index into ipv4 index base table
333 * @priv_ip: [in] Private IP address
334 * @priv_port: [in] Private port
335 * @trgt_ip: [in] Target IP address
336 * @trgt_port: [in] Target Port
337 * @proto: [in] Protocol (TCP/IP)
338 * @size: [in] size of the ipv4 index base Table
339 *
340 * This hash method is used to find the hash index of new nat
341 * entry into ipv4 index base table. In case of zero index, the
342 * new entry will be stored into N-1 index where N is size of
343 * ipv4 index base table
344 *
345 * Returns: >0 index into ipv4 index base table, negative on failure
346 */
src_hash(uint32_t priv_ip,uint16_t priv_port,uint32_t trgt_ip,uint16_t trgt_port,uint8_t proto,uint16_t size)347 static uint16_t src_hash(uint32_t priv_ip, uint16_t priv_port,
348 uint32_t trgt_ip, uint16_t trgt_port,
349 uint8_t proto, uint16_t size)
350 {
351 uint16_t hash = ((uint16_t)(priv_ip)) ^ ((uint16_t)(priv_ip >> 16)) ^
352 (priv_port) ^
353 ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
354 (trgt_port) ^ (proto);
355
356 IPADBG("priv_ip: 0x%x priv_port: 0x%x\n", priv_ip, priv_port);
357 IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
358 IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
359
360 hash = (hash & size);
361
362 /* If the hash resulted to zero then set it to maximum value
363 as zero is unused entry in nat tables */
364 if (0 == hash) {
365 return size;
366 }
367
368 IPADBG("src_hash returning value: %d\n", hash);
369 return hash;
370 }
371
372 /**
373 * ipa_nati_calc_ip_cksum() - Calculate the source nat
374 * IP checksum diff
375 * @pub_ip_addr: [in] public ip address
376 * @priv_ip_addr: [in] Private ip address
377 *
378 * source nat ip checksum different is calculated as
379 * public_ip_addr - private_ip_addr
380 * Here we are using 1's complement to represent -ve number.
381 * So take 1's complement of private ip addr and add it
382 * to public ip addr.
383 *
384 * Returns: >0 ip checksum diff
385 */
ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,uint32_t priv_ip_addr)386 static uint16_t ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,
387 uint32_t priv_ip_addr)
388 {
389 uint16_t ret;
390 uint32_t cksum = 0;
391
392 /* Add LSB(2 bytes) of public ip address to cksum */
393 cksum += (pub_ip_addr & 0xFFFF);
394
395 /* Add MSB(2 bytes) of public ip address to cksum
396 and check for carry forward(CF), if any add it
397 */
398 cksum += (pub_ip_addr>>16);
399 if (cksum >> 16) {
400 cksum = (cksum & 0x0000FFFF);
401 cksum += 1;
402 }
403
404 /* Calculate the 1's complement of private ip address */
405 priv_ip_addr = (~priv_ip_addr);
406
407 /* Add LSB(2 bytes) of private ip address to cksum
408 and check for carry forward(CF), if any add it
409 */
410 cksum += (priv_ip_addr & 0xFFFF);
411 if (cksum >> 16) {
412 cksum = (cksum & 0x0000FFFF);
413 cksum += 1;
414 }
415
416 /* Add MSB(2 bytes) of private ip address to cksum
417 and check for carry forward(CF), if any add it
418 */
419 cksum += (priv_ip_addr>>16);
420 if (cksum >> 16) {
421 cksum = (cksum & 0x0000FFFF);
422 cksum += 1;
423 }
424
425 /* Return the LSB(2 bytes) of checksum */
426 ret = (uint16_t)cksum;
427 return ret;
428 }
429
430 /**
431 * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat
432 * TCP/UDP checksum diff
433 * @pub_ip_addr: [in] public ip address
434 * @pub_port: [in] public tcp/udp port
435 * @priv_ip_addr: [in] Private ip address
436 * @priv_port: [in] Private tcp/udp prot
437 *
438 * source nat tcp/udp checksum is calculated as
439 * (pub_ip_addr + pub_port) - (priv_ip_addr + priv_port)
440 * Here we are using 1's complement to represent -ve number.
441 * So take 1's complement of prviate ip addr &private port
442 * and add it public ip addr & public port.
443 *
444 * Returns: >0 tcp/udp checksum diff
445 */
ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,uint16_t pub_port,uint32_t priv_ip_addr,uint16_t priv_port)446 static uint16_t ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,
447 uint16_t pub_port,
448 uint32_t priv_ip_addr,
449 uint16_t priv_port)
450 {
451 uint16_t ret = 0;
452 uint32_t cksum = 0;
453
454 /* Add LSB(2 bytes) of public ip address to cksum */
455 cksum += (pub_ip_addr & 0xFFFF);
456
457 /* Add MSB(2 bytes) of public ip address to cksum
458 and check for carry forward(CF), if any add it
459 */
460 cksum += (pub_ip_addr>>16);
461 if (cksum >> 16) {
462 cksum = (cksum & 0x0000FFFF);
463 cksum += 1;
464 }
465
466 /* Add public port to cksum and
467 check for carry forward(CF), if any add it */
468 cksum += pub_port;
469 if (cksum >> 16) {
470 cksum = (cksum & 0x0000FFFF);
471 cksum += 1;
472 }
473
474 /* Calculate the 1's complement of private ip address */
475 priv_ip_addr = (~priv_ip_addr);
476
477 /* Add LSB(2 bytes) of private ip address to cksum
478 and check for carry forward(CF), if any add it
479 */
480 cksum += (priv_ip_addr & 0xFFFF);
481 if (cksum >> 16) {
482 cksum = (cksum & 0x0000FFFF);
483 cksum += 1;
484 }
485
486 /* Add MSB(2 bytes) of private ip address to cksum
487 and check for carry forward(CF), if any add
488 */
489 cksum += (priv_ip_addr>>16);
490 if (cksum >> 16) {
491 cksum = (cksum & 0x0000FFFF);
492 cksum += 1;
493 }
494
495 /* Calculate the 1's complement of private port */
496 priv_port = (~priv_port);
497
498 /* Add public port to cksum and
499 check for carry forward(CF), if any add it */
500 cksum += priv_port;
501 if (cksum >> 16) {
502 cksum = (cksum & 0x0000FFFF);
503 cksum += 1;
504 }
505
506 /* return the LSB(2 bytes) of checksum */
507 ret = (uint16_t)cksum;
508 return ret;
509 }
510
511 /**
512 * ipa_nati_make_rule_hdl() - makes nat rule handle
513 * @tbl_hdl: [in] nat table handle
514 * @tbl_entry: [in] nat table entry
515 *
516 * Calculate the nat rule handle which from
517 * nat entry which will be returned to client of
518 * nat driver
519 *
520 * Returns: >0 nat rule handle
521 */
ipa_nati_make_rule_hdl(uint16_t tbl_hdl,uint16_t tbl_entry)522 uint16_t ipa_nati_make_rule_hdl(uint16_t tbl_hdl,
523 uint16_t tbl_entry)
524 {
525 struct ipa_nat_ip4_table_cache *tbl_ptr;
526 uint16_t rule_hdl = 0;
527 uint16_t cnt = 0;
528
529 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
530
531 if (tbl_entry >= tbl_ptr->table_entries) {
532 /* Increase the current expansion table count */
533 tbl_ptr->cur_expn_tbl_cnt++;
534
535 /* Update the index into table */
536 rule_hdl = tbl_entry - tbl_ptr->table_entries;
537 rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
538 /* Update the table type mask */
539 rule_hdl = (rule_hdl | IPA_NAT_RULE_HDL_TBL_TYPE_MASK);
540 } else {
541 /* Increase the current count */
542 tbl_ptr->cur_tbl_cnt++;
543
544 rule_hdl = tbl_entry;
545 rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
546 }
547
548 for (; cnt < (tbl_ptr->table_entries + tbl_ptr->expn_table_entries); cnt++) {
549 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_ptr->rule_id_array[cnt]) {
550 tbl_ptr->rule_id_array[cnt] = rule_hdl;
551 return cnt + 1;
552 }
553 }
554
555 return 0;
556 }
557
558 /**
559 * ipa_nati_parse_ipv4_rule_hdl() - prase rule handle
560 * @tbl_hdl: [in] nat table rule
561 * @rule_hdl: [in] nat rule handle
562 * @expn_tbl: [out] expansion table or not
563 * @tbl_entry: [out] index into table
564 *
565 * Parse the rule handle to retrieve the nat table
566 * type and entry of nat table
567 *
568 * Returns: None
569 */
ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,uint16_t rule_hdl,uint8_t * expn_tbl,uint16_t * tbl_entry)570 void ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,
571 uint16_t rule_hdl, uint8_t *expn_tbl,
572 uint16_t *tbl_entry)
573 {
574 struct ipa_nat_ip4_table_cache *tbl_ptr;
575 uint16_t rule_id;
576
577 *expn_tbl = 0;
578 *tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
579 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_index];
580
581 if (rule_hdl >= (tbl_ptr->table_entries + tbl_ptr->expn_table_entries)) {
582 IPAERR("invalid rule handle\n");
583 return;
584 }
585
586 rule_id = tbl_ptr->rule_id_array[rule_hdl-1];
587
588 /* Retrieve the table type */
589 *expn_tbl = 0;
590 if (rule_id & IPA_NAT_RULE_HDL_TBL_TYPE_MASK) {
591 *expn_tbl = 1;
592 }
593
594 /* Retrieve the table entry */
595 *tbl_entry = (rule_id >> IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
596 return;
597 }
598
ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t tbl_entry)599 uint32_t ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
600 nat_table_type tbl_type,
601 uint16_t tbl_entry)
602 {
603 struct ipa_nat_rule *tbl_ptr;
604 uint32_t ret = 0;
605
606 if (IPA_NAT_EXPN_TBL == tbl_type) {
607 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
608 } else {
609 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
610 }
611
612 ret = (char *)&tbl_ptr[tbl_entry] - (char *)tbl_ptr;
613 ret += cache_ptr->tbl_addr_offset;
614 return ret;
615 }
616
ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache * cache_ptr,nat_table_type tbl_type,uint16_t indx_tbl_entry)617 uint32_t ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
618 nat_table_type tbl_type,
619 uint16_t indx_tbl_entry)
620 {
621 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
622 uint32_t ret = 0;
623
624 if (IPA_NAT_INDEX_EXPN_TBL == tbl_type) {
625 indx_tbl_ptr =
626 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
627 } else {
628 indx_tbl_ptr =
629 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
630 }
631
632 ret = (char *)&indx_tbl_ptr[indx_tbl_entry] - (char *)indx_tbl_ptr;
633 ret += cache_ptr->tbl_addr_offset;
634 return ret;
635 }
636
637 /* ------------------------------------------
638 UTILITY FUNCTIONS END
639 --------------------------------------------*/
640
641 /* ------------------------------------------
642 Main Functions
643 --------------------------------------------**/
ipa_nati_reset_tbl(uint8_t tbl_indx)644 void ipa_nati_reset_tbl(uint8_t tbl_indx)
645 {
646 uint16_t table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
647 uint16_t expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].expn_table_entries;
648
649 /* Base table */
650 IPADBG("memset() base table to 0, %p\n",
651 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr);
652
653 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr,
654 0,
655 IPA_NAT_TABLE_ENTRY_SIZE * table_entries);
656
657 /* Base expansino table */
658 IPADBG("memset() expn base table to 0, %p\n",
659 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr);
660
661 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr,
662 0,
663 IPA_NAT_TABLE_ENTRY_SIZE * expn_table_entries);
664
665 /* Index table */
666 IPADBG("memset() index table to 0, %p\n",
667 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr);
668
669 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr,
670 0,
671 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * table_entries);
672
673 /* Index expansion table */
674 IPADBG("memset() index expn table to 0, %p\n",
675 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr);
676
677 memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr,
678 0,
679 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * expn_table_entries);
680
681 IPADBG("returning from ipa_nati_reset_tbl()\n");
682 return;
683 }
684
ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,uint16_t number_of_entries,uint32_t * tbl_hdl)685 int ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,
686 uint16_t number_of_entries,
687 uint32_t *tbl_hdl)
688 {
689 struct ipa_ioc_nat_alloc_mem mem;
690 uint8_t tbl_indx = ipv4_nat_cache.table_cnt;
691 uint16_t table_entries, expn_table_entries;
692 int ret;
693
694 *tbl_hdl = 0;
695 /* Allocate table */
696 memset(&mem, 0, sizeof(mem));
697 ret = ipa_nati_alloc_table(number_of_entries,
698 &mem,
699 &table_entries,
700 &expn_table_entries);
701 if (0 != ret) {
702 IPAERR("unable to allocate nat table\n");
703 return -ENOMEM;
704 }
705
706 /* Update the cache
707 The (IPA_NAT_UNUSED_BASE_ENTRIES/2) indicates zero entry entries
708 for both base and expansion table
709 */
710 ret = ipa_nati_update_cache(&mem,
711 public_ip_addr,
712 table_entries,
713 expn_table_entries);
714 if (0 != ret) {
715 IPAERR("unable to update cache Error: %d\n", ret);
716 return -EINVAL;
717 }
718
719 /* Reset the nat table before posting init cmd */
720 ipa_nati_reset_tbl(tbl_indx);
721
722 /* Initialize the ipa hw with nat table dimensions */
723 ret = ipa_nati_post_ipv4_init_cmd(tbl_indx);
724 if (0 != ret) {
725 IPAERR("unable to post nat_init command Error %d\n", ret);
726 return -EINVAL;
727 }
728
729 /* store the initial public ip address in the cached pdn table
730 this is backward compatible for pre IPAv4 versions, we will always
731 use this ip as the single PDN address
732 */
733 pdns[0].public_ip = public_ip_addr;
734
735 /* Return table handle */
736 ipv4_nat_cache.table_cnt++;
737 *tbl_hdl = ipv4_nat_cache.table_cnt;
738
739 #ifdef NAT_DUMP
740 ipa_nat_dump_ipv4_table(*tbl_hdl);
741 #endif
742 return 0;
743 }
744
ipa_nati_alloc_table(uint16_t number_of_entries,struct ipa_ioc_nat_alloc_mem * mem,uint16_t * table_entries,uint16_t * expn_table_entries)745 int ipa_nati_alloc_table(uint16_t number_of_entries,
746 struct ipa_ioc_nat_alloc_mem *mem,
747 uint16_t *table_entries,
748 uint16_t *expn_table_entries)
749 {
750 int fd = 0, ret;
751 uint16_t total_entries;
752
753 /* Copy the table name */
754 strlcpy(mem->dev_name, NAT_DEV_NAME, IPA_RESOURCE_NAME_MAX);
755
756 /* Calculate the size for base table and expansion table */
757 *table_entries = (uint16_t)(number_of_entries * IPA_NAT_BASE_TABLE_PERCENTAGE);
758 if (*table_entries == 0) {
759 *table_entries = 1;
760 }
761 if (GetNearest2Power(*table_entries, table_entries)) {
762 IPAERR("unable to calculate power of 2\n");
763 return -EINVAL;
764 }
765
766 *expn_table_entries = (uint16_t)(number_of_entries * IPA_NAT_EXPANSION_TABLE_PERCENTAGE);
767 GetNearestEven(*expn_table_entries, expn_table_entries);
768
769 total_entries = (*table_entries)+(*expn_table_entries);
770
771 /* Calclate the memory size for both table and index table entries */
772 mem->size = (IPA_NAT_TABLE_ENTRY_SIZE * total_entries);
773 IPADBG("Nat Table size: %zu\n", mem->size);
774 mem->size += (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * total_entries);
775 IPADBG("Nat Base and Index Table size: %zu\n", mem->size);
776
777 if (!ipv4_nat_cache.ipa_fd) {
778 fd = open(IPA_DEV_NAME, O_RDONLY);
779 if (fd < 0) {
780 perror("ipa_nati_alloc_table(): open error value:");
781 IPAERR("unable to open ipa device\n");
782 return -EIO;
783 }
784 ipv4_nat_cache.ipa_fd = fd;
785 }
786
787 if (GetIPAVer()) {
788 IPAERR("unable to get ipa ver\n");
789 return -EIO;
790 }
791
792 ret = CreateNatDevice(mem);
793 return ret;
794 }
795
796
ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem * mem,uint32_t public_addr,uint16_t tbl_entries,uint16_t expn_tbl_entries)797 int ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem *mem,
798 uint32_t public_addr,
799 uint16_t tbl_entries,
800 uint16_t expn_tbl_entries)
801 {
802 uint32_t index = ipv4_nat_cache.table_cnt;
803 char *ipv4_rules_addr = NULL;
804
805 int fd = 0;
806 int flags = MAP_SHARED;
807 int prot = PROT_READ | PROT_WRITE;
808 off_t offset = 0;
809 #ifdef IPA_ON_R3PC
810 int ret = 0;
811 uint32_t nat_mem_offset = 0;
812 #endif
813
814 ipv4_nat_cache.ip4_tbl[index].valid = IPA_NAT_TABLE_VALID;
815 ipv4_nat_cache.ip4_tbl[index].public_addr = public_addr;
816 ipv4_nat_cache.ip4_tbl[index].size = mem->size;
817 ipv4_nat_cache.ip4_tbl[index].tbl_addr_offset = mem->offset;
818
819 ipv4_nat_cache.ip4_tbl[index].table_entries = tbl_entries;
820 ipv4_nat_cache.ip4_tbl[index].expn_table_entries = expn_tbl_entries;
821
822 IPADBG("num of ipv4 rules:%d\n", tbl_entries);
823 IPADBG("num of ipv4 expn rules:%d\n", expn_tbl_entries);
824
825 /* allocate memory for nat index expansion table */
826 if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
827 ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta =
828 malloc(sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
829
830 if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
831 IPAERR("Fail to allocate ipv4 index expansion table meta\n");
832 return 0;
833 }
834
835 memset(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta,
836 0,
837 sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
838 }
839
840 /* Allocate memory for rule_id_array */
841 if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
842 ipv4_nat_cache.ip4_tbl[index].rule_id_array =
843 malloc(sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
844
845 if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
846 IPAERR("Fail to allocate rule id array\n");
847 return 0;
848 }
849
850 memset(ipv4_nat_cache.ip4_tbl[index].rule_id_array,
851 0,
852 sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
853 }
854
855
856 /* open the nat table */
857 strlcpy(mem->dev_name, NAT_DEV_FULL_NAME, IPA_RESOURCE_NAME_MAX);
858 fd = open(mem->dev_name, O_RDWR);
859 if (fd < 0) {
860 perror("ipa_nati_update_cache(): open error value:");
861 IPAERR("unable to open nat device. Error:%d\n", fd);
862 return -EIO;
863 }
864
865 /* copy the nat table name */
866 strlcpy(ipv4_nat_cache.ip4_tbl[index].table_name,
867 mem->dev_name,
868 IPA_RESOURCE_NAME_MAX);
869 ipv4_nat_cache.ip4_tbl[index].nat_fd = fd;
870
871 /* open the nat device Table */
872 #ifndef IPA_ON_R3PC
873 ipv4_rules_addr = (void *)mmap(NULL, mem->size,
874 prot, flags,
875 fd, offset);
876 #else
877 IPADBG("user space r3pc\n");
878 ipv4_rules_addr = (void *)mmap((caddr_t)0, NAT_MMAP_MEM_SIZE,
879 prot, flags,
880 fd, offset);
881 #endif
882 if (MAP_FAILED == ipv4_rules_addr) {
883 perror("unable to mmap the memory\n");
884 return -EINVAL;
885 }
886
887 #ifdef IPA_ON_R3PC
888 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_NAT_OFFSET, &nat_mem_offset);
889 if (ret != 0) {
890 perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
891 IPAERR("unable to post ant offset cmd Error: %d\n", ret);
892 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
893 return -EIO;
894 }
895 ipv4_rules_addr += nat_mem_offset;
896 ipv4_nat_cache.ip4_tbl[index].mmap_offset = nat_mem_offset;
897 #endif
898
899 IPADBG("mmap return value 0x%lx\n", (long unsigned int)ipv4_rules_addr);
900
901 ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr = ipv4_rules_addr;
902
903 ipv4_nat_cache.ip4_tbl[index].ipv4_expn_rules_addr =
904 ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * tbl_entries);
905
906 ipv4_nat_cache.ip4_tbl[index].index_table_addr =
907 ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries));
908
909 ipv4_nat_cache.ip4_tbl[index].index_table_expn_addr =
910 ipv4_rules_addr +
911 (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries))+
912 (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * tbl_entries);
913
914 return 0;
915 }
916
917 /* comment: check the implementation once
918 offset should be in terms of byes */
ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)919 int ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)
920 {
921 struct ipa_ioc_v4_nat_init cmd;
922 uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_index].tbl_addr_offset;
923 int ret;
924
925 cmd.tbl_index = tbl_index;
926
927 cmd.ipv4_rules_offset = offset;
928 cmd.expn_rules_offset = cmd.ipv4_rules_offset +
929 (ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
930
931 cmd.index_offset = cmd.expn_rules_offset +
932 (ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
933
934 cmd.index_expn_offset = cmd.index_offset +
935 (ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_INDEX_TABLE_ENTRY_SIZE);
936
937 cmd.table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].table_entries - 1;
938 cmd.expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries;
939
940 cmd.ip_addr = ipv4_nat_cache.ip4_tbl[tbl_index].public_addr;
941
942 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_INIT_NAT, &cmd);
943 if (ret != 0) {
944 perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
945 IPAERR("unable to post init cmd Error: %d\n", ret);
946 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
947 return -EINVAL;
948 }
949 IPADBG("Posted IPA_IOC_V4_INIT_NAT to kernel successfully\n");
950
951 return 0;
952 }
953
ipa_nati_del_ipv4_table(uint32_t tbl_hdl)954 int ipa_nati_del_ipv4_table(uint32_t tbl_hdl)
955 {
956 uint8_t index = (uint8_t)(tbl_hdl - 1);
957 void *addr = (void *)ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr;
958 struct ipa_ioc_v4_nat_del del_cmd;
959 int ret;
960
961 if (!ipv4_nat_cache.ip4_tbl[index].valid) {
962 IPAERR("invalid table handle passed\n");
963 ret = -EINVAL;
964 goto fail;
965 }
966
967 if (pthread_mutex_lock(&nat_mutex) != 0) {
968 ret = -1;
969 goto lock_mutex_fail;
970 }
971
972 /* unmap the device memory from user space */
973 #ifndef IPA_ON_R3PC
974 munmap(addr, ipv4_nat_cache.ip4_tbl[index].size);
975 #else
976 addr = (char *)addr - ipv4_nat_cache.ip4_tbl[index].mmap_offset;
977 munmap(addr, NAT_MMAP_MEM_SIZE);
978 #endif
979
980 /* close the file descriptor of nat device */
981 if (close(ipv4_nat_cache.ip4_tbl[index].nat_fd)) {
982 IPAERR("unable to close the file descriptor\n");
983 ret = -EINVAL;
984 if (pthread_mutex_unlock(&nat_mutex) != 0)
985 goto unlock_mutex_fail;
986 goto fail;
987 }
988
989 del_cmd.table_index = index;
990 del_cmd.public_ip_addr = ipv4_nat_cache.ip4_tbl[index].public_addr;
991 ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_DEL_NAT, &del_cmd);
992 if (ret != 0) {
993 perror("ipa_nati_del_ipv4_table(): ioctl error value");
994 IPAERR("unable to post nat del command init Error: %d\n", ret);
995 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
996 ret = -EINVAL;
997 if (pthread_mutex_unlock(&nat_mutex) != 0)
998 goto unlock_mutex_fail;
999 goto fail;
1000 }
1001 IPAERR("posted IPA_IOC_V4_DEL_NAT to kernel successfully\n");
1002
1003 free(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta);
1004 free(ipv4_nat_cache.ip4_tbl[index].rule_id_array);
1005
1006 memset(&ipv4_nat_cache.ip4_tbl[index],
1007 0,
1008 sizeof(ipv4_nat_cache.ip4_tbl[index]));
1009
1010 /* Decrease the table count by 1*/
1011 ipv4_nat_cache.table_cnt--;
1012
1013 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1014 ret = -1;
1015 goto unlock_mutex_fail;
1016 }
1017
1018 return 0;
1019
1020 lock_mutex_fail:
1021 IPAERR("unable to lock the nat mutex\n");
1022 return ret;
1023
1024 unlock_mutex_fail:
1025 IPAERR("unable to unlock the nat mutex\n");
1026
1027 fail:
1028 return ret;
1029 }
1030
ipa_nati_query_timestamp(uint32_t tbl_hdl,uint32_t rule_hdl,uint32_t * time_stamp)1031 int ipa_nati_query_timestamp(uint32_t tbl_hdl,
1032 uint32_t rule_hdl,
1033 uint32_t *time_stamp)
1034 {
1035 uint8_t tbl_index = (uint8_t)(tbl_hdl - 1);
1036 uint8_t expn_tbl = 0;
1037 uint16_t tbl_entry = 0;
1038 struct ipa_nat_rule *tbl_ptr = NULL;
1039
1040 if (!ipv4_nat_cache.ip4_tbl[tbl_index].valid) {
1041 IPAERR("invalid table handle\n");
1042 return -EINVAL;
1043 }
1044
1045 if (pthread_mutex_lock(&nat_mutex) != 0) {
1046 IPAERR("unable to lock the nat mutex\n");
1047 return -1;
1048 }
1049
1050 ipa_nati_parse_ipv4_rule_hdl(tbl_index, (uint16_t)rule_hdl,
1051 &expn_tbl, &tbl_entry);
1052
1053 tbl_ptr =
1054 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_rules_addr;
1055 if (expn_tbl) {
1056 tbl_ptr =
1057 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_expn_rules_addr;
1058 }
1059
1060 if (tbl_ptr)
1061 *time_stamp = Read32BitFieldValue(tbl_ptr[tbl_entry].ts_proto,
1062 TIME_STAMP_FIELD);
1063
1064 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1065 IPAERR("unable to unlock the nat mutex\n");
1066 return -1;
1067 }
1068
1069 return 0;
1070 }
1071
ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry * entry)1072 int ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry *entry)
1073 {
1074 if (entry->public_ip == 0)
1075 IPADBG("PDN %d public ip will be set to 0\n", entry->pdn_index);
1076
1077 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_MODIFY_PDN, entry)) {
1078 perror("ipa_nati_modify_pdn(): ioctl error value");
1079 IPAERR("unable to call modify pdn icotl\n");
1080 IPAERR("index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
1081 entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
1082 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1083 return -EIO;
1084 }
1085
1086 pdns[entry->pdn_index].public_ip = entry->public_ip;
1087 pdns[entry->pdn_index].dst_metadata = entry->dst_metadata;
1088 pdns[entry->pdn_index].src_metadata = entry->src_metadata;
1089
1090 IPADBG("posted IPA_IOC_NAT_MODIFY_PDN to kernel successfully and stored in cache\n index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
1091 entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
1092
1093 return 0;
1094 }
1095
ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,uint32_t * rule_hdl)1096 int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,
1097 const ipa_nat_ipv4_rule *clnt_rule,
1098 uint32_t *rule_hdl)
1099 {
1100 struct ipa_nat_ip4_table_cache *tbl_ptr;
1101 struct ipa_nat_sw_rule sw_rule;
1102 struct ipa_nat_indx_tbl_sw_rule index_sw_rule;
1103 uint16_t new_entry, new_index_tbl_entry;
1104
1105 /* verify that the rule's PDN is valid */
1106 if (clnt_rule->pdn_index >= IPA_MAX_PDN_NUM ||
1107 pdns[clnt_rule->pdn_index].public_ip == 0) {
1108 IPAERR("invalid parameters, pdn index %d, public ip = 0x%X\n",
1109 clnt_rule->pdn_index, pdns[clnt_rule->pdn_index].public_ip);
1110 return -EINVAL;
1111 }
1112
1113 memset(&sw_rule, 0, sizeof(sw_rule));
1114 memset(&index_sw_rule, 0, sizeof(index_sw_rule));
1115
1116 /* Generate rule from client input */
1117 if (ipa_nati_generate_rule(tbl_hdl, clnt_rule,
1118 &sw_rule, &index_sw_rule,
1119 &new_entry, &new_index_tbl_entry)) {
1120 IPAERR("unable to generate rule\n");
1121 return -EINVAL;
1122 }
1123
1124 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1125 ipa_nati_copy_ipv4_rule_to_hw(tbl_ptr, &sw_rule, new_entry, (uint8_t)(tbl_hdl-1));
1126 ipa_nati_copy_ipv4_index_rule_to_hw(tbl_ptr,
1127 &index_sw_rule,
1128 new_index_tbl_entry,
1129 (uint8_t)(tbl_hdl-1));
1130
1131 IPADBG("new entry:%d, new index entry: %d\n", new_entry, new_index_tbl_entry);
1132 if (ipa_nati_post_ipv4_dma_cmd((uint8_t)(tbl_hdl - 1), new_entry)) {
1133 IPAERR("unable to post dma command\n");
1134 return -EIO;
1135 }
1136
1137 /* Generate rule handle */
1138 *rule_hdl = ipa_nati_make_rule_hdl((uint16_t)tbl_hdl, new_entry);
1139 if (!(*rule_hdl)) {
1140 IPAERR("unable to generate rule handle\n");
1141 return -EINVAL;
1142 }
1143
1144 #ifdef NAT_DUMP
1145 ipa_nat_dump_ipv4_table(tbl_hdl);
1146 #endif
1147
1148 return 0;
1149 }
1150
ipa_nati_generate_rule(uint32_t tbl_hdl,const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * rule,struct ipa_nat_indx_tbl_sw_rule * index_sw_rule,uint16_t * tbl_entry,uint16_t * indx_tbl_entry)1151 int ipa_nati_generate_rule(uint32_t tbl_hdl,
1152 const ipa_nat_ipv4_rule *clnt_rule,
1153 struct ipa_nat_sw_rule *rule,
1154 struct ipa_nat_indx_tbl_sw_rule *index_sw_rule,
1155 uint16_t *tbl_entry,
1156 uint16_t *indx_tbl_entry)
1157 {
1158 struct ipa_nat_ip4_table_cache *tbl_ptr;
1159 uint16_t tmp;
1160
1161 if (NULL == clnt_rule || NULL == index_sw_rule ||
1162 NULL == rule || NULL == tbl_entry ||
1163 NULL == indx_tbl_entry) {
1164 IPAERR("invalid parameters\n");
1165 return -EINVAL;
1166 }
1167
1168 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
1169
1170 *tbl_entry = ipa_nati_generate_tbl_rule(clnt_rule,
1171 rule,
1172 tbl_ptr);
1173 if (IPA_NAT_INVALID_NAT_ENTRY == *tbl_entry) {
1174 IPAERR("unable to generate table entry\n");
1175 return -EINVAL;
1176 }
1177
1178 index_sw_rule->tbl_entry = *tbl_entry;
1179 *indx_tbl_entry = ipa_nati_generate_index_rule(clnt_rule,
1180 index_sw_rule,
1181 tbl_ptr);
1182 if (IPA_NAT_INVALID_NAT_ENTRY == *indx_tbl_entry) {
1183 IPAERR("unable to generate index table entry\n");
1184 return -EINVAL;
1185 }
1186
1187 rule->indx_tbl_entry = *indx_tbl_entry;
1188 if (*indx_tbl_entry >= tbl_ptr->table_entries) {
1189 tmp = *indx_tbl_entry - tbl_ptr->table_entries;
1190 tbl_ptr->index_expn_table_meta[tmp].prev_index = index_sw_rule->prev_index;
1191 }
1192
1193 return 0;
1194 }
1195
ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1196 uint16_t ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule *clnt_rule,
1197 struct ipa_nat_sw_rule *sw_rule,
1198 struct ipa_nat_ip4_table_cache *tbl_ptr)
1199 {
1200 uint32_t pub_ip_addr;
1201 uint16_t prev = 0, nxt_indx = 0, new_entry;
1202 struct ipa_nat_rule *tbl = NULL, *expn_tbl = NULL;
1203
1204 pub_ip_addr = pdns[clnt_rule->pdn_index].public_ip;
1205
1206 tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_rules_addr;
1207 expn_tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_expn_rules_addr;
1208
1209 /* copy the values from client rule to sw rule */
1210 sw_rule->private_ip = clnt_rule->private_ip;
1211 sw_rule->private_port = clnt_rule->private_port;
1212 sw_rule->protocol = clnt_rule->protocol;
1213 sw_rule->public_port = clnt_rule->public_port;
1214 sw_rule->target_ip = clnt_rule->target_ip;
1215 sw_rule->target_port = clnt_rule->target_port;
1216 sw_rule->pdn_index = clnt_rule->pdn_index;
1217
1218 /* consider only public and private ip fields */
1219 sw_rule->ip_chksum = ipa_nati_calc_ip_cksum(pub_ip_addr,
1220 clnt_rule->private_ip);
1221
1222 if (IPPROTO_TCP == sw_rule->protocol ||
1223 IPPROTO_UDP == sw_rule->protocol) {
1224 /* consider public and private ip & port fields */
1225 sw_rule->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum(
1226 pub_ip_addr,
1227 clnt_rule->public_port,
1228 clnt_rule->private_ip,
1229 clnt_rule->private_port);
1230 }
1231
1232 sw_rule->rsvd1 = 0;
1233 sw_rule->enable = IPA_NAT_FLAG_DISABLE_BIT;
1234 sw_rule->next_index = 0;
1235
1236 /*
1237 SW sets this timer to 0.
1238 The assumption is that 0 is an invalid clock value and no clock
1239 wraparounds are expected
1240 */
1241 sw_rule->time_stamp = 0;
1242 sw_rule->rsvd2 = 0;
1243 sw_rule->rsvd3 = 0;
1244 sw_rule->prev_index = 0;
1245 sw_rule->indx_tbl_entry = 0;
1246
1247 new_entry = dst_hash(pub_ip_addr, clnt_rule->target_ip,
1248 clnt_rule->target_port,
1249 clnt_rule->public_port,
1250 clnt_rule->protocol,
1251 tbl_ptr->table_entries-1);
1252
1253 /* check whether there is any collision
1254 if no collision return */
1255 if (!Read16BitFieldValue(tbl[new_entry].ip_cksm_enbl,
1256 ENABLE_FIELD)) {
1257 sw_rule->prev_index = 0;
1258 IPADBG("Destination Nat New Entry Index %d\n", new_entry);
1259 return new_entry;
1260 }
1261
1262 /* First collision */
1263 if (Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1264 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
1265 sw_rule->prev_index = new_entry;
1266 } else { /* check for more than one collision */
1267 /* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1268 nxt_indx = Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
1269 NEXT_INDEX_FIELD);
1270
1271 while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1272 prev = nxt_indx;
1273
1274 nxt_indx -= tbl_ptr->table_entries;
1275 nxt_indx = Read16BitFieldValue(expn_tbl[nxt_indx].nxt_indx_pub_port,
1276 NEXT_INDEX_FIELD);
1277
1278 /* Handling error case */
1279 if (prev == nxt_indx) {
1280 IPAERR("Error: Prev index:%d and next:%d index should not be same\n", prev, nxt_indx);
1281 return IPA_NAT_INVALID_NAT_ENTRY;
1282 }
1283 }
1284
1285 sw_rule->prev_index = prev;
1286 }
1287
1288 /* On collision check for the free entry in expansion table */
1289 new_entry = ipa_nati_expn_tbl_free_entry(expn_tbl,
1290 tbl_ptr->expn_table_entries);
1291
1292 if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1293 /* Expansion table is full return*/
1294 IPAERR("Expansion table is full\n");
1295 IPAERR("Current Table: %d & Expn Entries: %d\n",
1296 tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1297 return IPA_NAT_INVALID_NAT_ENTRY;
1298 }
1299 new_entry += tbl_ptr->table_entries;
1300
1301 IPADBG("new entry index %d\n", new_entry);
1302 return new_entry;
1303 }
1304
1305 /* returns expn table entry index */
ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule * expn_tbl,uint16_t size)1306 uint16_t ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule *expn_tbl,
1307 uint16_t size)
1308 {
1309 int cnt;
1310
1311 for (cnt = 1; cnt < size; cnt++) {
1312 if (!Read16BitFieldValue(expn_tbl[cnt].ip_cksm_enbl,
1313 ENABLE_FIELD)) {
1314 IPADBG("new expansion table entry index %d\n", cnt);
1315 return cnt;
1316 }
1317 }
1318
1319 IPAERR("nat expansion table is full\n");
1320 return 0;
1321 }
1322
ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule * clnt_rule,struct ipa_nat_indx_tbl_sw_rule * sw_rule,struct ipa_nat_ip4_table_cache * tbl_ptr)1323 uint16_t ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule *clnt_rule,
1324 struct ipa_nat_indx_tbl_sw_rule *sw_rule,
1325 struct ipa_nat_ip4_table_cache *tbl_ptr)
1326 {
1327 struct ipa_nat_indx_tbl_rule *indx_tbl, *indx_expn_tbl;
1328 uint16_t prev = 0, nxt_indx = 0, new_entry;
1329
1330 indx_tbl =
1331 (struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_addr;
1332 indx_expn_tbl =
1333 (struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_expn_addr;
1334
1335 new_entry = src_hash(clnt_rule->private_ip,
1336 clnt_rule->private_port,
1337 clnt_rule->target_ip,
1338 clnt_rule->target_port,
1339 clnt_rule->protocol,
1340 tbl_ptr->table_entries-1);
1341
1342 /* check whether there is any collision
1343 if no collision return */
1344 if (!Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1345 INDX_TBL_TBL_ENTRY_FIELD)) {
1346 sw_rule->prev_index = 0;
1347 IPADBG("Source Nat Index Table Entry %d\n", new_entry);
1348 return new_entry;
1349 }
1350
1351 /* check for more than one collision */
1352 if (Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1353 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
1354 sw_rule->prev_index = new_entry;
1355 IPADBG("First collosion. Entry %d\n", new_entry);
1356 } else {
1357 /* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
1358 nxt_indx = Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
1359 INDX_TBL_NEXT_INDEX_FILED);
1360
1361 while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
1362 prev = nxt_indx;
1363
1364 nxt_indx -= tbl_ptr->table_entries;
1365 nxt_indx = Read16BitFieldValue(indx_expn_tbl[nxt_indx].tbl_entry_nxt_indx,
1366 INDX_TBL_NEXT_INDEX_FILED);
1367
1368 /* Handling error case */
1369 if (prev == nxt_indx) {
1370 IPAERR("Error: Prev:%d and next:%d index should not be same\n", prev, nxt_indx);
1371 return IPA_NAT_INVALID_NAT_ENTRY;
1372 }
1373 }
1374
1375 sw_rule->prev_index = prev;
1376 }
1377
1378 /* On collision check for the free entry in expansion table */
1379 new_entry = ipa_nati_index_expn_get_free_entry(indx_expn_tbl,
1380 tbl_ptr->expn_table_entries);
1381
1382 if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
1383 /* Expansion table is full return*/
1384 IPAERR("Index expansion table is full\n");
1385 IPAERR("Current Table: %d & Expn Entries: %d\n",
1386 tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
1387 return IPA_NAT_INVALID_NAT_ENTRY;
1388 }
1389 new_entry += tbl_ptr->table_entries;
1390
1391
1392 if (sw_rule->prev_index == new_entry) {
1393 IPAERR("Error: prev_entry:%d ", sw_rule->prev_index);
1394 IPAERR("and new_entry:%d should not be same ", new_entry);
1395 IPAERR("infinite loop detected\n");
1396 return IPA_NAT_INVALID_NAT_ENTRY;
1397 }
1398
1399 IPADBG("index table entry %d\n", new_entry);
1400 return new_entry;
1401 }
1402
1403 /* returns index expn table entry index */
ipa_nati_index_expn_get_free_entry(struct ipa_nat_indx_tbl_rule * indx_tbl,uint16_t size)1404 uint16_t ipa_nati_index_expn_get_free_entry(
1405 struct ipa_nat_indx_tbl_rule *indx_tbl,
1406 uint16_t size)
1407 {
1408 int cnt;
1409 for (cnt = 1; cnt < size; cnt++) {
1410 if (!Read16BitFieldValue(indx_tbl[cnt].tbl_entry_nxt_indx,
1411 INDX_TBL_TBL_ENTRY_FIELD)) {
1412 return cnt;
1413 }
1414 }
1415
1416 IPAERR("nat index expansion table is full\n");
1417 return 0;
1418 }
1419
ipa_nati_write_next_index(uint8_t tbl_indx,nat_table_type tbl_type,uint16_t value,uint32_t offset)1420 void ipa_nati_write_next_index(uint8_t tbl_indx,
1421 nat_table_type tbl_type,
1422 uint16_t value,
1423 uint32_t offset)
1424 {
1425 struct ipa_ioc_nat_dma_cmd *cmd;
1426
1427 IPADBG("Updating next index field of table %d on collosion using dma\n", tbl_type);
1428 IPADBG("table index: %d, value: %d offset;%d\n", tbl_indx, value, offset);
1429
1430 cmd = (struct ipa_ioc_nat_dma_cmd *)
1431 malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1432 sizeof(struct ipa_ioc_nat_dma_one));
1433 if (NULL == cmd) {
1434 IPAERR("unable to allocate memory\n");
1435 return;
1436 }
1437
1438 cmd->dma[0].table_index = tbl_indx;
1439 cmd->dma[0].base_addr = tbl_type;
1440 cmd->dma[0].data = value;
1441 cmd->dma[0].offset = offset;
1442
1443 cmd->entries = 1;
1444 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1445 perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1446 IPAERR("unable to call dma icotl to update next index\n");
1447 IPAERR("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1448 goto fail;
1449 }
1450
1451 fail:
1452 free(cmd);
1453
1454 return;
1455 }
1456
ipa_nati_copy_ipv4_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_sw_rule * rule,uint16_t entry,uint8_t tbl_index)1457 void ipa_nati_copy_ipv4_rule_to_hw(
1458 struct ipa_nat_ip4_table_cache *ipv4_cache,
1459 struct ipa_nat_sw_rule *rule,
1460 uint16_t entry, uint8_t tbl_index)
1461 {
1462 struct ipa_nat_rule *tbl_ptr;
1463 uint16_t prev_entry = rule->prev_index;
1464 nat_table_type tbl_type;
1465 uint32_t offset = 0;
1466
1467 if (entry < ipv4_cache->table_entries) {
1468 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1469
1470 memcpy(&tbl_ptr[entry],
1471 rule,
1472 sizeof(struct ipa_nat_rule));
1473 } else {
1474 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_expn_rules_addr;
1475 memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1476 rule,
1477 sizeof(struct ipa_nat_rule));
1478 }
1479
1480 /* Update the previos entry next_index */
1481 if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1482
1483 if (prev_entry < ipv4_cache->table_entries) {
1484 tbl_type = IPA_NAT_BASE_TBL;
1485 tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
1486 } else {
1487 tbl_type = IPA_NAT_EXPN_TBL;
1488 /* tbp_ptr is already pointing to expansion table
1489 no need to initialize it */
1490 prev_entry = prev_entry - ipv4_cache->table_entries;
1491 }
1492
1493 offset = ipa_nati_get_entry_offset(ipv4_cache, tbl_type, prev_entry);
1494 offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1495
1496 ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1497 }
1498
1499 return;
1500 }
1501
ipa_nati_copy_ipv4_index_rule_to_hw(struct ipa_nat_ip4_table_cache * ipv4_cache,struct ipa_nat_indx_tbl_sw_rule * indx_sw_rule,uint16_t entry,uint8_t tbl_index)1502 void ipa_nati_copy_ipv4_index_rule_to_hw(
1503 struct ipa_nat_ip4_table_cache *ipv4_cache,
1504 struct ipa_nat_indx_tbl_sw_rule *indx_sw_rule,
1505 uint16_t entry,
1506 uint8_t tbl_index)
1507 {
1508 struct ipa_nat_indx_tbl_rule *tbl_ptr;
1509 struct ipa_nat_sw_indx_tbl_rule sw_rule;
1510 uint16_t prev_entry = indx_sw_rule->prev_index;
1511 nat_table_type tbl_type;
1512 uint16_t offset = 0;
1513
1514 sw_rule.next_index = indx_sw_rule->next_index;
1515 sw_rule.tbl_entry = indx_sw_rule->tbl_entry;
1516
1517 if (entry < ipv4_cache->table_entries) {
1518 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1519
1520 memcpy(&tbl_ptr[entry],
1521 &sw_rule,
1522 sizeof(struct ipa_nat_indx_tbl_rule));
1523 } else {
1524 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_expn_addr;
1525
1526 memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
1527 &sw_rule,
1528 sizeof(struct ipa_nat_indx_tbl_rule));
1529 }
1530
1531 /* Update the next field of previous entry on collosion */
1532 if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
1533 if (prev_entry < ipv4_cache->table_entries) {
1534 tbl_type = IPA_NAT_INDX_TBL;
1535 tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
1536 } else {
1537 tbl_type = IPA_NAT_INDEX_EXPN_TBL;
1538 /* tbp_ptr is already pointing to expansion table
1539 no need to initialize it */
1540 prev_entry = prev_entry - ipv4_cache->table_entries;
1541 }
1542
1543 offset = ipa_nati_get_index_entry_offset(ipv4_cache, tbl_type, prev_entry);
1544 offset += IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1545
1546 IPADBG("Updating next index field of index table on collosion using dma()\n");
1547 ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
1548 }
1549
1550 return;
1551 }
1552
ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,uint16_t entry)1553 int ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,
1554 uint16_t entry)
1555 {
1556 struct ipa_ioc_nat_dma_cmd *cmd;
1557 struct ipa_nat_rule *tbl_ptr;
1558 uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_indx].tbl_addr_offset;
1559 int ret = 0;
1560
1561 cmd = (struct ipa_ioc_nat_dma_cmd *)
1562 malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
1563 sizeof(struct ipa_ioc_nat_dma_one));
1564 if (NULL == cmd) {
1565 IPAERR("unable to allocate memory\n");
1566 return -ENOMEM;
1567 }
1568
1569 if (entry < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries) {
1570 tbl_ptr =
1571 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
1572
1573 cmd->dma[0].table_index = tbl_indx;
1574 cmd->dma[0].base_addr = IPA_NAT_BASE_TBL;
1575 cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1576
1577 cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1578 cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1579 } else {
1580 tbl_ptr =
1581 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr;
1582 entry = entry - ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
1583
1584 cmd->dma[0].table_index = tbl_indx;
1585 cmd->dma[0].base_addr = IPA_NAT_EXPN_TBL;
1586 cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
1587
1588 cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
1589 cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1590 cmd->dma[0].offset += offset;
1591 }
1592
1593 cmd->entries = 1;
1594 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
1595 perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
1596 IPAERR("unable to call dma icotl\n");
1597 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
1598 ret = -EIO;
1599 goto fail;
1600 }
1601 IPADBG("posted IPA_IOC_NAT_DMA to kernel successfully during add operation\n");
1602
1603
1604 fail:
1605 free(cmd);
1606
1607 return ret;
1608 }
1609
1610
ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,uint32_t rule_hdl)1611 int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,
1612 uint32_t rule_hdl)
1613 {
1614 uint8_t expn_tbl;
1615 uint16_t tbl_entry;
1616 struct ipa_nat_ip4_table_cache *tbl_ptr;
1617 del_type rule_pos;
1618 uint8_t tbl_indx = (uint8_t)(tbl_hdl - 1);
1619 int ret;
1620
1621 /* Parse the rule handle */
1622 ipa_nati_parse_ipv4_rule_hdl(tbl_indx, (uint16_t)rule_hdl,
1623 &expn_tbl, &tbl_entry);
1624 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_entry) {
1625 IPAERR("Invalid Rule Entry\n");
1626 ret = -EINVAL;
1627 goto fail;
1628 }
1629
1630 if (pthread_mutex_lock(&nat_mutex) != 0) {
1631 ret = -1;
1632 goto mutex_lock_error;
1633 }
1634
1635 IPADBG("Delete below rule\n");
1636 IPADBG("tbl_entry:%d expn_tbl:%d\n", tbl_entry, expn_tbl);
1637
1638 tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1639 if (!tbl_ptr->valid) {
1640 IPAERR("invalid table handle\n");
1641 ret = -EINVAL;
1642 if (pthread_mutex_unlock(&nat_mutex) != 0)
1643 goto mutex_unlock_error;
1644 goto fail;
1645 }
1646
1647 ipa_nati_find_rule_pos(tbl_ptr, expn_tbl,
1648 tbl_entry, &rule_pos);
1649 IPADBG("rule_pos:%d\n", rule_pos);
1650
1651 if (ipa_nati_post_del_dma_cmd(tbl_indx, tbl_entry,
1652 expn_tbl, rule_pos)) {
1653 ret = -EINVAL;
1654 if (pthread_mutex_unlock(&nat_mutex) != 0)
1655 goto mutex_unlock_error;
1656 goto fail;
1657 }
1658
1659 ipa_nati_del_dead_ipv4_head_nodes(tbl_indx);
1660
1661 /* Reset rule_id_array entry */
1662 ipv4_nat_cache.ip4_tbl[tbl_indx].rule_id_array[rule_hdl-1] =
1663 IPA_NAT_INVALID_NAT_ENTRY;
1664
1665 #ifdef NAT_DUMP
1666 IPADBG("Dumping Table after deleting rule\n");
1667 ipa_nat_dump_ipv4_table(tbl_hdl);
1668 #endif
1669
1670 if (pthread_mutex_unlock(&nat_mutex) != 0) {
1671 ret = -1;
1672 goto mutex_unlock_error;
1673 }
1674
1675 return 0;
1676
1677 mutex_lock_error:
1678 IPAERR("unable to lock the nat mutex\n");
1679 return ret;
1680
1681 mutex_unlock_error:
1682 IPAERR("unable to unlock the nat mutex\n");
1683
1684 fail:
1685 return ret;
1686 }
1687
ReorderCmds(struct ipa_ioc_nat_dma_cmd * cmd,int size)1688 void ReorderCmds(struct ipa_ioc_nat_dma_cmd *cmd, int size)
1689 {
1690 int indx_tbl_start = 0, cnt, cnt1;
1691 struct ipa_ioc_nat_dma_cmd *tmp;
1692
1693 IPADBG("called ReorderCmds() with entries :%d\n", cmd->entries);
1694
1695 for (cnt = 0; cnt < cmd->entries; cnt++) {
1696 if (cmd->dma[cnt].base_addr == IPA_NAT_INDX_TBL ||
1697 cmd->dma[cnt].base_addr == IPA_NAT_INDEX_EXPN_TBL) {
1698 indx_tbl_start = cnt;
1699 break;
1700 }
1701 }
1702
1703 if (indx_tbl_start == 0) {
1704 IPADBG("Reorder not needed\n");
1705 return;
1706 }
1707
1708 tmp = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1709 if (tmp == NULL) {
1710 IPAERR("unable to allocate memory\n");
1711 return;
1712 }
1713
1714 cnt1 = 0;
1715 tmp->entries = cmd->entries;
1716 for (cnt = indx_tbl_start; cnt < cmd->entries; cnt++) {
1717 tmp->dma[cnt1] = cmd->dma[cnt];
1718 cnt1++;
1719 }
1720
1721 for (cnt = 0; cnt < indx_tbl_start; cnt++) {
1722 tmp->dma[cnt1] = cmd->dma[cnt];
1723 cnt1++;
1724 }
1725
1726 memset(cmd, 0, size);
1727 memcpy(cmd, tmp, size);
1728 free(tmp);
1729
1730 return;
1731 }
1732
ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,uint16_t cur_tbl_entry,uint8_t expn_tbl,del_type rule_pos)1733 int ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,
1734 uint16_t cur_tbl_entry,
1735 uint8_t expn_tbl,
1736 del_type rule_pos)
1737 {
1738
1739 #define MAX_DMA_ENTRIES_FOR_DEL 3
1740
1741 struct ipa_nat_ip4_table_cache *cache_ptr;
1742 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
1743 struct ipa_nat_rule *tbl_ptr;
1744 int ret = 0, size = 0;
1745
1746 uint16_t indx_tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
1747 del_type indx_rule_pos;
1748
1749 struct ipa_ioc_nat_dma_cmd *cmd;
1750 uint8_t no_of_cmds = 0;
1751
1752 uint16_t prev_entry = IPA_NAT_INVALID_NAT_ENTRY;
1753 uint16_t next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1754 uint16_t indx_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1755 uint16_t indx_next_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
1756 uint16_t table_entry;
1757
1758 size = sizeof(struct ipa_ioc_nat_dma_cmd)+
1759 (MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
1760
1761 cmd = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
1762 if (NULL == cmd) {
1763 IPAERR("unable to allocate memory\n");
1764 return -ENOMEM;
1765 }
1766
1767 cache_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
1768 if (!expn_tbl) {
1769 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
1770 } else {
1771 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
1772 }
1773
1774
1775 if (!Read16BitFieldValue(tbl_ptr[cur_tbl_entry].ip_cksm_enbl,
1776 ENABLE_FIELD)) {
1777 IPAERR("Deleting invalid(not enabled) rule\n");
1778 ret = -EINVAL;
1779 goto fail;
1780 }
1781
1782 indx_tbl_entry =
1783 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1784 SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD);
1785
1786 /* ================================================
1787 Base Table rule Deletion
1788 ================================================*/
1789 /* Just delete the current rule by disabling the flag field */
1790 if (IPA_NAT_DEL_TYPE_ONLY_ONE == rule_pos) {
1791 cmd->dma[no_of_cmds].table_index = tbl_indx;
1792 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1793 cmd->dma[no_of_cmds].data = IPA_NAT_FLAG_DISABLE_BIT_MASK;
1794
1795 cmd->dma[no_of_cmds].offset =
1796 ipa_nati_get_entry_offset(cache_ptr,
1797 cmd->dma[no_of_cmds].base_addr,
1798 cur_tbl_entry);
1799 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
1800 }
1801
1802 /* Just update the protocol field to invalid */
1803 else if (IPA_NAT_DEL_TYPE_HEAD == rule_pos) {
1804 cmd->dma[no_of_cmds].table_index = tbl_indx;
1805 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1806 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_PROTO_FIELD_VALUE;
1807
1808 cmd->dma[no_of_cmds].offset =
1809 ipa_nati_get_entry_offset(cache_ptr,
1810 cmd->dma[no_of_cmds].base_addr,
1811 cur_tbl_entry);
1812 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_PROTO_FIELD_OFFSET;
1813
1814 IPADBG("writing invalid proto: 0x%x\n", cmd->dma[no_of_cmds].data);
1815 }
1816
1817 /*
1818 Update the previous entry of next_index field value
1819 with current entry next_index field value
1820 */
1821 else if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
1822 prev_entry =
1823 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1824 SW_SPEC_PARAM_PREV_INDEX_FIELD);
1825
1826 cmd->dma[no_of_cmds].table_index = tbl_indx;
1827 cmd->dma[no_of_cmds].data =
1828 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
1829 NEXT_INDEX_FIELD);
1830
1831 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1832 if (prev_entry >= cache_ptr->table_entries) {
1833 cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1834 prev_entry -= cache_ptr->table_entries;
1835 }
1836
1837 cmd->dma[no_of_cmds].offset =
1838 ipa_nati_get_entry_offset(cache_ptr,
1839 cmd->dma[no_of_cmds].base_addr, prev_entry);
1840
1841 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1842 }
1843
1844 /*
1845 Reset the previous entry of next_index field with 0
1846 */
1847 else if (IPA_NAT_DEL_TYPE_LAST == rule_pos) {
1848 prev_entry =
1849 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
1850 SW_SPEC_PARAM_PREV_INDEX_FIELD);
1851
1852 cmd->dma[no_of_cmds].table_index = tbl_indx;
1853 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1854
1855 cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
1856 if (prev_entry >= cache_ptr->table_entries) {
1857 cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
1858 prev_entry -= cache_ptr->table_entries;
1859 }
1860
1861 cmd->dma[no_of_cmds].offset =
1862 ipa_nati_get_entry_offset(cache_ptr,
1863 cmd->dma[no_of_cmds].base_addr, prev_entry);
1864
1865 cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
1866 }
1867
1868 /* ================================================
1869 Base Table rule Deletion End
1870 ================================================*/
1871
1872 /* ================================================
1873 Index Table rule Deletion
1874 ================================================*/
1875 ipa_nati_find_index_rule_pos(cache_ptr,
1876 indx_tbl_entry,
1877 &indx_rule_pos);
1878 IPADBG("Index table entry: 0x%x\n", indx_tbl_entry);
1879 IPADBG("and position: %d\n", indx_rule_pos);
1880 if (indx_tbl_entry >= cache_ptr->table_entries) {
1881 indx_tbl_entry -= cache_ptr->table_entries;
1882 indx_tbl_ptr =
1883 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1884 } else {
1885 indx_tbl_ptr =
1886 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
1887 }
1888
1889 /* Just delete the current rule by resetting nat_table_index field to 0 */
1890 if (IPA_NAT_DEL_TYPE_ONLY_ONE == indx_rule_pos) {
1891 no_of_cmds++;
1892 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1893 cmd->dma[no_of_cmds].table_index = tbl_indx;
1894 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1895
1896 cmd->dma[no_of_cmds].offset =
1897 ipa_nati_get_index_entry_offset(cache_ptr,
1898 cmd->dma[no_of_cmds].base_addr,
1899 indx_tbl_entry);
1900
1901 cmd->dma[no_of_cmds].offset +=
1902 IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1903 }
1904
1905 /* copy the next entry values to current entry */
1906 else if (IPA_NAT_DEL_TYPE_HEAD == indx_rule_pos) {
1907 next_entry =
1908 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1909 INDX_TBL_NEXT_INDEX_FILED);
1910
1911 next_entry -= cache_ptr->table_entries;
1912
1913 no_of_cmds++;
1914 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1915 cmd->dma[no_of_cmds].table_index = tbl_indx;
1916
1917 /* Copy the nat_table_index field value of next entry */
1918 indx_tbl_ptr =
1919 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
1920 cmd->dma[no_of_cmds].data =
1921 Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1922 INDX_TBL_TBL_ENTRY_FIELD);
1923
1924 cmd->dma[no_of_cmds].offset =
1925 ipa_nati_get_index_entry_offset(cache_ptr,
1926 cmd->dma[no_of_cmds].base_addr,
1927 indx_tbl_entry);
1928
1929 cmd->dma[no_of_cmds].offset +=
1930 IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
1931
1932 /* Copy the next_index field value of next entry */
1933 no_of_cmds++;
1934 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1935 cmd->dma[no_of_cmds].table_index = tbl_indx;
1936 cmd->dma[no_of_cmds].data =
1937 Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
1938 INDX_TBL_NEXT_INDEX_FILED);
1939
1940 cmd->dma[no_of_cmds].offset =
1941 ipa_nati_get_index_entry_offset(cache_ptr,
1942 cmd->dma[no_of_cmds].base_addr, indx_tbl_entry);
1943
1944 cmd->dma[no_of_cmds].offset +=
1945 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1946 indx_next_entry = next_entry;
1947 }
1948
1949 /*
1950 Update the previous entry of next_index field value
1951 with current entry next_index field value
1952 */
1953 else if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
1954 prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1955
1956 no_of_cmds++;
1957 cmd->dma[no_of_cmds].table_index = tbl_indx;
1958 cmd->dma[no_of_cmds].data =
1959 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
1960 INDX_TBL_NEXT_INDEX_FILED);
1961
1962 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1963 if (prev_entry >= cache_ptr->table_entries) {
1964 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1965 prev_entry -= cache_ptr->table_entries;
1966 }
1967
1968 IPADBG("prev_entry: %d update with cur next_index: %d\n",
1969 prev_entry, cmd->dma[no_of_cmds].data);
1970 IPADBG("prev_entry: %d exist in table_type:%d\n",
1971 prev_entry, cmd->dma[no_of_cmds].base_addr);
1972
1973 cmd->dma[no_of_cmds].offset =
1974 ipa_nati_get_index_entry_offset(cache_ptr,
1975 cmd->dma[no_of_cmds].base_addr, prev_entry);
1976
1977 cmd->dma[no_of_cmds].offset +=
1978 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
1979 }
1980
1981 /* Reset the previous entry next_index field with 0 */
1982 else if (IPA_NAT_DEL_TYPE_LAST == indx_rule_pos) {
1983 prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
1984
1985 no_of_cmds++;
1986 cmd->dma[no_of_cmds].table_index = tbl_indx;
1987 cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
1988
1989 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
1990 if (prev_entry >= cache_ptr->table_entries) {
1991 cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
1992 prev_entry -= cache_ptr->table_entries;
1993 }
1994
1995 IPADBG("Reseting prev_entry: %d next_index\n", prev_entry);
1996 IPADBG("prev_entry: %d exist in table_type:%d\n",
1997 prev_entry, cmd->dma[no_of_cmds].base_addr);
1998
1999 cmd->dma[no_of_cmds].offset =
2000 ipa_nati_get_index_entry_offset(cache_ptr,
2001 cmd->dma[no_of_cmds].base_addr, prev_entry);
2002
2003 cmd->dma[no_of_cmds].offset +=
2004 IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
2005 }
2006
2007 /* ================================================
2008 Index Table rule Deletion End
2009 ================================================*/
2010 cmd->entries = no_of_cmds + 1;
2011
2012 if (cmd->entries > 1) {
2013 ReorderCmds(cmd, size);
2014 }
2015 if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
2016 perror("ipa_nati_post_del_dma_cmd(): ioctl error value");
2017 IPAERR("unable to post cmd\n");
2018 IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
2019 ret = -EIO;
2020 goto fail;
2021 }
2022
2023 /* if entry exist in IPA_NAT_DEL_TYPE_MIDDLE of list
2024 Update the previous entry in sw specific parameters
2025 */
2026 if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
2027 /* Retrieve the current entry prev_entry value */
2028 prev_entry =
2029 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
2030 SW_SPEC_PARAM_PREV_INDEX_FIELD);
2031
2032 /* Retrieve the next entry */
2033 next_entry =
2034 Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
2035 NEXT_INDEX_FIELD);
2036
2037 next_entry -= cache_ptr->table_entries;
2038 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2039
2040 /* copy the current entry prev_entry value to next entry*/
2041 UpdateSwSpecParams(&tbl_ptr[next_entry],
2042 IPA_NAT_SW_PARAM_PREV_INDX_BYTE,
2043 prev_entry);
2044 }
2045
2046 /* Reset the other field values of current delete entry
2047 In case of IPA_NAT_DEL_TYPE_HEAD, don't reset */
2048 if (IPA_NAT_DEL_TYPE_HEAD != rule_pos) {
2049 memset(&tbl_ptr[cur_tbl_entry], 0, sizeof(struct ipa_nat_rule));
2050 }
2051
2052 if (indx_rule_pos == IPA_NAT_DEL_TYPE_HEAD) {
2053
2054 /* Update next next entry previous value to current
2055 entry as we moved the next entry values
2056 to current entry */
2057 indx_next_next_entry =
2058 Read16BitFieldValue(indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx,
2059 INDX_TBL_NEXT_INDEX_FILED);
2060
2061 if (indx_next_next_entry != 0 &&
2062 indx_next_next_entry >= cache_ptr->table_entries) {
2063
2064 IPADBG("Next Next entry: %d\n", indx_next_next_entry);
2065 indx_next_next_entry -= cache_ptr->table_entries;
2066
2067 IPADBG("Updating entry: %d prev index to: %d\n",
2068 indx_next_next_entry, indx_tbl_entry);
2069 cache_ptr->index_expn_table_meta[indx_next_next_entry].prev_index =
2070 indx_tbl_entry;
2071 }
2072
2073 /* Now reset the next entry as we copied
2074 the next entry to current entry */
2075 IPADBG("Resetting, index table entry(Proper): %d\n",
2076 (cache_ptr->table_entries + indx_next_entry));
2077
2078 /* This resets both table entry and next index values */
2079 indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx = 0;
2080
2081 /*
2082 In case of IPA_NAT_DEL_TYPE_HEAD, update the sw specific parameters
2083 (index table entry) of base table entry
2084 */
2085 indx_tbl_ptr =
2086 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
2087 table_entry =
2088 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2089 INDX_TBL_TBL_ENTRY_FIELD);
2090
2091 if (table_entry >= cache_ptr->table_entries) {
2092 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2093 table_entry -= cache_ptr->table_entries;
2094 } else {
2095 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2096 }
2097
2098 UpdateSwSpecParams(&tbl_ptr[table_entry],
2099 IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE,
2100 indx_tbl_entry);
2101 } else {
2102 /* Update the prev_entry value (in index_expn_table_meta)
2103 for the next_entry in list with current entry prev_entry value
2104 */
2105 if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
2106 next_entry =
2107 Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
2108 INDX_TBL_NEXT_INDEX_FILED);
2109
2110 if (next_entry >= cache_ptr->table_entries) {
2111 next_entry -= cache_ptr->table_entries;
2112 }
2113
2114 cache_ptr->index_expn_table_meta[next_entry].prev_index =
2115 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
2116
2117 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index =
2118 IPA_NAT_INVALID_NAT_ENTRY;
2119 }
2120
2121 IPADBG("At, indx_tbl_entry value: %d\n", indx_tbl_entry);
2122 IPADBG("At, indx_tbl_entry member address: %p\n",
2123 &indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx);
2124
2125 indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx = 0;
2126
2127 }
2128
2129 fail:
2130 free(cmd);
2131
2132 return ret;
2133 }
2134
ipa_nati_find_index_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint16_t tbl_entry,del_type * rule_pos)2135 void ipa_nati_find_index_rule_pos(
2136 struct ipa_nat_ip4_table_cache *cache_ptr,
2137 uint16_t tbl_entry,
2138 del_type *rule_pos)
2139 {
2140 struct ipa_nat_indx_tbl_rule *tbl_ptr;
2141
2142 if (tbl_entry >= cache_ptr->table_entries) {
2143 tbl_ptr =
2144 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
2145
2146 tbl_entry -= cache_ptr->table_entries;
2147 if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2148 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2149 *rule_pos = IPA_NAT_DEL_TYPE_LAST;
2150 } else {
2151 *rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2152 }
2153 } else {
2154 tbl_ptr =
2155 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
2156
2157 if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
2158 INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
2159 *rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2160 } else {
2161 *rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2162 }
2163 }
2164 }
2165
ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache * cache_ptr,uint8_t expn_tbl,uint16_t tbl_entry,del_type * rule_pos)2166 void ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache *cache_ptr,
2167 uint8_t expn_tbl,
2168 uint16_t tbl_entry,
2169 del_type *rule_pos)
2170 {
2171 struct ipa_nat_rule *tbl_ptr;
2172
2173 if (expn_tbl) {
2174 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
2175 if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2176 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2177 *rule_pos = IPA_NAT_DEL_TYPE_LAST;
2178 } else {
2179 *rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
2180 }
2181 } else {
2182 tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
2183 if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
2184 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2185 *rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
2186 } else {
2187 *rule_pos = IPA_NAT_DEL_TYPE_HEAD;
2188 }
2189 }
2190 }
2191
ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)2192 void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)
2193 {
2194 struct ipa_nat_rule *tbl_ptr;
2195 uint16_t cnt;
2196
2197 tbl_ptr =
2198 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
2199
2200 for (cnt = 0;
2201 cnt < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
2202 cnt++) {
2203
2204 if (Read8BitFieldValue(tbl_ptr[cnt].ts_proto,
2205 PROTOCOL_FIELD) == IPAHAL_NAT_INVALID_PROTOCOL
2206 &&
2207 Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
2208 NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
2209 /* Delete the IPA_NAT_DEL_TYPE_HEAD node */
2210 IPADBG("deleting the dead node 0x%x\n", cnt);
2211 memset(&tbl_ptr[cnt], 0, sizeof(struct ipa_nat_rule));
2212 }
2213 } /* end of for loop */
2214
2215 return;
2216 }
2217
2218
2219 /* ========================================================
2220 Debug functions
2221 ========================================================*/
2222 #ifdef NAT_DUMP
ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)2223 void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)
2224 {
2225 struct ipa_nat_rule *tbl_ptr;
2226 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2227 int cnt;
2228 uint8_t atl_one = 0;
2229
2230 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2231 tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2232 IPAERR("invalid table handle passed\n");
2233 return;
2234 }
2235
2236 /* Print ipv4 rules */
2237 IPADBG("Dumping ipv4 active rules:\n");
2238 tbl_ptr = (struct ipa_nat_rule *)
2239 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
2240 for (cnt = 0;
2241 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2242 cnt++) {
2243 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2244 ENABLE_FIELD)) {
2245 atl_one = 1;
2246 ipa_nati_print_rule(&tbl_ptr[cnt], cnt);
2247 }
2248 }
2249 if (!atl_one) {
2250 IPADBG("No active base rules, total: %d\n",
2251 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2252 }
2253 atl_one = 0;
2254
2255 /* Print ipv4 expansion rules */
2256 IPADBG("Dumping ipv4 active expansion rules:\n");
2257 tbl_ptr = (struct ipa_nat_rule *)
2258 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
2259 for (cnt = 0;
2260 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2261 cnt++) {
2262 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2263 ENABLE_FIELD)) {
2264 atl_one = 1;
2265 ipa_nati_print_rule(&tbl_ptr[cnt],
2266 (cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries));
2267 }
2268 }
2269 if (!atl_one) {
2270 IPADBG("No active base expansion rules, total: %d\n",
2271 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2272 }
2273 atl_one = 0;
2274
2275 /* Print ipv4 index rules */
2276 IPADBG("Dumping ipv4 index active rules:\n");
2277 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2278 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
2279 for (cnt = 0;
2280 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2281 cnt++) {
2282 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2283 INDX_TBL_TBL_ENTRY_FIELD)) {
2284 atl_one = 1;
2285 ipa_nati_print_index_rule(&indx_tbl_ptr[cnt], cnt, 0);
2286 }
2287 }
2288 if (!atl_one) {
2289 IPADBG("No active index table rules, total:%d\n",
2290 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
2291 }
2292 atl_one = 0;
2293
2294
2295 /* Print ipv4 index expansion rules */
2296 IPADBG("Dumping ipv4 index expansion active rules:\n");
2297 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2298 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
2299 for (cnt = 0;
2300 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2301 cnt++) {
2302 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2303 INDX_TBL_TBL_ENTRY_FIELD)) {
2304 atl_one = 1;
2305 ipa_nati_print_index_rule(&indx_tbl_ptr[cnt],
2306 (cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries),
2307 ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_expn_table_meta[cnt].prev_index);
2308 }
2309 }
2310 if (!atl_one) {
2311 IPADBG("No active index expansion rules, total:%d\n",
2312 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
2313 }
2314 atl_one = 0;
2315
2316 }
2317
ipa_nati_print_rule(struct ipa_nat_rule * param,uint32_t rule_id)2318 void ipa_nati_print_rule(
2319 struct ipa_nat_rule *param,
2320 uint32_t rule_id)
2321 {
2322 struct ipa_nat_sw_rule sw_rule;
2323 memcpy(&sw_rule, param, sizeof(sw_rule));
2324 uint32_t ip_addr;
2325
2326 IPADUMP("rule-id:%d ", rule_id);
2327 ip_addr = sw_rule.target_ip;
2328 IPADUMP("Trgt-IP:%d.%d.%d.%d ",
2329 ((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2330 ((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2331
2332 IPADUMP("Trgt-Port:%d Priv-Port:%d ", sw_rule.target_port, sw_rule.private_port);
2333
2334 ip_addr = sw_rule.private_ip;
2335 IPADUMP("Priv-IP:%d.%d.%d.%d ",
2336 ((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
2337 ((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
2338
2339 IPADUMP("Pub-Port:%d Nxt-indx:%d ", sw_rule.public_port, sw_rule.next_index);
2340 IPADUMP("IP-cksm-delta:0x%x En-bit:0x%x ", sw_rule.ip_chksum, sw_rule.enable);
2341 IPADUMP("TS:0x%x Proto:0x%x ", sw_rule.time_stamp, sw_rule.protocol);
2342 IPADUMP("Prv-indx:%d indx_tbl_entry:%d ", sw_rule.prev_index, sw_rule.indx_tbl_entry);
2343 IPADUMP("Tcp-udp-cksum-delta:0x%x", sw_rule.tcp_udp_chksum);
2344 IPADUMP("\n");
2345 return;
2346 }
2347
ipa_nati_print_index_rule(struct ipa_nat_indx_tbl_rule * param,uint32_t rule_id,uint16_t prev_indx)2348 void ipa_nati_print_index_rule(
2349 struct ipa_nat_indx_tbl_rule *param,
2350 uint32_t rule_id, uint16_t prev_indx)
2351 {
2352 struct ipa_nat_sw_indx_tbl_rule sw_rule;
2353 memcpy(&sw_rule, param, sizeof(sw_rule));
2354
2355 IPADUMP("rule-id:%d Table_entry:%d Next_index:%d, prev_indx:%d",
2356 rule_id, sw_rule.tbl_entry, sw_rule.next_index, prev_indx);
2357 IPADUMP("\n");
2358 return;
2359 }
2360
ipa_nati_query_nat_rules(uint32_t tbl_hdl,nat_table_type tbl_type)2361 int ipa_nati_query_nat_rules(
2362 uint32_t tbl_hdl,
2363 nat_table_type tbl_type)
2364 {
2365 struct ipa_nat_rule *tbl_ptr;
2366 struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
2367 int cnt = 0, ret = 0;
2368
2369 if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
2370 tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
2371 IPAERR("invalid table handle passed\n");
2372 return ret;
2373 }
2374
2375 /* Print ipv4 rules */
2376 if (tbl_type == IPA_NAT_BASE_TBL) {
2377 IPADBG("Counting ipv4 active rules:\n");
2378 tbl_ptr = (struct ipa_nat_rule *)
2379 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_rules_addr;
2380 for (cnt = 0;
2381 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2382 cnt++) {
2383 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2384 ENABLE_FIELD)) {
2385 ret++;
2386 }
2387 }
2388 if (!ret) {
2389 IPADBG("No active base rules\n");
2390 }
2391
2392 IPADBG("Number of active base rules: %d\n", ret);
2393 }
2394
2395 /* Print ipv4 expansion rules */
2396 if (tbl_type == IPA_NAT_EXPN_TBL) {
2397 IPADBG("Counting ipv4 active expansion rules:\n");
2398 tbl_ptr = (struct ipa_nat_rule *)
2399 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_expn_rules_addr;
2400 for (cnt = 0;
2401 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2402 cnt++) {
2403 if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
2404 ENABLE_FIELD)) {
2405 ret++;
2406 }
2407 }
2408 if (!ret) {
2409 IPADBG("No active base expansion rules\n");
2410 }
2411
2412 IPADBG("Number of active base expansion rules: %d\n", ret);
2413 }
2414
2415 /* Print ipv4 index rules */
2416 if (tbl_type == IPA_NAT_INDX_TBL) {
2417 IPADBG("Counting ipv4 index active rules:\n");
2418 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2419 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_addr;
2420 for (cnt = 0;
2421 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
2422 cnt++) {
2423 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2424 INDX_TBL_TBL_ENTRY_FIELD)) {
2425 ret++;
2426 }
2427 }
2428 if (!ret) {
2429 IPADBG("No active index table rules\n");
2430 }
2431
2432 IPADBG("Number of active index table rules: %d\n", ret);
2433 }
2434
2435 /* Print ipv4 index expansion rules */
2436 if (tbl_type == IPA_NAT_INDEX_EXPN_TBL) {
2437 IPADBG("Counting ipv4 index expansion active rules:\n");
2438 indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
2439 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_expn_addr;
2440 for (cnt = 0;
2441 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
2442 cnt++) {
2443 if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
2444 INDX_TBL_TBL_ENTRY_FIELD)) {
2445 ret++;
2446 }
2447 }
2448
2449 if (!ret)
2450 IPADBG("No active index expansion rules\n");
2451
2452 IPADBG("Number of active index expansion rules: %d\n", ret);
2453 }
2454
2455 return ret;
2456 }
2457 #endif
2458