Lines Matching refs:pb

240 	struct pcap_bpf *pb = p->priv;  in pcap_getnonblock_bpf()  local
242 if (pb->zerocopy) in pcap_getnonblock_bpf()
243 return (pb->nonblock); in pcap_getnonblock_bpf()
252 struct pcap_bpf *pb = p->priv; in pcap_setnonblock_bpf() local
254 if (pb->zerocopy) { in pcap_setnonblock_bpf()
255 pb->nonblock = nonblock; in pcap_setnonblock_bpf()
275 struct pcap_bpf *pb = p->priv; in pcap_next_zbuf_shm() local
278 if (pb->zbuffer == pb->zbuf2 || pb->zbuffer == NULL) { in pcap_next_zbuf_shm()
279 bzh = (struct bpf_zbuf_header *)pb->zbuf1; in pcap_next_zbuf_shm()
282 pb->bzh = bzh; in pcap_next_zbuf_shm()
283 pb->zbuffer = (u_char *)pb->zbuf1; in pcap_next_zbuf_shm()
284 p->buffer = pb->zbuffer + sizeof(*bzh); in pcap_next_zbuf_shm()
288 } else if (pb->zbuffer == pb->zbuf1) { in pcap_next_zbuf_shm()
289 bzh = (struct bpf_zbuf_header *)pb->zbuf2; in pcap_next_zbuf_shm()
292 pb->bzh = bzh; in pcap_next_zbuf_shm()
293 pb->zbuffer = (u_char *)pb->zbuf2; in pcap_next_zbuf_shm()
294 p->buffer = pb->zbuffer + sizeof(*bzh); in pcap_next_zbuf_shm()
313 struct pcap_bpf *pb = p->priv; in pcap_next_zbuf() local
340 if (pb->interrupted && p->opt.timeout) { in pcap_next_zbuf()
341 expire = TSTOMILLI(&pb->firstsel) + p->opt.timeout; in pcap_next_zbuf()
345 pb->interrupted = 0; in pcap_next_zbuf()
362 if (!pb->nonblock) { in pcap_next_zbuf()
372 if (!pb->interrupted && p->opt.timeout) { in pcap_next_zbuf()
373 pb->interrupted = 1; in pcap_next_zbuf()
374 pb->firstsel = cur; in pcap_next_zbuf()
383 pb->interrupted = 0; in pcap_next_zbuf()
411 struct pcap_bpf *pb = p->priv; in pcap_ack_zbuf() local
413 atomic_store_rel_int(&pb->bzh->bzh_user_gen, in pcap_ack_zbuf()
414 pb->bzh->bzh_kernel_gen); in pcap_ack_zbuf()
415 pb->bzh = NULL; in pcap_ack_zbuf()
822 struct pcap_bpf *pb = p->priv; in pcap_read_bpf() local
858 if (pb->zerocopy) { in pcap_read_bpf()
1006 if (pb->filtering_in_kernel || in pcap_read_bpf()
1273 struct pcap_bpf *pb = p->priv; in pcap_cleanup_bpf() local
1280 if (pb->must_do_on_close != 0) { in pcap_cleanup_bpf()
1286 if (pb->must_do_on_close & MUST_CLEAR_RFMON) { in pcap_cleanup_bpf()
1303 strncpy(req.ifm_name, pb->device, in pcap_cleanup_bpf()
1318 pb->device, in pcap_cleanup_bpf()
1341 pb->must_do_on_close = 0; in pcap_cleanup_bpf()
1345 if (pb->zerocopy) { in pcap_cleanup_bpf()
1353 if (pb->zbuf1 != MAP_FAILED && pb->zbuf1 != NULL) in pcap_cleanup_bpf()
1354 (void) munmap(pb->zbuf1, pb->zbufsize); in pcap_cleanup_bpf()
1355 if (pb->zbuf2 != MAP_FAILED && pb->zbuf2 != NULL) in pcap_cleanup_bpf()
1356 (void) munmap(pb->zbuf2, pb->zbufsize); in pcap_cleanup_bpf()
1360 if (pb->device != NULL) { in pcap_cleanup_bpf()
1361 free(pb->device); in pcap_cleanup_bpf()
1362 pb->device = NULL; in pcap_cleanup_bpf()
1477 struct pcap_bpf *pb = p->priv; local
1583 pb->device = strdup(p->opt.source);
1584 if (pb->device == NULL) {
1692 pb->zerocopy = 1;
1723 pb->zbufsize = roundup(v, getpagesize());
1724 if (pb->zbufsize > zbufmax)
1725 pb->zbufsize = zbufmax;
1726 pb->zbuf1 = mmap(NULL, pb->zbufsize, PROT_READ | PROT_WRITE,
1728 pb->zbuf2 = mmap(NULL, pb->zbufsize, PROT_READ | PROT_WRITE,
1730 if (pb->zbuf1 == MAP_FAILED || pb->zbuf2 == MAP_FAILED) {
1737 bz.bz_bufa = pb->zbuf1;
1738 bz.bz_bufb = pb->zbuf2;
1739 bz.bz_buflen = pb->zbufsize;
1753 v = pb->zbufsize - sizeof(struct bpf_zbuf_header);
2094 if (p->opt.timeout && !pb->zerocopy) {
2204 if (!pb->zerocopy) {
2319 struct pcap_bpf *pb = p->priv; local
2457 pb->must_do_on_close |= MUST_CLEAR_RFMON;
2634 struct pcap_bpf *pb = p->priv; local
2648 pb->filtering_in_kernel = 1; /* filtering in the kernel */
2688 pb->filtering_in_kernel = 0; /* filtering in userland */