/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | neon-lut64-p2-x20.c | 114 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20() local 129 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20() 130 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20()
|
D | neonfma-lut64-p2-x20.c | 113 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20() local 128 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20() 129 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20()
|
D | neonfma-lut64-p2-x20-acc5.c | 117 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc5() local 132 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc5() 133 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc5()
|
D | neon-lut64-p2-x20-acc2.c | 115 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc2() local 130 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc2() 131 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc2()
|
D | neon-lut64-p2-x20-acc5.c | 118 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc5() local 133 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc5() 134 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neon_lut64_p2_x20_acc5()
|
D | neonfma-lut64-p2-x20-acc2.c | 114 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc2() local 129 vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc2() 130 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_lut64_p2_x20_acc2()
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | neonfma-rr1-lut64-p2-div-x20.c | 87 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x20() local 102 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x20() 103 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x20()
|
D | neonfma-rr1-lut2048-p1-div-x20.c | 86 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x20() local 101 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x20() 102 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x20()
|
D | neonfma-rr1-lut2048-p1-nr2recps-x20.c | 86 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x20() local 101 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x20() 102 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x20()
|
D | neonfma-rr1-lut64-p2-nr2fma-x20.c | 87 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x20() local 102 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x20() 103 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x20()
|
D | neonfma-rr1-lut64-p2-nr2recps-x20.c | 87 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x20() local 102 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x20() 103 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x20()
|
D | neonfma-rr1-lut64-p2-div-x24.c | 92 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x24() local 111 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x24() 112 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_div_x24()
|
D | neon-rr2-lut64-p2-nr2recps-x20.c | 88 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x20() local 103 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x20() 104 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x20()
|
D | neonfma-rr1-lut2048-p1-div-x24.c | 91 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x24() local 110 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x24() 111 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_div_x24()
|
D | neonfma-rr1-lut64-p2-nr1recps1fma-x20.c | 87 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x20() local 102 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x20() 103 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr1recps1fma_x20()
|
D | neon-rr2-lut2048-p1-nr2recps-x20.c | 87 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x20() local 102 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x20() 103 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x20()
|
D | neonfma-rr1-lut2048-p1-nr1recps1fma-x20.c | 86 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x20() local 101 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x20() 102 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x20()
|
D | neonfma-rr1-lut2048-p1-nr2fma-x20.c | 86 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20() local 101 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20() 102 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20()
|
D | neonfma-rr1-lut2048-p1-nr2recps-x24.c | 91 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x24() local 110 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x24() 111 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x24()
|
D | neonfma-rr1-lut64-p2-nr2recps-x24.c | 92 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x24() local 111 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x24() 112 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2recps_x24()
|
D | neonfma-rr1-lut64-p2-nr2fma-x24.c | 92 float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x24() local 111 vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x24() 112 const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ); in xnn_f32_sigmoid_ukernel__neonfma_rr1_lut64_p2_nr2fma_x24()
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-neonfma-rr1-lut16-p3-x20.c | 105 …int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint3… in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20() local 107 …vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vid… in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20() 108 const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ); in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20()
|
D | velu-neon-rr2-lut16-p3-x20.c | 106 …int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint3… in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20() local 108 …vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vid… in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20() 109 const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ); in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20()
|
D | velu-neon-rr2-lut16-p3-x24.c | 111 …int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint3… in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24() local 113 …vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vid… in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24() 114 const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ); in xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24()
|
D | velu-neonfma-rr1-lut16-p3-x24.c | 110 …int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint3… in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24() local 112 …vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vid… in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24() 113 const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ); in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24()
|