• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3   *
4   *  Use of this source code is governed by a BSD-style license
5   *  that can be found in the LICENSE file in the root of the source
6   *  tree. An additional intellectual property rights grant can be found
7   *  in the file PATENTS.  All contributing project authors may
8   *  be found in the AUTHORS file in the root of the source tree.
9   */
10  
11  #include "./vp8_rtcd.h"
12  #include "vpx_ports/asmdefs_mmi.h"
13  
14  #define COPY_MEM_16X2 \
15    "gsldlc1    %[ftmp0],   0x07(%[src])                    \n\t" \
16    "gsldrc1    %[ftmp0],   0x00(%[src])                    \n\t" \
17    "ldl        %[tmp0],    0x0f(%[src])                    \n\t" \
18    "ldr        %[tmp0],    0x08(%[src])                    \n\t" \
19    MMI_ADDU(%[src],     %[src],         %[src_stride])           \
20    "gssdlc1    %[ftmp0],   0x07(%[dst])                    \n\t" \
21    "gssdrc1    %[ftmp0],   0x00(%[dst])                    \n\t" \
22    "sdl        %[tmp0],    0x0f(%[dst])                    \n\t" \
23    "sdr        %[tmp0],    0x08(%[dst])                    \n\t" \
24    MMI_ADDU(%[dst],      %[dst],        %[dst_stride])           \
25    "gsldlc1    %[ftmp1],   0x07(%[src])                    \n\t" \
26    "gsldrc1    %[ftmp1],   0x00(%[src])                    \n\t" \
27    "ldl        %[tmp1],    0x0f(%[src])                    \n\t" \
28    "ldr        %[tmp1],    0x08(%[src])                    \n\t" \
29    MMI_ADDU(%[src],     %[src],         %[src_stride])           \
30    "gssdlc1    %[ftmp1],   0x07(%[dst])                    \n\t" \
31    "gssdrc1    %[ftmp1],   0x00(%[dst])                    \n\t" \
32    "sdl        %[tmp1],    0x0f(%[dst])                    \n\t" \
33    "sdr        %[tmp1],    0x08(%[dst])                    \n\t" \
34    MMI_ADDU(%[dst],     %[dst],         %[dst_stride])
35  
36  #define COPY_MEM_8X2 \
37    "gsldlc1    %[ftmp0],   0x07(%[src])                    \n\t" \
38    "gsldrc1    %[ftmp0],   0x00(%[src])                    \n\t" \
39    MMI_ADDU(%[src],     %[src],         %[src_stride])           \
40    "ldl        %[tmp0],    0x07(%[src])                    \n\t" \
41    "ldr        %[tmp0],    0x00(%[src])                    \n\t" \
42    MMI_ADDU(%[src],     %[src],         %[src_stride])           \
43                                                                  \
44    "gssdlc1    %[ftmp0],   0x07(%[dst])                    \n\t" \
45    "gssdrc1    %[ftmp0],   0x00(%[dst])                    \n\t" \
46    MMI_ADDU(%[dst],      %[dst],        %[dst_stride])           \
47    "sdl        %[tmp0],    0x07(%[dst])                    \n\t" \
48    "sdr        %[tmp0],    0x00(%[dst])                    \n\t" \
49    MMI_ADDU(%[dst],     %[dst],         %[dst_stride])
50  
vp8_copy_mem16x16_mmi(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride)51  void vp8_copy_mem16x16_mmi(unsigned char *src, int src_stride,
52                             unsigned char *dst, int dst_stride) {
53    double ftmp[2];
54    uint64_t tmp[2];
55    uint8_t loop_count = 4;
56  
57    /* clang-format off */
58    __asm__ volatile (
59      "1:                                                     \n\t"
60      COPY_MEM_16X2
61      COPY_MEM_16X2
62      MMI_ADDIU(%[loop_count], %[loop_count], -0x01)
63      "bnez       %[loop_count],    1b                        \n\t"
64      : [ftmp0]"=&f"(ftmp[0]),            [ftmp1]"=&f"(ftmp[1]),
65        [tmp0]"=&r"(tmp[0]),              [tmp1]"=&r"(tmp[1]),
66        [loop_count]"+&r"(loop_count),
67        [dst]"+&r"(dst),                  [src]"+&r"(src)
68      : [src_stride]"r"((mips_reg)src_stride),
69        [dst_stride]"r"((mips_reg)dst_stride)
70      : "memory"
71    );
72    /* clang-format on */
73  }
74  
vp8_copy_mem8x8_mmi(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride)75  void vp8_copy_mem8x8_mmi(unsigned char *src, int src_stride, unsigned char *dst,
76                           int dst_stride) {
77    double ftmp[2];
78    uint64_t tmp[1];
79    uint8_t loop_count = 4;
80  
81    /* clang-format off */
82    __asm__ volatile (
83      "1:                                                     \n\t"
84      COPY_MEM_8X2
85      MMI_ADDIU(%[loop_count], %[loop_count], -0x01)
86      "bnez       %[loop_count],    1b                        \n\t"
87      : [ftmp0]"=&f"(ftmp[0]),            [ftmp1]"=&f"(ftmp[1]),
88        [tmp0]"=&r"(tmp[0]),              [loop_count]"+&r"(loop_count),
89        [dst]"+&r"(dst),                  [src]"+&r"(src)
90      : [src_stride]"r"((mips_reg)src_stride),
91        [dst_stride]"r"((mips_reg)dst_stride)
92      : "memory"
93    );
94    /* clang-format on */
95  }
96  
vp8_copy_mem8x4_mmi(unsigned char * src,int src_stride,unsigned char * dst,int dst_stride)97  void vp8_copy_mem8x4_mmi(unsigned char *src, int src_stride, unsigned char *dst,
98                           int dst_stride) {
99    double ftmp[2];
100    uint64_t tmp[1];
101  
102    /* clang-format off */
103    __asm__ volatile (
104      COPY_MEM_8X2
105      COPY_MEM_8X2
106      : [ftmp0]"=&f"(ftmp[0]),            [ftmp1]"=&f"(ftmp[1]),
107        [tmp0]"=&r"(tmp[0]),
108        [dst]"+&r"(dst),                  [src]"+&r"(src)
109      : [src_stride]"r"((mips_reg)src_stride),
110        [dst_stride]"r"((mips_reg)dst_stride)
111      : "memory"
112    );
113    /* clang-format on */
114  }
115