Searched refs:scalars (Results 1 – 25 of 64) sorted by relevance
123
337 static struct reg scalars[512+1]; variable353 for (i = 0, tmp = scalar_names ; i < Elements(scalars) ; i++) { in init_regs()355 scalars[i].idx = i; in init_regs()356 scalars[i].closest = tmp; in init_regs()357 scalars[i].flags = ISFLOAT; in init_regs()368 scalars[Elements(scalars)-1].idx = -1; in init_regs()517 for (i = 0 ; i < Elements(scalars) ; i++) in dump_state()518 print_reg( &scalars[i] ); in dump_state()566 int sz = header.scalars.count; in radeon_emit_scalars()568 int start = header.scalars.offset; in radeon_emit_scalars()[all …]
181 h.scalars.cmd_type = RADEON_CMD_SCALARS; in cmdscl()182 h.scalars.offset = offset; in cmdscl()183 h.scalars.stride = stride; in cmdscl()184 h.scalars.count = count; in cmdscl()255 OUT_BATCH((h.scalars.offset) | (h.scalars.stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); \256 OUT_BATCH(CP_PACKET0_ONE(R200_SE_TCL_SCALAR_DATA_REG, h.scalars.count - 1)); \257 OUT_BATCH_TABLE((data), h.scalars.count); \
10 the parser does not require scalars14 "the parser does not require scalars18 bar: 'quoted scalars
4 scalars:
615 static struct reg scalars[512+1]; variable631 for (i = 0, tmp = scalar_names ; i < Elements(scalars) ; i++) { in init_regs()633 scalars[i].idx = i; in init_regs()634 scalars[i].closest = tmp; in init_regs()635 scalars[i].flags = ISFLOAT; in init_regs()646 scalars[Elements(scalars)-1].idx = -1; in init_regs()795 for (i = 0 ; i < Elements(scalars) ; i++) in dump_state()796 print_reg( &scalars[i] ); in dump_state()844 int sz = header.scalars.count; in radeon_emit_scalars()846 int start = header.scalars.offset; in radeon_emit_scalars()[all …]
199 h.scalars.cmd_type = RADEON_CMD_SCALARS; in cmdscl()200 h.scalars.offset = offset; in cmdscl()201 h.scalars.stride = stride; in cmdscl()202 h.scalars.count = count; in cmdscl()210 h.scalars.cmd_type = RADEON_CMD_SCALARS2; in cmdscl2()211 h.scalars.offset = offset - 0x100; in cmdscl2()212 h.scalars.stride = stride; in cmdscl2()213 h.scalars.count = count; in cmdscl2()311 OUT_BATCH((h.scalars.offset) | (h.scalars.stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); \312 OUT_BATCH(CP_PACKET0_ONE(R200_SE_TCL_SCALAR_DATA_REG, h.scalars.count - 1)); \[all …]
574 def __init__ (self, numCols, numRows, scalars): argument575 assert len(scalars) == numRows*numCols578 self.scalars = scalars582 scalars = []585 scalars.append(1.0 if col == row else 0.0)586 return Mat(numCols, numRows, scalars)591 return self.scalars[colNdx*self.numRows + rowNdx]596 self.scalars[colNdx*self.numRows + rowNdx] = scalar622 return "%s(%s)" % (self.typeString(), ", ".join([str(s) for s in self.scalars]))629 return (self.scalars == other.scalars)[all …]
60 scalars = reduce(operator.add, [x[ndx].toFloat().getScalars() for x in comps])62 res.append(Vec.fromScalarList(scalars))80 scalars = reduce(operator.add, [x[ndx].toFloat().getScalars() for x in comps])81 res.append(Mat(numCols, numRows, scalars))
31 ; store constants 1,2,3,4 as scalars37 ; load stored scalars80 ; store constants as scalars88 ; load stored scalars132 ; load stored scalars
719 def __init__ (self, numCols, numRows, scalars): argument720 assert len(scalars) == numRows*numCols723 self.scalars = scalars727 scalars = []730 scalars.append(scalar if col == row else 0.0)731 return Mat(numCols, numRows, scalars)740 return self.scalars[colNdx*self.numRows + rowNdx]745 self.scalars[colNdx*self.numRows + rowNdx] = scalar771 return "%s(%s)" % (self.typeString(), ", ".join(["%s" % s for s in self.scalars]))778 return (self.scalars == other.scalars)[all …]
717 def __init__ (self, numCols, numRows, scalars): argument718 assert len(scalars) == numRows*numCols721 self.scalars = scalars725 scalars = []728 scalars.append(scalar if col == row else 0.0)729 return Mat(numCols, numRows, scalars)738 return self.scalars[colNdx*self.numRows + rowNdx]743 self.scalars[colNdx*self.numRows + rowNdx] = scalar769 return "%s(%s)" % (self.typeString(), ", ".join(["%s" % s for s in self.scalars]))776 return (self.scalars == other.scalars)[all …]
60 scalars = reduce(operator.add, [x[ndx].toFloat().getScalars() for x in comps])62 res.append(Vec.fromScalarList(scalars))83 scalars = reduce(operator.add, [x[ndx].toFloat().getScalars() for x in comps])84 res.append(Mat(numCols, numRows, scalars))
853 const BIGNUM *scalars[1]; in EC_POINT_mul() local856 scalars[0] = p_scalar; in EC_POINT_mul()859 points, scalars, ctx); in EC_POINT_mul()863 size_t num, const EC_POINT *points[], const BIGNUM *scalars[], in EC_POINTs_mul() argument867 return ec_wNAF_mul(group, r, scalar, num, points, scalars, ctx); in EC_POINTs_mul()870 return group->meth->mul(group, r, scalar, num, points, scalars, ctx); in EC_POINTs_mul()
1506 const felem_bytearray scalars[], in batch_mul() argument1569 bits = get_bit(scalars[num], i + 4) << 5; in batch_mul()1570 bits |= get_bit(scalars[num], i + 3) << 4; in batch_mul()1571 bits |= get_bit(scalars[num], i + 2) << 3; in batch_mul()1572 bits |= get_bit(scalars[num], i + 1) << 2; in batch_mul()1573 bits |= get_bit(scalars[num], i) << 1; in batch_mul()1574 bits |= get_bit(scalars[num], i - 1); in batch_mul()1719 const BIGNUM *scalars[], BN_CTX *ctx) { in ec_GFp_nistp256_points_mul() argument1813 p_scalar = scalars[i]; in ec_GFp_nistp256_points_mul()
164 size_t num, const EC_POINT *points[], const BIGNUM *scalars[],234 size_t num, const EC_POINT *points[], const BIGNUM *scalars[],
291 size_t num, const EC_POINT *points[], const BIGNUM *scalars[], in ec_wNAF_mul() argument404 bits = i < num ? BN_num_bits(scalars[i]) : BN_num_bits(scalar); in ec_wNAF_mul()409 compute_wNAF((i < num ? scalars[i] : scalar), wsize[i], &wNAF_len[i]); in ec_wNAF_mul()
89 \item The number of scalars read into the vector \varname{[coefficients]}95 scalars in \varname{[coefficients]} is to to read a total of twelve96 scalars as four vectors of three scalars each. This is not an error114 length \varname{[n]} vector with all-zero scalars. Otherwise, begin by
8 ; <4 x double> is legalized to scalars).
3 ; Ensure we don't simplify away additions vectors of +0.0's (same as scalars).
... .util.Map) java.util.Map scalars Runtime runtime protected void collectGlobalPerformanceInfo (java.util. ...
13 4) bitcasts from vectors to scalars: PR2804
3 ; Make sure that vectors get the same benefits as scalars when using unsafe-fp-math.
15 // all scalars, but widths do not match.26 // scalar condition and mixed-width vectors and scalars
906 std::vector<float>& scalars = attribValues[attribValueNdx++]; in execute() local907 scalars.resize(numVerticesPerDraw * scalarSize); in execute()912 scalars[repNdx*scalarSize + ndx] = val.elements[arrayNdx*scalarSize + ndx].float32; in execute()923 scalars[repNdx*scalarSize + ndx] = v; in execute()950 …loat(attribLoc + i, numRows, numVerticesPerDraw, scalarSize*sizeof(float), &scalars[i * numRows])); in execute()955 vertexArrays.push_back(va::Float(attribLoc, scalarSize, numVerticesPerDraw, 0, &scalars[0])); in execute()