size 91 apps/HelloAndroid/src/com/example/hellohalide/CameraPreview.java int size = y_size + c_size * 2; size 92 apps/HelloAndroid/src/com/example/hellohalide/CameraPreview.java mPreviewData = new byte[size]; size 342 apps/HelloAndroidCamera2/src/com/example/helloandroidcamera2/Camera2BasicFragment.java for (Size size : sizes) { size 343 apps/HelloAndroidCamera2/src/com/example/helloandroidcamera2/Camera2BasicFragment.java Log.d(TAG, size.toString()); size 87 apps/camera_pipe/fcam/Demosaic_ARM.cpp if (src.shot().colorMatrix().size() == 12) { size 10 apps/cuda_mat_mul/mat_mul_generator.cpp GeneratorParam<int> size {"size", 1024}; size 18 apps/cuda_mat_mul/mat_mul_generator.cpp RDom r(0, size); size 23 apps/cuda_mat_mul/mat_mul_generator.cpp out.bound(x, 0, size) size 24 apps/cuda_mat_mul/mat_mul_generator.cpp .bound(y, 0, size) size 46 apps/cuda_mat_mul/mat_mul_generator.cpp .dim(0).set_bounds(0, size) size 47 apps/cuda_mat_mul/mat_mul_generator.cpp .dim(1).set_stride(size); size 10 apps/cuda_mat_mul/runner.cpp int size = 1024; size 12 apps/cuda_mat_mul/runner.cpp size = atoi(argv[1]); size 16 apps/cuda_mat_mul/runner.cpp Buffer<float> A(size, size), B(size, size), C(size, size); size 26 apps/cuda_mat_mul/runner.cpp cudaMalloc((void **)&A, size*size*4); size 27 apps/cuda_mat_mul/runner.cpp cudaMalloc((void **)&B, size*size*4); size 28 apps/cuda_mat_mul/runner.cpp cudaMalloc((void **)&C, size*size*4); size 34 apps/cuda_mat_mul/runner.cpp size, size, size, &alpha, A, size, B, size, &beta, C, size); size 59 apps/fft/fft.cpp for (size_t i = 0; i < R.size(); i++) { size 312 apps/fft/fft.cpp for (size_t i = 0; i < NR.size(); i++) { size 408 apps/fft/fft.cpp for (size_t i = 0; i + 1 < stages.size(); i++) { size 69 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp void run(std::string benchmark, int size) { size 71 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp bench_copy(size); size 73 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp bench_scal(size); size 75 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp bench_axpy(size); size 77 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp bench_dot(size); size 79 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp bench_asum(size); size 81 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemv_notrans(size); size 83 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemv_trans(size); size 85 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_ger(size); size 87 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemm_notrans(size); size 89 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemm_transA(size); size 91 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemm_transB(size); size 93 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp this->bench_gemm_transAB(size); size 201 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp int size = std::stoi(argv[2]); size 205 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp BenchmarksFloat (BLAS_NAME).run(subroutine, size); size 207 apps/linear_algebra/benchmarks/cblas_benchmarks.cpp BenchmarksDouble(BLAS_NAME).run(subroutine, size); size 55 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp void run(std::string benchmark, int size) { size 57 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_copy(size); size 59 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_scal(size); size 61 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_axpy(size); size 63 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_dot(size); size 65 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_asum(size); size 67 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemv_notrans(size); size 69 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemv_trans(size); size 71 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_ger(size); size 73 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemm_notrans(size); size 75 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemm_transA(size); size 77 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemm_transB(size); size 79 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp bench_gemm_transAB(size); size 112 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp int size = std::stoi(argv[2]); size 116 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp Benchmarks<float> ("Eigen").run(subroutine, size); size 118 apps/linear_algebra/benchmarks/eigen_benchmarks.cpp Benchmarks<double>("Eigen").run(subroutine, size); size 59 apps/linear_algebra/benchmarks/halide_benchmarks.cpp void run(std::string benchmark, int size) { size 61 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_copy(size); size 63 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_scal(size); size 65 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_axpy(size); size 67 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_dot(size); size 69 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_asum(size); size 71 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemv_notrans(size); size 73 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemv_trans(size); size 75 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_ger(size); size 77 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemm_notrans(size); size 79 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemm_transA(size); size 81 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemm_transB(size); size 83 apps/linear_algebra/benchmarks/halide_benchmarks.cpp bench_gemm_transAB(size); size 179 apps/linear_algebra/benchmarks/halide_benchmarks.cpp int size = std::stoi(argv[2]); size 183 apps/linear_algebra/benchmarks/halide_benchmarks.cpp BenchmarksFloat ("Halide").run(subroutine, size); size 185 apps/linear_algebra/benchmarks/halide_benchmarks.cpp BenchmarksDouble("Halide").run(subroutine, size); size 61 apps/linear_algebra/src/blas_l1_generators.cpp Expr size = x_.width(); size 62 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_vecs = (size / vec_size) * vec_size; size 63 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_tail = size - size_vecs; size 123 apps/linear_algebra/src/blas_l1_generators.cpp Expr size = x_.width(); size 124 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_vecs = size / vec_size; size 125 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_tail = size - size_vecs * vec_size; size 144 apps/linear_algebra/src/blas_l1_generators.cpp RDom k(0, size); size 150 apps/linear_algebra/src/blas_l1_generators.cpp y_.set_bounds(0, 0, size); size 190 apps/linear_algebra/src/blas_l1_generators.cpp Expr size = x_.width(); size 191 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_vecs = size / vec_size; size 192 apps/linear_algebra/src/blas_l1_generators.cpp Expr size_tail = size - size_vecs * vec_size; size 49 apps/linear_algebra/src/blas_l2_generators.cpp const Expr size = A_.height(); size 78 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= unroll_size).vectorize(i, unroll_size) size 79 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= block_size_) size 83 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= unroll_size).vectorize(i, unroll_size) size 84 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= block_size_) size 104 apps/linear_algebra/src/blas_l2_generators.cpp sum_lanes.specialize(size >= vec_size).vectorize(i, vec_size);//.unroll(i); size 105 apps/linear_algebra/src/blas_l2_generators.cpp sum_lanes.update().specialize(size >= vec_size).vectorize(i, vec_size);//.unroll(i); size 107 apps/linear_algebra/src/blas_l2_generators.cpp sum_tail.specialize(size >= vec_size).vectorize(i, vec_size);//.unroll(i); size 108 apps/linear_algebra/src/blas_l2_generators.cpp sum_tail.update().specialize(size >= vec_size).vectorize(i, vec_size);//.unroll(i); size 116 apps/linear_algebra/src/blas_l2_generators.cpp const Expr size = A_.width(); size 132 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= vec_size).vectorize(i, vec_size) size 133 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= unroll_size * vec_size).unroll(i, unroll_size) size 134 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= block_size_) size 137 apps/linear_algebra/src/blas_l2_generators.cpp result.specialize(size >= vec_size).vectorize(i, vec_size) size 138 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= unroll_size * vec_size).unroll(i, unroll_size) size 139 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= block_size_) size 143 apps/linear_algebra/src/blas_l2_generators.cpp block.specialize(size >= vec_size).vectorize(i, vec_size); size 144 apps/linear_algebra/src/blas_l2_generators.cpp block.update().specialize(size >= vec_size && sum_size >= unroll_size) size 149 apps/linear_algebra/src/blas_l2_generators.cpp block.update().specialize(size >= vec_size).vectorize(i, vec_size); size 151 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= vec_size).vectorize(i, vec_size) size 164 apps/linear_algebra/src/blas_l2_generators.cpp const Expr size = x_.width(); size 166 apps/linear_algebra/src/blas_l2_generators.cpp output.specialize(size >= vec_size).vectorize(i, vec_size) size 167 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= unroll_size * vec_size).unroll(i, unroll_size) size 168 apps/linear_algebra/src/blas_l2_generators.cpp .specialize(size >= block_size_) size 301 apps/linear_algebra/tests/test_halide_blas.cpp int size = std::stoi(argv[i]); size 302 apps/linear_algebra/tests/test_halide_blas.cpp std::cout << "Testing halide_blas with N = " << size << ":\n"; size 303 apps/linear_algebra/tests/test_halide_blas.cpp s.run_tests(size); size 304 apps/linear_algebra/tests/test_halide_blas.cpp d.run_tests(size); size 307 apps/linear_algebra/tests/test_halide_blas.cpp int size = 64 * 7; size 308 apps/linear_algebra/tests/test_halide_blas.cpp std::cout << "Testing halide_blas with N = " << size << ":\n"; size 309 apps/linear_algebra/tests/test_halide_blas.cpp s.run_tests(size); size 310 apps/linear_algebra/tests/test_halide_blas.cpp d.run_tests(size); size 49 apps/resize/resize.cpp float size; size 122 apps/resize/resize.cpp float kernelSize = kernelInfo[interpolationType].size / kernelScaling; size 7 apps/simd_op_check/driver.cpp extern "C" void *memalign(size_t alignment, size_t size); size 85 python_bindings/numpy/ndarray.cpp if (shape_.size() != strides_.size()) { size 89 python_bindings/numpy/ndarray.cpp for (std::size_t i = 0; i < shape_.size(); ++i) { size 102 python_bindings/numpy/ndarray.cpp if (shape.size() != strides.size()) { size 114 python_bindings/numpy/ndarray.cpp shape.size(), size 29 python_bindings/python/BoundaryConditions.cpp std::vector<std::pair<h::Expr, h::Expr>> result(intermediate.size()); size 19 python_bindings/python/Expr.cpp if (t.size() == 1) { size 25 python_bindings/python/Func.cpp if (r.size() == 1) { size 29 python_bindings/python/Func.cpp for (size_t i = 0; i < r.size(); i++) { size 174 python_bindings/python/Func_Ref.cpp .def("size", &FuncRef::size, size 176 python_bindings/python/Func_Ref.cpp .def("__len__", &FuncRef::size, size 88 python_bindings/python/Image.cpp if (int_args.size() != args_len) { size 98 python_bindings/python/Image.cpp switch (int_args.size()) { size 108 python_bindings/python/Image.cpp printf("buffer_to_setitem_operator4 receive a tuple with %zu integers\n", int_args.size()); size 76 python_bindings/python/RDom.cpp assert((exprs.size() % 2) == 0); size 78 python_bindings/python/RDom.cpp for (size_t i = 0; i < exprs.size(); i += 2) { size 37 src/AddImageChecks.cpp r.dimensions = (int)op->args.size(); size 43 src/AddImageChecks.cpp r.dimensions = (int)op->args.size(); size 78 src/AddImageChecks.cpp for (size_t i = 0; i < f.values().size(); i++) { size 83 src/AddImageChecks.cpp if (f.values().size() > 1) { size 152 src/AddImageChecks.cpp for (size_t i = 0; i < f.output_buffers().size(); i++) { size 167 src/AddImageChecks.cpp internal_assert(touched.empty() || (int)(touched.size()) == dimensions); size 175 src/AddImageChecks.cpp for (size_t i = 0; i < order.size(); i++) { size 179 src/AddImageChecks.cpp for (size_t j = 0; j < args.size(); j++) { size 191 src/AddImageChecks.cpp for (size_t i = 0; i < extern_users.size(); i++) { size 479 src/AddImageChecks.cpp for (size_t i = 0; i < constraints.size(); i++) { size 513 src/AddImageChecks.cpp for (size_t i = asserts_host_alignment.size(); i > 0; i--) { size 519 src/AddImageChecks.cpp for (size_t i = dims_no_overflow_asserts.size(); i > 0; i--) { size 524 src/AddImageChecks.cpp for (size_t i = lets_overflow.size(); i > 0; i--) { size 541 src/AddImageChecks.cpp for (size_t i = asserts_constrained.size(); i > 0; i--) { size 546 src/AddImageChecks.cpp for (size_t i = asserts_required.size(); i > 0; i--) { size 551 src/AddImageChecks.cpp for (size_t i = asserts_elem_size.size(); i > 0; i--) { size 561 src/AddImageChecks.cpp for (size_t i = buffer_rewrites.size(); i > 0; i--) { size 568 src/AddImageChecks.cpp for (size_t i = asserts_proposed.size(); i > 0; i--) { size 574 src/AddImageChecks.cpp for (size_t i = lets_proposed.size(); i > 0; i--) { size 579 src/AddImageChecks.cpp for (size_t i = lets_constrained.size(); i > 0; i--) { size 584 src/AddImageChecks.cpp for (size_t i = lets_required.size(); i > 0; i--) { size 89 src/AddParameterChecks.cpp for (size_t i = 0; i < lets.size(); i++) { size 98 src/AddParameterChecks.cpp for (size_t i = 0; i < asserts.size(); i++) { size 39 src/AllocationBoundsInference.cpp Box required(op->bounds.size()); size 40 src/AllocationBoundsInference.cpp for (size_t i = 0; i < required.size(); i++) { size 53 src/AllocationBoundsInference.cpp internal_assert(b.size() == op->bounds.size()); size 55 src/AllocationBoundsInference.cpp for (size_t i = 0; i < b.size(); i++) { size 58 src/AllocationBoundsInference.cpp for (size_t j = 0; j < f.schedule().bounds().size(); j++) { size 134 src/AllocationBoundsInference.cpp for (size_t i = 0; i < f.extern_arguments().size(); i++) { size 326 src/AssociativeOpsTable.cpp for (size_t i = 1; i < exprs.size() - 1; ++i) { size 331 src/AssociativeOpsTable.cpp if (exprs.size() > 2) { size 367 src/AssociativeOpsTable.cpp const vector<AssociativePattern> &table = get_ops_table_helper(exprs[0].type(), root, exprs.size()); size 368 src/AssociativeOpsTable.cpp debug(7) << "Table size: " << table.size() << "\n"; size 40 src/AssociativeOpsTable.h AssociativePattern(size_t size) : ops(size), identities(size), is_commutative(false) {} size 47 src/AssociativeOpsTable.h if ((is_commutative != other.is_commutative) || (ops.size() != other.ops.size())) { size 50 src/AssociativeOpsTable.h for (size_t i = 0; i < size(); ++i) { size 58 src/AssociativeOpsTable.h size_t size() const { return ops.size(); } size 32 src/Associativity.cpp internal_assert(index < (int)v.size()); size 61 src/Associativity.cpp internal_assert(args.size() == op->args.size()) size 63 src/Associativity.cpp for (size_t i = 0; i < op->args.size(); i++) { size 72 src/Associativity.cpp internal_assert(op->value_index < (int)op_x_names.size()); size 107 src/Associativity.cpp for (size_t i = 0; i < x_names.size(); ++i) { size 118 src/Associativity.cpp for (size_t i = 0; i < y_names.size(); ++i) { size 128 src/Associativity.cpp for (size_t i = 0; i < x_names.size(); ++i) { size 161 src/Associativity.cpp internal_assert(op_x_names.size() == op_y_names.size()); size 162 src/Associativity.cpp internal_assert(op_x_names.size() == x_parts.size()); size 163 src/Associativity.cpp internal_assert(op_x_names.size() == exprs.size()); size 164 src/Associativity.cpp internal_assert(op_x_names.size() == assoc_op.size()); size 172 src/Associativity.cpp internal_assert(pattern.size() == op_x_names.size()); size 177 src/Associativity.cpp for (size_t i = 0; i < pattern.size(); ++i) { size 189 src/Associativity.cpp for (size_t index = 0; index < op_y_names.size(); ++index) { size 208 src/Associativity.cpp for (size_t index = 0; index < exprs.size(); ++index) { size 232 src/Associativity.cpp if ((exprs.size() == 1) && (!x_parts[0].defined())) { size 253 src/Associativity.cpp for (size_t i = 0; i < dependencies.size(); ++i) { size 254 src/Associativity.cpp for (size_t j = 0; j < dependencies.size(); ++j) { size 275 src/Associativity.cpp vector<set<int>> subgraphs(dependencies.size()); size 276 src/Associativity.cpp for (size_t i = 0; i < dependencies.size(); ++i) { size 283 src/Associativity.cpp for (size_t j = 0; j < dependencies.size(); ++j) { size 285 src/Associativity.cpp if ((i == j) || (current.size() > other.size()) || (j < i && subgraphs[i].empty())) { size 309 src/Associativity.cpp AssociativeOp assoc_op(exprs.size()); size 317 src/Associativity.cpp vector<string> op_x_names(exprs.size()), op_y_names(exprs.size()); size 318 src/Associativity.cpp for (size_t idx = 0; idx < exprs.size(); ++idx) { size 323 src/Associativity.cpp vector<set<int>> dependencies(exprs.size()); size 324 src/Associativity.cpp vector<Expr> x_parts(exprs.size()); size 329 src/Associativity.cpp for (int idx = exprs.size()-1; idx >= 0; --idx) { size 356 src/Associativity.cpp internal_assert((exprs.size() != 1) || all_independent) << "1D tuple should be all independent\n"; size 371 src/Associativity.cpp internal_assert(subgraphs.size() == exprs.size()); size 373 src/Associativity.cpp for (size_t i = 0; i < subgraphs.size(); ++i) { size 378 src/Associativity.cpp if (subgraphs[i].size() > 2) { size 380 src/Associativity.cpp debug(5) << "Subgraph " << i << " size is " << subgraphs[i].size() << " which is bigger than 2\n"; size 388 src/Associativity.cpp AssociativeOp sub_assoc_op(sub_exprs.size()); size 449 src/Associativity.cpp for (size_t i = 0; i < args.size(); ++i) { size 451 src/Associativity.cpp if (i != args.size() - 1) { size 457 src/Associativity.cpp if (exprs.size() == 1) { size 459 src/Associativity.cpp } else if (exprs.size() > 1) { size 461 src/Associativity.cpp for (size_t i = 0; i < exprs.size(); ++i) { size 463 src/Associativity.cpp if (i != exprs.size() - 1) { size 481 src/Associativity.cpp for (size_t i = 0; i < assoc_op.size(); ++i) { size 504 src/Associativity.cpp for (size_t i = 0; i < assoc_op.size(); ++i) { size 606 src/Associativity.cpp for (size_t i = 0; i < xs.size(); ++i) { size 76 src/Associativity.h AssociativeOp(size_t size) : pattern(size), xs(size), ys(size), is_associative(false) {} size 83 src/Associativity.h size_t size() const { return pattern.size(); } size 10 src/BoundaryConditions.cpp user_assert(args.size() >= bounds.size()) << size 11 src/BoundaryConditions.cpp "repeat_edge called with more bounds (" << bounds.size() << size 12 src/BoundaryConditions.cpp ") than dimensions (" << args.size() << ") Func " << size 16 src/BoundaryConditions.cpp for (size_t i = 0; i < bounds.size(); i++) { size 32 src/BoundaryConditions.cpp actuals.insert(actuals.end(), args.begin() + actuals.size(), args.end()); size 44 src/BoundaryConditions.cpp user_assert(args.size() >= bounds.size()) << size 45 src/BoundaryConditions.cpp "constant_exterior called with more bounds (" << bounds.size() << size 46 src/BoundaryConditions.cpp ") than dimensions (" << source_args.size() << ") Func " << size 50 src/BoundaryConditions.cpp for (size_t i = 0; i < bounds.size(); i++) { size 66 src/BoundaryConditions.cpp if (value.as_vector().size() > 1) { size 68 src/BoundaryConditions.cpp for (size_t i = 0; i < value.as_vector().size(); i++) { size 88 src/BoundaryConditions.cpp user_assert(args.size() >= bounds.size()) << size 89 src/BoundaryConditions.cpp "repeat_image called with more bounds (" << bounds.size() << size 90 src/BoundaryConditions.cpp ") than dimensions (" << args.size() << ") Func " << size 94 src/BoundaryConditions.cpp for (size_t i = 0; i < bounds.size(); i++) { size 118 src/BoundaryConditions.cpp actuals.insert(actuals.end(), args.begin() + actuals.size(), args.end()); size 129 src/BoundaryConditions.cpp user_assert(args.size() >= bounds.size()) << size 130 src/BoundaryConditions.cpp "mirror_image called with more bounds (" << bounds.size() << size 131 src/BoundaryConditions.cpp ") than dimensions (" << args.size() << ") Func " << size 135 src/BoundaryConditions.cpp for (size_t i = 0; i < bounds.size(); i++) { size 159 src/BoundaryConditions.cpp actuals.insert(actuals.end(), args.begin() + actuals.size(), args.end()); size 170 src/BoundaryConditions.cpp user_assert(args.size() >= bounds.size()) << size 171 src/BoundaryConditions.cpp "mirror_interior called with more bounds (" << bounds.size() << size 172 src/BoundaryConditions.cpp ") than dimensions (" << args.size() << ") Func " << size 176 src/BoundaryConditions.cpp for (size_t i = 0; i < bounds.size(); i++) { size 205 src/BoundaryConditions.cpp actuals.insert(actuals.end(), args.begin() + actuals.size(), args.end()); size 583 src/Bounds.cpp std::vector<Expr> new_args(op->args.size()); size 585 src/Bounds.cpp for (size_t i = 0; i < op->args.size() && const_args; i++) { size 626 src/Bounds.cpp assert(op->args.size() == 1); size 629 src/Bounds.cpp assert(op->args.size() == 2); size 632 src/Bounds.cpp assert(op->args.size() == 3); size 646 src/Bounds.cpp } else if (op->args.size() == 1 && interval.is_bounded() && size 673 src/Bounds.cpp internal_assert(op->args.size() >= 1); size 802 src/Bounds.cpp internal_assert(a.size() == b.size()) << "Mismatched dimensionality in region union\n"; size 804 src/Bounds.cpp for (size_t i = 0; i < a.size(); i++) { size 826 src/Bounds.cpp internal_assert(a.size() == b.size()); size 834 src/Bounds.cpp for (size_t i = 0; i < a.size(); i++) { size 905 src/Bounds.cpp internal_assert(a.size() == b.size()); size 906 src/Bounds.cpp result.resize(a.size()); size 908 src/Bounds.cpp for (size_t i = 0; i < a.size(); i++) { size 928 src/Bounds.cpp if (a.size() != b.size() && (a.size() == 0 || b.size() == 0)) { size 932 src/Bounds.cpp internal_assert(a.size() == b.size()); size 941 src/Bounds.cpp for (size_t i = 0; i < a.size(); i++) { size 957 src/Bounds.cpp if (inner.size() > outer.size()) { size 961 src/Bounds.cpp for (size_t i = 0; i < inner.size(); i++) { size 1002 src/Bounds.cpp assert(op->args.size() == 3); size 1020 src/Bounds.cpp Box b(op->args.size()); size 1022 src/Bounds.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1079 src/Bounds.cpp for (size_t i = 0; i < box.size(); i++) { size 1293 src/Bounds.cpp Box b(op->args.size()); size 1294 src/Bounds.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1302 src/Bounds.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1305 src/Bounds.cpp for (size_t i = 0; i < op->values.size(); i++) { size 1351 src/Bounds.cpp internal_assert(boxes.size() <= 1); size 1422 src/Bounds.cpp for (size_t i = 0; i < order.size(); i++) { size 1434 src/Bounds.cpp for (size_t k = 0; k < f.args().size(); k++) { size 54 src/Bounds.h size_t size() const {return bounds.size();} size 153 src/BoundsInference.cpp for (size_t i = 0; i < result.size(); ++i) { size 160 src/BoundsInference.cpp for (size_t i = 1; i < predicates.size(); ++i) { size 174 src/BoundsInference.cpp for (size_t i = specializations.size(); i > 0; i--) { size 192 src/BoundsInference.cpp for (size_t i = 0; i < result.size(); i++) { size 202 src/BoundsInference.cpp if (vec.size() > 1) { size 205 src/BoundsInference.cpp for (size_t i = 1; i < vec.size(); ++i) { size 212 src/BoundsInference.cpp debug(4) << "compute_exprs: all values (size: " << vec.size() << ") " size 240 src/BoundsInference.cpp internal_assert(result.size() == 2); size 285 src/BoundsInference.cpp internal_assert(b.empty() || b.size() == func_args.size()); size 293 src/BoundsInference.cpp vector<bool> always_pure_dims(func_args.size(), true); size 295 src/BoundsInference.cpp for (size_t j = 0; j < always_pure_dims.size(); j++) { size 303 src/BoundsInference.cpp if (stage < func.updates().size()) { size 304 src/BoundsInference.cpp size_t stages = func.updates().size(); size 306 src/BoundsInference.cpp for (size_t i = 0; i < always_pure_dims.size(); i++) { size 406 src/BoundsInference.cpp for (size_t i = 0; i < func.schedule().bounds().size(); i++) { size 451 src/BoundsInference.cpp for (size_t d = 0; d < b.size(); d++) { size 490 src/BoundsInference.cpp for (size_t j = 0; j < args.size(); j++) { size 600 src/BoundsInference.cpp for (size_t i = 0; i < lets.size(); i++) { size 646 src/BoundsInference.cpp vector<bool> inlined(f.size()); size 647 src/BoundsInference.cpp for (size_t i = 0; i < inlined.size(); i++) { size 648 src/BoundsInference.cpp if (i < f.size() - 1 && size 660 src/BoundsInference.cpp for (size_t i = 0; i < f.size(); i++) { size 672 src/BoundsInference.cpp for (size_t j = 0; j < f[i].updates().size(); j++) { size 682 src/BoundsInference.cpp for (size_t i = f.size(); i > 0; i--) { size 685 src/BoundsInference.cpp for (size_t j = 0; j < stages.size(); j++) { size 687 src/BoundsInference.cpp for (size_t k = 0; k < s.exprs.size(); k++) { size 698 src/BoundsInference.cpp for (size_t i = 0; i < stages.size(); i++) { size 715 src/BoundsInference.cpp for (size_t i = 0; i < stages.size(); i++) { size 733 src/BoundsInference.cpp for (size_t j = 0; j < args.size(); j++) { size 736 src/BoundsInference.cpp string stage_name = f.name() + ".s" + std::to_string(f.updates().size()); size 772 src/BoundsInference.cpp for (size_t k = 0; k < b.size(); k++) { size 832 src/BoundsInference.cpp for (size_t i = 0; i < stages.size(); i++) { size 868 src/BoundsInference.cpp for (size_t i = 0; i < stages.size(); i++) { size 884 src/BoundsInference.cpp internal_assert((int)box.size() == f.dimensions()); size 896 src/BoundsInference.cpp vector<bool> bounds_needed(stages.size(), false); size 897 src/BoundsInference.cpp for (size_t i = 0; i < stages.size(); i++) { size 907 src/BoundsInference.cpp for (size_t j = 0; j < stages[i].consumers.size(); j++) { size 918 src/BoundsInference.cpp for (size_t i = 0; i < box.size(); i++) { size 972 src/BoundsInference.cpp for (size_t i = lets.size(); i > 0; i--) { size 999 src/BoundsInference.cpp vector<Function> funcs(order.size()); size 1000 src/BoundsInference.cpp for (size_t i = 0; i < order.size(); i++) { size 26 src/Buffer.cpp int missing_dimensions = buf.dimensions() - (int)args.size() + 1; size 51 src/CPlusPlusMangle.cpp if (table.size() >= 10) { size 57 src/CPlusPlusMangle.cpp auto insert_result = table.insert({ name, table.size() }); size 70 src/CPlusPlusMangle.cpp if (mangled.full_name.size() < 2) { size 212 src/CPlusPlusMangle.cpp for (size_t i = type.handle_type->enclosing_types.size(); i > 0; i--) { size 217 src/CPlusPlusMangle.cpp for (size_t i = type.handle_type->namespaces.size(); i > 0; i--) { size 282 src/CPlusPlusMangle.cpp for (size_t i = namespaces.size(); i > 0; i--) { size 291 src/CPlusPlusMangle.cpp if (args.size() == 0) { size 315 src/CPlusPlusMangle.cpp oss << id.size() << id; size 372 src/CPlusPlusMangle.cpp auto place = prev_seen.insert({ prefix, prev_seen.size() }); size 466 src/CPlusPlusMangle.cpp bool is_directly_in_std = enclosing_types.size() == 0 && (namespaces.size() == 1 && namespaces[0] == "std"); size 583 src/CPlusPlusMangle.cpp if (args.size() == 0) { size 140 src/CSE.cpp number = (int)entries.size(); size 283 src/CSE.cpp vector<Expr> new_version(gvn.entries.size()); size 285 src/CSE.cpp for (size_t i = 0; i < gvn.entries.size(); i++) { size 304 src/CSE.cpp for (size_t i = lets.size(); i > 0; i--) { size 371 src/CSE.cpp for (size_t i = exprs.size() - 1; i > 0; i--) { size 80 src/CanonicalizeGPUVars.cpp for (size_t i = 0; i < v.size(); ++i) { size 82 src/CanonicalizeGPUVars.cpp if (i != v.size() - 1) { size 91 src/CanonicalizeGPUVars.cpp internal_assert(v.size() == 2); size 121 src/CanonicalizeGPUVars.cpp internal_assert(v.size() > 2); size 52 src/Closure.cpp ref.size = image.size_in_bytes(); size 78 src/Closure.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 54 src/Closure.h size_t size; size 56 src/Closure.h Buffer() : dimensions(0), read(false), write(false), size(0) { } size 257 src/CodeGen_ARM.cpp for (size_t i = 0; i < casts.size() ; i++) { size 269 src/CodeGen_ARM.cpp for (size_t i = 0; i < matches.size(); i++) { size 386 src/CodeGen_ARM.cpp for (size_t i = 0; i < left_shifts.size(); i++) { size 420 src/CodeGen_ARM.cpp for (size_t i = 0; i < averagings.size(); i++) { size 441 src/CodeGen_ARM.cpp for (size_t i = 0; i < negations.size(); i++) { size 687 src/CodeGen_ARM.cpp 2 <= shuffle->vectors.size() && shuffle->vectors.size() <= 4) { size 689 src/CodeGen_ARM.cpp const int num_vecs = shuffle->vectors.size(); size 698 src/CodeGen_ARM.cpp for (size_t i = 0; i < lets.size(); i++) { size 772 src/CodeGen_ARM.cpp for (size_t i = 0; i < lets.size(); i++) { size 944 src/CodeGen_ARM.cpp internal_assert(op->args.size() == 1); size 256 src/CodeGen_C.cpp for (size_t i = 0; i < type.handle_type->namespaces.size(); i++) { size 259 src/CodeGen_C.cpp for (size_t i = 0; i < type.handle_type->enclosing_types.size(); i++) { size 341 src/CodeGen_C.cpp for (size_t i = 0; i < name.size(); i++) { size 395 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 492 src/CodeGen_C.cpp for (size_t i = 0; i < args.size(); i++) { size 497 src/CodeGen_C.cpp for (size_t ns = 0; ns < handle_type->namespaces.size(); ns++ ) { size 509 src/CodeGen_C.cpp for (size_t ns = 0; ns < handle_type->namespaces.size(); ns++ ) { size 516 src/CodeGen_C.cpp for (size_t i = 0; i < args.size(); i++) { size 569 src/CodeGen_C.cpp for (size_t i = 0; i < args.size(); i++) { size 579 src/CodeGen_C.cpp if (i < args.size()-1) stream << ", "; size 609 src/CodeGen_C.cpp for (size_t i = 0; i < namespaces.size(); i++) { size 884 src/CodeGen_C.cpp internal_assert(op->args.size() == 3); size 897 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 902 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 907 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 912 src/CodeGen_C.cpp internal_assert(op->args.size() == 1); size 915 src/CodeGen_C.cpp internal_assert(op->args.size() == 1); size 918 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 923 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 928 src/CodeGen_C.cpp internal_assert(op->args.size() == 3); size 932 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 938 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 943 src/CodeGen_C.cpp internal_assert(op->args.size() == 3); size 970 src/CodeGen_C.cpp internal_assert(op->args.size() == 1); size 974 src/CodeGen_C.cpp internal_assert(op->args.size() >= 1); size 978 src/CodeGen_C.cpp internal_assert(op->args.size() == 1); size 989 src/CodeGen_C.cpp string size = print_expr(simplify((op->args[0] + 7)/8)); size 992 src/CodeGen_C.cpp stream << "uint64_t " << array_name << "[" << size << "];"; size 1004 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1010 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1016 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1031 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1059 src/CodeGen_C.cpp for (size_t i = 0; i < printf_args.size(); i++) { size 1066 src/CodeGen_C.cpp internal_assert(op->args.size() == 2); size 1095 src/CodeGen_C.cpp user_assert((op->args.size() == 4) && is_one(op->args[2])) size 1112 src/CodeGen_C.cpp vector<string> args(op->args.size()); size 1113 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1122 src/CodeGen_C.cpp for (size_t i = 0; i < op->args.size(); i++) { size 1326 src/CodeGen_C.cpp internal_assert(op->extents.size() > 0); size 1330 src/CodeGen_C.cpp for (size_t i = 1; i < op->extents.size(); i++) { size 1503 src/CodeGen_C.cpp halide_error(__user_context_, "32-bit signed overflow computing size of allocation tmp.heap\n"); size 1546 src/CodeGen_C.cpp while (diff_end < (int)src.size() && src[diff_end] != '\n') diff_end++; size 88 src/CodeGen_GPU_Host.cpp internal_assert(allocate->type == UInt(8) && allocate->extents.size() == 1); size 200 src/CodeGen_GPU_Host.cpp Value *kernel_size = ConstantInt::get(i32_t, kernel_src.size()); size 241 src/CodeGen_GPU_Host.cpp for (size_t i = 0; i < kernel_name.size(); i++) { size 294 src/CodeGen_GPU_Host.cpp for (size_t i = 0; i < closure_args.size(); i++) { size 307 src/CodeGen_GPU_Host.cpp for (size_t i = 0; i < closure_args.size(); i++) { size 309 src/CodeGen_GPU_Host.cpp closure_args[i].size = allocations.get(closure_args[i].name).constant_bytes; size 328 src/CodeGen_GPU_Host.cpp int num_args = (int)closure_args.size(); size 79 src/CodeGen_Hexagon.cpp cl::ParseCommandLineOptions(options.size(), options.data()); size 586 src/CodeGen_Hexagon.cpp llvm_arg_types.reserve(arg_types.size()); size 606 src/CodeGen_Hexagon.cpp if (args.size() + 1 == intrin_ty->getNumParams()) { size 622 src/CodeGen_Hexagon.cpp internal_assert(args.size() == intrin_ty->getNumParams()); size 623 src/CodeGen_Hexagon.cpp for (size_t i = 0; i < args.size(); i++) { size 687 src/CodeGen_Hexagon.cpp internal_assert(FType->getNumParams() == Ops.size()); size 708 src/CodeGen_Hexagon.cpp int result_elements = v_ty->getVectorNumElements()*v.size(); size 709 src/CodeGen_Hexagon.cpp if (v.size() == 2) { size 742 src/CodeGen_Hexagon.cpp } else if (v.size() == 3) { size 752 src/CodeGen_Hexagon.cpp for (size_t j = 0; j < v.size(); j++) { size 768 src/CodeGen_Hexagon.cpp int size = static_cast<int>(indices.size()); size 773 src/CodeGen_Hexagon.cpp for (int i = 0; i < size; i++) { size 798 src/CodeGen_Hexagon.cpp for (int i = 0; i < size; i++) { size 809 src/CodeGen_Hexagon.cpp while (begin < indices.size() && indices[begin] == -1) { size 812 src/CodeGen_Hexagon.cpp size_t end = indices.size(); size 846 src/CodeGen_Hexagon.cpp int result_elements = static_cast<int>(indices.size()); size 852 src/CodeGen_Hexagon.cpp for (size_t i = 1; i < indices.size(); i++) { size 1081 src/CodeGen_Hexagon.cpp for (int j = 0; j < static_cast<int>(lut_slices.size()); j++) { size 1120 src/CodeGen_Hexagon.cpp llvm_indices.reserve(indices.size()); size 1136 src/CodeGen_Hexagon.cpp llvm::Type *i8x_t = VectorType::get(i8_t, indices.size()); size 1137 src/CodeGen_Hexagon.cpp llvm::Type *i16x_t = VectorType::get(i16_t, indices.size()); size 1156 src/CodeGen_Hexagon.cpp llvm_indices.reserve(indices.size()); size 1164 src/CodeGen_Hexagon.cpp Value *minus_one = codegen(make_const(UInt(16, indices.size()), -1)); size 1190 src/CodeGen_Hexagon.cpp for (size_t i = 1; i < ranges.size(); i++) { size 1228 src/CodeGen_Hexagon.cpp for (size_t i = 1; i < ops.size(); i++) { size 1465 src/CodeGen_Hexagon.cpp internal_assert(op->args.size() == 2); size 1473 src/CodeGen_Hexagon.cpp internal_assert(op->args.size() == 4); size 1482 src/CodeGen_Hexagon.cpp internal_assert(op->args.size() == 3); size 1498 src/CodeGen_Hexagon.cpp internal_assert(op->args.size() == 1); size 1511 src/CodeGen_Hexagon.cpp internal_assert(op->args.size() == 2); size 1519 src/CodeGen_Hexagon.cpp internal_assert((op->args.size() == 4) || (op->args.size() == 6)) size 1529 src/CodeGen_Hexagon.cpp if (op->args.size() == 4) { // 1D prefetch: {base, offset, extent0, stride0} size 212 src/CodeGen_Internal.cpp bool can_allocation_fit_on_stack(int64_t size) { size 213 src/CodeGen_Internal.cpp user_assert(size > 0) << "Allocation size should be a positive number\n"; size 214 src/CodeGen_Internal.cpp return (size <= 1024 * 16); size 56 src/CodeGen_Internal.h bool can_allocation_fit_on_stack(int64_t size); size 341 src/CodeGen_LLVM.cpp cl::ParseCommandLineOptions((int)(c_arg_vec.size()), &c_arg_vec[0], "Halide compiler\n"); size 567 src/CodeGen_LLVM.cpp vector<llvm::Type *> arg_types(args.size()); size 568 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 604 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 645 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 881 src/CodeGen_LLVM.cpp llvm::Value *ptr = builder->CreateConstGEP1_32(arg_array, wrapper_args.size()); size 906 src/CodeGen_LLVM.cpp const int num_args = (int) args.size(); size 1805 src/CodeGen_LLVM.cpp internal_assert(vecs.size() >= 1); size 1806 src/CodeGen_LLVM.cpp for (size_t i = 1; i < vecs.size(); i++) { size 1811 src/CodeGen_LLVM.cpp if (vecs.size() == 1) { size 1813 src/CodeGen_LLVM.cpp } else if (vecs.size() == 2) { size 1825 src/CodeGen_LLVM.cpp for (size_t i = 0; i < vecs.size(); i++) { size 1835 src/CodeGen_LLVM.cpp if (even_vecs.size() > odd_vecs.size()) { size 1839 src/CodeGen_LLVM.cpp internal_assert(even_vecs.size() == odd_vecs.size()); size 1846 src/CodeGen_LLVM.cpp int result_elements = vec_elements*vecs.size(); size 1851 src/CodeGen_LLVM.cpp if (i%vecs.size() < vecs.size() - 1) { size 1852 src/CodeGen_LLVM.cpp indices[i] = idx%2 == 0 ? idx/2 : idx/2 + vec_elements*even_vecs.size(); size 1861 src/CodeGen_LLVM.cpp if (i%vecs.size() < vecs.size() - 1) { size 1864 src/CodeGen_LLVM.cpp indices[i] = i/vecs.size() + result_elements; size 2083 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 3); size 2102 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2105 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2108 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2111 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2114 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2153 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2156 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2164 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2182 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2215 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2226 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2237 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 3); size 2240 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2248 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2260 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2272 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 3); size 2307 src/CodeGen_LLVM.cpp vector<llvm::Value *> args(op->args.size()); size 2308 src/CodeGen_LLVM.cpp vector<llvm::Type *> types(op->args.size()); size 2309 src/CodeGen_LLVM.cpp for (size_t i = 0; i < op->args.size(); i++) { size 2318 src/CodeGen_LLVM.cpp if (args.size() == 1) { size 2323 src/CodeGen_LLVM.cpp (llvm::Type *)ArrayType::get(types[0], types.size()) : size 2327 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 2343 src/CodeGen_LLVM.cpp for (size_t i = 0; i < op->args.size(); i++) { size 2346 src/CodeGen_LLVM.cpp buf_size += op->args[i].as<StringImm>()->value.size(); size 2389 src/CodeGen_LLVM.cpp for (size_t i = 0; i < op->args.size(); i++) { size 2434 src/CodeGen_LLVM.cpp internal_assert(op->args.size() > 0); size 2439 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2459 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2511 src/CodeGen_LLVM.cpp internal_assert(op->args.size() >= 4); size 2512 src/CodeGen_LLVM.cpp internal_assert(!(op->args.size() & 1)); size 2521 src/CodeGen_LLVM.cpp for (size_t i = 0; i < op->args.size(); i += 2) { size 2586 src/CodeGen_LLVM.cpp for (int i = sub_fns.size() - 1; i >= 0; i--) { size 2615 src/CodeGen_LLVM.cpp user_assert((op->args.size() == 4) && is_one(op->args[2])) size 2644 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 2); size 2650 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2654 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2659 src/CodeGen_LLVM.cpp internal_assert(op->args.size() == 1); size 2683 src/CodeGen_LLVM.cpp vector<Value *> args(op->args.size()); size 2684 src/CodeGen_LLVM.cpp for (size_t i = 0; i < op->args.size(); i++) { size 2702 src/CodeGen_LLVM.cpp vector<llvm::Type *> arg_types(args.size()); size 2703 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 2735 src/CodeGen_LLVM.cpp i < std::min(args.size(), (size_t)(func_t->getNumParams())); size 2784 src/CodeGen_LLVM.cpp vector<Value *> arg_lane(args.size()); size 2785 src/CodeGen_LLVM.cpp for (size_t j = 0; j < args.size(); j++) { size 2846 src/CodeGen_LLVM.cpp data.reserve(s.size()+1); size 2859 src/CodeGen_LLVM.cpp llvm::Type *type = ArrayType::get(i8_t, data.size()); size 2863 src/CodeGen_LLVM.cpp ArrayRef<unsigned char> data_array((const unsigned char *)&data[0], data.size()); size 3240 src/CodeGen_LLVM.cpp value = slice_vector(value, op->indices[0], op->indices.size()); size 3259 src/CodeGen_LLVM.cpp Value *size = ConstantInt::get(i32_t, n); size 3260 src/CodeGen_LLVM.cpp AllocaInst *ptr = builder->CreateAlloca(t, size, name); size 3287 src/CodeGen_LLVM.cpp vector<Value *> arg_values(args.size()); size 3288 src/CodeGen_LLVM.cpp for (size_t i = 0; i < args.size(); i++) { size 3309 src/CodeGen_LLVM.cpp for (size_t i = 0; i < arg_values.size(); i++) { size 3333 src/CodeGen_LLVM.cpp vector<llvm::Type *> arg_types(arg_values.size()); size 3334 src/CodeGen_LLVM.cpp for (size_t i = 0; i < arg_values.size(); i++) { size 3355 src/CodeGen_LLVM.cpp Value *CodeGen_LLVM::slice_vector(Value *vec, int start, int size) { size 3358 src/CodeGen_LLVM.cpp if (start == 0 && size == vec_lanes) { size 3362 src/CodeGen_LLVM.cpp vector<int> indices(size); size 3363 src/CodeGen_LLVM.cpp for (int i = 0; i < size; i++) { size 3375 src/CodeGen_LLVM.cpp if (v.size() == 1) return v[0]; size 3388 src/CodeGen_LLVM.cpp while (vecs.size() > 1) { size 3391 src/CodeGen_LLVM.cpp for (size_t i = 0; i < vecs.size()-1; i += 2) { size 3423 src/CodeGen_LLVM.cpp if (vecs.size() & 1) { size 3436 src/CodeGen_LLVM.cpp vector<Constant *> llvm_indices(indices.size()); size 3437 src/CodeGen_LLVM.cpp for (size_t i = 0; i < llvm_indices.size(); i++) { size 3478 src/CodeGen_LLVM.cpp for (size_t i = 0; i < sizes_to_try.size(); i++) { size 369 src/CodeGen_Metal_Dev.cpp int32_t size = op->constant_allocation_size(); size 370 src/CodeGen_Metal_Dev.cpp user_assert(size > 0) size 377 src/CodeGen_Metal_Dev.cpp << print_name(op->name) << "[" << size << "];\n"; size 423 src/CodeGen_Metal_Dev.cpp size_t size; size 425 src/CodeGen_Metal_Dev.cpp BufferSize() : size(0) {} size 426 src/CodeGen_Metal_Dev.cpp BufferSize(string name, size_t size) : name(name), size(size) {} size 429 src/CodeGen_Metal_Dev.cpp return size < r.size; size 450 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 453 src/CodeGen_Metal_Dev.cpp args[i].size > 0) { size 454 src/CodeGen_Metal_Dev.cpp constants.push_back(BufferSize(args[i].name, args[i].size)); size 465 src/CodeGen_Metal_Dev.cpp for (size_t i = 1; i < constants.size(); i++) { size 466 src/CodeGen_Metal_Dev.cpp constants[i].size += constants[i - 1].size; size 471 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 480 src/CodeGen_Metal_Dev.cpp stream << "#if " << constant->size << " < MAX_CONSTANT_BUFFER_SIZE && " size 494 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 520 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 539 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 552 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 560 src/CodeGen_Metal_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 193 src/CodeGen_OpenCL_Dev.cpp internal_assert(op->args.size() == 3); size 404 src/CodeGen_OpenCL_Dev.cpp int32_t size = op->constant_allocation_size(); size 405 src/CodeGen_OpenCL_Dev.cpp user_assert(size > 0) size 412 src/CodeGen_OpenCL_Dev.cpp << print_name(op->name) << "[" << size << "];\n"; size 451 src/CodeGen_OpenCL_Dev.cpp if (op->vectors.size() == 1) { size 455 src/CodeGen_OpenCL_Dev.cpp } else if (op->vectors.size() == 2) { size 474 src/CodeGen_OpenCL_Dev.cpp int num_vectors = op->vectors.size(); size 514 src/CodeGen_OpenCL_Dev.cpp size_t size; size 516 src/CodeGen_OpenCL_Dev.cpp BufferSize() : size(0) {} size 517 src/CodeGen_OpenCL_Dev.cpp BufferSize(string name, size_t size) : name(name), size(size) {} size 520 src/CodeGen_OpenCL_Dev.cpp return size < r.size; size 545 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 548 src/CodeGen_OpenCL_Dev.cpp args[i].size > 0) { size 549 src/CodeGen_OpenCL_Dev.cpp constants.push_back(BufferSize(args[i].name, args[i].size)); size 560 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 1; i < constants.size(); i++) { size 561 src/CodeGen_OpenCL_Dev.cpp constants[i].size += constants[i - 1].size; size 566 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 575 src/CodeGen_OpenCL_Dev.cpp stream << "#if " << constant->size << " <= MAX_CONSTANT_BUFFER_SIZE && " size 589 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 609 src/CodeGen_OpenCL_Dev.cpp if (i < args.size()-1) stream << ",\n"; size 619 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 627 src/CodeGen_OpenCL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 272 src/CodeGen_OpenGLCompute_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 293 src/CodeGen_OpenGLCompute_Dev.cpp internal_assert(op->extents.size() == 1 && is_const(op->extents[0])); size 334 src/CodeGen_OpenGLCompute_Dev.cpp internal_assert(op->extents.size() >= 1); size 75 src/CodeGen_OpenGL_Dev.cpp vector<Expr> new_args(args.size()); size 76 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 199 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < op->args.size(); i++) { size 295 src/CodeGen_OpenGL_Dev.cpp internal_assert(op->vectors.size() == 1); size 588 src/CodeGen_OpenGL_Dev.cpp internal_assert(op->args.size() == 5); size 673 src/CodeGen_OpenGL_Dev.cpp internal_assert(op->args.size() == 6); size 729 src/CodeGen_OpenGL_Dev.cpp internal_assert(op->args.size() == 2); size 765 src/CodeGen_OpenGL_Dev.cpp int32_t size = op->constant_allocation_size(); size 766 src/CodeGen_OpenGL_Dev.cpp user_assert(size) << "Allocations inside GLSL kernels must be constant-sized\n"; size 774 src/CodeGen_OpenGL_Dev.cpp if (size == 1) { size 780 src/CodeGen_OpenGL_Dev.cpp } else if (size <= 4 && all_access_constant.result) { size 782 src/CodeGen_OpenGL_Dev.cpp stream << print_type(op->type.with_lanes(size)) << " " << print_name(op->name) << ";\n"; size 787 src/CodeGen_OpenGL_Dev.cpp stream << print_type(op->type) << " " << print_name(op->name) << "[" << size << "];\n"; size 848 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 902 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 930 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 963 src/CodeGen_OpenGL_Dev.cpp for (size_t i = 0; i < s.size(); ) { size 966 src/CodeGen_OpenGL_Dev.cpp for (i++; i < s.size() && isdigit(s[i]); i++) { size 55 src/CodeGen_PTX_Dev.cpp vector<llvm::Type *> arg_types(args.size()); size 56 src/CodeGen_PTX_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 70 src/CodeGen_PTX_Dev.cpp for (size_t i = 0; i < args.size(); i++) { size 135 src/CodeGen_PTX_Dev.cpp for (size_t i = 0; i < arg_sym_names.size(); i++) { size 201 src/CodeGen_PTX_Dev.cpp int32_t size = alloc->constant_allocation_size(); size 202 src/CodeGen_PTX_Dev.cpp user_assert(size > 0) size 210 src/CodeGen_PTX_Dev.cpp Value *ptr = builder->CreateAlloca(llvm_type_of(alloc->type), ConstantInt::get(i32_t, size)); size 43 src/CodeGen_Posix.cpp for (size_t i = 0; i < extents.size(); i++) { size 110 src/CodeGen_PowerPC.cpp for (size_t i = 0; i < matches.size(); i++) { size 278 src/CodeGen_X86.cpp for (size_t i = 0; i < matches.size(); i++) { size 28 src/DebugToFile.cpp user_assert(op->types.size() == 1) size 39 src/DebugToFile.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 53 src/Definition.cpp for (size_t i = 0; i < values.size(); ++i) { size 56 src/Definition.cpp for (size_t i = 0; i < args.size(); ++i) { size 96 src/Definition.cpp for (size_t i = 0; i < rdom.domain().size(); i++) { size 71 src/Deinterleave.cpp if (stores.size() >= (size_t)max_stores) { size 258 src/Deinterleave.cpp std::vector<Expr> args(op->args.size()); size 259 src/Deinterleave.cpp for (size_t i = 0; i < args.size(); i++) { size 291 src/Deinterleave.cpp if ((int)op->vectors.size() == lane_stride) { size 293 src/Deinterleave.cpp } else if ((int)op->vectors.size() % lane_stride == 0) { size 295 src/Deinterleave.cpp std::vector<Expr> new_vectors(op->vectors.size() / lane_stride); size 296 src/Deinterleave.cpp for (size_t i = 0; i < new_vectors.size(); i++) { size 596 src/Deinterleave.cpp internal_assert(stores.size() <= (size_t)expected_stores); size 599 src/Deinterleave.cpp if (stores.size() != (size_t)expected_stores) goto fail; size 603 src/Deinterleave.cpp std::vector<Expr> args(stores.size()); size 604 src/Deinterleave.cpp std::vector<Expr> predicates(stores.size()); size 607 src/Deinterleave.cpp std::vector<int> offsets(stores.size()); size 612 src/Deinterleave.cpp for (size_t i = 0; i < stores.size(); ++i) { size 658 src/Deinterleave.cpp for (size_t i = 0; i < stores.size(); ++i) { size 661 src/Deinterleave.cpp j /= stores.size(); size 669 src/Deinterleave.cpp if (j < 0 || (size_t)j >= stores.size()) goto fail; size 689 src/Deinterleave.cpp t = t.with_lanes(lanes*stores.size()); size 21 src/DeviceArgument.cpp debug(2) << "buffer: " << b.first << " " << b.second.size; size 26 src/DeviceArgument.cpp DeviceArgument arg(b.first, true, b.second.type, b.second.dimensions, b.second.size); size 50 src/DeviceArgument.h size_t size; size 65 src/DeviceArgument.h size(0), size 79 src/DeviceArgument.h size(_size), size 38 src/DeviceInterface.cpp for (size_t i = 0; i < runtime.size(); i++) { size 257 src/Elf.cpp index = table.size(); size 265 src/Elf.cpp const char *assert_string_valid(const char *name, const char *data, size_t size) { size 266 src/Elf.cpp internal_assert(data <= name && name + strlen(name) + 1 <= data + size); size 277 src/Elf.cpp buf.reserve(buf.size() + std::distance(begin, end)*sizeof(*begin)); size 288 src/Elf.cpp buf.resize((buf.size() + alignment - 1) & ~(alignment - 1)); size 321 src/Elf.cpp std::unique_ptr<Object> parse_object_internal(const char *data, size_t size) { size 335 src/Elf.cpp internal_assert(data <= at && at + sizeof(Shdr<T>) <= data + size) size 347 src/Elf.cpp internal_assert(data <= strings && strings + sh->sh_size <= data + size); size 359 src/Elf.cpp const char *name = assert_string_valid(&strings[sh->sh_name], data, size); size 366 src/Elf.cpp internal_assert(data <= sh_data && sh_data + sh->sh_size <= data + size); size 382 src/Elf.cpp internal_assert(data <= sym_ptr && sym_ptr + sizeof(Sym<T>) <= data + size); size 384 src/Elf.cpp const char *name = assert_string_valid(&strings[sym.st_name], data, size); size 402 src/Elf.cpp const char *name = assert_string_valid(&strings[sh->sh_name], data, size); size 412 src/Elf.cpp internal_assert(data <= rela_ptr && rela_ptr + sizeof(Rela<T>) <= data + size); size 425 src/Elf.cpp std::unique_ptr<Object> Object::parse_object(const char *data, size_t size) { size 426 src/Elf.cpp return parse_object_internal<Types<32>>(data, size); size 485 src/Elf.cpp uint64_t offset = contents.size(); size 566 src/Elf.cpp uint64_t offset = output.size(); size 570 src/Elf.cpp if (contents.size() < s.get_size()) { size 571 src/Elf.cpp append_zeros(output, s.get_size() - contents.size()); size 588 src/Elf.cpp uint16_t shndx = safe_cast<uint16_t>(shdrs.size()); size 705 src/Elf.cpp text_phdr.p_filesz = output.size() - text_phdr.p_offset; size 709 src/Elf.cpp safe_assign(data_phdr.p_offset, output.size()); size 752 src/Elf.cpp safe_assign(symbol_idxs[s], syms.size()); size 756 src/Elf.cpp local_count = syms.size(); size 774 src/Elf.cpp size_t sym_count = syms.size(); size 835 src/Elf.cpp uint64_t offset = output.size(); size 841 src/Elf.cpp uint64_t size = output.size() - offset; size 850 src/Elf.cpp safe_assign(shdr.sh_size, size); size 857 src/Elf.cpp uint16_t shndx = safe_cast<uint16_t>(shdrs.size()); size 911 src/Elf.cpp addr_t pltrelsz = sizeof(Rela<T>)*plt_symbols.size(); size 939 src/Elf.cpp safe_assign(data_phdr.p_filesz, output.size() - data_phdr.p_offset); size 948 src/Elf.cpp ehdr.e_shoff = output.size(); size 949 src/Elf.cpp ehdr.e_shnum = shdrs.size(); size 968 src/Elf.cpp ehdr.e_phnum = phdrs.size(); size 74 src/Elf.h uint32_t size = 0; size 103 src/Elf.h Symbol &define(const Section *section, uint64_t offset, uint32_t size) { size 106 src/Elf.h this->size = size; size 112 src/Elf.h uint32_t get_size() const { return size; } size 221 src/Elf.h uint64_t size = 0; size 259 src/Elf.h Section &set_size(uint64_t size) { size 260 src/Elf.h this->size = size; size 263 src/Elf.h uint64_t get_size() const { return std::max((uint64_t) size, (uint64_t) contents.size()); } size 290 src/Elf.h this->contents.assign((const char *)contents.data(), (const char *)(contents.data() + contents.size())); size 303 src/Elf.h size_t contents_size() const { return contents.size(); } size 322 src/Elf.h size_t relocations_size() const { return relocs.size(); } size 425 src/Elf.h static std::unique_ptr<Object> parse_object(const char *data, size_t size); size 437 src/Elf.h size_t sections_size() const { return secs.size(); } size 453 src/Elf.h size_t symbols_size() const { return syms.size(); } size 64 src/Func.cpp std::vector<Var> args(arg_names.size()); size 65 src/Func.cpp for (size_t i = 0; i < arg_names.size(); i++) { size 109 src/Func.cpp user_assert(func.update(idx).values().size() == 1) size 135 src/Func.cpp std::vector<RVar> rvs(rvars.size()); size 136 src/Func.cpp for (size_t i = 0; i < rvars.size(); i++) { size 153 src/Func.cpp return static_cast<int>(func.updates().size()); size 217 src/Func.cpp while ((int)args.size() < dimensions()) { size 225 src/Func.cpp if (func.has_pure_definition() && args.size() != (size_t)dimensions()) { size 227 src/Func.cpp << args.size() << " arguments, but was defined with " << dimensions() << "\n"; size 247 src/Func.cpp while ((int)args.size() < dimensions()) { size 255 src/Func.cpp if (func.has_pure_definition() && args.size() != (size_t)dimensions()) { size 257 src/Func.cpp << args.size() << " arguments, but was defined with " << dimensions() << "\n"; size 280 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 325 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 345 src/Func.cpp for (size_t i = 0; i < definition.schedule().dims().size(); i++) { size 607 src/Func.cpp internal_assert(prover_result.size() == values.size()); size 617 src/Func.cpp vector<bool> is_rfactored(dims.size(), false); size 648 src/Func.cpp for (int i = dims.size() - 1; i >= 0; --i) { size 703 src/Func.cpp vector<RVar> rvars_kept(preserved.size()); size 705 src/Func.cpp vector<Var> vars_rename(preserved.size()); size 706 src/Func.cpp for (size_t i = 0; i < preserved.size(); ++i) { size 747 src/Func.cpp vector<Expr> init_vals(values.size()); size 748 src/Func.cpp for (size_t i = 0; i < init_vals.size(); ++i) { size 756 src/Func.cpp vector<Expr> update_args(args.size() + vars_rename.size()); size 762 src/Func.cpp for (size_t i = 0; i < intm_rvars.size(); ++i) { size 765 src/Func.cpp for (size_t i = 0; i < vars_rename.size(); i++) { size 766 src/Func.cpp update_args[i + args.size()] = vars_rename[i]; size 773 src/Func.cpp for (size_t i = 0; i < args.size(); i++) { size 793 src/Func.cpp vector<Expr> update_vals(values.size()); size 794 src/Func.cpp for (size_t i = 0; i < update_vals.size(); i++) { size 812 src/Func.cpp internal_assert(intm_storage_dims.size() == storage_dims.size() + vars_rename.size()); size 813 src/Func.cpp for (size_t i = 0; i < storage_dims.size(); ++i) { size 817 src/Func.cpp for (size_t i = 0; i < rvars_kept.size(); ++i) { size 845 src/Func.cpp vector<Expr> f_store_args(dim_vars.size()); size 846 src/Func.cpp for (size_t i = 0; i < f_store_args.size(); ++i) { size 856 src/Func.cpp internal_assert(f_load_args.size() == init_args.size()); size 860 src/Func.cpp vector<Expr> f_values(values.size()); size 861 src/Func.cpp if (values.size() > 1) { size 865 src/Func.cpp for (size_t i = 0; i < f_values.size(); ++i) { size 880 src/Func.cpp for (size_t i = 0; i < f_values.size(); ++i) { size 911 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 928 src/Func.cpp for (size_t i = 0; (!found) && i < dims.size(); i++) { size 1050 src/Func.cpp for (size_t i = 0; (!found_outer) && i < dims.size(); i++) { size 1066 src/Func.cpp for (size_t i = 0; (!found_inner) && i < dims.size(); i++) { size 1128 src/Func.cpp for (size_t i = 0; i < specializations.size(); i++) { size 1169 src/Func.cpp for (size_t i = 0; (!found) && i < dims.size(); i++) { size 1201 src/Func.cpp for (size_t i = 0; (!found) && i < dims.size(); i++) { size 1230 src/Func.cpp for (size_t i = splits.size(); i > 0; i--) { size 1310 src/Func.cpp for (size_t i = 0; (!found) && i < dims.size(); i++) { size 1332 src/Func.cpp for (size_t i = splits.size(); i > 0; i--) { size 1464 src/Func.cpp void reorder_vars(vector<Dim> &dims_old, const VarOrRVar *vars, size_t size, const Stage &stage) { size 1468 src/Func.cpp vector<size_t> idx(size); size 1469 src/Func.cpp for (size_t i = 0; i < size; i++) { size 1471 src/Func.cpp for (size_t j = 0; j < dims.size(); j++) { size 1485 src/Func.cpp for (size_t i = 0; i < idx.size(); i++) { size 1487 src/Func.cpp for (size_t j = i+1; j < idx.size(); j++) { size 1504 src/Func.cpp for (size_t i = 0; i < size; i++) { size 1513 src/Func.cpp reorder_vars(definition.schedule().dims(), &vars[0], vars.size(), *this); size 1795 src/Func.cpp for (size_t i = 1; i < fs.size(); ++i) { size 1948 src/Func.cpp for (size_t i = 0; i < func.args().size(); i++) { size 1983 src/Func.cpp for (size_t i = 0; i < func.args().size(); i++) { size 2196 src/Func.cpp for (size_t i = 0; i < sched.bounds().size(); i++) { size 2236 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 2253 src/Func.cpp for (size_t i = start + 1; i < dims.size(); i++) { size 2256 src/Func.cpp if ((dims.size() - start) > 2) { size 2263 src/Func.cpp user_assert(dims.size() > 1) << size 2273 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 2288 src/Func.cpp for (size_t i = 0; i < dims.size(); i++) { size 2387 src/Func.cpp for (size_t i = 0; i < e.size(); i++) { size 2406 src/Func.cpp Internal::check_call_arg_types(f.name(), &args, args.size()); size 2412 src/Func.cpp args.resize(a.size()); size 2413 src/Func.cpp for (size_t i = 0; i < a.size(); i++) { size 2421 src/Func.cpp for (size_t i = 0; i < a.size(); i++) { size 2425 src/Func.cpp for (size_t i = 0; i < e.size(); i++) { size 2431 src/Func.cpp for (size_t i = 0; i < a.size(); i++) { size 2462 src/Func.cpp for (size_t j = 0; j < a.size(); j++) { size 2484 src/Func.cpp for (size_t i = 0; i < args.size(); ++i) { size 2493 src/Func.cpp vector<string> expanded_args_str(expanded_args.size()); size 2494 src/Func.cpp for (size_t i = 0; i < expanded_args.size(); ++i) { size 2505 src/Func.cpp size_t update_stage = func.updates().size() - 1; size 2514 src/Func.cpp if (e.size() == 1) { size 2527 src/Func.cpp vector<Var> pure_args(a.size()); size 2530 src/Func.cpp for (size_t i = 0; i < a.size(); i++) { size 2550 src/Func.cpp internal_assert(e.size() > 1); size 2552 src/Func.cpp vector<Expr> init_values(e.size()); size 2553 src/Func.cpp for (int i = 0; i < (int)init_values.size(); ++i) { size 2559 src/Func.cpp vector<Expr> values(e.size()); size 2560 src/Func.cpp for (int i = 0; i < (int)values.size(); ++i) { size 2578 src/Func.cpp if (e.size() == 1) { size 2586 src/Func.cpp if (e.size() == 1) { size 2598 src/Func.cpp if (e.size() == 1) { size 2606 src/Func.cpp if (e.size() == 1) { size 2618 src/Func.cpp if (e.size() == 1) { size 2626 src/Func.cpp if (e.size() == 1) { size 2638 src/Func.cpp if (e.size() == 1) { size 2646 src/Func.cpp if (e.size() == 1) { size 2685 src/Func.cpp internal_assert(func_ref.size() > 1) size 2687 src/Func.cpp internal_assert(idx >= 0 && idx < (int)func_ref.size()); size 2691 src/Func.cpp vector<Expr> values(func_ref.size()); size 2692 src/Func.cpp for (int i = 0; i < (int)values.size(); ++i) { size 2760 src/Func.cpp for (size_t i = 0; i < outputs.size(); i++) { size 2779 src/Func.cpp user_assert(func.output_buffers().size() == 1) size 2789 src/Func.cpp vector<OutputImageParam> bufs(func.output_buffers().size()); size 2790 src/Func.cpp for (size_t i = 0; i < bufs.size(); i++) { size 69 src/Func.h internal_assert(definition.args().size() == dim_vars.size()); size 78 src/Func.h std::vector<Var> dim_vars(args.size()); size 79 src/Func.h for (size_t i = 0; i < args.size(); i++) { size 82 src/Func.h internal_assert(definition.args().size() == dim_vars.size()); size 405 src/Func.h EXPORT size_t size() const; size 79 src/Function.cpp for (size_t j = 0; j < init_def.args().size() && j < 4; j++) { size 148 src/Function.cpp for (size_t i = 0; i < op->args.size(); i++) { size 169 src/Function.cpp for (size_t i = 0; i < pure_args.size(); i++) { size 253 src/Function.cpp for (size_t i = 0; i < n.size(); i++) { size 379 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 389 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 395 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 400 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 414 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 420 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 441 src/Function.cpp pure_def_args.resize(args.size()); size 442 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 446 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 459 src/Function.cpp contents->output_types.resize(values.size()); size 460 src/Function.cpp for (size_t i = 0; i < contents->output_types.size(); i++) { size 464 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 466 src/Function.cpp if (values.size() > 1) { size 469 src/Function.cpp Parameter output(values[i].type(), true, args.size(), buffer_name); size 475 src/Function.cpp int update_idx = static_cast<int>(contents->updates.size()); size 486 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 494 src/Function.cpp user_assert((int)_args.size() == dimensions()) size 498 src/Function.cpp user_assert(values.size() == contents->init_def.values().size()) size 504 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 512 src/Function.cpp if (values.size()) { size 523 src/Function.cpp vector<Expr> args(_args.size()); size 524 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 532 src/Function.cpp vector<string> pure_args(args.size()); size 534 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 561 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 564 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 574 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 577 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 589 src/Function.cpp for (size_t i = 0; i < pure_args.size(); i++) { size 595 src/Function.cpp for (size_t i = 0; i < check.reduction_domain.domain().size(); i++) { size 601 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 604 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 617 src/Function.cpp for (size_t i = 0; i < args.size(); i++) { size 620 src/Function.cpp for (size_t i = 0; i < values.size(); i++) { size 633 src/Function.cpp for (size_t i = 0; i < check.reduction_domain.domain().size(); i++) { size 650 src/Function.cpp for (size_t i = 0; i < pure_args.size(); i++) { size 702 src/Function.cpp for (size_t i = 0; i < types.size(); i++) { size 704 src/Function.cpp if (types.size() > 1) { size 740 src/Function.cpp std::vector<std::string> arg_names(pure_def_args.size()); size 741 src/Function.cpp for (size_t i = 0; i < pure_def_args.size(); i++) { size 750 src/Function.cpp return contents->init_def.args().size(); size 774 src/Function.cpp internal_assert(idx < (int)contents->updates.size()) << "Invalid update definition index\n"; size 779 src/Function.cpp internal_assert(idx < (int)contents->updates.size()) << "Invalid update definition index\n"; size 784 src/Function.cpp internal_assert(idx < (int)contents->updates.size()) << "Invalid update definition index\n"; size 146 src/Function.h return (int)output_types().size(); size 236 src/FuseGPUThreadLoops.cpp Expr size; size 243 src/FuseGPUThreadLoops.cpp max_size_bytes = simplify(alloc.type.bytes() * alloc.size); size 249 src/FuseGPUThreadLoops.cpp max_size_bytes = simplify(max(max_size_bytes, simplify(alloc.size * alloc.type.bytes()))); size 294 src/FuseGPUThreadLoops.cpp if (expr_uses_var(s.size, op->name)) { size 295 src/FuseGPUThreadLoops.cpp s.size = bounds_of_expr_in_scope(s.size, scope).max; size 345 src/FuseGPUThreadLoops.cpp alloc.size = 1; size 346 src/FuseGPUThreadLoops.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 347 src/FuseGPUThreadLoops.cpp alloc.size *= op->extents[i]; size 349 src/FuseGPUThreadLoops.cpp alloc.size = simplify(alloc.size); size 402 src/FuseGPUThreadLoops.cpp if (expr_uses_var(s.size, op->name)) { size 403 src/FuseGPUThreadLoops.cpp s.size = simplify(Let::make(op->name, op->value, s.size)); size 421 src/FuseGPUThreadLoops.cpp Expr alloc_size = simplify(alloc.size); size 434 src/FuseGPUThreadLoops.cpp for (int i = free_spaces.size() - 1; i >= 0; --i) { size 435 src/FuseGPUThreadLoops.cpp internal_assert(free_spaces[i] >= 0 && free_spaces[i] < (int)mem_allocs.size()); size 446 src/FuseGPUThreadLoops.cpp for (int i = free_spaces.size() - 1; i >= 0; --i) { size 447 src/FuseGPUThreadLoops.cpp internal_assert(free_spaces[i] >= 0 && free_spaces[i] < (int)mem_allocs.size()); size 451 src/FuseGPUThreadLoops.cpp Expr size = alloc_size * alloc.type.bytes(); size 452 src/FuseGPUThreadLoops.cpp Expr dist = mem_allocs[free_spaces[i]].max_size_bytes - size; size 491 src/FuseGPUThreadLoops.cpp for (int i = start_idx; i < (int)allocations.size(); ++i) { size 504 src/FuseGPUThreadLoops.cpp for (int j = 0; j < (int)mem_allocs.size(); ++j) { // Find the index of the space to free size 510 src/FuseGPUThreadLoops.cpp internal_assert(free_idx >= 0 && free_idx < (int)mem_allocs.size()); size 528 src/FuseGPUThreadLoops.cpp alloc.type, {alloc.size}, const_true(), s); size 549 src/FuseGPUThreadLoops.cpp sentinel.size = 0; size 553 src/FuseGPUThreadLoops.cpp Expr total_size = Variable::make(Int(32), "group_" + std::to_string(mem_allocs.size()-1) + ".shared_offset"); size 559 src/FuseGPUThreadLoops.cpp for (int i = (int)(mem_allocs.size()) - 1; i >= 0; i--) { size 30 src/Generator.cpp for (size_t i = 1; i < n.size(); ++i) { size 190 src/Generator.cpp for (size_t i = 0; i < values.size(); ++i) { size 196 src/Generator.cpp internal_assert(history.size() == values.size()) size 197 src/Generator.cpp << "Expected values of size " << history.size() size 198 src/Generator.cpp << " but saw size " << values.size() size 202 src/Generator.cpp for (size_t i = 0; i < values.size(); ++i) { size 220 src/Generator.cpp if (history[i].size() > max_unique_values) { size 264 src/Generator.cpp internal_assert(namespaces.size() >= 1); size 268 src/Generator.cpp internal_assert(namespaces.size() >= 2); size 536 src/Generator.cpp for (size_t i = 0; i < inputs.size(); ++i) { size 696 src/Generator.cpp for (int i = (int)namespaces.size() - 1; i >= 0 ; --i) { size 720 src/Generator.cpp user_assert(a.size() == b.size()) << "Mismatch in Function vector length.\n"; size 721 src/Generator.cpp for (size_t i = 0; i < a.size(); ++i) { size 801 src/Generator.cpp if (v.size() != 2 || v[0].empty() || v[1].empty()) { size 826 src/Generator.cpp if (generator_names.size() == 0 && runtime_name.empty()) { size 835 src/Generator.cpp if (generator_names.size() > 1) { size 859 src/Generator.cpp const bool stub_only = (emit_flags.size() == 1 && emit_flags[0] == "cpp_stub"); size 875 src/Generator.cpp if (emit_flags.empty() || (emit_flags.size() == 1 && emit_flags[0].empty())) { size 912 src/Generator.cpp if (subst_pair.size() != 2) { size 928 src/Generator.cpp if (targets.size() != 1) { size 960 src/Generator.cpp if (targets.size() > 1 || !emit_options.substitutions.empty()) { size 1057 src/Generator.cpp GeneratorBase::GeneratorBase(size_t size, const void *introspection_helper) size 1058 src/Generator.cpp : size(size) { size 1059 src/Generator.cpp ObjectInstanceRegistry::register_instance(this, size, ObjectInstanceRegistry::Generator, this, introspection_helper); size 1079 src/Generator.cpp GeneratorBase::ParamInfo::ParamInfo(GeneratorBase *generator, const size_t size) { size 1082 src/Generator.cpp generator, size, ObjectInstanceRegistry::FilterParam); size 1111 src/Generator.cpp generator, size, ObjectInstanceRegistry::GeneratorInput); size 1125 src/Generator.cpp generator, size, ObjectInstanceRegistry::GeneratorOutput); size 1138 src/Generator.cpp if (filter_params.size() > 0 && filter_inputs.size() > 0) { size 1142 src/Generator.cpp if (filter_params.size() > 0 && filter_outputs.size() > 0) { size 1146 src/Generator.cpp if (filter_inputs.size() > 0 && filter_outputs.size() == 0) { size 1152 src/Generator.cpp generator, size, ObjectInstanceRegistry::GeneratorParam); size 1174 src/Generator.cpp generator, size, ObjectInstanceRegistry::ScheduleParam); size 1189 src/Generator.cpp param_info_ptr.reset(new ParamInfo(this, size)); size 1206 src/Generator.cpp user_assert(!output->is_array() && output->funcs().size() == 1) << "Output " << n << " must be accessed via get_output_vector()\n"; size 1271 src/Generator.cpp user_assert(pi.filter_params.size() == 0) size 1273 src/Generator.cpp user_assert(inputs.size() == pi.filter_inputs.size()) size 1274 src/Generator.cpp << "Expected exactly " << pi.filter_inputs.size() size 1275 src/Generator.cpp << " inputs but got " << inputs.size() << "\n"; size 1276 src/Generator.cpp for (size_t i = 0; i < pi.filter_inputs.size(); ++i) { size 1336 src/Generator.cpp user_assert(pi.filter_params.size() == 0) << "May not use generate() method with Param<> or ImageParam."; size 1337 src/Generator.cpp user_assert(pi.filter_outputs.size() > 0) << "Must use Output<> with generate() method."; size 1368 src/Generator.cpp user_assert(pi.filter_inputs.size() == 0) << "May not use build() method with Input<>."; size 1369 src/Generator.cpp user_assert(pi.filter_outputs.size() == 0) << "May not use build() method with Output<>."; size 1381 src/Generator.cpp user_assert(pi.filter_outputs.size() > 0) << "Must use get_pipeline<> with Output<>."; size 1392 src/Generator.cpp user_assert((int)f.outputs() == (int)output->types().size()) << "Output \"" << f.name() size 1393 src/Generator.cpp << "\" requires a Tuple of size " << output->types().size() size 1395 src/Generator.cpp for (size_t i = 0; i < f.output_types().size(); ++i) { size 1420 src/Generator.cpp if (param_info().filter_params.size() > 0) { size 1520 src/Generator.cpp internal_assert(types_.size() == 1) << "Expected types_.size() == 1, saw " << types_.size() << " for " << name() << "\n"; size 1534 src/Generator.cpp internal_assert(funcs_.size() == array_size() && exprs_.empty()); size 1539 src/Generator.cpp internal_assert(exprs_.size() == array_size() && funcs_.empty()); size 1557 src/Generator.cpp user_assert(f.output_types().size() == 1) size 1590 src/Generator.cpp user_assert(types().size() == t.size()) << "Type mismatch for " << name() << ": expected " << types().size() << " types but saw " << t.size(); size 1591 src/Generator.cpp for (size_t i = 0; i < t.size(); ++i) { size 1605 src/Generator.cpp void GIOBase::check_matching_array_size(size_t size) { size 1607 src/Generator.cpp user_assert(array_size() == size) << "ArraySize mismatch for " << name() << ": expected " << array_size() << " saw " << size; size 1609 src/Generator.cpp array_size_ = size; size 1650 src/Generator.cpp const size_t expected = (kind() != IOKind::Scalar) ? funcs().size() : exprs().size(); size 1651 src/Generator.cpp user_assert(parameters_.size() == expected) << "Expected parameters_.size() == " size 1652 src/Generator.cpp << expected << ", saw " << parameters_.size() << " for " << name() << "\n"; size 1682 src/Generator.cpp check_matching_array_size(inputs.size()); size 1683 src/Generator.cpp for (size_t i = 0; i < inputs.size(); ++i) { size 1749 src/Generator.cpp void GeneratorOutputBase::resize(size_t size) { size 1753 src/Generator.cpp array_size_ = (int) size; size 1140 src/Generator.h EXPORT void check_matching_array_size(size_t size); size 1242 src/Generator.h size_t size() const { size 1243 src/Generator.h return get_values<ValueType>().size(); size 1293 src/Generator.h internal_assert(this->parameters_.size() == 1); size 1678 src/Generator.h EXPORT void resize(size_t size); size 1688 src/Generator.h internal_assert(funcs_.size() == array_size() && exprs_.empty()); size 1694 src/Generator.h internal_assert(funcs_.size() == array_size() && exprs_.empty()); size 1750 src/Generator.h size_t size() const { size 1751 src/Generator.h return get_values<ValueType>().size(); size 1778 src/Generator.h void resize(size_t size) { size 1779 src/Generator.h GeneratorOutputBase::resize(size); size 1805 src/Generator.h user_assert(t.size() <= 1) << "Output<Buffer<>>(" << name << ") requires at most one Type, but has " << t.size() << "\n"; size 1825 src/Generator.h internal_assert(this->funcs().size() == 1); size 1854 src/Generator.h internal_assert(this->exprs_.empty() && this->funcs_.size() == 1); size 1872 src/Generator.h user_assert(output_types.size() == 1) size 1873 src/Generator.h << "Output should have size=1 but saw size=" << output_types.size() << "\n"; size 1889 src/Generator.h internal_assert(this->exprs_.empty() && this->funcs_.size() == 1); size 1904 src/Generator.h internal_assert(this->exprs_.empty() && this->funcs_.size() > i); size 2295 src/Generator.h user_assert(sizeof...(args) == pi.filter_inputs.size()) size 2296 src/Generator.h << "Expected exactly " << pi.filter_inputs.size() size 2331 src/Generator.h EXPORT GeneratorBase(size_t size, const void *introspection_helper); size 2392 src/Generator.h EXPORT ParamInfo(GeneratorBase *generator, const size_t size); size 2420 src/Generator.h const size_t size; size 2530 src/Generator.h siv.reserve(arg.size()); size 2552 src/Generator.h siv.reserve(arg.size()); size 2581 src/Generator.h siv.reserve(arg.size()); size 640 src/HexagonOffload.cpp Expr buffer_ptr(const uint8_t* buffer, size_t size, const char* name) { size 641 src/HexagonOffload.cpp Buffer<uint8_t> code((int)size, name); size 642 src/HexagonOffload.cpp memcpy(code.data(), buffer, (int)size); size 881 src/HexagonOffload.cpp auto obj = Elf::Object::parse_object(object.data(), object.size()); size 917 src/HexagonOffload.cpp f.write(shared_object.data(), shared_object.size()); size 933 src/HexagonOffload.cpp f.read(shared_object.data(), shared_object.size()); size 939 src/HexagonOffload.cpp Halide::Buffer<uint8_t> result_buf(shared_object.size(), device_code.name()); size 940 src/HexagonOffload.cpp memcpy(result_buf.data(), shared_object.data(), shared_object.size()); size 49 src/HexagonOptimize.cpp if (!c || c->args.size() != 1) return false; size 195 src/HexagonOptimize.cpp for (size_t i = 0; i < matches.size() && is_match; i++) { size 230 src/HexagonOptimize.cpp internal_assert(matches.size() >= 2); size 234 src/HexagonOptimize.cpp internal_assert(matches.size() >= 3); size 393 src/HexagonOptimize.cpp if ((int)mpys.size() >= max_mpy_count) { size 484 src/HexagonOptimize.cpp if (mpy_count > 0 && mpys.size() == 4) { size 525 src/HexagonOptimize.cpp if (mpy_count > 0 && mpys.size() == 4) { size 573 src/HexagonOptimize.cpp if (mpy_count > 0 && mpys.size() == 2) { size 883 src/HexagonOptimize.cpp internal_assert(op->args.size() == 3); size 886 src/HexagonOptimize.cpp internal_assert(op->args.size() == 1); size 364 src/IR.cpp for (size_t i = 0; i < values.size(); i++) { size 367 src/IR.cpp for (size_t i = 0; i < args.size(); i++) { size 381 src/IR.cpp for (size_t i = 0; i < extents.size(); i++) { size 403 src/IR.cpp for (size_t i = 0; i < extents.size(); i++) { size 442 src/IR.cpp for (size_t i = 0; i < bounds.size(); i++) { size 463 src/IR.cpp for (size_t i = 0; i < bounds.size(); i++) { size 502 src/IR.cpp for (size_t i = stmts.size()-1; i > 0; i--) { size 541 src/IR.cpp internal_assert(args.size() % 2 == 0) size 544 src/IR.cpp for (size_t i = 0; i < args.size(); i++) { size 548 src/IR.cpp for (size_t i = 0; i < args.size(); i++) { size 555 src/IR.cpp for (size_t i = 0; i < args.size(); i++) { size 599 src/IR.cpp node->type = element_ty.with_lanes((int)indices.size()); size 608 src/IR.cpp if (vectors.size() == 1) { size 621 src/IR.cpp for (int j = 0; j < (int)vectors.size(); j++) { size 632 src/IR.cpp if (vectors.size() == 1) { size 638 src/IR.cpp for (int i = 0; i < (int)vectors.size(); i++) { size 647 src/IR.cpp Expr Shuffle::make_slice(const Expr &vector, int begin, int stride, int size) { size 648 src/IR.cpp if (begin == 0 && size == vector.type().lanes() && stride == 1) { size 653 src/IR.cpp for (int i = 0; i < size; i++) { size 679 src/IR.cpp if (lanes * vectors.size() != indices.size()) { size 683 src/IR.cpp for (int i = 0; i < (int)vectors.size(); i++) { size 685 src/IR.cpp if (indices[j * (int)vectors.size() + i] != i * lanes + j) { size 699 src/IR.cpp for (size_t i = 0; i + 1 < indices.size(); i++) { size 717 src/IR.cpp return indices.size() == input_lanes && is_ramp(indices); size 728 src/IR.cpp return indices.size() < input_lanes && is_ramp(indices, slice_stride()); size 732 src/IR.cpp return indices.size() == 1; size 689 src/IR.h EXPORT static Expr make_slice(const Expr &vector, int begin, int stride, int size); size 709 src/IR.h int slice_stride() const { return indices.size() >= 2 ? indices[1] - indices[0] : 1; } size 226 src/IREquality.cpp compare_scalar(a.size(), b.size()); size 227 src/IREquality.cpp for (size_t i = 0; (i < a.size()) && result == Equal; i++) { size 403 src/IREquality.cpp compare_scalar(s->types.size(), op->types.size()); size 404 src/IREquality.cpp compare_scalar(s->bounds.size(), op->bounds.size()); size 405 src/IREquality.cpp for (size_t i = 0; (result == Equal) && (i < s->types.size()); i++) { size 408 src/IREquality.cpp for (size_t i = 0; (result == Equal) && (i < s->bounds.size()); i++) { size 448 src/IREquality.cpp compare_scalar(e->indices.size(), op->indices.size()); size 449 src/IREquality.cpp for (size_t i = 0; (i < e->indices.size()) && result == Equal; i++) { size 458 src/IREquality.cpp compare_scalar(s->bounds.size(), op->bounds.size()); size 459 src/IREquality.cpp for (size_t i = 0; (result == Equal) && (i < s->bounds.size()); i++) { size 60 src/IREquality.h for (size_t i = 0; i < entries.size(); i++) { size 35 src/IRMatch.cpp matches.size() == 3 && size 231 src/IRMatch.cpp e->args.size() == op->args.size()) { size 232 src/IRMatch.cpp for (size_t i = 0; result && (i < e->args.size()); i++) { size 44 src/IRMutator.cpp Region new_bounds(bounds.size()); size 47 src/IRMutator.cpp for (size_t i = 0; i < bounds.size(); i++) { size 143 src/IRMutator.cpp vector<Expr > new_args(op->args.size()); size 147 src/IRMutator.cpp for (size_t i = 0; i < op->args.size(); i++) { size 229 src/IRMutator.cpp vector<Expr> new_args(op->args.size()); size 230 src/IRMutator.cpp vector<Expr> new_values(op->values.size()); size 234 src/IRMutator.cpp for (size_t i = 0; i < op->args.size(); i++) { size 241 src/IRMutator.cpp for (size_t i = 0; i < op->values.size(); i++) { size 258 src/IRMutator.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 349 src/IRMutator.cpp vector<Expr > new_vectors(op->vectors.size()); size 352 src/IRMutator.cpp for (size_t i = 0; i < op->vectors.size(); i++) { size 661 src/IROperator.cpp if (i < mins.size()) { size 666 src/IROperator.cpp if (i < extents.size()) { size 671 src/IROperator.cpp if (i < strides.size()) { size 775 src/IROperator.cpp std::vector<Expr> strings(args.size()*2); size 776 src/IROperator.cpp for (size_t i = 0; i < args.size(); i++) { size 778 src/IROperator.cpp if (i < args.size() - 1) { size 151 src/IRPrinter.cpp for (size_t i = 0; i < p.ops.size(); ++i) { size 162 src/IRPrinter.cpp for (size_t i = 0; i < op.xs.size(); ++i) { size 222 src/IRPrinter.cpp for (size_t i = 0; i < function.args.size(); i++) { size 224 src/IRPrinter.cpp if (i + 1 < function.args.size()) { size 263 src/IRPrinter.cpp for (size_t i = 0; i < exprs.size(); i++) { size 265 src/IRPrinter.cpp if (i < exprs.size() - 1) { size 305 src/IRPrinter.cpp for (size_t i = 0; i < op->value.size(); i++) { size 596 src/IRPrinter.cpp if (op->values.size() > 1) { size 600 src/IRPrinter.cpp if (op->values.size() > 1) { size 610 src/IRPrinter.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 638 src/IRPrinter.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 644 src/IRPrinter.cpp if (i < op->bounds.size() - 1) stream << ", "; size 664 src/IRPrinter.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 670 src/IRPrinter.cpp if (i < op->bounds.size() - 1) stream << ", "; size 734 src/IRPrinter.cpp stream << ", " << op->slice_begin() << ", " << op->slice_stride() << ", " << op->indices.size(); size 740 src/IRPrinter.cpp for (size_t i = 0; i < op->indices.size(); i++) { size 742 src/IRPrinter.cpp if (i < op->indices.size() - 1) { size 128 src/IRVisitor.cpp for (size_t i = 0; i < op->args.size(); i++) { size 136 src/IRVisitor.cpp for (size_t i = 0; i < f.extern_arguments().size(); i++) { size 178 src/IRVisitor.cpp for (size_t i = 0; i < op->values.size(); i++) { size 181 src/IRVisitor.cpp for (size_t i = 0; i < op->args.size(); i++) { size 187 src/IRVisitor.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 201 src/IRVisitor.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 210 src/IRVisitor.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 380 src/IRVisitor.cpp for (size_t i = 0; i < op->args.size(); i++) { size 417 src/IRVisitor.cpp for (size_t i = 0; i < op->values.size(); i++) { size 420 src/IRVisitor.cpp for (size_t i = 0; i < op->args.size(); i++) { size 426 src/IRVisitor.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 440 src/IRVisitor.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 449 src/IRVisitor.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 95 src/InjectHostDevBufferCopies.cpp buffers_to_track.erase(op->name.substr(0, op->name.size() - 7)); size 119 src/InjectHostDevBufferCopies.cpp buffers_to_track.insert(op->name.substr(0, op->name.size() - 7)); size 437 src/InjectHostDevBufferCopies.cpp internal_assert(op->args.size() >= 2); size 440 src/InjectHostDevBufferCopies.cpp string buf_name = buffer_var->name.substr(0, buffer_var->name.size() - 7); size 448 src/InjectHostDevBufferCopies.cpp internal_assert(op->args.size() >= 2); size 451 src/InjectHostDevBufferCopies.cpp string buf_name = buffer_var->name.substr(0, buffer_var->name.size() - 7); size 514 src/InjectHostDevBufferCopies.cpp string buf_name = op->name.substr(0, op->name.size() - 7); size 544 src/InjectHostDevBufferCopies.cpp user_assert(op->extents.size() <= 4) size 550 src/InjectHostDevBufferCopies.cpp bool on_single_device = ((buf_info.devices_touched.size() < 2) || size 551 src/InjectHostDevBufferCopies.cpp (buf_info.devices_touched.size() == 2 && size 608 src/InjectHostDevBufferCopies.cpp for (size_t i = body_lets.size(); i > 0; i--) { size 626 src/InjectHostDevBufferCopies.cpp string buf_name = op->name.substr(0, op->name.size() - 7); size 33 src/InjectImageIntrinsics.cpp internal_assert(provide->values.size() == 1) size 35 src/InjectImageIntrinsics.cpp user_assert(provide->args.size() == 3) size 75 src/InjectImageIntrinsics.cpp while (padded_call_args.size() < 3) { size 85 src/InjectImageIntrinsics.cpp for (size_t i = 0; i < padded_call_args.size(); i++) { size 92 src/InjectImageIntrinsics.cpp if (i < call->args.size()) { size 77 src/InjectOpenGLIntrinsics.cpp user_assert(call->args.size() == 6) size 38 src/Inline.cpp for (size_t i = 0; i < s.dims().size(); i++) { size 55 src/Inline.cpp for (size_t i = 0; i < s.splits().size(); i++) { size 76 src/Inline.cpp for (size_t i = 0; i < s.bounds().size(); i++) { size 102 src/Inline.cpp vector<Expr> args(op->args.size()); size 103 src/Inline.cpp for (size_t i = 0; i < args.size(); i++) { size 112 src/Inline.cpp internal_assert(args.size() == func_args.size()); size 114 src/Inline.cpp for (size_t i = 0; i < args.size(); i++) { size 94 src/InlineReductions.cpp for (size_t i = 0; i < free_vars.size(); i++) { size 184 src/InlineReductions.cpp int value_index = (int)initial_tup.size()-1; size 213 src/InlineReductions.cpp int value_index = (int)initial_tup.size()-1; size 29 src/Introspection.cpp void get_program_name(char *name, int32_t size) { size 30 src/Introspection.cpp _NSGetExecutablePath(name, &size); size 35 src/Introspection.cpp void get_program_name(char *name, int32_t size) { size 36 src/Introspection.cpp strncpy(name, program_invocation_name, size); size 147 src/Introspection.cpp uint64_t size; size 157 src/Introspection.cpp TypeInfo() : size(0), def_loc(0), type(Primitive) {} size 169 src/Introspection.cpp last_slash >= binary.size() - 1) { size 174 src/Introspection.cpp std::string file_only = binary.substr(last_slash, binary.size() - last_slash); size 199 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 240 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 244 src/Introspection.cpp for (size_t j = 0; j < f.variables.size(); j++) { size 246 src/Introspection.cpp for (size_t k = 0; k < v.live_ranges.size(); k++) { size 253 src/Introspection.cpp for (size_t i = 0; i < source_lines.size(); i++) { size 257 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 271 src/Introspection.cpp if (query_type[query_type.size()-1] == '?' && size 272 src/Introspection.cpp starts_with(actual_type, query_type.substr(0, query_type.size()-1))) { size 288 src/Introspection.cpp size_t hi = global_variables.size(); size 300 src/Introspection.cpp if (lo >= global_variables.size()) { size 316 src/Introspection.cpp uint64_t size = t->size; size 319 src/Introspection.cpp size *= t->size; size 321 src/Introspection.cpp end_ptr += size; size 343 src/Introspection.cpp for (; (size_t)idx < global_variables.size() && global_variables[idx].addr <= address; idx++) { size 347 src/Introspection.cpp if (v.type && v.type->type == TypeInfo::Array && v.type->size) { size 365 src/Introspection.cpp int64_t array_size_bytes = v.type->size * elem_type->size; size 369 src/Introspection.cpp pos_bytes % elem_type->size == 0) { size 371 src/Introspection.cpp oss << v.name << '[' << (pos_bytes / elem_type->size) << ']'; size 382 src/Introspection.cpp void register_heap_object(const void *obj, size_t size, const void *helper) { size 400 src/Introspection.cpp internal_assert(ptr.type->members.size() == 1); size 406 src/Introspection.cpp internal_assert(size == object_type->size); size 413 src/Introspection.cpp for (size_t i = 0; i < object_type->members.size(); i++) { size 428 src/Introspection.cpp for (size_t i = 0; i < heap_object.members.size(); i++) { size 440 src/Introspection.cpp for (size_t j = 0; j < parent.type->members.size(); j++) { size 470 src/Introspection.cpp for (size_t i = 0; i < heap_object.members.size(); i++) { size 478 src/Introspection.cpp void deregister_heap_object(const void *obj, size_t size) { size 505 src/Introspection.cpp uint64_t object_end = object_start + obj.type->size; size 515 src/Introspection.cpp for (size_t i = 0; i < obj.members.size(); i++) { size 536 src/Introspection.cpp uint64_t array_end_addr = array_start_addr + t->size * elem_type->size; size 542 src/Introspection.cpp uint64_t containing_elem = (addr - array_start_addr) / elem_type->size; size 543 src/Introspection.cpp addr -= containing_elem * elem_type->size; size 553 src/Introspection.cpp uint64_t struct_end_addr = struct_start_addr + t->size; size 656 src/Introspection.cpp for (size_t j = 0; j < func->variables.size(); j++) { size 661 src/Introspection.cpp if (var.live_ranges.size()) { size 663 src/Introspection.cpp for (size_t i = 0; i < var.live_ranges.size(); i++) { size 678 src/Introspection.cpp if (type && type->type == TypeInfo::Array && type->size) { size 695 src/Introspection.cpp int64_t array_size_bytes = type->size * elem_type->size; size 699 src/Introspection.cpp pos_bytes % elem_type->size == 0) { size 701 src/Introspection.cpp oss << var.name << '[' << (pos_bytes / elem_type->size) << ']'; size 717 src/Introspection.cpp if (!source_lines.size()) { size 726 src/Introspection.cpp int trace_size = backtrace(&trace[0], (int)(trace.size())); size 760 src/Introspection.cpp if (f->name.size() > 8 && size 767 src/Introspection.cpp size_t hi = source_lines.size(); size 796 src/Introspection.cpp for (size_t i = 0; i < types.size(); i++) { size 799 src/Introspection.cpp (unsigned long long)(types[i].size), size 801 src/Introspection.cpp for (size_t j = 0; j < types[i].members.size(); j++) { size 816 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 823 src/Introspection.cpp for (size_t j = 0; j < f.variables.size(); j++) { size 835 src/Introspection.cpp for (size_t k = 0; k < v.live_ranges.size(); k++) { size 844 src/Introspection.cpp for (size_t i = 0; i < source_lines.size(); i++) { size 852 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1085 src/Introspection.cpp if (func_stack.size() && size 1091 src/Introspection.cpp if (type_stack.size() && size 1097 src/Introspection.cpp if (namespace_stack.size() && size 1101 src/Introspection.cpp if (live_range_stack.size() && size 1109 src/Introspection.cpp assert(abbrev_code <= entry_formats.size()); size 1125 src/Introspection.cpp if (type_stack.size()) { size 1128 src/Introspection.cpp for (size_t i = 0; i < namespace_stack.size(); i++) { size 1142 src/Introspection.cpp for (size_t i = 0; i < fmt.fields.size(); i++) { size 1361 src/Introspection.cpp type_info.size = val; size 1368 src/Introspection.cpp type_info.size = val; size 1375 src/Introspection.cpp type_info.size = val; size 1397 src/Introspection.cpp type_info.size = address_size; size 1400 src/Introspection.cpp type_info.size = val; size 1411 src/Introspection.cpp type_info.size = val; size 1422 src/Introspection.cpp type_info.size = val; size 1436 src/Introspection.cpp type_info.size = val; size 1476 src/Introspection.cpp if (type_stack.size()) { size 1498 src/Introspection.cpp type_stack.size() && size 1500 src/Introspection.cpp type_stack.back().first.size = val+1; size 1502 src/Introspection.cpp type_stack.size() && size 1504 src/Introspection.cpp type_stack.back().first.size = val; size 1511 src/Introspection.cpp } else if (attr == attr_high_pc && live_ranges.size()) { size 1520 src/Introspection.cpp if (val < debug_ranges.size()) { size 1523 src/Introspection.cpp const void * const * end = (const void * const *)(debug_ranges.data() + debug_ranges.size()); size 1542 src/Introspection.cpp if (func_stack.size() && !gvar.addr) { size 1543 src/Introspection.cpp if (live_range_stack.size()) { size 1551 src/Introspection.cpp type_stack.size()) { size 1578 src/Introspection.cpp type_info.members.size() == 1) { size 1587 src/Introspection.cpp live_ranges.size() && fmt.has_children) { size 1596 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1600 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1613 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1614 src/Introspection.cpp for (size_t j = 0; j < functions[i].variables.size(); j++) { size 1619 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1620 src/Introspection.cpp for (size_t j = 0; j < functions[i].variables.size(); j++) { size 1640 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1650 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1668 src/Introspection.cpp for (size_t i = 0; i < types.size(); i++) { size 1672 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1673 src/Introspection.cpp for (size_t j = 0; j < functions[i].variables.size(); j++) { size 1679 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1684 src/Introspection.cpp for (size_t i = 0; i < types.size(); i++) { size 1685 src/Introspection.cpp for (size_t j = 0; j < types[i].members.size(); j++) { size 1692 src/Introspection.cpp for (size_t i = 0; i < types.size(); i++) { size 1699 src/Introspection.cpp assert(t->members.size() == 1); size 1703 src/Introspection.cpp assert(t->members.size() == 1); size 1707 src/Introspection.cpp assert(t->members.size() == 1); size 1711 src/Introspection.cpp if (t->size != 0) { size 1713 src/Introspection.cpp oss << '[' << t->size << ']'; size 1718 src/Introspection.cpp assert(t->members.size() == 1); size 1725 src/Introspection.cpp if (t && suffix.size()) { size 1727 src/Introspection.cpp while (suffix.size()) { size 1737 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1739 src/Introspection.cpp for (size_t j = 0; j < new_vars.size(); j++) { size 1747 src/Introspection.cpp size_t members = v.type->members.size(); size 1760 src/Introspection.cpp if (new_vars[j+k+1].name.size() && size 1761 src/Introspection.cpp new_vars[j].name.size()) { size 1770 src/Introspection.cpp if (functions[i].variables.size()) { size 1772 src/Introspection.cpp for (size_t j = 0; j < functions[i].variables.size(); j++) { size 1782 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1790 src/Introspection.cpp for (size_t j = 0; j < members.size(); j++) { size 1813 src/Introspection.cpp for (size_t i = 0; i < functions.size(); i++) { size 1823 src/Introspection.cpp for (size_t j = 0; j < f.variables.size(); j++) { size 1841 src/Introspection.cpp for (size_t i = 0; i < global_variables.size(); i++) { size 1910 src/Introspection.cpp int source_files_base = source_files.size(); size 1920 src/Introspection.cpp assert(dir <= include_dirs.size()); size 1995 src/Introspection.cpp assert(dir_index < include_dirs.size()); size 2116 src/Introspection.cpp size_t hi = functions.size(); size 2204 src/Introspection.cpp void register_heap_object(const void *obj, size_t size, const void *helper) { size 2208 src/Introspection.cpp debug_sections->register_heap_object(obj, size, helper); size 2211 src/Introspection.cpp void deregister_heap_object(const void *obj, size_t size) { size 2214 src/Introspection.cpp debug_sections->deregister_heap_object(obj, size); size 2288 src/Introspection.cpp void register_heap_object(const void *obj, size_t size, const void *helper) { size 2291 src/Introspection.cpp void deregister_heap_object(const void *obj, size_t size) { size 30 src/Introspection.h EXPORT void register_heap_object(const void *obj, size_t size, const void *helper); size 33 src/Introspection.h EXPORT void deregister_heap_object(const void *obj, size_t size); size 181 src/JITModule.cpp for (size_t i = 0; i < modules.size(); i++) { size 194 src/JITModule.cpp virtual uint8_t *allocateCodeSection(uintptr_t size, unsigned alignment, unsigned section_id, StringRef section_name) override { size 195 src/JITModule.cpp uint8_t *result = SectionMemoryManager::allocateCodeSection(size, alignment, section_id, section_name); size 196 src/JITModule.cpp code_pages.push_back({result, size}); size 313 src/JITModule.cpp for (size_t i = 0; i < listeners.size(); i++) { size 332 src/JITModule.cpp for (size_t i = 0; i < requested_exports.size(); i++) { size 341 src/JITModule.cpp for (size_t i = 0; i < listeners.size(); i++) { size 452 src/JITModule.cpp void JITModule::memoization_cache_set_size(int64_t size) const { size 456 src/JITModule.cpp return (reinterpret_bits<void (*)(int64_t)>(f->second.address))(size); size 542 src/JITModule.cpp int min, int size, uint8_t *closure) { size 545 src/JITModule.cpp return (*jit_user_context->handlers.custom_do_par_for)(context, f, min, size, closure); size 547 src/JITModule.cpp return (*active_handlers.custom_do_par_for)(context, f, min, size, closure); size 856 src/JITModule.cpp void JITSharedRuntime::memoization_cache_set_size(int64_t size) { size 859 src/JITModule.cpp if (size != default_cache_size) { size 860 src/JITModule.cpp default_cache_size = size; size 861 src/JITModule.cpp shared_runtimes(MainShared).memoization_cache_set_size(size); size 117 src/JITModule.h EXPORT void memoization_cache_set_size(int64_t size) const; size 154 src/JITModule.h EXPORT static void memoization_cache_set_size(int64_t size); size 412 src/LLVM_Runtime_Linker.cpp for (size_t i = 0; i < modules.size(); i++) { size 418 src/LLVM_Runtime_Linker.cpp for (size_t i = 1; i < modules.size(); i++) { size 522 src/LLVM_Runtime_Linker.cpp n.size() > 2 && n[0] == 1 && n[1] == '_') { size 959 src/LLVM_Runtime_Linker.cpp llvm::StringRef sb = llvm::StringRef((const char *)&bitcode[0], bitcode.size()); size 228 src/LoopCarry.cpp for (size_t i = 0; i < v.size(); i++) { size 258 src/LoopCarry.cpp debug(4) << "Found " << find_loads.result.size() << " loads\n"; size 293 src/LoopCarry.cpp for (int i = 0; i < (int)indices.size(); i++) { size 294 src/LoopCarry.cpp for (int j = 0; j < (int)indices.size(); j++) { size 318 src/LoopCarry.cpp for (size_t i = 0; i < chains.size(); i++) { size 320 src/LoopCarry.cpp for (size_t j = 0; j < chains.size(); j++) { size 330 src/LoopCarry.cpp for (size_t i = 0; i < chains.size(); i++) { size 331 src/LoopCarry.cpp while (i < chains.size() && chains[i].empty()) { size 341 src/LoopCarry.cpp [&](const vector<int> &c1, const vector<int> &c2){return c1.size() > c2.size();}); size 356 src/LoopCarry.cpp if (sz + c.size() > (size_t)max_carried_values) { size 364 src/LoopCarry.cpp sz += c.size(); size 389 src/LoopCarry.cpp for (size_t i = 0; i < c.size(); i++) { size 398 src/LoopCarry.cpp if (i == c.size() - 1) { size 433 src/LoopCarry.cpp for (size_t i = 0; i < c.size() - 1; i++) { size 444 src/LoopCarry.cpp for (size_t i = initial_lets.size(); i > 0; i--) { size 450 src/LoopCarry.cpp for (size_t i = containing_lets.size(); i > 0; i--) { size 459 src/LoopCarry.cpp (int)c.size() * loads[c.front()][0]->type.lanes(), size 491 src/LoopCarry.cpp int size; size 529 src/LoopCarry.cpp stmt = Allocate::make(alloc.name, alloc.type, {alloc.size}, const_true(), stmt); size 322 src/Lower.cpp for (size_t i = 0; i < custom_passes.size(); i++) { size 368 src/Lower.cpp for (size_t i = 0; i < args.size(); i++) { size 28 src/Memoization.cpp for (size_t i = 0; i < extern_args.size(); i++) { size 50 src/Memoization.cpp if (call->args.size() == 1) { size 54 src/Memoization.cpp for (size_t i = 1; i < call->args.size(); i++) { size 118 src/Memoization.cpp uint32_t size; size 122 src/Memoization.cpp if (size < rhs.size) { size 124 src/Memoization.cpp } else if (size == rhs.size) { size 131 src/Memoization.cpp : size(size_arg), name(name_arg) { size 221 src/Memoization.cpp StringImm::make(std::to_string(top_level_name.size()) + ":" + top_level_name + size 222 src/Memoization.cpp std::to_string(function_name.size()) + ":" + function_name), size 369 src/Memoization.cpp std::string max_stage_num = std::to_string(f.updates().size()); size 450 src/Memoization.cpp while (i < realization_name.size() && isdigit(realization_name[i])) { size 453 src/Memoization.cpp if (i == realization_name.size()) { size 482 src/Memoization.cpp internal_assert(call->args.size() >= 3) size 508 src/Memoization.cpp for (size_t i = allocations.size(); i > 0; i--) { size 214 src/Module.cpp for (size_t i = 0; i < modules.size(); i++) { size 260 src/Module.cpp Buffer<uint8_t> result(object.size(), name()); size 261 src/Module.cpp memcpy(result.data(), reinterpret_cast<uint8_t*>(&object[0]), object.size()); size 426 src/Module.cpp const bool needs_wrapper = (targets.size() > 1); size 427 src/Module.cpp if (targets.size() == 1) { size 395 src/ModulusRemainder.cpp internal_assert(op->indices.size() == 1) << "modulus_remainder of vector\n"; size 298 src/Monotonic.cpp for (size_t i = 0; i < op->args.size(); i++) { size 323 src/Monotonic.cpp for (size_t i = 0; i < op->vectors.size(); i++) { size 15 src/ObjectInstanceRegistry.cpp void ObjectInstanceRegistry::register_instance(void *this_ptr, size_t size, Kind kind, size 22 src/ObjectInstanceRegistry.cpp registry.instances[key] = InstanceInfo(size, kind, subject_ptr, true); size 23 src/ObjectInstanceRegistry.cpp Introspection::register_heap_object(this_ptr, size, introspection_helper); size 25 src/ObjectInstanceRegistry.cpp registry.instances[key] = InstanceInfo(size, kind, subject_ptr, false); size 37 src/ObjectInstanceRegistry.cpp Introspection::deregister_heap_object(this_ptr, it->second.size); size 43 src/ObjectInstanceRegistry.cpp std::vector<void *> ObjectInstanceRegistry::instances_in_range(void *start, size_t size, size 53 src/ObjectInstanceRegistry.cpp uintptr_t limit_ptr = ((uintptr_t)start) + size; size 59 src/ObjectInstanceRegistry.cpp if (it->first > (uintptr_t)start && it->second.size != 0) { size 61 src/ObjectInstanceRegistry.cpp it = registry.instances.lower_bound(it->first + it->second.size); size 55 src/ObjectInstanceRegistry.h static void register_instance(void *this_ptr, size_t size, Kind kind, void *subject_ptr, size 67 src/ObjectInstanceRegistry.h static std::vector<void *> instances_in_range(void *start, size_t size, Kind kind); size 74 src/ObjectInstanceRegistry.h size_t size; // May be 0 for params size 78 src/ObjectInstanceRegistry.h InstanceInfo() : subject_ptr(nullptr), size(0), kind(Invalid), registered_for_introspection(false) {} size 79 src/ObjectInstanceRegistry.h InstanceInfo(size_t size, Kind kind, void *subject_ptr, bool registered_for_introspection) size 80 src/ObjectInstanceRegistry.h : subject_ptr(subject_ptr), size(size), kind(kind), registered_for_introspection(registered_for_introspection) {} size 35 src/ParallelRVar.cpp for (size_t i = 0; i < loads.size(); i++) { size 36 src/ParallelRVar.cpp for (size_t j = 0; j < loads[i].size(); j++) { size 100 src/ParallelRVar.cpp for (size_t i = 0; i < values.size(); i++) { size 106 src/ParallelRVar.cpp vector<Expr> other_store(args.size()); size 107 src/ParallelRVar.cpp for (size_t i = 0; i < args.size(); i++) { size 120 src/ParallelRVar.cpp for (size_t i = 0; i < args.size(); i++) { size 126 src/ParallelRVar.cpp for (size_t i = 0; i < find.loads.size(); i++) { size 127 src/ParallelRVar.cpp internal_assert(find.loads[i].size() == other_store.size()); size 129 src/ParallelRVar.cpp for (size_t j = 0; j < find.loads[i].size(); j++) { size 353 src/Parameter.cpp user_assert(args->size() == (size_t)dims) size 354 src/Parameter.cpp << args->size() << "-argument call to \"" size 357 src/Parameter.cpp for (size_t i = 0; i < args->size(); i++) { size 85 src/PartitionLoops.cpp internal_assert(op->args.size() == 1); size 750 src/PartitionLoops.cpp while (lifted_lets.size()) { size 797 src/PartitionLoops.cpp internal_assert(a->name == "__shared" && a->extents.size() == 1); size 962 src/PartitionLoops.cpp internal_assert(op->args.size() == 1); size 103 src/Pipeline.cpp for (size_t i = 0; i < custom_lowering_passes.size(); i++) { size 346 src/Pipeline.cpp same_compile = same_compile && (lowering_args.size() + outputs().size()) == old_module.functions().front().args.size(); size 372 src/Pipeline.cpp for (size_t i = 0; i < name.size(); i++) { size 517 src/Pipeline.cpp for (size_t i = 0; i < r.size(); i++) { size 677 src/Pipeline.cpp user_assert(output_buffer_types.size() == dst.size()) size 678 src/Pipeline.cpp << "Realization contains wrong number of Images (" << dst.size() size 679 src/Pipeline.cpp << ") for realizing pipeline with " << output_buffer_types.size() size 683 src/Pipeline.cpp for (size_t i = 0; i < dst.size(); i++) { size 729 src/Pipeline.cpp for (size_t i = 0; i < dst.size(); i++) { size 772 src/Pipeline.cpp for (size_t i = 0; i < pipeline_contents.outputs.size(); i++) { size 793 src/Pipeline.cpp for (size_t i = 0; i < dst.size(); i++) { size 909 src/Pipeline.cpp vector<TrackedBuffer> tracked_buffers(args.size()); size 912 src/Pipeline.cpp for (size_t i = 0; i < contents->inferred_args.size(); i++) { size 989 src/Pipeline.cpp vector<int> size; size 990 src/Pipeline.cpp if (x_size) size.push_back(x_size); size 991 src/Pipeline.cpp if (y_size) size.push_back(y_size); size 992 src/Pipeline.cpp if (z_size) size.push_back(z_size); size 993 src/Pipeline.cpp if (w_size) size.push_back(w_size); size 997 src/Pipeline.cpp bufs.emplace_back(t, size); size 48 src/Prefetch.cpp internal_assert(v.size() > 2); size 55 src/Prefetch.cpp string str = v[1].substr(1, v[1].size() - 1); size 66 src/Prefetch.cpp internal_assert(stage <= (int)current_func->updates().size()); size 76 src/Prefetch.cpp internal_assert((int)b.size() == dims); size 169 src/Prefetch.cpp for (size_t i = 0; i < box.size(); i++) { size 205 src/Prefetch.cpp for (int i = prefetch_list.size() - 1; i >= 0; --i) { size 229 src/Prefetch.cpp Box bounds = get_buffer_bounds(b->first, b->second.size()); size 230 src/Prefetch.cpp internal_assert(prefetch_box.size() == bounds.size()); size 236 src/Prefetch.cpp for (size_t i = 0; i < bounds.size(); ++i) { size 281 src/Prefetch.cpp if (call && call->is_intrinsic(Call::prefetch) && (call->args.size() > max_arg_size)) { size 287 src/Prefetch.cpp for (size_t i = max_arg_size; i < call->args.size(); i += 2) { size 300 src/Prefetch.cpp for (size_t i = 0; i < index_names.size(); ++i) { size 336 src/Prefetch.cpp for (size_t i = 2; i < call->args.size(); i += 2) { size 363 src/Prefetch.cpp for (size_t i = 0; i < index_names.size(); ++i) { size 57 src/PrintLoopNest.cpp for (size_t i = 0; i < s.size(); i++) { size 43 src/Profiling.cpp Expr size; size 53 src/Profiling.cpp internal_assert(v.size() > 0); size 62 src/Profiling.cpp idx = (int)indices.size(); size 92 src/Profiling.cpp internal_assert(extents.size() > 0); size 95 src/Profiling.cpp Expr size = cast<uint64_t>(extents[0]); size 96 src/Profiling.cpp for (size_t i = 1; i < extents.size(); i++) { size 97 src/Profiling.cpp size *= extents[i]; size 99 src/Profiling.cpp size = simplify(Select::make(condition, size * type.bytes(), make_zero(UInt(64)))); size 100 src/Profiling.cpp return size; size 108 src/Profiling.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 115 src/Profiling.cpp Expr size = compute_allocation_size(new_extents, condition, op->type, op->name, on_stack); size 116 src/Profiling.cpp internal_assert(size.type() == UInt(64)); size 117 src/Profiling.cpp func_alloc_sizes.push(op->name, {on_stack, size}); size 122 src/Profiling.cpp if (!is_zero(size) && on_stack) { size 123 src/Profiling.cpp const uint64_t *int_size = as_const_uint(size); size 127 src/Profiling.cpp debug(3) << " Allocation on stack: " << op->name << "(" << size << ") in pipeline " << pipeline_name size 145 src/Profiling.cpp if (!is_zero(size) && !on_stack && profiling_memory) { size 147 src/Profiling.cpp debug(3) << " Allocation on heap: " << op->name << "(" << size << ") in pipeline " << pipeline_name << "\n"; size 149 src/Profiling.cpp {profiler_pipeline_state, idx, size}, Call::Extern); size 158 src/Profiling.cpp internal_assert(alloc.size.type() == UInt(64)); size 163 src/Profiling.cpp if (!is_zero(alloc.size)) { size 168 src/Profiling.cpp debug(3) << " Free on heap: " << op->name << "(" << alloc.size << ") in pipeline " << pipeline_name << "\n"; size 170 src/Profiling.cpp {profiler_pipeline_state, idx, alloc.size}, Call::Extern); size 174 src/Profiling.cpp const uint64_t *int_size = as_const_uint(alloc.size); size 178 src/Profiling.cpp debug(3) << " Free on stack: " << op->name << "(" << alloc.size << ") in pipeline " << pipeline_name size 269 src/Profiling.cpp int num_funcs = (int)(profiling.indices.size()); size 64 src/RDom.cpp if (i < dom_vars.size()) { size 107 src/RDom.cpp for (size_t i = 0; i < ranges.size(); i++) { size 179 src/RDom.cpp return (int)dom.domain().size(); size 67 src/Random.cpp internal_assert(e.size()); size 71 src/Random.cpp for (size_t i = 1; i < e.size(); i++) { size 121 src/Random.cpp for (size_t i = 0; i < free_vars.size(); i++) { size 20 src/Reduction.cpp if (result.size() != expected.size()) { size 23 src/Reduction.cpp for (size_t i = 0; i < expected.size(); ++i) { size 17 src/RemoveDeadAllocations.cpp for (size_t i = 0; i < op->args.size(); i++) { size 156 src/RemoveUndef.cpp vector<Expr> new_args(op->args.size()); size 160 src/RemoveUndef.cpp for (size_t i = 0; i < op->args.size(); i++) { size 301 src/RemoveUndef.cpp vector<Expr> new_args(op->args.size()); size 302 src/RemoveUndef.cpp vector<Expr> new_values(op->values.size()); size 308 src/RemoveUndef.cpp for (size_t i = 0; i < op->args.size(); i++) { size 321 src/RemoveUndef.cpp for (size_t i = 1; i < args_predicates.size(); i++) { size 329 src/RemoveUndef.cpp for (size_t i = 0; i < op->values.size(); i++) { size 348 src/RemoveUndef.cpp for (size_t i = 1; i < values_predicates.size(); i++) { size 368 src/RemoveUndef.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 402 src/RemoveUndef.cpp Region new_bounds(op->bounds.size()); size 406 src/RemoveUndef.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 221 src/Schedule.cpp internal_assert(copy.contents->wrappers.size() == contents->wrappers.size()); size 120 src/ScheduleFunctions.cpp for (int i = (int)s.dims().size() - 1; i >= 0; i--) { size 150 src/ScheduleFunctions.cpp int n_predicates = pred_container.size(); size 156 src/ScheduleFunctions.cpp for (int i = (int)s.dims().size(); i < (int)nest.size() - n_predicates; i++) { size 179 src/ScheduleFunctions.cpp for (int i = (int)nest.size() - n_predicates; i < (int)nest.size(); i++) { size 201 src/ScheduleFunctions.cpp for (int i = (int)nest.size() - 1; i >= 0; i--) { size 220 src/ScheduleFunctions.cpp for (size_t i = splits.size(); i > 0; i--) { size 224 src/ScheduleFunctions.cpp for (size_t j = 0; j < let_stmts.size(); j++) { size 273 src/ScheduleFunctions.cpp vector<Expr> site(def.args().size()); size 274 src/ScheduleFunctions.cpp vector<Expr> values(def.values().size()); size 275 src/ScheduleFunctions.cpp for (size_t i = 0; i < values.size(); i++) { size 283 src/ScheduleFunctions.cpp for (size_t i = 0; i < def.args().size(); i++) { size 296 src/ScheduleFunctions.cpp for (size_t i = specializations.size(); i > 0; i--) { size 306 src/ScheduleFunctions.cpp internal_assert(i == specializations.size()); size 428 src/ScheduleFunctions.cpp internal_assert(f.dimensions() == (int)f.args().size()); size 497 src/ScheduleFunctions.cpp for (size_t i = 0; i < lets.size(); i++) { size 518 src/ScheduleFunctions.cpp for (size_t i = 0; i < f.updates().size(); i++) { size 545 src/ScheduleFunctions.cpp for (size_t stage = 0; stage <= func.updates().size(); stage++) { size 546 src/ScheduleFunctions.cpp for (size_t i = 0; i < s.bounds().size(); i++) { size 745 src/ScheduleFunctions.cpp for (size_t i = lets.size(); i > 0; i--) { size 1000 src/ScheduleFunctions.cpp for (size_t i = 0; i < f.updates().size(); i++) { size 1022 src/ScheduleFunctions.cpp for (size_t i = 0; i < definitions.size(); i++) { size 1078 src/ScheduleFunctions.cpp for (size_t i = 0; i < sites.size(); i++) { size 1108 src/ScheduleFunctions.cpp for (size_t i = 0; i < sites.size(); i++) { size 1156 src/ScheduleFunctions.cpp for (size_t i = order.size(); i > 0; i--) { size 4000 src/Simplify.cpp found_buffer_reference(op->name, op->args.size()); size 4121 src/Simplify.cpp for (size_t i = 0; i < op->args.size(); i++) { size 4158 src/Simplify.cpp if (new_args.size() == 1 && new_args[0].as<StringImm>()) { size 4224 src/Simplify.cpp internal_assert(op->args.size() == 1); size 4255 src/Simplify.cpp internal_assert(op->args.size() % 2 == 0); // Format: {base, offset, extent0, min0, ...} size 4259 src/Simplify.cpp for (size_t i = 0; i < op->args.size(); ++i) { size 4270 src/Simplify.cpp for (size_t i = 2; i < args.size(); i += 2) { size 4273 src/Simplify.cpp for (size_t j = i + 2; j < args.size(); j += 2) { size 4288 src/Simplify.cpp internal_assert(args.size() <= op->args.size()); size 4290 src/Simplify.cpp if (changed || (args.size() != op->args.size())) { size 4344 src/Simplify.cpp if (load_indices.size() == new_vectors.size()) { size 4345 src/Simplify.cpp Type t = load_indices[0].type().with_lanes(op->indices.size()); size 4357 src/Simplify.cpp t = t.with_lanes(op->indices.size()); size 4371 src/Simplify.cpp for (size_t i = 1; i < new_vectors.size() && can_collapse; i++) { size 4380 src/Simplify.cpp if (op->indices.size() == 1) { size 4383 src/Simplify.cpp expr = Broadcast::make(b1->value, op->indices.size()); size 4390 src/Simplify.cpp int terms = (int)new_vectors.size(); size 4396 src/Simplify.cpp for (size_t i = 1; i < new_vectors.size() && can_collapse; i++) { size 4423 src/Simplify.cpp for (size_t i = 0; i < new_vectors.size() && can_collapse; i++) { size 4440 src/Simplify.cpp if (first_shuffle->vectors.size() != i_shuffle->vectors.size()) { size 4445 src/Simplify.cpp for (size_t j = 0; j < first_shuffle->vectors.size() && can_collapse; j++) { size 4464 src/Simplify.cpp for (size_t i = 1; i < new_vectors.size() && can_collapse; i++) { size 4479 src/Simplify.cpp expr = Ramp::make(r->base, r->stride, op->indices.size()); size 4488 src/Simplify.cpp for (size_t i = 1; i < new_vectors.size() && can_collapse; i++) { size 4501 src/Simplify.cpp expr = Ramp::make(new_vectors[0], stride, op->indices.size()); size 4532 src/Simplify.cpp if (slices_a.size() != slices_b.size()) { size 4536 src/Simplify.cpp for (size_t i = 0; i < slices_a.size(); i++) { size 4543 src/Simplify.cpp for (size_t i = 0; i < slices_a.size(); i++) { size 4589 src/Simplify.cpp } else if (shuffle && shuffle->is_concat() && shuffle->vectors.size() == 2) { size 4815 src/Simplify.cpp found_buffer_reference(op->name, op->args.size()); size 4848 src/Simplify.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 4896 src/Simplify.cpp for (size_t i = lets.size(); i > 0; i--) { size 5599 src/Simplify.cpp for (size_t i = 0; i < clamped.size(); ++i) { size 5610 src/Simplify.cpp for (size_t i = 0; i < clamped.size(); ++i) { size 5621 src/Simplify.cpp for (size_t i = 0; i < clamped.size(); ++i) { size 5643 src/Simplify.cpp for (size_t i = 0; i < clamped.size(); ++i) { size 5653 src/Simplify.cpp for (size_t i = 0; i < clamped.size(); ++i) { size 124 src/SimplifySpecializations.cpp for (size_t i = specializations.size(); i > 0; i--) { size 28 src/SkipStages.cpp for (size_t i = 0; i < op->args.size(); i++) { size 252 src/SkipStages.cpp for (size_t i = 0; i < op->args.size(); i++) { size 462 src/SkipStages.cpp for (size_t i = order.size()-1; i > 0; i--) { size 127 src/SlidingWindow.cpp string prefix = func.name() + ".s" + std::to_string(func.updates().size()) + "."; size 250 src/SlidingWindow.cpp for (size_t i = 0; i < func.updates().size(); i++) { size 61 src/SplitTuples.cpp if (op->types.size() > 1) { size 64 src/SplitTuples.cpp for (int i = (int)op->types.size() - 1; i >= 0; i--) { size 86 src/SplitTuples.cpp if (!op->param.defined() && (op->types.size() > 1)) { size 94 src/SplitTuples.cpp internal_assert((*it) < (int)op->types.size()); size 97 src/SplitTuples.cpp internal_assert((*it) < (int)op->types.size()); size 129 src/SplitTuples.cpp if (op->values.size() == 1) { size 170 src/SplitTuples.cpp for (size_t i = 0; i < op->values.size(); i++) { size 113 src/StmtToHtml.cpp for (size_t i = 0; i < args.size(); i++) { size 161 src/StmtToHtml.cpp for (size_t i = 0; i < op->value.size(); i++) { size 405 src/StmtToHtml.cpp if (op->values.size() > 1) { size 424 src/StmtToHtml.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 466 src/StmtToHtml.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 468 src/StmtToHtml.cpp if (i < op->bounds.size() - 1) stream << ", "; size 491 src/StmtToHtml.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 493 src/StmtToHtml.cpp if (i < op->bounds.size() - 1) stream << ", "; size 580 src/StmtToHtml.cpp args.push_back(static_cast<int>(op->indices.size())); size 611 src/StmtToHtml.cpp for (size_t i = 0; i < op.args.size(); i++) { size 43 src/StorageFlattening.cpp vector<Expr> mins(args.size()), strides(args.size()); size 46 src/StorageFlattening.cpp for (size_t i = 0; i < args.size(); i++) { size 66 src/StorageFlattening.cpp for (size_t i = 0; i < args.size(); i++) { size 80 src/StorageFlattening.cpp for (size_t i = 0; i < args.size(); i++) { size 104 src/StorageFlattening.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 119 src/StorageFlattening.cpp for (size_t i = 0; i < storage_dims.size(); i++) { size 120 src/StorageFlattening.cpp for (size_t j = 0; j < args.size(); j++) { size 129 src/StorageFlattening.cpp internal_assert(storage_permutation.size() == i+1); size 133 src/StorageFlattening.cpp internal_assert(storage_permutation.size() == op->bounds.size()); size 136 src/StorageFlattening.cpp internal_assert(op->types.size() == 1); size 139 src/StorageFlattening.cpp int dims = op->bounds.size(); size 170 src/StorageFlattening.cpp for (int i = (int)op->bounds.size()-1; i > 0; i--) { size 184 src/StorageFlattening.cpp for (size_t i = op->bounds.size(); i > 0; i--) { size 191 src/StorageFlattening.cpp internal_assert(op->values.size() == 1); size 227 src/StorageFlattening.cpp internal_assert(op->types.size() == 1) size 230 src/StorageFlattening.cpp vector<Expr> prefetch_min(op->bounds.size()); size 231 src/StorageFlattening.cpp vector<Expr> prefetch_extent(op->bounds.size()); size 232 src/StorageFlattening.cpp vector<Expr> prefetch_stride(op->bounds.size()); size 233 src/StorageFlattening.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 252 src/StorageFlattening.cpp for (size_t i = 0; i < storage_dims.size(); i++) { size 253 src/StorageFlattening.cpp for (size_t j = 0; j < args.size(); j++) { size 258 src/StorageFlattening.cpp internal_assert(storage_permutation.size() == i+1); size 261 src/StorageFlattening.cpp internal_assert(storage_permutation.size() == op->bounds.size()); size 263 src/StorageFlattening.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 264 src/StorageFlattening.cpp internal_assert(storage_permutation[i] < (int)op->bounds.size()); size 269 src/StorageFlattening.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 67 src/StorageFolding.cpp internal_assert(dim < (int)args.size()); size 124 src/StorageFolding.cpp for (size_t i = box.size(); i > 0; i--) { size 331 src/StorageFolding.cpp for (size_t i = 0; i < folder.dims_folded.size(); i++) { size 335 src/StorageFolding.cpp d < (int)bounds.size()); size 317 src/Target.cpp for (size_t i = 0; i < tokens.size(); i++) { size 90 src/Target.h for (size_t i = 0; i < initial_features.size(); i++) { size 36 src/Tracing.cpp parent_id, idx, (int)coordinates.size()}; size 120 src/Tracing.cpp vector<Expr> traces(op->values.size()); size 127 src/Tracing.cpp for (size_t i = 0; i < values.size(); i++) { size 147 src/Tracing.cpp for (size_t i = 0; i < args.size(); i++) { size 177 src/Tracing.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 189 src/TrimNoOps.cpp for (size_t i = containing_loops.size(); i > 0; i--) { size 7 src/Tuple.cpp Tuple::Tuple(const FuncRef &f) : exprs(f.size()) { size 8 src/Tuple.cpp user_assert(f.size() > 1) size 11 src/Tuple.cpp for (size_t i = 0; i < f.size(); i++) { size 24 src/Tuple.h size_t size() const { return exprs.size(); } size 28 src/Tuple.h user_assert(x < exprs.size()) << "Tuple access out of bounds\n"; size 34 src/Tuple.h user_assert(x < exprs.size()) << "Tuple access out of bounds\n"; size 53 src/Tuple.h user_assert(e.size() > 0) << "Tuples must have at least one element\n"; size 74 src/Tuple.h size_t size() const { return images.size(); } size 78 src/Tuple.h user_assert(x < images.size()) << "Realization access out of bounds\n"; size 84 src/Tuple.h user_assert(x < images.size()) << "Realization access out of bounds\n"; size 107 src/Tuple.h user_assert(e.size() > 0) << "Realizations must have at least one element\n"; size 115 src/Tuple.h Tuple result(std::vector<Expr>(condition.size())); size 116 src/Tuple.h for (size_t i = 0; i < result.size(); i++) { size 123 src/Tuple.h Tuple result(std::vector<Expr>(true_value.size())); size 124 src/Tuple.h for (size_t i = 0; i < result.size(); i++) { size 66 src/Util.cpp uint32_t size = sizeof(path); size 68 src/Util.cpp ssize_t len = ::readlink("/proc/self/exe", path, size - 1); size 70 src/Util.cpp ssize_t len = ::_NSGetExecutablePath(path, &size); size 127 src/Util.cpp for (size_t i = 0; i < sanitized.size(); i++) { size 142 src/Util.cpp matches_char_pattern &= prefix.size() > 1; size 162 src/Util.cpp if (str.size() < prefix.size()) return false; size 163 src/Util.cpp for (size_t i = 0; i < prefix.size(); i++) { size 170 src/Util.cpp if (str.size() < suffix.size()) return false; size 171 src/Util.cpp size_t off = str.size() - suffix.size(); size 172 src/Util.cpp for (size_t i = 0; i < suffix.size(); i++) { size 195 src/Util.cpp for (size_t i = 0; i < name.size(); i++) { size 210 src/Util.cpp start = found + delim.size(); size 215 src/Util.cpp if (start <= source.size()) { size 306 src/Util.cpp std::vector<char> buf(templ.size() + 1); size 308 src/Util.cpp int fd = mkstemps(&buf[0], suffix.size()); size 357 src/Util.cpp std::vector<char> buf(templ.size() + 1); size 119 src/Util.h for (size_t i = 1; i < vec.size(); i++) { size 134 src/Util.h for (size_t i = vec.size()-1; i > 0; i--) { size 270 src/Util.h static constexpr size_t size() { return sizeof...(Ints); } size 65 src/VaryingAttributes.cpp internal_assert(loop_vars.size() > 0) << "No GPU loop variables found at texture load\n"; size 76 src/VaryingAttributes.cpp internal_assert(loop_vars.size() > 0) << "No GPU loop variables found at texture store\n"; size 814 src/VaryingAttributes.cpp std::vector<Stmt> new_args(op->args.size()); size 817 src/VaryingAttributes.cpp for (size_t i = 0; i < op->args.size(); i++) { size 824 src/VaryingAttributes.cpp for (size_t i = 0; i < new_args.size(); ++i) { size 857 src/VaryingAttributes.cpp for (size_t i = 0; i < op->args.size(); i++) { size 871 src/VaryingAttributes.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 894 src/VaryingAttributes.cpp for (size_t i = 0; i < op->bounds.size(); i++) { size 1005 src/VaryingAttributes.cpp internal_assert(for_loops.size() <= 1); size 1077 src/VaryingAttributes.cpp stmt = For::make(name, 0, (int)dim.size(), ForType::Serial, DeviceAPI::None, loop_var); size 1188 src/VaryingAttributes.cpp int num_attributes = varyings.size() + 2; size 1192 src/VaryingAttributes.cpp int vertex_buffer_size = num_padded_attributes*coords[0].size()*coords[1].size(); size 1241 src/VaryingAttributes.cpp stmt = LetStmt::make("glsl.num_coords_dim0", dont_simplify((int)(coords[0].size())), size 1242 src/VaryingAttributes.cpp LetStmt::make("glsl.num_coords_dim1", dont_simplify((int)(coords[1].size())), size 35 src/VectorizeLoops.cpp if (op->indices.size() == 1 && size 475 src/VectorizeLoops.cpp vector<Expr> new_args(op->args.size()); size 480 src/VectorizeLoops.cpp for (size_t i = 0; i < op->args.size(); i++) { size 500 src/VectorizeLoops.cpp vector<Expr> call_args(call->args.size()); size 501 src/VectorizeLoops.cpp for (size_t i = 0; i < call_args.size(); i++) { size 514 src/VectorizeLoops.cpp for (size_t i = 0; i < new_args.size(); i++) { size 618 src/VectorizeLoops.cpp vector<Expr> new_args(op->args.size()); size 619 src/VectorizeLoops.cpp vector<Expr> new_values(op->values.size()); size 624 src/VectorizeLoops.cpp for (size_t i = 0; i < op->args.size(); i++) { size 632 src/VectorizeLoops.cpp for (size_t i = 0; i < op->args.size(); i++) { size 644 src/VectorizeLoops.cpp for (size_t i = 0; i < new_args.size(); i++) { size 647 src/VectorizeLoops.cpp for (size_t i = 0; i < new_values.size(); i++) { size 819 src/VectorizeLoops.cpp for (size_t i = 0; i < op->extents.size(); i++) { size 864 src/VectorizeLoops.cpp for (size_t i = containing_lets.size(); i > 0; i--) { size 153 src/WrapExternStages.cpp size_t num_functions = m.functions().size(); size 264 src/runtime/HalideBuffer.h for (size_t i = 0; i < sizes.size(); i++) { size 628 src/runtime/HalideBuffer.h size_t size = type().bytes(); size 630 src/runtime/HalideBuffer.h size *= dim(i).extent(); size 633 src/runtime/HalideBuffer.h size = (size << 1) >> 1; size 635 src/runtime/HalideBuffer.h size /= dim(i).extent(); size 637 src/runtime/HalideBuffer.h assert(size == (size_t)type().bytes() && "Error: Overflow computing total size of buffer."); size 656 src/runtime/HalideBuffer.h size_t size = size_in_bytes(); size 658 src/runtime/HalideBuffer.h size = (size + alignment - 1) & ~(alignment - 1); size 659 src/runtime/HalideBuffer.h alloc = (AllocationHeader *)allocate_fn(size + sizeof(AllocationHeader) + alignment - 1); size 746 src/runtime/HalideBuffer.h buf.dimensions = (int)sizes.size(); size 758 src/runtime/HalideBuffer.h buf.dimensions = (int)sizes.size(); size 814 src/runtime/HalideBuffer.h buf.dimensions = (int)sizes.size(); size 829 src/runtime/HalideBuffer.h buf.dimensions = (int)sizes.size(); size 1057 src/runtime/HalideBuffer.h for (int i = 0; i < rect.size(); i++) { size 1089 src/runtime/HalideBuffer.h for (size_t i = 0; i < delta.size(); i++) { size 125 src/runtime/HalideRuntime.h int min, int size, uint8_t *closure); size 396 src/runtime/HalideRuntime.h uint32_t size; size 559 src/runtime/HalideRuntime.h extern void halide_memoization_cache_set_size(int64_t size); size 578 src/runtime/HalideRuntime.h extern int halide_memoization_cache_lookup(void *user_context, const uint8_t *cache_key, int32_t size, size 596 src/runtime/HalideRuntime.h extern int halide_memoization_cache_store(void *user_context, const uint8_t *cache_key, int32_t size, size 20 src/runtime/HalideRuntimeCuda.h const char *src, int size); size 30 src/runtime/HalideRuntimeHexagonHost.h void *ptr, uint64_t size); size 20 src/runtime/HalideRuntimeMetal.h const char *src, int size); size 20 src/runtime/HalideRuntimeOpenCL.h const char *src, int size); size 20 src/runtime/HalideRuntimeOpenGL.h const char *src, int size); size 24 src/runtime/HalideRuntimeOpenGLCompute.h const char *src, int size); size 18 src/runtime/HalideRuntimeQurt.h extern int halide_qurt_hvx_lock(void *user_context, int size); size 23 src/runtime/android_ioctl.h #define _IOC(dir,type,nr,size) (((dir) << _IOC_DIRSHIFT) | ((type) << _IOC_TYPESHIFT) | ((nr) << _IOC_NRSHIFT) | ((size) << _IOC_SIZESHIFT)) size 318 src/runtime/cache.cpp WEAK void halide_memoization_cache_set_size(int64_t size) { size 319 src/runtime/cache.cpp if (size == 0) { size 320 src/runtime/cache.cpp size = kDefaultCacheSize; size 325 src/runtime/cache.cpp max_cache_size = size; size 329 src/runtime/cache.cpp WEAK int halide_memoization_cache_lookup(void *user_context, const uint8_t *cache_key, int32_t size, size 331 src/runtime/cache.cpp uint32_t h = djb_hash(cache_key, size); size 337 src/runtime/cache.cpp debug_print_key(user_context, "halide_memoization_cache_lookup", cache_key, size); size 351 src/runtime/cache.cpp if (entry->hash == h && entry->key_size == (size_t)size && size 352 src/runtime/cache.cpp keys_equal(entry->key, cache_key, size) && size 420 src/runtime/cache.cpp WEAK int halide_memoization_cache_store(void *user_context, const uint8_t *cache_key, int32_t size, size 432 src/runtime/cache.cpp debug_print_key(user_context, "halide_memoization_cache_store", cache_key, size); size 446 src/runtime/cache.cpp if (entry->hash == h && entry->key_size == (size_t)size && size 447 src/runtime/cache.cpp keys_equal(entry->key, cache_key, size) && size 489 src/runtime/cache.cpp inited = new_entry->init(cache_key, size, h, computed_bounds, tuple_count, tuple_buffers); size 335 src/runtime/cuda.cpp WEAK bool validate_device_pointer(void *user_context, halide_buffer_t* buf, size_t size=0) { size 366 src/runtime/cuda.cpp WEAK int halide_cuda_initialize_kernels(void *user_context, void **state_ptr, const char* ptx_src, int size) { size 370 src/runtime/cuda.cpp << ", size: " << size << "\n"; size 395 src/runtime/cuda.cpp debug(user_context) << " cuModuleLoadData " << (void *)ptx_src << ", " << size << " -> "; size 537 src/runtime/cuda.cpp size_t size = buf->size_in_bytes(); size 538 src/runtime/cuda.cpp halide_assert(user_context, size != 0); size 541 src/runtime/cuda.cpp halide_assert(user_context, validate_device_pointer(user_context, buf, size)); size 557 src/runtime/cuda.cpp debug(user_context) << " cuMemAlloc " << (uint64_t)size << " -> "; size 558 src/runtime/cuda.cpp CUresult err = cuMemAlloc(&p, size); size 302 src/runtime/device_interface.cpp size_t size = buf->size_in_bytes(); size 303 src/runtime/device_interface.cpp buf->host = (uint8_t *)halide_malloc(user_context, size); size 16 src/runtime/fake_thread_pool.cpp int min, int size, uint8_t *closure) { size 17 src/runtime/fake_thread_pool.cpp for (int x = min; x < min + size; x++) { size 76 src/runtime/fake_thread_pool.cpp int min, int size, uint8_t *closure) { size 77 src/runtime/fake_thread_pool.cpp return (*custom_do_par_for)(user_context, f, min, size, closure); size 102 src/runtime/gcd_thread_pool.cpp int min, int size, uint8_t *closure) { size 103 src/runtime/gcd_thread_pool.cpp if (custom_num_threads == 1 || size == 1) { size 106 src/runtime/gcd_thread_pool.cpp for (int x = min; x < min + size; x++) { size 122 src/runtime/gcd_thread_pool.cpp dispatch_apply_f(size, dispatch_get_global_queue(0, 0), &job, &halide_do_gcd_task); size 182 src/runtime/gcd_thread_pool.cpp int min, int size, uint8_t *closure) { size 183 src/runtime/gcd_thread_pool.cpp return (*custom_do_par_for)(user_context, f, min, size, closure); size 12 src/runtime/hexagon_host.cpp size_t size; size 165 src/runtime/hexagon_host.cpp size_t size; size 211 src/runtime/hexagon_host.cpp (*state)->size = 0; size 225 src/runtime/hexagon_host.cpp (*state)->size = code_size; size 262 src/runtime/hexagon_host.cpp mapped_arg.dataLen = ion_handle->size; size 383 src/runtime/hexagon_host.cpp state->size = 0; size 414 src/runtime/hexagon_host.cpp size_t size = buf->size_in_bytes(); size 415 src/runtime/hexagon_host.cpp halide_assert(user_context, size != 0); size 420 src/runtime/hexagon_host.cpp size += 128; size 426 src/runtime/hexagon_host.cpp debug(user_context) << " allocating buffer of " << (uint64_t)size << " bytes\n"; size 433 src/runtime/hexagon_host.cpp if (size >= min_ion_allocation_size) { size 434 src/runtime/hexagon_host.cpp debug(user_context) << " host_malloc len=" << (uint64_t)size << " -> "; size 435 src/runtime/hexagon_host.cpp ion = host_malloc(size); size 442 src/runtime/hexagon_host.cpp debug(user_context) << " halide_malloc size=" << (uint64_t)size << " -> "; size 443 src/runtime/hexagon_host.cpp ion = halide_malloc(user_context, size); size 451 src/runtime/hexagon_host.cpp int err = halide_hexagon_wrap_device_handle(user_context, buf, ion, size); size 453 src/runtime/hexagon_host.cpp if (size >= min_ion_allocation_size) { size 485 src/runtime/hexagon_host.cpp uint64_t size = halide_hexagon_get_device_size(user_context, buf); size 487 src/runtime/hexagon_host.cpp if (size >= min_ion_allocation_size) { size 572 src/runtime/hexagon_host.cpp void *ion_buf, uint64_t size) { size 583 src/runtime/hexagon_host.cpp handle->size = size; size 620 src/runtime/hexagon_host.cpp return handle->size; size 108 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c static __inline void _qaic_memmove(void* dst, void* src, int size) { size 110 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c for(i = 0; i < size; ++i) { size 145 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c #define _ALLOCATE(nErr, pal, size, alignment, pv) _TRY(nErr, _allocator_alloc(pal, __FILE_LINE__, size, alignment, (void**)&pv)) size 147 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c #define _ALLOCATE(nErr, pal, size, alignment, pv) _TRY(nErr, _allocator_alloc(pal, 0, size, alignment, (void**)&pv)) size 175 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c static __inline int _heap_alloc(_heap** ppa, const char* loc, int size, void** ppbuf) { size 177 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c pn = malloc(size + sizeof(_heap) - sizeof(uint64_t)); size 193 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c int size, size 196 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c if(size < 0) { size 198 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c } else if (size == 0) { size 202 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c if((_ALIGN_SIZE((uintptr_t)me->stackEnd, al) + size) < (uintptr_t)me->stack + me->nSize) { size 204 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c me->stackEnd = (uint8_t*)_ALIGN_SIZE((uintptr_t)me->stackEnd, al) + size; size 207 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_skel.c return _heap_alloc(&me->pheap, loc, size, ppbuf); size 108 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c static __inline void _qaic_memmove(void* dst, void* src, int size) { size 110 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c for(i = 0; i < size; ++i) { size 145 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c #define _ALLOCATE(nErr, pal, size, alignment, pv) _TRY(nErr, _allocator_alloc(pal, __FILE_LINE__, size, alignment, (void**)&pv)) size 147 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c #define _ALLOCATE(nErr, pal, size, alignment, pv) _TRY(nErr, _allocator_alloc(pal, 0, size, alignment, (void**)&pv)) size 175 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c static __inline int _heap_alloc(_heap** ppa, const char* loc, int size, void** ppbuf) { size 177 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c pn = malloc(size + sizeof(_heap) - sizeof(uint64_t)); size 193 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c int size, size 196 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c if(size < 0) { size 198 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c } else if (size == 0) { size 202 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c if((_ALIGN_SIZE((uintptr_t)me->stackEnd, al) + size) < (uintptr_t)me->stack + me->nSize) { size 204 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c me->stackEnd = (uint8_t*)_ALIGN_SIZE((uintptr_t)me->stackEnd, al) + size; size 207 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c return _heap_alloc(&me->pheap, loc, size, ppbuf); size 460 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c extern int adsp_pls_add_lookup(uint32_t type, uint32_t key, int size, int (*ctor)(void* ctx, void* data), void* ctx, void (*dtor)(void* ctx), void** ppo); size 462 src/runtime/hexagon_remote/bin/src/halide_hexagon_remote_stub.c extern int HAP_pls_add_lookup(uint32_t type, uint32_t key, int size, int (*ctor)(void* ctx, void* data), void* ctx, void (*dtor)(void* ctx), void** ppo); size 322 src/runtime/hexagon_remote/dlib.cpp bool parse(const char *data, size_t size) { size 323 src/runtime/hexagon_remote/dlib.cpp if (size < sizeof(Ehdr)) { size 348 src/runtime/hexagon_remote/dlib.cpp size_t aligned_size = (size + alignment - 1) & ~(alignment - 1); size 354 src/runtime/hexagon_remote/dlib.cpp program_size = size; size 449 src/runtime/hexagon_remote/dlib.cpp void *mmap_dlopen(const void *code, size_t size) { size 454 src/runtime/hexagon_remote/dlib.cpp if (!dlib->parse((const char *)code, size)) { size 11 src/runtime/hexagon_remote/dlib.h void *mmap_dlopen(const void *code, size_t size); size 115 src/runtime/hexagon_remote/halide_remote.cpp __attribute__((weak)) void* dlopenbuf(const char*filename, const char* data, int size, int perms); size 86 src/runtime/hexagon_remote/host_malloc.cpp size_t size; size 103 src/runtime/hexagon_remote/host_malloc.cpp __attribute__((weak)) void remote_register_buf(void* buf, int size, int fd); size 119 src/runtime/hexagon_remote/host_malloc.cpp void *halide_hexagon_host_malloc(size_t size) { size 130 src/runtime/hexagon_remote/host_malloc.cpp size = (size + alignment - 1) & ~(alignment - 1); size 134 src/runtime/hexagon_remote/host_malloc.cpp if (size >= alignments[i]) { size 140 src/runtime/hexagon_remote/host_malloc.cpp ion_user_handle_t handle = ion_alloc(ion_fd, size, alignment, 1 << heap_id, ion_flags); size 143 src/runtime/hexagon_remote/host_malloc.cpp ion_fd, size, alignment, 1 << heap_id, ion_flags); size 156 src/runtime/hexagon_remote/host_malloc.cpp void *buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, buf_fd, 0); size 159 src/runtime/hexagon_remote/host_malloc.cpp size, buf_fd); size 167 src/runtime/hexagon_remote/host_malloc.cpp remote_register_buf(buf, size, buf_fd); size 174 src/runtime/hexagon_remote/host_malloc.cpp munmap(buf, size); size 184 src/runtime/hexagon_remote/host_malloc.cpp rec->size = size; size 221 src/runtime/hexagon_remote/host_malloc.cpp remote_register_buf(rec->buf, rec->size, -1); size 225 src/runtime/hexagon_remote/host_malloc.cpp munmap(rec->buf, rec->size); size 11 src/runtime/hexagon_remote/log.cpp int size; size 19 src/runtime/hexagon_remote/log.cpp Log(int size) : buffer(NULL), size(size), read_cursor(0), write_cursor(0) { size 21 src/runtime/hexagon_remote/log.cpp buffer = (char *)malloc(size); size 34 src/runtime/hexagon_remote/log.cpp buffer[write_cursor & (size - 1)] = in[i]; size 46 src/runtime/hexagon_remote/log.cpp char out_i = buffer[read_cursor++ & (size - 1)]; size 69 src/runtime/hexagon_remote/log.cpp extern "C" int halide_hexagon_remote_poll_log(char *out, int size, int *read_size) { size 72 src/runtime/hexagon_remote/log.cpp *read_size = global_log.read(out, size - 1, '\n'); size 126 src/runtime/hexagon_remote/sim_host.cpp int write_memory(int dest, const void *src, int size) { size 129 src/runtime/hexagon_remote/sim_host.cpp while (size > 0) { size 135 src/runtime/hexagon_remote/sim_host.cpp if (size >= 8) { size 138 src/runtime/hexagon_remote/sim_host.cpp } else if (size >= 4) { size 141 src/runtime/hexagon_remote/sim_host.cpp } else if (size >= 2) { size 154 src/runtime/hexagon_remote/sim_host.cpp size -= chunk_size; size 161 src/runtime/hexagon_remote/sim_host.cpp int read_memory(void *dest, int src, int size) { size 164 src/runtime/hexagon_remote/sim_host.cpp while (size > 0) { size 167 src/runtime/hexagon_remote/sim_host.cpp if (size >= 8) next = 8; size 168 src/runtime/hexagon_remote/sim_host.cpp else if (size >= 4) next = 4; size 169 src/runtime/hexagon_remote/sim_host.cpp else if (size >= 2) next = 2; size 176 src/runtime/hexagon_remote/sim_host.cpp size -= next; size 200 src/runtime/hexagon_remote/sim_host.cpp for (size_t i = 0; i < arguments.size(); i++) { size 132 src/runtime/hexagon_remote/sim_remote.cpp int min, int size, uint8_t *closure) { size 133 src/runtime/hexagon_remote/sim_remote.cpp for (int x = min; x < min + size; x++) { size 264 src/runtime/hexagon_remote/sim_remote.cpp __attribute__ ((weak)) void* dlopenbuf(const char*filename, const char* data, int size, int perms); size 107 src/runtime/hexagon_remote/thread_pool.cpp int min, int size, uint8_t *closure) { size 146 src/runtime/hexagon_remote/thread_pool.cpp int ret = Halide::Runtime::Internal::default_do_par_for(user_context, task, min, size, (uint8_t *)&c); size 332 src/runtime/metal.cpp size_t size = buf->size_in_bytes(); size 333 src/runtime/metal.cpp halide_assert(user_context, size != 0); size 355 src/runtime/metal.cpp mtl_buffer *metal_buf = new_buffer(metal_context.device, size); size 357 src/runtime/metal.cpp error(user_context) << "Metal: Failed to allocate buffer of size " << (int64_t)size << ".\n"; size 128 src/runtime/mini_cl.h size_t size; size 112 src/runtime/mini_opengl.h typedef void (*PFNGLBUFFERDATAPROC)(GLenum target, GLsizeiptr size, const GLvoid *data, GLenum usage); size 150 src/runtime/mini_opengl.h typedef void (*PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid *pointer); size 8 src/runtime/msan.cpp const void *mem, size_t size); size 93 src/runtime/noos.cpp int min, int size, uint8_t *closure) { size 94 src/runtime/noos.cpp return (*custom_do_par_for)(user_context, f, min, size, closure); size 217 src/runtime/opencl.cpp WEAK bool validate_device_pointer(void *user_context, halide_buffer_t* buf, size_t size=0) { size 234 src/runtime/opencl.cpp << ": asked for " << (uint64_t)size size 237 src/runtime/opencl.cpp if (size) { size 238 src/runtime/opencl.cpp halide_assert(user_context, real_size >= size && "Validating pointer with insufficient size"); size 507 src/runtime/opencl.cpp WEAK int halide_opencl_initialize_kernels(void *user_context, void **state_ptr, const char* src, int size) { size 512 src/runtime/opencl.cpp << ", size: " << size << "\n"; size 538 src/runtime/opencl.cpp if (!(*state && (*state)->program) && size > 1) { size 708 src/runtime/opencl.cpp size_t size = buf->size_in_bytes(); size 709 src/runtime/opencl.cpp halide_assert(user_context, size != 0); size 711 src/runtime/opencl.cpp halide_assert(user_context, validate_device_pointer(user_context, buf, size)); size 727 src/runtime/opencl.cpp debug(user_context) << " clCreateBuffer -> " << (int)size << " "; size 728 src/runtime/opencl.cpp cl_mem dev_ptr = clCreateBuffer(ctx.context, CL_MEM_READ_WRITE, size, NULL, &err); size 125 src/runtime/opengl.cpp __attribute__((always_inline)) HalideMalloc(void *user_context, size_t size) size 126 src/runtime/opengl.cpp : user_context(user_context), ptr(halide_malloc(user_context, size)) {} size 452 src/runtime/opengl.cpp WEAK KernelInfo *create_kernel(void *user_context, const char *src, int size) { size 455 src/runtime/opengl.cpp kernel->source = strndup(src, size); size 459 src/runtime/opengl.cpp debug(user_context) << "Compiling GLSL kernel (size = " << size << "):\n"; size 466 src/runtime/opengl.cpp next_line = line + size; size 1910 src/runtime/opengl.cpp const char *src, int size) { size 1999 src/runtime/opengl.cpp if (vertex_src.size() >= PrinterLength) { size 68 src/runtime/openglcompute.cpp __attribute__((always_inline)) HalideMalloc(void *user_context, size_t size) size 69 src/runtime/openglcompute.cpp : user_context(user_context), ptr(halide_malloc(user_context, size)) {} size 244 src/runtime/openglcompute.cpp size_t size = buf->size_in_bytes(); size 245 src/runtime/openglcompute.cpp halide_assert(user_context, size != 0); size 358 src/runtime/openglcompute.cpp size_t size = buf->size_in_bytes(); size 359 src/runtime/openglcompute.cpp halide_assert(user_context, size != 0); size 360 src/runtime/openglcompute.cpp global_state.BufferData(GL_ARRAY_BUFFER, size, buf->host, GL_DYNAMIC_COPY); size 363 src/runtime/openglcompute.cpp debug(user_context) << " copied " << ((unsigned)size) << " bytes from " << buf->host << " to the device.\n"; size 385 src/runtime/openglcompute.cpp size_t size = buf->size_in_bytes(); size 386 src/runtime/openglcompute.cpp halide_assert(user_context, size != 0); size 392 src/runtime/openglcompute.cpp << ", size=" << (unsigned)size << ")\n"; size 399 src/runtime/openglcompute.cpp size, size 402 src/runtime/openglcompute.cpp memcpy(buf->host, device_data, size); size 405 src/runtime/openglcompute.cpp debug(user_context) << " copied " << (unsigned)size << " bytes to the host.\n"; size 543 src/runtime/openglcompute.cpp const char *src, int size) { size 120 src/runtime/printer.h uint64_t size() const { size 337 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 345 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 353 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 360 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 371 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 374 src/runtime/profiler.cpp while (sstr.size() < cursor) sstr << " "; size 18 src/runtime/qurt_hvx.cpp WEAK int halide_qurt_hvx_lock(void *user_context, int size) { size 20 src/runtime/qurt_hvx.cpp switch (size) { size 77 src/runtime/qurt_hvx.cpp WEAK int _halide_prefetch(const void *ptr, int size) { size 78 src/runtime/qurt_hvx.cpp _halide_prefetch_2d(ptr, size, 1, 1); size 36 src/runtime/thread_pool.cpp int min, int size, uint8_t *closure) { size 37 src/runtime/thread_pool.cpp return (*custom_do_par_for)(user_context, f, min, size, closure); size 165 src/runtime/thread_pool_common.h int min, int size, uint8_t *closure) { size 204 src/runtime/thread_pool_common.h job.max = min + size; // Keep going until one less than this index. size 209 src/runtime/thread_pool_common.h if (!work_queue.jobs && size < work_queue.desired_num_threads) { size 214 src/runtime/thread_pool_common.h work_queue.target_a_team_size = size; size 37 src/runtime/tracing.cpp header.size = total_size; size 51 test/common/check_call_graphs.h if (result.size() != expected.size()) { size 52 test/common/check_call_graphs.h printf("Expect %d callers instead of %d\n", (int)expected.size(), (int)result.size()); size 80 test/common/halide_test_dirs.h bool is_absolute = dir.size() >= 1 && dir[0] == '/'; size 84 test/common/halide_test_dirs.h if (dir.size() >= 3 && dir[1] == ':' && (dir[2] == '\\' || dir[2] == '/')) { size 93 test/common/halide_test_dirs.h if (dir[dir.size() - 1] != sep) { size 27 test/correctness/cascaded_filters.cpp for (size_t i = 0; i < stages.size()-1; i++) { size 8 test/correctness/code_explosion.cpp const int size = 100; size 14 test/correctness/code_explosion.cpp for (int i = 2; i < size; i++) { size 18 test/correctness/code_explosion.cpp g(x) = funcs[funcs.size()-1](x); size 24 test/correctness/code_explosion.cpp std::vector<Expr> e(size); size 28 test/correctness/code_explosion.cpp for (size_t i = 2; i < e.size(); i++) { size 32 test/correctness/code_explosion.cpp f(x) = e[e.size()-1]; size 60 test/correctness/custom_lowering_pass.cpp const int size = 10; size 61 test/correctness/custom_lowering_pass.cpp f.realize(size); size 63 test/correctness/custom_lowering_pass.cpp if (multiply_count != size * 2) { size 65 test/correctness/custom_lowering_pass.cpp multiply_count, size); size 27 test/correctness/external_code.cpp bitcode_stream.read(reinterpret_cast<char *>(&bitcode[0]), bitcode.size()); size 9 test/correctness/fuzz_float_stores.cpp const int size = 1000; size 19 test/correctness/fuzz_float_stores.cpp Buffer<float> im_ref = f.realize(size, target); size 20 test/correctness/fuzz_float_stores.cpp Buffer<float> im_fuzzed = f.realize(size, target_fuzzed); size 39 test/correctness/fuzz_float_stores.cpp Buffer<float> im_ref = f.realize(size, target); size 40 test/correctness/fuzz_float_stores.cpp Buffer<float> im_fuzzed = f.realize(size, target_fuzzed); size 55 test/correctness/fuzz_float_stores.cpp if (differences == size) { size 11 test/correctness/implicit_args_tests.cpp for (size_t idx = 0; idx < funcs.size(); idx++) { size 36 test/correctness/infer_arguments.cpp EXPECT(7, args.size()); size 121 test/correctness/infer_arguments.cpp EXPECT(3, args.size()); size 46 test/correctness/interleave.cpp if (values.size() == 1) { size 62 test/correctness/interleave.cpp if (f.size() == 1) { size 12 test/correctness/many_dimensions.cpp for (size_t i = 0; i < vars.size(); i++) { size 27 test/correctness/many_dimensions.cpp std::vector<Expr> site1(vars.size()), site2(vars.size()); size 28 test/correctness/many_dimensions.cpp for (size_t i = 0; i < vars.size(); i++) { size 55 test/correctness/out_of_memory.cpp funcs[funcs.size()-1].set_custom_allocator(&test_malloc, &test_free); size 56 test/correctness/out_of_memory.cpp funcs[funcs.size()-1].set_error_handler(&handler); size 57 test/correctness/out_of_memory.cpp funcs[funcs.size()-1].realize(1); size 117 test/correctness/predicated_store_load.cpp int size = 73; size 124 test/correctness/predicated_store_load.cpp ref(x, y) = select(x < 23, g(size-x, y) * 2 + g(20-x, y), undef<int>()); size 125 test/correctness/predicated_store_load.cpp Buffer<int> im_ref = ref.realize(size, size); size 127 test/correctness/predicated_store_load.cpp f(x, y) = select(x < 23, g(size-x, y) * 2 + g(20-x, y), undef<int>()); size 137 test/correctness/predicated_store_load.cpp Buffer<int> im = f.realize(size, size); size 149 test/correctness/predicated_store_load.cpp int size = 100; size 156 test/correctness/predicated_store_load.cpp RDom r(0, size, 0, size); size 161 test/correctness/predicated_store_load.cpp ref(r.x, r.y) = g(size-r.x, r.y) * 2 + g(67-r.x, r.y); size 162 test/correctness/predicated_store_load.cpp Buffer<int> im_ref = ref.realize(size, size); size 165 test/correctness/predicated_store_load.cpp f(r.x, r.y) = g(size-r.x, r.y) * 2 + g(67-r.x, r.y); size 175 test/correctness/predicated_store_load.cpp Buffer<int> im = f.realize(size, size); size 43 test/correctness/print.cpp assert(messages.size() == 10); size 44 test/correctness/print.cpp for (size_t i = 0; i < messages.size(); i++) { size 76 test/correctness/print.cpp assert(messages.size() == 1); size 118 test/correctness/print.cpp assert(messages.back().size() == 8191); size 156 test/correctness/print.cpp assert(messages.size() == (size_t)N); size 179 test/correctness/print.cpp assert(messages.size() == (size_t)N); size 580 test/correctness/reduction_non_rectangular.cpp assert(res.size() == 2); size 10 test/correctness/reduction_schedule.cpp const int size = 32; size 12 test/correctness/reduction_schedule.cpp Buffer<float> noise(size, size); size 13 test/correctness/reduction_schedule.cpp for (int i = 0; i < size; i++) { size 14 test/correctness/reduction_schedule.cpp for (int j = 0; j < size; j++) { size 33 test/correctness/reduction_schedule.cpp Buffer<float> im_energy = energy.realize(size,size); size 34 test/correctness/reduction_schedule.cpp Buffer<float> ref_energy(size, size); size 35 test/correctness/reduction_schedule.cpp for (int y = 0; y < size; y++) { size 36 test/correctness/reduction_schedule.cpp for (int x = 0; x < size; x++) { size 38 test/correctness/reduction_schedule.cpp int xp = std::min(x + 1, size - 1); size 452 test/correctness/rfactor.cpp int size = 1024; size 462 test/correctness/rfactor.cpp RDom r(0, size); size 260 test/correctness/simd_op_check.cpp for (size_t i = 0; i < name.size(); i++) { size 264 test/correctness/simd_op_check.cpp name += "_" + std::to_string(tasks.size()); size 19 test/correctness/sort_exprs.cpp size_t size = v.size(); size 20 test/correctness/sort_exprs.cpp size_t half_size = size/2; size 32 test/correctness/sort_exprs.cpp assert(a.size() == half_size); size 33 test/correctness/sort_exprs.cpp assert(b.size() == half_size); size 40 test/correctness/sort_exprs.cpp for (size_t i = 0; i < size; i++) { size 56 test/correctness/sort_exprs.cpp while (v.size() & (v.size() - 1)) { size 70 test/correctness/sort_exprs.cpp return v[v.size()/2]; size 87 test/correctness/sort_exprs.cpp std::cout << exprs.size() << "\n"; size 288 test/correctness/specialize.cpp Param<int> start, size; size 289 test/correctness/specialize.cpp RDom r(start, size); size 295 test/correctness/specialize.cpp f.update().specialize(size == 1); size 298 test/correctness/specialize.cpp f.update().specialize(size == 0); size 301 test/correctness/specialize.cpp size.set(1); size 539 test/correctness/specialize.cpp _halide_user_assert(f.function().definition().specializations().size() == 2); size 546 test/correctness/specialize.cpp _halide_user_assert(s.size() == 1); size 585 test/correctness/specialize.cpp _halide_user_assert(f.function().definition().specializations().size() == 5); size 594 test/correctness/specialize.cpp _halide_user_assert(s.size() == 1); size 8 test/correctness/vectorized_load_from_vectorized_allocation.cpp const int size = 80; size 16 test/correctness/vectorized_load_from_vectorized_allocation.cpp RDom r(0, size, 0, size, 0, size); size 22 test/correctness/vectorized_load_from_vectorized_allocation.cpp g.bound_extent(x, size*size); size 24 test/correctness/vectorized_load_from_vectorized_allocation.cpp Buffer<int> im = f.realize(size, size, size); size 15 test/generator/cleanup_on_error_aottest.cpp const int size = 64; size 66 test/generator/cleanup_on_error_aottest.cpp Buffer<int32_t> output(size); size 47 test/generator/image_from_array_aottest.cpp for (size_t i = 0, last = v.size(); i < last; ++i) { size 98 test/generator/image_from_array_aottest.cpp int dims = (int)sizes.size(); size 54 test/generator/matlab_aottest.cpp size_t get_number_of_dimensions() const { return dims.size(); } size 59 test/generator/metadata_tester_generator.cpp assert(output.types().size() == 2); size 72 test/generator/metadata_tester_generator.cpp for (size_t i = 0; i < array_outputs.size(); ++i) { size 54 test/generator/msan_aottest.cpp const void *mem, size_t size) { size 17 test/generator/pyramid_generator.cpp for (size_t i = 1; i < pyramid.size(); i++) { size 7 test/generator/stubtest_generator.cpp template<typename Type, int size = 32, int dim = 3> size 9 test/generator/stubtest_generator.cpp Halide::Buffer<Type> im(size, size, dim); size 10 test/generator/stubtest_generator.cpp for (int x = 0; x < size; x++) { size 11 test/generator/stubtest_generator.cpp for (int y = 0; y < size; y++) { size 64 test/generator/stubtest_generator.cpp array_output.resize(array_input.size()); size 65 test/generator/stubtest_generator.cpp for (size_t i = 0; i < array_input.size(); ++i) { size 9 test/generator/stubuser_generator.cpp template<typename Type, int size = 32> size 11 test/generator/stubuser_generator.cpp Buffer<Type> im(size, size, 3); size 12 test/generator/stubuser_generator.cpp for (int x = 0; x < size; x++) { size 13 test/generator/stubuser_generator.cpp for (int y = 0; y < size; y++) { size 23 test/opengl/testing.h for (size_t i = 0; i < v.size(); i++) { size 64 test/opengl/varying.cpp if ((int)varyings.size() != expected_nvarying) { size 67 test/opengl/varying.cpp label, (int)varyings.size(), expected_nvarying); size 23 test/performance/inner_loop_parallel.cpp memcpy(buf, str.c_str(), str.size()); size 14 test/performance/rfactor.cpp const int size = 1024 * 1024 * N1 * N2; size 18 test/performance/rfactor.cpp RDom r(0, size); size 45 test/performance/rfactor.cpp Buffer<float> vec_A(size); size 50 test/performance/rfactor.cpp for (int ix = 0; ix < size; ix++) { size 63 test/performance/rfactor.cpp float gbits = 32.0 * size / 1e9; // bits per seconds size 130 test/performance/rfactor.cpp const int size = 64; size 136 test/performance/rfactor.cpp RDom r(0, size, 0, size, 0, size, 0, size); size 166 test/performance/rfactor.cpp Buffer<uint8_t> vec(size, size, size, size); size 169 test/performance/rfactor.cpp for (int iw = 0; iw < size; iw++) { size 170 test/performance/rfactor.cpp for (int iz = 0; iz < size; iz++) { size 171 test/performance/rfactor.cpp for (int iy = 0; iy < size; iy++) { size 172 test/performance/rfactor.cpp for (int ix = 0; ix < size; ix++) { size 173 test/performance/rfactor.cpp vec(ix, iy, iz, iw) = (rand() % size); size 202 test/performance/rfactor.cpp const int size = 1024*1024*N1 * N2; size 210 test/performance/rfactor.cpp RDom r(0, size); size 239 test/performance/rfactor.cpp Buffer<int32_t> vec0(size), vec1(size); size 242 test/performance/rfactor.cpp for (int ix = 0; ix < size; ix++) { size 243 test/performance/rfactor.cpp vec0(ix) = (rand() % size); size 244 test/performance/rfactor.cpp vec1(ix) = (rand() % size); size 260 test/performance/rfactor.cpp float gbits = input0.type().bits() * size * 2 / 1e9; // bits per seconds size 271 test/performance/rfactor.cpp const int size = 1024 * 1024 * N1 * N2; size 278 test/performance/rfactor.cpp RDom r(0, size); size 306 test/performance/rfactor.cpp Buffer<float> vec_A(size), vec_B(size); size 311 test/performance/rfactor.cpp for (int ix = 0; ix < size; ix++) { size 328 test/performance/rfactor.cpp float gbits = 32 * size * (2 / 1e9); // bits per seconds size 339 test/performance/rfactor.cpp const int size = 1024 * 1024 * N1 * N2; size 343 test/performance/rfactor.cpp RDom r(0, size); size 387 test/performance/rfactor.cpp Buffer<int32_t> vec_A(size); size 390 test/performance/rfactor.cpp for (int ix = 0; ix < size; ix++) { size 403 test/performance/rfactor.cpp float gbits = 8 * size * (2 / 1e9); // bits per seconds size 11 test/performance/sort.cpp Func bitonic_sort(Func input, int size) { size 16 test/performance/sort.cpp for (int pass_size = 1; pass_size < size; pass_size <<= 1) { size 83 tools/halide_image_info.h int32_t size = 1; size 94 tools/halide_image_info.h size *= extent[d]; size 148 tools/halide_image_info.h const T *img_next = img_data + size; size 149 tools/halide_image_info.h int32_t img_size = size * img_bpp; size 151 tools/halide_image_info.h std::cout << tag << " size = " << size << " (0x" size 152 tools/halide_image_info.h << std::hex << size << ")" << std::dec << std::endl; size 177 tools/halide_image_info.h int32_t size = 1; size 183 tools/halide_image_info.h size *= extent[d]; size 194 tools/halide_image_info.h for (int32_t i = 0; i < size; i++) { size 241 tools/halide_image_info.h int32_t size = 1; size 246 tools/halide_image_info.h size *= extent[d]; size 269 tools/halide_image_info.h for (int32_t i = 0; i < size; i++) { size 90 tutorial/lesson_13_tuples.cpp assert(r.size() == 2); size 98 util/HalideTraceViz.cpp uint32_t payload_size = size - header_size; size 113 util/HalideTraceViz.cpp bool read_stdin(void *d, ssize_t size) { size 115 util/HalideTraceViz.cpp if (!size) return true; size 117 util/HalideTraceViz.cpp ssize_t s = read(0, dst, size); size 125 util/HalideTraceViz.cpp } else if (s == size) { size 128 util/HalideTraceViz.cpp size -= s; size 756 util/HalideTraceViz.cpp for (size_t i = 0; i < fi.config.labels.size(); i++) { size 876 util/HalideTraceViz.cpp fprintf(stderr, "Total number of Funcs: %d\n", (int)func_info.size());