6#ifndef HALIDE_RUNTIME_BUFFER_H
7#define HALIDE_RUNTIME_BUFFER_H
21#include <AvailabilityVersions.h>
22#include <TargetConditionals.h>
25#if defined(__has_feature)
26#if __has_feature(memory_sanitizer)
27#include <sanitizer/msan_interface.h>
35#define HALIDE_ALLOCA _alloca
37#define HALIDE_ALLOCA __builtin_alloca
41#if __GNUC__ == 5 && __GNUC_MINOR__ == 1
42#pragma GCC diagnostic ignored "-Warray-bounds"
45#ifndef HALIDE_RUNTIME_BUFFER_CHECK_INDICES
46#define HALIDE_RUNTIME_BUFFER_CHECK_INDICES 0
49#ifndef HALIDE_RUNTIME_BUFFER_ALLOCATION_ALIGNMENT
53#define HALIDE_RUNTIME_BUFFER_ALLOCATION_ALIGNMENT 128
57 "HALIDE_RUNTIME_BUFFER_ALLOCATION_ALIGNMENT must be a power of 2.");
65#ifndef HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC
72 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 0
74#elif defined(__ANDROID_API__) && __ANDROID_API__ < 28
77 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 0
79#elif defined(__APPLE__)
81 #if TARGET_OS_OSX && (__MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_15)
84 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 0
86 #elif TARGET_OS_IPHONE && (__IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_14_0)
89 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 0
94 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 1
100 #if defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)
103 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 0
108 #define HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC 1
121template<
typename T,
int Dims,
int InClassDimStorage>
126template<
typename...
Args>
132template<
typename T,
typename...
Args>
134 static const bool value = std::is_convertible<T, int>::value &&
AllInts<
Args...>::value;
140template<
typename...
Args>
143template<
typename...
Args>
148template<
typename Container>
159 static inline void *(*default_allocate_fn)(
size_t) =
nullptr;
219template<
typename T = void,
238 static const bool T_is_void = std::is_same<typename std::remove_const<T>::type,
void>::value;
241 template<
typename T2>
242 using add_const_if_T_is_const =
typename std::conditional<std::is_const<T>::value,
const T2, T2>::type;
246 using not_void_T =
typename std::conditional<T_is_void,
247 add_const_if_T_is_const<uint8_t>,
251 using not_const_T =
typename std::remove_const<T>::type;
257 using storage_T =
typename std::conditional<std::is_pointer<T>::value,
uint64_t, not_void_T>::type;
261 static constexpr bool has_static_halide_type = !T_is_void;
271 return alloc !=
nullptr;
282 static_assert(!has_static_dimensions || static_dimensions() >= 0);
286 void incref()
const {
287 if (owns_host_memory()) {
291 if (!dev_ref_count) {
297 dev_ref_count =
new DeviceRefCount;
299 dev_ref_count->
count++;
305 struct DevRefCountCropped : DeviceRefCount {
312 Buffer<T, AnyDims> cropped_from;
313 explicit DevRefCountCropped(
const Buffer<T, AnyDims> &cropped_from)
314 : cropped_from(cropped_from) {
315 ownership = BufferDeviceOwnership::Cropped;
320 void crop_from(
const Buffer<T, AnyDims> &cropped_from) {
321 assert(dev_ref_count ==
nullptr);
322 dev_ref_count =
new DevRefCountCropped(cropped_from);
332 alloc->~AllocationHeader();
337 set_host_dirty(
false);
345 assert(!(alloc && device_dirty()) &&
346 "Implicitly freeing a dirty device allocation while a host allocation still lives. "
347 "Call device_free explicitly if you want to drop dirty device-side data. "
348 "Call copy_to_host explicitly if you want the data copied to the host allocation "
349 "before the device allocation is freed.");
366 delete (DevRefCountCropped *)dev_ref_count;
368 delete dev_ref_count;
372 dev_ref_count =
nullptr;
377 void free_shape_storage() {
378 if (buf.
dim != shape) {
384 template<
int DimsSpecified>
385 void make_static_shape_storage() {
387 "Number of arguments to Buffer() does not match static dimensionality");
402 void make_shape_storage(
const int dimensions) {
404 assert(
false &&
"Number of arguments to Buffer() does not match static dimensionality");
414 make_shape_storage(
other.dimensions);
418 template<
typename T2,
int D2,
int S2>
421 copy_shape_from(
other.buf);
424 other.buf.dim =
nullptr;
435 dev_ref_count =
new DeviceRefCount;
441 void initialize_shape(
const int *
sizes) {
454 void initialize_shape(
const std::vector<int> &
sizes) {
456 initialize_shape(
sizes.data());
460 template<
typename Array,
size_t N>
461 void initialize_shape_from_array_shape(
int next,
Array (&
vals)[
N]) {
467 initialize_shape_from_array_shape(next - 1,
vals[0]);
473 template<
typename T2>
474 void initialize_shape_from_array_shape(
int,
const T2 &) {
478 template<
typename Array,
size_t N>
479 static int dimensionality_of_array(
Array (&
vals)[
N]) {
480 return dimensionality_of_array(
vals[0]) + 1;
483 template<
typename T2>
484 static int dimensionality_of_array(
const T2 &) {
489 template<
typename Array,
size_t N>
491 return scalar_type_of_array(
vals[0]);
494 template<
typename T2>
500 void crop_host(
int d,
int min,
int extent) {
502 assert(dim(d).
max() >= min + extent - 1);
504 if (buf.
host !=
nullptr) {
505 buf.
host += (
shift * dim(d).stride()) * type().bytes();
512 void crop_host(
const std::vector<std::pair<int, int>> &
rect) {
537 void slice_host(
int d,
int pos) {
540 assert(d >= 0 && d < dimensions());
544 if (buf.
host !=
nullptr) {
596 return min() + extent() - 1;
607 return val != other.
val;
622 return {min() + extent()};
642 return dim(
i).extent();
645 return dim(
i).stride();
652 return buf.number_of_elements();
657 if constexpr (has_static_dimensions) {
673 return (T *)buf.begin();
679 return (T *)buf.end();
684 return buf.size_in_bytes();
696 buf.
type = static_halide_type();
706 assert(T_is_void || buf.
type == static_halide_type());
707 initialize_from_buffer(buf, ownership);
711 template<
typename T2,
int D2,
int S2>
715 template<
typename T2,
int D2,
int S2>
716 static void static_assert_can_convert_from() {
717 static_assert((!std::is_const<T2>::value || std::is_const<T>::value),
718 "Can't convert from a Buffer<const T> to a Buffer<T>");
719 static_assert(std::is_same<typename std::remove_const<T>::type,
720 typename std::remove_const<T2>::type>::value ||
722 "type mismatch constructing Buffer");
724 "Can't convert from a Buffer with static dimensionality to a Buffer with different static dimensionality");
738 template<
typename T2,
int D2,
int S2>
742 if (
other.type() != static_halide_type()) {
756 template<
typename T2,
int D2,
int S2>
770 dev_ref_count =
other.dev_ref_count;
771 copy_shape_from(
other.buf);
780 template<
typename T2,
int D2,
int S2>
784 assert_can_convert_from(
other);
786 dev_ref_count =
other.dev_ref_count;
787 copy_shape_from(
other.buf);
794 dev_ref_count(
other.dev_ref_count) {
795 other.dev_ref_count =
nullptr;
796 other.alloc =
nullptr;
803 template<
typename T2,
int D2,
int S2>
807 dev_ref_count(
other.dev_ref_count) {
808 assert_can_convert_from(
other);
809 other.dev_ref_count =
nullptr;
810 other.alloc =
nullptr;
817 template<
typename T2,
int D2,
int S2>
819 if ((
const void *)
this == (
const void *)&
other) {
822 assert_can_convert_from(
other);
825 dev_ref_count =
other.dev_ref_count;
827 free_shape_storage();
829 copy_shape_from(
other.buf);
836 if ((
const void *)
this == (
const void *)&
other) {
841 dev_ref_count =
other.dev_ref_count;
843 free_shape_storage();
845 copy_shape_from(
other.buf);
852 template<
typename T2,
int D2,
int S2>
854 assert_can_convert_from(
other);
857 other.alloc =
nullptr;
858 dev_ref_count =
other.dev_ref_count;
859 other.dev_ref_count =
nullptr;
860 free_shape_storage();
870 other.alloc =
nullptr;
871 dev_ref_count =
other.dev_ref_count;
872 other.dev_ref_count =
nullptr;
873 free_shape_storage();
881 size_t size = type().bytes();
882 for (
int i = 0;
i < dimensions();
i++) {
883 size *= dim(
i).extent();
887 for (
int i = 0;
i < dimensions();
i++) {
888 size /= dim(
i).extent();
890 assert(size == (
size_t)type().bytes() &&
"Error: Overflow computing total size of buffer.");
896 void (*deallocate_fn)(
void *) =
nullptr) {
905 const auto align_up = [=](
size_t value) ->
size_t {
906 return (value + alignment - 1) & ~(alignment - 1);
909 size_t size = size_in_bytes();
911#if HALIDE_RUNTIME_BUFFER_USE_ALIGNED_ALLOC
933 if (!deallocate_fn) {
935 if (!deallocate_fn) {
936 deallocate_fn =
free;
948 (
int)
sizeof(std::max_align_t)));
974 template<
typename...
Args,
975 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
978 assert(static_halide_type() == t);
980 int extents[] = {first, (
int)rest...};
984 initialize_shape(extents);
998 static_assert(!T_is_void,
999 "To construct an Buffer<void>, pass a halide_type_t as the first argument to the constructor");
1000 int extents[] = {first};
1001 buf.
type = static_halide_type();
1004 initialize_shape(extents);
1011 template<
typename...
Args,
1012 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
1014 static_assert(!T_is_void,
1015 "To construct an Buffer<void>, pass a halide_type_t as the first argument to the constructor");
1016 int extents[] = {first,
second, (
int)rest...};
1017 buf.
type = static_halide_type();
1020 initialize_shape(extents);
1031 assert(static_halide_type() == t);
1035 make_shape_storage((
int)
sizes.size());
1036 initialize_shape(
sizes);
1050 static std::vector<int> make_ordered_sizes(
const std::vector<int> &
sizes,
const std::vector<int> &order) {
1053 for (
size_t i = 0;
i <
sizes.size(); ++
i) {
1075 template<
typename Array,
size_t N>
1078 buf.
type = scalar_type_of_array(
vals);
1088 template<
typename...
Args,
1089 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
1092 assert(static_halide_type() == t);
1094 int extents[] = {first, (
int)rest...};
1099 initialize_shape(extents);
1105 template<
typename...
Args,
1106 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
1108 int extents[] = {first, (
int)rest...};
1109 buf.
type = static_halide_type();
1113 initialize_shape(extents);
1121 buf.
type = static_halide_type();
1123 make_shape_storage((
int)
sizes.size());
1124 initialize_shape(
sizes);
1133 assert(static_halide_type() == t);
1137 make_shape_storage((
int)
sizes.size());
1138 initialize_shape(
sizes);
1146 assert(static_halide_type() == t);
1150 make_shape_storage(d);
1151 for (
int i = 0;
i < d;
i++) {
1152 buf.
dim[
i] = shape[
i];
1160 const std::vector<halide_dimension_t> &shape)
1161 :
Buffer(t, data, (
int)shape.size(), shape.data()) {
1168 buf.
type = static_halide_type();
1170 make_shape_storage(d);
1171 for (
int i = 0;
i < d;
i++) {
1172 buf.
dim[
i] = shape[
i];
1179 explicit Buffer(T *data,
const std::vector<halide_dimension_t> &shape)
1180 :
Buffer(data, (
int)shape.size(), shape.data()) {
1188 free_shape_storage();
1215 template<
typename T2,
int D2 = Dims>
1228 template<
typename T2,
int D2 = Dims>
1241 template<
typename T2,
int D2 = Dims>
1277 template<
typename TVoid,
1279 typename =
typename std::enable_if<std::is_same<TVoid, void>::value &&
1280 !std::is_void<T2>::value &&
1281 !std::is_const<T2>::value>::type>
1288 template<
typename TVoid,
1290 typename =
typename std::enable_if<std::is_same<TVoid, void>::value &&
1291 !std::is_void<T2>::value &&
1292 std::is_const<T2>::value>::type>
1300 return (dimensions() > 0) ? dim(0).extent() : 1;
1303 return (dimensions() > 1) ? dim(1).extent() : 1;
1306 return (dimensions() > 2) ? dim(2).extent() : 1;
1313 return dim(0).min();
1317 return dim(0).max();
1321 return dim(1).min();
1325 return dim(1).max();
1345 void (*deallocate_fn)(
void *) =
nullptr)
const {
1356 void (*deallocate_fn)(
void *) =
nullptr)
const {
1358 assert(dimensions() == 3);
1370 void (*deallocate_fn)(
void *) =
nullptr)
const {
1371 std::vector<int> mins, extents;
1372 const int dims = dimensions();
1374 extents.reserve(dims);
1375 for (
int d = 0; d < dims; ++d) {
1376 mins.push_back(dim(d).
min());
1377 extents.push_back(dim(d).extent());
1408 template<
typename T2,
int D2,
int S2>
1410 static_assert(!std::is_const<T>::value,
"Cannot call copy_from() on a Buffer<const T>");
1411 assert(!device_dirty() &&
"Cannot call Halide::Runtime::Buffer::copy_from on a device dirty destination.");
1412 assert(!src.
device_dirty() &&
"Cannot call Halide::Runtime::Buffer::copy_from on a device dirty source.");
1420 const int d = dimensions();
1421 for (
int i = 0;
i < d;
i++) {
1436 if (T_is_void ? (type().bytes() == 1) : (
sizeof(not_void_T) == 1)) {
1441 }
else if (T_is_void ? (type().bytes() == 2) : (
sizeof(not_void_T) == 2)) {
1446 }
else if (T_is_void ? (type().bytes() == 4) : (
sizeof(not_void_T) == 4)) {
1451 }
else if (T_is_void ? (type().bytes() == 8) : (
sizeof(not_void_T) == 8)) {
1457 assert(
false &&
"type().bytes() must be 1, 2, 4, or 8");
1474 im.device_deallocate();
1476 im.crop_host(d, min, extent);
1478 complete_device_crop(
im);
1486 void crop(
int d,
int min,
int extent) {
1492 *
this = cropped(d, min, extent);
1494 crop_host(d, min, extent);
1510 im.device_deallocate();
1514 complete_device_crop(
im);
1523 void crop(
const std::vector<std::pair<int, int>> &
rect) {
1529 *
this = cropped(
rect);
1541 im.translate(d,
dx);
1549 device_deallocate();
1564 device_deallocate();
1576 assert(mins.size() <=
static_cast<decltype(mins.size())
>(dimensions()));
1577 device_deallocate();
1578 for (
size_t i = 0;
i < mins.size();
i++) {
1583 template<
typename...
Args>
1585 set_min(std::vector<int>{args...});
1593 for (
size_t i = 0;
i <
coords.size();
i++) {
1601 template<
typename...
Args>
1603 return contains(std::vector<int>{args...});
1635 assert((
int)order.size() == dimensions());
1636 if (dimensions() < 2) {
1645 transpose(
j,
j - 1);
1654 im.transpose(order);
1662 static_assert(
Dims ==
AnyDims ||
Dims > 0,
"Cannot slice a 0-dimensional buffer");
1663 assert(dimensions() > 0);
1670 im.device_deallocate();
1672 im.slice_host(d,
pos);
1674 complete_device_slice(
im, d,
pos);
1683 static_assert(
Dims ==
AnyDims ||
Dims > 0,
"Cannot slice a 0-dimensional buffer");
1684 assert(dimensions() > 0);
1686 return sliced(d, dim(d).
min());
1695 static_assert(
Dims ==
AnyDims,
"Cannot call slice() on a Buffer with static dimensionality.");
1696 assert(dimensions() > 0);
1703 *
this = sliced(d,
pos);
1711 slice(d, dim(d).
min());
1734 static_assert(
Dims ==
AnyDims,
"Cannot call embed() on a Buffer with static dimensionality.");
1735 assert(d >= 0 && d <= dimensions());
1737 translate(dimensions() - 1,
pos);
1738 for (
int i = dimensions() - 1;
i > d;
i--) {
1739 transpose(
i,
i - 1);
1748 static_assert(
Dims ==
AnyDims,
"Cannot call add_dimension() on a Buffer with static dimensionality.");
1751 if (buf.
dim != shape) {
1754 for (
int i = 0;
i < dims;
i++) {
1762 for (
int i = 0;
i < dims;
i++) {
1763 buf.
dim[
i] = shape[
i];
1768 buf.
dim[dims] = {0, 1, 0};
1790 assert((!v || !device_dirty()) &&
"Cannot set host dirty when device is already dirty. Call copy_to_host() before accessing the buffer from host.");
1791 buf.set_host_dirty(v);
1799 return buf.device_dirty();
1803 return buf.host_dirty();
1807 assert((!v || !host_dirty()) &&
"Cannot set device dirty when host is already dirty.");
1808 buf.set_device_dirty(v);
1812 if (device_dirty()) {
1830 if (dev_ref_count) {
1832 "Can't call device_free on an unmanaged or wrapped native device handle. "
1833 "Free the source allocation or call device_detach_native instead.");
1836 "Multiple Halide::Runtime::Buffer objects share this device "
1837 "allocation. Freeing it would create dangling references. "
1838 "Don't call device_free on Halide buffers that you have copied or "
1839 "passed by value.");
1845 if (dev_ref_count) {
1846 delete dev_ref_count;
1847 dev_ref_count =
nullptr;
1854 assert(device_interface);
1857 return device_interface->
wrap_native(
ctx, &buf, handle, device_interface);
1863 "Only call device_detach_native on buffers wrapping a native "
1864 "device handle via device_wrap_native. This buffer was allocated "
1865 "using device_malloc, or is unmanaged. "
1866 "Call device_free or free the original allocation instead.");
1869 "Multiple Halide::Runtime::Buffer objects share this device "
1870 "allocation. Freeing it could create dangling references. "
1871 "Don't call device_detach_native on Halide buffers that you "
1872 "have copied or passed by value.");
1877 delete dev_ref_count;
1878 dev_ref_count =
nullptr;
1887 if (dev_ref_count) {
1889 "Can't call device_and_host_free on a device handle not allocated with device_and_host_malloc. "
1890 "Free the source allocation or call device_detach_native instead.");
1893 "Multiple Halide::Runtime::Buffer objects share this device "
1894 "allocation. Freeing it would create dangling references. "
1895 "Don't call device_and_host_free on Halide buffers that you have copied or "
1896 "passed by value.");
1902 if (dev_ref_count) {
1903 delete dev_ref_count;
1904 dev_ref_count =
nullptr;
1910 return buf.device_sync(
ctx);
1919 if (dev_ref_count ==
nullptr) {
1933 static_assert(
Dims ==
AnyDims ||
Dims == 3,
"make_interleaved() must be called on a Buffer that can represent 3 dimensions.");
1949 return make_interleaved(static_halide_type(), width, height, channels);
1955 static_assert(
Dims ==
AnyDims ||
Dims == 3,
"make_interleaved() must be called on a Buffer that can represent 3 dimensions.");
1964 return make_interleaved(static_halide_type(), data, width, height, channels);
1969 static_assert(
Dims ==
AnyDims ||
Dims == 0,
"make_scalar() must be called on a Buffer that can represent 0 dimensions.");
1977 static_assert(
Dims ==
AnyDims ||
Dims == 0,
"make_scalar() must be called on a Buffer that can represent 0 dimensions.");
1985 static_assert(
Dims ==
AnyDims ||
Dims == 0,
"make_scalar() must be called on a Buffer that can represent 0 dimensions.");
1993 template<
typename T2,
int D2,
int S2>
1996 void (*deallocate_fn)(
void *) =
nullptr) {
2008 void (*deallocate_fn)(
void *)) {
2010 std::vector<int>
swaps;
2011 for (
int i = dimensions - 1;
i > 0;
i--) {
2012 for (
int j =
i;
j > 0;
j--) {
2013 if (shape[
j - 1].stride > shape[
j].stride) {
2014 std::swap(shape[
j - 1], shape[
j]);
2022 for (
int i = 0;
i < dimensions;
i++) {
2031 while (!
swaps.empty()) {
2033 std::swap(shape[
j - 1], shape[
j]);
2045 template<
typename...
Args>
2048 offset_of(
int d,
int first,
Args... rest)
const {
2049#if HALIDE_RUNTIME_BUFFER_CHECK_INDICES
2061 template<
typename...
Args>
2064 address_of(
Args... args)
const {
2066 return (storage_T *)(this->buf.
host) + offset_of(0, args...) * type().bytes();
2068 return (storage_T *)(this->buf.
host) + offset_of(0, args...);
2075 for (
int i = this->dimensions() - 1;
i >= 0;
i--) {
2076#if HALIDE_RUNTIME_BUFFER_CHECK_INDICES
2086 storage_T *address_of(
const int *
pos)
const {
2088 return (storage_T *)this->buf.
host + offset_of(
pos) * type().bytes();
2090 return (storage_T *)this->buf.
host + offset_of(
pos);
2097 return (T *)(this->buf.
host);
2107 template<
typename...
Args,
2108 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
2110 static_assert(!T_is_void,
2111 "Cannot use operator() on Buffer<void> types");
2113 static_assert(
Dims ==
AnyDims ||
Dims ==
expected_dims,
"Buffer with static dimensions was accessed with the wrong number of coordinates in operator()");
2115 return *((
const not_void_T *)(address_of(first, rest...)));
2121 static_assert(!T_is_void,
2122 "Cannot use operator() on Buffer<void> types");
2124 static_assert(
Dims ==
AnyDims ||
Dims ==
expected_dims,
"Buffer with static dimensions was accessed with the wrong number of coordinates in operator()");
2126 return *((
const not_void_T *)(data()));
2132 static_assert(!T_is_void,
2133 "Cannot use operator() on Buffer<void> types");
2135 return *((
const not_void_T *)(address_of(
pos)));
2138 template<
typename...
Args,
2139 typename =
typename std::enable_if<
AllInts<
Args...>::value>::type>
2143 static_assert(!T_is_void,
2144 "Cannot use operator() on Buffer<void> types");
2146 static_assert(
Dims ==
AnyDims ||
Dims ==
expected_dims,
"Buffer with static dimensions was accessed with the wrong number of coordinates in operator()");
2148 return *((not_void_T *)(address_of(first, rest...)));
2154 static_assert(!T_is_void,
2155 "Cannot use operator() on Buffer<void> types");
2157 static_assert(
Dims ==
AnyDims ||
Dims ==
expected_dims,
"Buffer with static dimensions was accessed with the wrong number of coordinates in operator()");
2159 return *((not_void_T *)(data()));
2165 static_assert(!T_is_void,
2166 "Cannot use operator() on Buffer<void> types");
2168 return *((not_void_T *)(address_of(
pos)));
2174 bool all_equal =
true;
2175 for_each_element([&](
const int *
pos) { all_equal &= (*this)(
pos) == val; });
2181 for_each_value([=](T &v) { v = val; });
2189 struct for_each_value_task_dim {
2190 std::ptrdiff_t extent;
2191 std::ptrdiff_t stride[
N];
2197 template<
typename Ptr,
typename...
Ptrs>
2200 advance_ptrs(stride + 1,
ptrs...);
2204 static void advance_ptrs(
const std::ptrdiff_t *) {
2207 template<
typename Fn,
typename Ptr,
typename...
Ptrs>
2209 const for_each_value_task_dim<
sizeof...(
Ptrs) + 1> *t,
Ptr ptr,
Ptrs...
ptrs) {
2212 Ptr end = ptr + t[0].extent;
2213 while (ptr != end) {
2214 f(*ptr++, (*
ptrs++)...);
2217 for (std::ptrdiff_t
i = t[0].extent;
i != 0;
i--) {
2218 f(*ptr, (*ptrs)...);
2219 advance_ptrs(t[0].stride, ptr,
ptrs...);
2223 for (std::ptrdiff_t
i = t[d].extent;
i != 0;
i--) {
2225 advance_ptrs(t[d].stride, ptr,
ptrs...);
2234 const int dimensions = buffers[0]->
dimensions;
2238 for (
int i = 0;
i <
N;
i++) {
2239 if (buffers[
i]->device) {
2241 "Buffer passed to for_each_value has device allocation but no host allocation. Call allocate() and copy_to_host() first");
2242 assert(!buffers[
i]->device_dirty() &&
2243 "Buffer passed to for_each_value is dirty on device. Call copy_to_host() first");
2246 "Buffer passed to for_each_value has no host or device allocation");
2251 for (
int i = 0;
i < dimensions;
i++) {
2252 for (
int j = 0;
j <
N;
j++) {
2253 assert(buffers[
j]->dimensions == dimensions);
2254 assert(buffers[
j]->dim[
i].extent == buffers[0]->dim[
i].extent &&
2255 buffers[
j]->dim[
i].min == buffers[0]->dim[
i].min);
2264 for (
int j =
i;
j > 0 && t[
j].stride[
N - 1] < t[
j - 1].stride[
N - 1];
j--) {
2265 std::swap(t[
j], t[
j - 1]);
2272 for (
int i = 1;
i < d;
i++) {
2274 for (
int j = 0;
j <
N;
j++) {
2275 flat =
flat && t[
i - 1].stride[
j] * t[
i - 1].extent == t[
i].stride[
j];
2278 t[
i - 1].extent *= t[
i].extent;
2279 for (
int j =
i;
j < d - 1;
j++) {
2291 for (
int i = 0;
i <
N;
i++) {
2298 template<
typename Fn,
typename...
Args,
int N =
sizeof...(Args) + 1>
2300 if (dimensions() > 0) {
2339 template<
typename Fn,
typename...
Args,
int N =
sizeof...(Args) + 1>
2341 for_each_value_impl(f, std::forward<Args>(
other_buffers)...);
2345 template<
typename Fn,
typename...
Args,
int N =
sizeof...(Args) + 1>
2349 for_each_value_impl(f, std::forward<Args>(
other_buffers)...);
2356 struct for_each_element_task_dim {
2363 template<
typename Fn,
2365 typename =
decltype(std::declval<Fn>()(std::declval<Args>()...))>
2366 HALIDE_ALWAYS_INLINE static void for_each_element_variadic(
int,
int,
const for_each_element_task_dim *,
Fn &&f,
Args... args) {
2372 template<
typename Fn,
2374 HALIDE_ALWAYS_INLINE static void for_each_element_variadic(
double,
int d,
const for_each_element_task_dim *t,
Fn &&f,
Args... args) {
2375 for (
int i = t[d].min;
i <= t[d].
max;
i++) {
2376 for_each_element_variadic(0, d - 1, t, std::forward<Fn>(f),
i, args...);
2382 template<
typename Fn,
2384 typename =
decltype(std::declval<Fn>()(std::declval<Args>()...))>
2386 return (
int)(
sizeof...(Args));
2392 template<
typename Fn,
2395 static_assert(
sizeof...(args) <= 256,
2396 "Callable passed to for_each_element must accept either a const int *,"
2397 " or up to 256 ints. No such operator found. Expect infinite template recursion.");
2398 return num_args(0, std::forward<Fn>(f), 0, args...);
2408 typename =
typename std::enable_if<(d >= 0)>::type>
2409 HALIDE_ALWAYS_INLINE static void for_each_element_array_helper(
int,
const for_each_element_task_dim *t,
Fn &&f,
int *
pos) {
2410 for (
pos[d] = t[d].min;
pos[d] <= t[d].
max;
pos[d]++) {
2411 for_each_element_array_helper<d - 1>(0, t, std::forward<Fn>(f),
pos);
2418 typename =
typename std::enable_if<(d < 0)>::type>
2419 HALIDE_ALWAYS_INLINE static void for_each_element_array_helper(
double,
const for_each_element_task_dim *t,
Fn &&f,
int *
pos) {
2428 template<
typename Fn>
2429 static void for_each_element_array(
int d,
const for_each_element_task_dim *t,
Fn &&f,
int *
pos) {
2432 }
else if (d == 0) {
2437 }
else if (d == 1) {
2439 }
else if (d == 2) {
2441 }
else if (d == 3) {
2444 for (
pos[d] = t[d].min;
pos[d] <= t[d].
max;
pos[d]++) {
2445 for_each_element_array(d - 1, t, std::forward<Fn>(f),
pos);
2453 template<
typename Fn,
2454 typename =
decltype(std::declval<Fn>()((
const int *)
nullptr))>
2455 static void for_each_element(
int,
int dims,
const for_each_element_task_dim *t,
Fn &&f,
int check = 0) {
2456 const int size = dims *
sizeof(
int);
2461 for_each_element_array(dims - 1, t, std::forward<Fn>(f),
pos);
2466 template<
typename Fn>
2467 HALIDE_ALWAYS_INLINE static void for_each_element(
double,
int dims,
const for_each_element_task_dim *t,
Fn &&f) {
2468 int args = num_args(0, std::forward<Fn>(f));
2470 for_each_element_variadic(0, args - 1, t, std::forward<Fn>(f));
2473 template<
typename Fn>
2474 void for_each_element_impl(
Fn &&f)
const {
2475 for_each_element_task_dim *t =
2476 (for_each_element_task_dim *)
HALIDE_ALLOCA(dimensions() *
sizeof(for_each_element_task_dim));
2477 for (
int i = 0;
i < dimensions();
i++) {
2478 t[
i].min = dim(
i).min();
2479 t[
i].max = dim(
i).max();
2481 for_each_element(0, dimensions(), t, std::forward<Fn>(f));
2542 template<
typename Fn>
2544 for_each_element_impl(f);
2548 template<
typename Fn>
2552 for_each_element_impl(f);
2558 template<
typename Fn>
2563 template<
typename...
Args,
2564 typename =
decltype(std::declval<Fn>()(std::declval<Args>()...))>
2565 void operator()(
Args... args) {
2566 (*buf)(args...) = f(args...);
2579 template<
typename Fn,
2580 typename =
typename std::enable_if<!std::is_arithmetic<typename std::decay<Fn>::type>::value>::type>
2583 FillHelper<Fn>
wrapper(std::forward<Fn>(f),
this);
2584 return for_each_element(
wrapper);
2592 return buf.is_bounds_query();
2601#if defined(__has_feature)
2602#if __has_feature(memory_sanitizer)
#define HALIDE_RUNTIME_BUFFER_ALLOCATION_ALIGNMENT
This file declares the routines used by Halide internally in its runtime.
#define HALIDE_NEVER_INLINE
@ halide_error_code_success
There was no error.
#define HALIDE_ALWAYS_INLINE
struct halide_buffer_t halide_buffer_t
The raw representation of an image passed around by generated Halide code.
Read-only access to the shape.
HALIDE_ALWAYS_INLINE int min() const
The lowest coordinate in this dimension.
Dimension(const halide_dimension_t &dim)
HALIDE_ALWAYS_INLINE int max() const
The highest coordinate in this dimension.
HALIDE_ALWAYS_INLINE iterator end() const
An iterator that points to one past the max coordinate.
HALIDE_ALWAYS_INLINE int stride() const
The number of elements in memory you have to step over to increment this coordinate by one.
HALIDE_ALWAYS_INLINE iterator begin() const
An iterator that points to the min coordinate.
HALIDE_ALWAYS_INLINE int extent() const
The extent of the image along this dimension.
A templated Buffer class that wraps halide_buffer_t and adds functionality.
Buffer< T, Dims, InClassDimStorage > & operator=(const Buffer< T2, D2, S2 > &other)
Assign from another Buffer of possibly-different dimensionality and type.
Buffer< not_const_T, Dims, InClassDimStorage > copy_to_planar(void *(*allocate_fn)(size_t)=nullptr, void(*deallocate_fn)(void *)=nullptr) const
Like copy(), but the copy is created in planar memory layout (vs.
Buffer< T, Dims, InClassDimStorage > transposed(const std::vector< int > &order) const
Make a buffer which refers to the same data in the same layout using a different ordering of the dime...
void translate(int d, int delta)
Translate an image in-place along one dimension by changing how it is indexed.
Buffer(const halide_buffer_t &buf, BufferDeviceOwnership ownership=BufferDeviceOwnership::Unmanaged)
Make a Buffer from a halide_buffer_t.
void allocate(void *(*allocate_fn)(size_t)=nullptr, void(*deallocate_fn)(void *)=nullptr)
Allocate memory for this Buffer.
Buffer< not_const_T, Dims, InClassDimStorage > copy(void *(*allocate_fn)(size_t)=nullptr, void(*deallocate_fn)(void *)=nullptr) const
Make a new image which is a deep copy of this image.
Buffer< T,(Dims==AnyDims ? AnyDims :Dims+1)> embedded(int d, int pos=0) const
Make a new buffer that views this buffer as a single slice in a higher-dimensional space.
void add_dimension()
Add a new dimension with a min of zero and an extent of one.
void slice(int d)
Slice a buffer in-place at the dimension's minimum.
static void set_default_allocate_fn(void *(*allocate_fn)(size_t))
bool owns_host_memory() const
Does this Buffer own the host memory it refers to?
int width() const
Conventional names for the first three dimensions.
void transpose(const std::vector< int > &order)
A generalized transpose: instead of swapping two dimensions, pass a vector that lists each dimension ...
void set_min(const std::vector< int > &mins)
Set the min coordinate of an image in the first N dimensions.
HALIDE_ALWAYS_INLINE Buffer< T, Dims, InClassDimStorage > & for_each_element(Fn &&f)
Buffer(halide_type_t t, add_const_if_T_is_const< void > *data, const std::vector< int > &sizes)
Initialize an Buffer of runtime type from a pointer and a vector of sizes.
HALIDE_ALWAYS_INLINE Buffer< T2, D2, InClassDimStorage > as() &&
Return an rval reference to this Buffer.
int copy_to_host(void *ctx=nullptr)
Buffer(halide_type_t t, const std::vector< int > &sizes)
Allocate a new image of unknown type using a vector of ints as the size.
int device_malloc(const struct halide_device_interface_t *device_interface, void *ctx=nullptr)
int device_free(void *ctx=nullptr)
bool contains(Args... args) const
void crop(const std::vector< std::pair< int, int > > &rect)
Crop an image in-place along the first N dimensions.
HALIDE_ALWAYS_INLINE const Buffer< typename std::add_const< T >::type, Dims, InClassDimStorage > & as_const() const &
void set_device_dirty(bool v=true)
HALIDE_ALWAYS_INLINE const not_void_T & operator()(const int *pos) const
Buffer(T *data, int d, const halide_dimension_t *shape)
Initialize an Buffer from a pointer to the min coordinate and an array describing the shape.
Buffer(Buffer< T2, D2, S2 > &&other)
Move-construct a Buffer from a Buffer of different dimensionality and type.
void slice(int d, int pos)
Rewrite the buffer to refer to a single lower-dimensional slice of itself along the given dimension a...
HALIDE_ALWAYS_INLINE const not_void_T & operator()(int first, Args... rest) const
Access elements.
HALIDE_ALWAYS_INLINE void set_host_dirty(bool v=true)
Methods for managing any GPU allocation.
void msan_check_mem_is_initialized(bool entire=false) const
Convenient check to verify that all of the interesting bytes in the Buffer are initialized under MSAN...
HALIDE_ALWAYS_INLINE Buffer< typename std::add_const< T >::type, Dims, InClassDimStorage > as_const() &&
Buffer< T, Dims, InClassDimStorage > & operator=(Buffer< T, Dims, InClassDimStorage > &&other) noexcept
Standard move-assignment operator.
int device_detach_native(void *ctx=nullptr)
int device_wrap_native(const struct halide_device_interface_t *device_interface, uint64_t handle, void *ctx=nullptr)
Buffer< T, Dims, InClassDimStorage > translated(const std::vector< int > &delta) const
Make an image which refers to the same data translated along the first N dimensions.
HALIDE_ALWAYS_INLINE Dimension dim(int i) const
Access the shape of the buffer.
Buffer(int first, int second, Args... rest)
HALIDE_ALWAYS_INLINE Buffer< typename std::add_const< T >::type, Dims, InClassDimStorage > & as_const() &
as_const() is syntactic sugar for .as<const T>(), to avoid the need to recapitulate the type argument...
Buffer< T, Dims, InClassDimStorage > transposed(int d1, int d2) const
Make a buffer which refers to the same data in the same layout using a swapped indexing order for the...
HALIDE_ALWAYS_INLINE Buffer< T, Dims, InClassDimStorage > & for_each_value(Fn &&f, Args &&...other_buffers)
HALIDE_ALWAYS_INLINE not_void_T & operator()()
BufferDeviceOwnership device_ownership() const
Return the method by which the device field is managed.
void check_overflow()
Check the product of the extents fits in memory.
static bool can_convert_from(const Buffer< T2, D2, S2 > &other)
Determine if a Buffer<T, Dims, InClassDimStorage> can be constructed from some other Buffer type.
Buffer< not_const_T, Dims, InClassDimStorage > copy_to_interleaved(void *(*allocate_fn)(size_t)=nullptr, void(*deallocate_fn)(void *)=nullptr) const
Like copy(), but the copy is created in interleaved memory layout (vs.
int device_and_host_malloc(const struct halide_device_interface_t *device_interface, void *ctx=nullptr)
int device_sync(void *ctx=nullptr)
static Buffer< void, Dims, InClassDimStorage > make_interleaved(halide_type_t t, int width, int height, int channels)
If you use the (x, y, c) indexing convention, then Halide Buffers are stored planar by default.
Buffer(const std::vector< int > &sizes)
Allocate a new image of known type using a vector of ints as the size.
void embed(int d, int pos=0)
Embed a buffer in-place, increasing the dimensionality.
static constexpr halide_type_t static_halide_type()
Get the Halide type of T.
Buffer(T *data, int first, Args &&...rest)
Initialize an Buffer from a pointer and some sizes.
int copy_to_device(const struct halide_device_interface_t *device_interface, void *ctx=nullptr)
Buffer(Array(&vals)[N])
Make an Buffer that refers to a statically sized array.
const halide_buffer_t * raw_buffer() const
HALIDE_ALWAYS_INLINE not_void_T & operator()(int first, Args... rest)
static Buffer< T, Dims, InClassDimStorage > make_interleaved(int width, int height, int channels)
If you use the (x, y, c) indexing convention, then Halide Buffers are stored planar by default.
halide_type_t type() const
Get the type of the elements.
int device_and_host_free(const struct halide_device_interface_t *device_interface, void *ctx=nullptr)
Buffer(int first)
Allocate a new image of the given size.
halide_buffer_t * raw_buffer()
Get a pointer to the raw halide_buffer_t this wraps.
T * end() const
A pointer to one beyond the element with the highest address.
HALIDE_ALWAYS_INLINE bool device_dirty() const
Buffer< T, Dims, InClassDimStorage > cropped(const std::vector< std::pair< int, int > > &rect) const
Make an image that refers to a sub-rectangle of this image along the first N dimensions.
static constexpr int static_dimensions()
Callers should not use the result if has_static_dimensions is false.
void transpose(int d1, int d2)
Transpose a buffer in-place by changing how it is indexed.
void deallocate()
Drop reference to any owned host or device memory, possibly freeing it, if this buffer held the last ...
size_t size_in_bytes() const
The total number of bytes spanned by the data in memory.
bool has_device_allocation() const
void reset()
Reset the Buffer to be equivalent to a default-constructed Buffer of the same static type (if any); B...
Buffer(halide_type_t t, int first, Args... rest)
Allocate a new image of the given size with a runtime type.
int dimensions() const
Get the dimensionality of the buffer.
Buffer(halide_type_t t, add_const_if_T_is_const< void > *data, int d, const halide_dimension_t *shape)
Initialize an Buffer from a pointer to the min coordinate and an array describing the shape.
int min(int i) const
Access to the mins, strides, extents.
HALIDE_ALWAYS_INLINE const Buffer< T, Dims, InClassDimStorage > & for_each_element(Fn &&f) const
Call a function at each site in a buffer.
void device_deallocate()
Drop reference to any owned device memory, possibly freeing it if this buffer held the last reference...
HALIDE_ALWAYS_INLINE const not_void_T & operator()() const
static Buffer< T, Dims, InClassDimStorage > make_scalar()
Make a zero-dimensional Buffer.
void add_dimension_with_stride(int s)
Add a new dimension with a min of zero, an extent of one, and the specified stride.
Buffer(Buffer< T, Dims, InClassDimStorage > &&other) noexcept
Move constructor.
Buffer< T, Dims, InClassDimStorage > cropped(int d, int min, int extent) const
Make an image that refers to a sub-range of this image along the given dimension.
void crop(int d, int min, int extent)
Crop an image in-place along the given dimension.
Buffer< T, Dims, InClassDimStorage > & fill(Fn &&f)
Fill a buffer by evaluating a callable at every site.
static Buffer< T, Dims, InClassDimStorage > make_scalar(T *data)
Make a zero-dimensional Buffer that points to non-owned, existing data.
Buffer< T, Dims, InClassDimStorage > alias() const
Make a copy of the Buffer which shares the underlying host and/or device allocations as the existing ...
void set_min(Args... args)
size_t number_of_elements() const
The total number of elements this buffer represents.
static void assert_can_convert_from(const Buffer< T2, D2, S2 > &other)
Fail an assertion at runtime or compile-time if an Buffer<T, Dims, InClassDimStorage> cannot be const...
void translate(const std::vector< int > &delta)
Translate an image along the first N dimensions by changing how it is indexed.
Buffer(const Buffer< T, Dims, InClassDimStorage > &other)
Copy constructor.
HALIDE_ALWAYS_INLINE not_void_T & operator()(const int *pos)
T * data() const
Get a pointer to the address of the min coordinate.
Buffer< T, Dims, InClassDimStorage > & fill(not_void_T val)
Buffer(const std::vector< int > &sizes, const std::vector< int > &storage_order)
Buffer< T, Dims, InClassDimStorage > & operator=(Buffer< T2, D2, S2 > &&other)
Move from another Buffer of possibly-different dimensionality and type.
Buffer(halide_type_t t, const std::vector< int > &sizes, const std::vector< int > &storage_order)
Allocate a new image of unknown type using a vector of ints as the size and a vector of indices indic...
Buffer(halide_type_t t, add_const_if_T_is_const< void > *data, const std::vector< halide_dimension_t > &shape)
Initialize a Buffer from a pointer to the min coordinate and a vector describing the shape.
Buffer< T,(Dims==AnyDims ? AnyDims :Dims - 1)> sliced(int d, int pos) const
Make a lower-dimensional buffer that refers to one slice of this buffer.
static Buffer< add_const_if_T_is_const< void >, Dims, InClassDimStorage > make_interleaved(halide_type_t t, T *data, int width, int height, int channels)
Wrap an existing interleaved image.
HALIDE_ALWAYS_INLINE const Buffer< T, Dims, InClassDimStorage > & for_each_value(Fn &&f, Args &&...other_buffers) const
Call a function on every value in the buffer, and the corresponding values in some number of other bu...
bool is_bounds_query() const
Check if an input buffer passed extern stage is a querying bounds.
Buffer< T,(Dims==AnyDims ? AnyDims :Dims - 1)> sliced(int d) const
Make a lower-dimensional buffer that refers to one slice of this buffer at the dimension's minimum.
int left() const
Conventional names for the min and max value of each dimension.
void copy_from(Buffer< T2, D2, S2 > src)
Fill a Buffer with the values at the same coordinates in another Buffer.
Buffer< T, Dims, InClassDimStorage > translated(int d, int dx) const
Make an image which refers to the same data with using translated coordinates in the given dimension.
static Buffer< T, Dims, InClassDimStorage > make_interleaved(T *data, int width, int height, int channels)
Wrap an existing interleaved image.
static void set_default_deallocate_fn(void(*deallocate_fn)(void *))
static Buffer< T, Dims, InClassDimStorage > make_with_shape_of(Buffer< T2, D2, S2 > src, void *(*allocate_fn)(size_t)=nullptr, void(*deallocate_fn)(void *)=nullptr)
Make a buffer with the same shape and memory nesting order as another buffer.
Buffer(const Buffer< T2, D2, S2 > &other)
Construct a Buffer from a Buffer of different dimensionality and type.
bool contains(const std::vector< int > &coords) const
Test if a given coordinate is within the bounds of an image.
Buffer(T *data, const std::vector< halide_dimension_t > &shape)
Initialize a Buffer from a pointer to the min coordinate and a vector describing the shape.
Buffer(T *data, const std::vector< int > &sizes)
Initialize an Buffer from a pointer and a vector of sizes.
Buffer< T, Dims, InClassDimStorage > & operator=(const Buffer< T, Dims, InClassDimStorage > &other)
Standard assignment operator.
T * begin() const
A pointer to the element with the lowest address.
bool all_equal(not_void_T val) const
Tests that all values in this buffer are equal to val.
Buffer(halide_type_t t, add_const_if_T_is_const< void > *data, int first, Args &&...rest)
Initialize an Buffer of runtime type from a pointer and some sizes.
HALIDE_ALWAYS_INLINE Buffer< T2, D2, InClassDimStorage > & as() &
Return a typed reference to this Buffer.
HALIDE_ALWAYS_INLINE const Buffer< T2, D2, InClassDimStorage > & as() const &
Return a const typed reference to this Buffer.
static Buffer< add_const_if_T_is_const< void >, Dims, InClassDimStorage > make_scalar(halide_type_t t)
Make a zero-dimensional Buffer.
auto end(reverse_adaptor< T > i)
bool any_zero(const Container &c)
BufferDeviceOwnership
This indicates how to deallocate the device for a Halide::Runtime::Buffer.
@ AllocatedDeviceAndHost
No free routine will be called when device ref count goes to zero
@ WrappedNative
halide_device_free will be called when device ref count goes to zero
@ Unmanaged
halide_device_detach_native will be called when device ref count goes to zero
@ Cropped
Call device_and_host_free when DevRefCount goes to zero.
This file defines the class FunctionDAG, which is our representation of a Halide pipeline,...
@ Internal
Not visible externally, similar to 'static' linkage in C.
Expr min(const FuncRef &a, const FuncRef &b)
Explicit overloads of min and max for FuncRef.
Internal::ConstantInterval cast(Type t, const Internal::ConstantInterval &a)
Cast operators for ConstantIntervals.
Expr max(const FuncRef &a, const FuncRef &b)
unsigned __INT64_TYPE__ uint64_t
__UINTPTR_TYPE__ uintptr_t
ALWAYS_INLINE T align_up(T p, size_t alignment)
unsigned __INT8_TYPE__ uint8_t
__PTRDIFF_TYPE__ ptrdiff_t
unsigned __INT16_TYPE__ uint16_t
void * memcpy(void *s1, const void *s2, size_t n)
void * memset(void *s, int val, size_t n)
unsigned __INT32_TYPE__ uint32_t
int64_t min
The lower and upper bound of the interval.
An iterator class, so that you can iterate over coordinates in a dimensions using a range-based for l...
bool operator!=(const iterator &other) const
A similar struct for managing device allocations.
BufferDeviceOwnership ownership
static void *(* default_allocate_fn)(size_t)
static void(* default_deallocate_fn)(void *)
The raw representation of an image passed around by generated Halide code.
int32_t dimensions
The dimensionality of the buffer.
halide_dimension_t * dim
The shape of the buffer.
uint64_t device
A device-handle for e.g.
uint8_t * host
A pointer to the start of the data in main memory.
struct halide_type_t type
The type of each buffer element.
const struct halide_device_interface_t * device_interface
The interface used to interpret the above handle.
Each GPU API provides a halide_device_interface_t struct pointing to the code that manages device all...
int(* device_slice)(void *user_context, const struct halide_buffer_t *src, int slice_dim, int slice_pos, struct halide_buffer_t *dst)
int(* device_and_host_malloc)(void *user_context, struct halide_buffer_t *buf, const struct halide_device_interface_t *device_interface)
int(* wrap_native)(void *user_context, struct halide_buffer_t *buf, uint64_t handle, const struct halide_device_interface_t *device_interface)
int(* device_release_crop)(void *user_context, struct halide_buffer_t *buf)
int(* device_crop)(void *user_context, const struct halide_buffer_t *src, struct halide_buffer_t *dst)
int(* copy_to_host)(void *user_context, struct halide_buffer_t *buf)
int(* copy_to_device)(void *user_context, struct halide_buffer_t *buf, const struct halide_device_interface_t *device_interface)
int(* device_free)(void *user_context, struct halide_buffer_t *buf)
int(* detach_native)(void *user_context, struct halide_buffer_t *buf)
int(* device_and_host_free)(void *user_context, struct halide_buffer_t *buf)
int(* device_malloc)(void *user_context, struct halide_buffer_t *buf, const struct halide_device_interface_t *device_interface)
A runtime tag for a type in the halide type system.