14 #define EIGEN_TEST_NO_LONGDOUBLE 
   15 #define EIGEN_TEST_NO_COMPLEX 
   17 #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t 
   18 #define EIGEN_USE_SYCL 
   21 #include <unsupported/Eigen/CXX11/Tensor> 
   32 template <
typename DataType, 
int DataLayout, 
typename IndexType>
 
   36   IndexType sizeDim1 = 2;
 
   37   IndexType sizeDim2 = 3;
 
   38   IndexType sizeDim3 = 5;
 
   39   IndexType sizeDim4 = 7;
 
   52   const size_t tensorBuffSize =tensor.
size()*
sizeof(DataType);
 
   53   DataType* gpu_data_tensor  = 
static_cast<DataType*
>(sycl_device.allocate(tensorBuffSize));
 
   54   DataType* gpu_data_no_stride  = 
static_cast<DataType*
>(sycl_device.allocate(tensorBuffSize));
 
   59   sycl_device.memcpyHostToDevice(gpu_data_tensor, tensor.
data(), tensorBuffSize);
 
   60   gpu_no_stride.device(sycl_device)=gpu_tensor.inflate(
strides);
 
   61   sycl_device.memcpyDeviceToHost(no_stride.
data(), gpu_data_no_stride, tensorBuffSize);
 
   68   for (IndexType 
i = 0; 
i < 2; ++
i) {
 
   69     for (IndexType 
j = 0; 
j < 3; ++
j) {
 
   70       for (IndexType 
k = 0; 
k < 5; ++
k) {
 
   71         for (IndexType 
l = 0; 
l < 7; ++
l) {
 
   84   IndexType inflatedSizeDim1 = 3;
 
   85   IndexType inflatedSizeDim2 = 9;
 
   86   IndexType inflatedSizeDim3 = 9;
 
   87   IndexType inflatedSizeDim4 = 19;
 
   88   array<IndexType, 4> inflatedTensorRange = {{inflatedSizeDim1, inflatedSizeDim2, inflatedSizeDim3, inflatedSizeDim4}};
 
   92   const size_t inflatedTensorBuffSize =inflated.
size()*
sizeof(DataType);
 
   93   DataType* gpu_data_inflated  = 
static_cast<DataType*
>(sycl_device.allocate(inflatedTensorBuffSize));
 
   95   gpu_inflated.device(sycl_device)=gpu_tensor.inflate(
strides);
 
   96   sycl_device.memcpyDeviceToHost(inflated.
data(), gpu_data_inflated, inflatedTensorBuffSize);
 
  103   for (IndexType 
i = 0; 
i < inflatedSizeDim1; ++
i) {
 
  104     for (IndexType 
j = 0; 
j < inflatedSizeDim2; ++
j) {
 
  105       for (IndexType 
k = 0; 
k < inflatedSizeDim3; ++
k) {
 
  106         for (IndexType 
l = 0; 
l < inflatedSizeDim4; ++
l) {
 
  120   sycl_device.deallocate(gpu_data_tensor);
 
  121   sycl_device.deallocate(gpu_data_no_stride);
 
  122   sycl_device.deallocate(gpu_data_inflated);
 
  126   QueueInterface queueInterface(
s);
 
  127   auto sycl_device = Eigen::SyclDevice(&queueInterface);
 
  128   test_simple_inflation_sycl<DataType, RowMajor, int64_t>(sycl_device);
 
  129   test_simple_inflation_sycl<DataType, ColMajor, int64_t>(sycl_device);
 
  133   for (
const auto& device :Eigen::get_sycl_supported_devices()) {
 
  134     CALL_SUBTEST(sycl_inflation_test_per_device<float>(device));