math/test/test_triangular_cdf_float.cu
Matt Borland e9cd6c96fd
Add GPU support to normal dist
Add SYCL testing of normal dist

Add CUDA testing of normal dist

Add NVRTC testing of normal dist

NVRTC fixes

Move headers for NVRTC support

Add GPU support to inverse gaussian dist

Add NVRTC testing of inverse Gaussian dist

Add CUDA testing of inverse gaussian dist

Add SYCL testing of inverse gaussian dist

Add GPU support to lognormal dist

Add SYCL testing of lognormal dist

Add CUDA testing of lognormal dist

Add nvrtc testing of lognormal dist

Add GPU support to negative binomial dist

Avoid float_prior on GPU platform

Add NVRTC testing of negative binomial dist

Fix ambiguous use of nextafter

Add CUDA testing of negative binomial dist

Fix float_prior workaround

Add SYCL testing of negative binomial dist

Add GPU support to non_central_beta dist

Add SYCL testing of nc beta dist

Add CUDA testing of nc beta dist

Enable generic dist handling on GPU

Add GPU support to brent_find_minima

Add NVRTC testing of nc beta dist

Add utility header

Replace non-functional macro with new function

Add GPU support to non central chi squared dist

Add SYCL testing of non central chi squared dist

Add missing macro definition

Markup generic quantile finder

Add CUDA testing of non central chi squared dist

Add NVRTC testing of non central chi squared dist

Add GPU support to the non-central f dist

Add SYCL testing of ncf

Add CUDA testing of ncf dist

Add NVRTC testing of ncf dist

Add GPU support to students_t dist

Add SYCL testing of students_t dist

Add CUDA testing of students_t

Add NVRTC testing of students_t dist

Workaround for header cycle

Add GPU support to pareto dist

Add SYCL testing of pareto dist

Add CUDA testing of pareto dist

Add NVRTC testing of pareto dist

Add missing header

Add GPU support to poisson dist

Add SYCL testing of poisson dist

Add CUDA testing of poisson dist

Add NVRTC testing of poisson dist

Add forward decl for NVRTC platform

Add GPU support to rayleigh dist

Add CUDA testing of rayleigh dist

Add SYCL testing of rayleigh dist

Add NVRTC testing of rayleigh dist

Add GPU support to triangular dist

Add SYCL testing of triangular dist

Add NVRTC testing of triangular dist

Add CUDA testing of triangular dist

Add GPU support to the uniform dist

Add CUDA testing of uniform dist

Add SYCL testing of uniform dist

Add NVRTC testing of uniform dist

Fix missing header

Add markers to docs
2024-09-06 12:10:18 -04:00

114 lines
3.5 KiB
Plaintext

// Copyright John Maddock 2016.
// Copyright Matt Borland 2024.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_MATH_OVERFLOW_ERROR_POLICY ignore_error
#include <iostream>
#include <iomanip>
#include <vector>
#include <boost/math/distributions/triangular.hpp>
#include <boost/math/special_functions/relative_difference.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_real_distribution.hpp>
#include "cuda_managed_ptr.hpp"
#include "stopwatch.hpp"
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
typedef float float_type;
/**
* CUDA Kernel Device code
*
*/
__global__ void cuda_test(const float_type *in1, float_type *out, int numElements)
{
using std::cos;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
out[i] = cdf(boost::math::triangular_distribution<float_type>(1), in1[i]);
}
}
/**
* Host main routine
*/
int main(void)
{
try{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
std::cout << "[Vector operation on " << numElements << " elements]" << std::endl;
// Allocate the managed input vector A
cuda_managed_ptr<float_type> input_vector1(numElements);
// Allocate the managed output vector C
cuda_managed_ptr<float_type> output_vector(numElements);
boost::random::mt19937 gen;
boost::random::uniform_real_distribution<float_type> dist;
// Initialize the input vectors
for (int i = 0; i < numElements; ++i)
{
input_vector1[i] = dist(gen);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl;
watch w;
cuda_test<<<blocksPerGrid, threadsPerBlock>>>(input_vector1.get(), output_vector.get(), numElements);
cudaDeviceSynchronize();
std::cout << "CUDA kernal done in " << w.elapsed() << "s" << std::endl;
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel (error code " << cudaGetErrorString(err) << ")!" << std::endl;
return EXIT_FAILURE;
}
// Verify that the result vector is correct
std::vector<float_type> results;
results.reserve(numElements);
w.reset();
for(int i = 0; i < numElements; ++i)
results.push_back(cdf(boost::math::triangular_distribution<float_type>(1), input_vector1[i]));
double t = w.elapsed();
// check the results
for(int i = 0; i < numElements; ++i)
{
if (std::isfinite(results[i]))
{
if (boost::math::epsilon_difference(output_vector[i], results[i]) > 100.0)
{
std::cerr << "Result verification failed at element " << i << "!" << std::endl;
std::cerr << "Error rate was: " << boost::math::epsilon_difference(output_vector[i], results[i]) << "eps" << std::endl;
return EXIT_FAILURE;
}
}
}
std::cout << "Test PASSED with calculation time: " << t << "s" << std::endl;
std::cout << "Done\n";
}
catch(const std::exception& e)
{
std::cerr << "Stopped with exception: " << e.what() << std::endl;
}
return 0;
}