Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions include/mppi/dynamics/autorally/ar_nn_model.cu
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,13 @@ NeuralNetModel<S_DIM, C_DIM, K_DIM>::NeuralNetModel(std::array<float2, C_DIM> co
{
std::vector<int> args{ 6, 32, 32, 4 };
helper_ = new FNNHelper<>(args, stream);
this->SHARED_MEM_REQUEST_GRD_BYTES = helper_->getGrdSharedSizeBytes();
this->SHARED_MEM_REQUEST_BLK_BYTES = helper_->getBlkSharedSizeBytes();
}

template <int S_DIM, int C_DIM, int K_DIM>
NeuralNetModel<S_DIM, C_DIM, K_DIM>::NeuralNetModel(cudaStream_t stream) : PARENT_CLASS(stream)
{
std::vector<int> args{ 6, 32, 32, 4 };
helper_ = new FNNHelper<>(args, stream);
this->SHARED_MEM_REQUEST_GRD_BYTES = helper_->getGrdSharedSizeBytes();
this->SHARED_MEM_REQUEST_BLK_BYTES = helper_->getBlkSharedSizeBytes();
}

template <int S_DIM, int C_DIM, int K_DIM>
Expand Down Expand Up @@ -173,7 +169,13 @@ template <int S_DIM, int C_DIM, int K_DIM>
__device__ void NeuralNetModel<S_DIM, C_DIM, K_DIM>::initializeDynamics(float* state, float* control, float* output,
float* theta_s, float t_0, float dt)
{
PARENT_CLASS::initializeDynamics(state, control, output, theta_s, t_0, dt);
if (output)
{
for (int i = 0; i < this->OUTPUT_DIM && i < this->STATE_DIM; i++)
{
output[i] = state[i];
}
}
helper_->initialize(theta_s);
}

Expand All @@ -191,3 +193,15 @@ NeuralNetModel<S_DIM, C_DIM, K_DIM>::stateFromMap(const std::map<std::string, fl
s(S_INDEX(YAW_RATE)) = map.at("OMEGA_Z");
return s;
}

template <int S_DIM, int C_DIM, int K_DIM>
__host__ __device__ int NeuralNetModel<S_DIM, C_DIM, K_DIM>::getGrdSharedSizeBytes() const
{
return helper_->getGrdSharedSizeBytes();
}

template <int S_DIM, int C_DIM, int K_DIM>
__host__ __device__ int NeuralNetModel<S_DIM, C_DIM, K_DIM>::getBlkSharedSizeBytes() const
{
return helper_->getBlkSharedSizeBytes();
}
14 changes: 7 additions & 7 deletions include/mppi/dynamics/autorally/ar_nn_model.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
* DYNAMICS_DIM = 4
*/


struct NNDynamicsParams : public DynamicsParams
{
enum class StateIndex : int
Expand Down Expand Up @@ -69,14 +68,12 @@ struct NNDynamicsParams : public DynamicsParams
using namespace MPPI_internal;

template <int S_DIM, int C_DIM, int K_DIM>
class NeuralNetModel : public Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>,
NNDynamicsParams>
class NeuralNetModel : public Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>, NNDynamicsParams>
{
public:
// TODO remove duplication of calculation of values, pull from the structure
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
using PARENT_CLASS = Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>,
NNDynamicsParams>;
using PARENT_CLASS = Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>, NNDynamicsParams>;

// Define Eigen fixed size matrices
using state_array = typename PARENT_CLASS::state_array;
Expand All @@ -85,7 +82,7 @@ public:
using dfdx = typename PARENT_CLASS::dfdx;
using dfdu = typename PARENT_CLASS::dfdu;

static const int DYNAMICS_DIM = S_DIM - K_DIM; ///< number of inputs from state
static const int DYNAMICS_DIM = S_DIM - K_DIM; ///< number of inputs from state

NeuralNetModel(cudaStream_t stream = 0);
NeuralNetModel(std::array<float2, C_DIM> control_rngs, cudaStream_t stream = 0);
Expand All @@ -110,7 +107,8 @@ public:
return helper_->getThetaPtr();
}

FNNHelper<>* getHelperPtr() {
FNNHelper<>* getHelperPtr()
{
return helper_;
}

Expand Down Expand Up @@ -147,6 +145,8 @@ public:
__device__ void computeKinematics(float* state, float* state_der);

state_array stateFromMap(const std::map<std::string, float>& map) override;
__host__ __device__ int getGrdSharedSizeBytes() const;
__host__ __device__ int getBlkSharedSizeBytes() const;

private:
FNNHelper<>* helper_ = nullptr;
Expand Down
17 changes: 17 additions & 0 deletions tests/dynamics/ar_dynamics_nn_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,14 @@ TEST(ARNeuralNetDynamics, UpdateModelTest)
TEST(ARNeuralNetDynamics, LoadModelTest)
{
NeuralNetModel<7, 2, 3> model;
EXPECT_EQ(model.getGrdSharedSizeBytes(), model.getHelperPtr()->getGrdSharedSizeBytes()) << "Shared mem request "
"doesn't match between "
"Dynamics and Neural "
"Network";
EXPECT_EQ(model.getBlkSharedSizeBytes(), model.getHelperPtr()->getBlkSharedSizeBytes()) << "Shared mem request "
"doesn't match between "
"Dynamics and Neural "
"Network";
model.GPUSetup();

// TODO procedurally generate a NN in python and save and run like costs
Expand All @@ -238,6 +246,15 @@ TEST(ARNeuralNetDynamics, LoadModelTest)
std::array<int, 6> stride_result = {};
std::array<int, 4> net_structure_result = {};

EXPECT_EQ(model.getGrdSharedSizeBytes(), model.getHelperPtr()->getGrdSharedSizeBytes()) << "Shared mem request "
"doesn't match between "
"Dynamics and Neural "
"Network";
EXPECT_EQ(model.getBlkSharedSizeBytes(), model.getHelperPtr()->getBlkSharedSizeBytes()) << "Shared mem request "
"doesn't match between "
"Dynamics and Neural "
"Network";

// launch kernel
launchParameterCheckTestKernel<NeuralNetModel<7, 2, 3>, 1412, 6, 4>(model, theta_result, stride_result,
net_structure_result);
Expand Down