Skip to content

Commit

Permalink
- added support for NAMM loader (not finishe)
Browse files Browse the repository at this point in the history
  • Loading branch information
christoph-hart committed Jan 23, 2025
1 parent b6da95a commit e4e5659
Show file tree
Hide file tree
Showing 30 changed files with 2,388 additions and 10,334 deletions.
12 changes: 12 additions & 0 deletions hi_scripting/scripting/api/ScriptingApiObjects.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5057,6 +5057,7 @@ struct ScriptingObjects::ScriptNeuralNetwork::Wrapper
API_METHOD_WRAPPER_0(ScriptNeuralNetwork, getModelJSON);
API_VOID_METHOD_WRAPPER_1(ScriptNeuralNetwork, loadTensorFlowModel);
API_VOID_METHOD_WRAPPER_1(ScriptNeuralNetwork, loadPytorchModel);
API_VOID_METHOD_WRAPPER_1(ScriptNeuralNetwork, loadNAMModel);
API_METHOD_WRAPPER_1(ScriptNeuralNetwork, createModelJSONFromTextFile);
API_METHOD_WRAPPER_2(ScriptNeuralNetwork, loadOnnxModel);
API_METHOD_WRAPPER_3(ScriptNeuralNetwork, processFFTSpectrum);
Expand All @@ -5073,6 +5074,7 @@ ScriptingObjects::ScriptNeuralNetwork::ScriptNeuralNetwork(ProcessorWithScriptin
ADD_API_METHOD_1(createModelJSONFromTextFile);
ADD_API_METHOD_1(loadTensorFlowModel);
ADD_API_METHOD_1(loadPytorchModel);
ADD_API_METHOD_1(loadNAMModel);
ADD_API_METHOD_0(getModelJSON);
ADD_API_METHOD_2(loadOnnxModel);
ADD_API_METHOD_3(processFFTSpectrum);
Expand Down Expand Up @@ -5296,6 +5298,16 @@ void ScriptingObjects::ScriptNeuralNetwork::loadPytorchModel(const var& modelJSO
#endif
}

void ScriptingObjects::ScriptNeuralNetwork::loadNAMModel(const var& modelJSON)
{
#if HISE_INCLUDE_RT_NEURAL
nn->loadNAMModel(modelJSON);
postBuild();
#else
reportScriptError("You must enable HISE_INCLUDE_RT_NEURAL");
#endif
}

bool ScriptingObjects::ScriptNeuralNetwork::loadOnnxModel(const var& base64Data, int numOutputs)
{
if(onnx == nullptr)
Expand Down
3 changes: 3 additions & 0 deletions hi_scripting/scripting/api/ScriptingApiObjects.h
Original file line number Diff line number Diff line change
Expand Up @@ -1606,6 +1606,9 @@ namespace ScriptingObjects
/** Loads the model layout and weights from a Pytorch model JSON. */
void loadPytorchModel(const var& modelJSON);

/** Loads the model from a NAM file. */
void loadNAMModel(const var& modelJSON);

/** Loads the ONNX runtime model for spectral analysis. */
bool loadOnnxModel(const var& base64Data, int numOutputValues);

Expand Down
23 changes: 9 additions & 14 deletions hi_scripting/scripting/scriptnode/ui/NodeComponent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ void NodeComponent::handlePopupMenuResult(int result)

if (wType == 2)
{
auto id = node->getId();
auto name = snex::cppgen::Helpers::getValidCppVariableName(node->getName());

struct ConnectionState
{
Expand Down Expand Up @@ -724,23 +724,23 @@ void NodeComponent::handlePopupMenuResult(int result)
c.removeOldConnection(node.get());
}

if (id == node->getPath().getIdentifier().toString())
if (name == node->getPath().getIdentifier().toString())
{
id = PresetHandler::getCustomName(id, "Enter a customized name for the node");
name = PresetHandler::getCustomName(name, "Enter a customized name for the node");
}

String newId = id + "_";
String newId = name + "_";

node->setValueTreeProperty(PropertyIds::ID, newId);

PopupHelpers::wrapIntoChain(node.get(), MenuActions::WrapIntoChain, id);
PopupHelpers::wrapIntoChain(node.get(), MenuActions::WrapIntoChain, name);

auto pn = node->getParentNode();
pn->getValueTree().setProperty(PropertyIds::ShowParameters, true, node->getUndoManager());

if (auto modNode = dynamic_cast<ModulationSourceNode*>(node.get()))
{
String pmodId = id + "_pm";
String pmodId = name + "_pm";
var pmodvar = node->getRootNetwork()->create("routing.public_mod", pmodId);

auto pmod = dynamic_cast<NodeBase*>(pmodvar.getObject());
Expand Down Expand Up @@ -1035,7 +1035,9 @@ void NodeComponent::PopupHelpers::wrapIntoNetwork(NodeBase* node, bool makeCompi
for (int i = 0; i < rootTree.getNumProperties(); i++)
nData.setProperty(rootTree.getPropertyName(i), rootTree.getProperty(rootTree.getPropertyName(i)), nullptr);

nData.setProperty(PropertyIds::ID, node->getId(), nullptr);
auto name = snex::cppgen::Helpers::getValidCppVariableName(node->getName());

nData.setProperty(PropertyIds::ID, name, nullptr);
nData.addChild(node->getValueTree().createCopy(), -1, nullptr);

auto ndir = BackendDllManager::getSubFolder(node->getScriptProcessor()->getMainController_(), BackendDllManager::FolderSubType::Networks);
Expand Down Expand Up @@ -1121,15 +1123,8 @@ void NodeComponent::PopupHelpers::wrapIntoChain(NodeBase* node, MenuActions resu
auto parent = selection.getFirst()->getValueTree().getParent();
auto nIndex = parent.indexOf(selection.getFirst()->getValueTree());



for (auto n : selection)
{
n->setParent(newContainer, -1);

//n->getValueTree().getParent().removeChild(n->getValueTree(), um);
//containerTree.getChildWithName(PropertyIds::Nodes).addChild(n->getValueTree(), -1, um);
}

parent.addChild(containerTree, nIndex, um);
}
Expand Down
210 changes: 210 additions & 0 deletions hi_tools/hi_neural/RTNeural/RTNeural/conv1d/strided_conv1d.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
#pragma once

#include "conv1d.h"

namespace RTNEURAL_NAMESPACE
{
/**
* Dynamic implementation of a 1-dimensional convolutional layer
* with strides.
*
* Internally, this is just a wrapper around the Conv1D layer.
*/
template <typename T>
class StridedConv1D final : public Layer<T>
{
public:
/**
* Constructs a strided convolution layer for the given dimensions.
*
* @param in_size: the input size for the layer
* @param out_size: the output size for the layer
* @param kernel_size: the size of the convolution kernel
* @param dilation: the dilation rate to use for dilated convolution
* @param stride: the stride of the convolution
*/
StridedConv1D(int in_size, int out_size, int kernel_size, int dilation, int stride, int groups = 1)
: Layer<T>(in_size, out_size)
, internal(in_size, out_size, kernel_size, dilation, groups)
, stride(stride)
{
skip_output.resize(out_size, T {});
}

StridedConv1D(std::initializer_list<int> sizes)
: StridedConv1D<T>(*sizes.begin(), *(sizes.begin() + 1), *(sizes.begin() + 2),
*(sizes.begin() + 3), *(sizes.begin() + 4), *(sizes.begin() + 5))
{
}

StridedConv1D(const StridedConv1D& other) = default;
StridedConv1D& operator=(const StridedConv1D& other) = default;

/** Resets the layer state. */
RTNEURAL_REALTIME void reset() override
{
strides_counter = 0;
std::fill(std::begin(skip_output), std::end(skip_output), T {});
internal.reset();
}

/** Returns the name of this layer. */
std::string getName() const noexcept override { return "strided_conv1d"; }

/** Performs a stride step for this layer. */
RTNEURAL_REALTIME inline void skip(const T* input)
{
internal.skip(input);
}

/** Performs forward propagation for this layer. */
RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override
{
if(strides_counter == 0)
{
internal.forward(input, h);
std::copy(h, h + Layer<T>::out_size, std::begin(skip_output));
}
else
{
internal.skip(input);
std::copy(std::begin(skip_output), std::end(skip_output), h);
}

strides_counter = (strides_counter == stride - 1) ? 0 : strides_counter + 1;
}

/**
* Sets the layer weights.
*
* The weights vector must have size weights[out_size][in_size][kernel_size * dilation]
*/
RTNEURAL_REALTIME void setWeights(const std::vector<std::vector<std::vector<T>>>& weights)
{
internal.setWeights(weights);
}

/**
* Sets the layer biases.
*
* The bias vector must have size bias[out_size]
*/
RTNEURAL_REALTIME void setBias(const std::vector<T>& biasVals)
{
internal.setBias(biasVals);
}

/** Returns the size of the convolution kernel. */
RTNEURAL_REALTIME int getKernelSize() const noexcept { return internal.getKernelSize(); }

/** Returns the convolution dilation rate. */
RTNEURAL_REALTIME int getDilationRate() const noexcept { return internal.getDilationRate(); }

/** Returns the number of "groups" in the convolution. */
int getGroups() const noexcept { return internal.getGroups(); }

private:
Conv1D<T> internal;

const int stride;
int strides_counter = 0;
std::vector<T> skip_output {};
};

//====================================================
/**
* Static implementation of a 1-dimensional convolution layer
* with strides.
*
* Internally, this is just a wrapper around the Conv1DT layer.
*
* @param in_sizet: the input size for the layer
* @param out_sizet: the output size for the layer
* @param kernel_size: the size of the convolution kernel
* @param dilation_rate: the dilation rate to use for dilated convolution
* @param stride: the stride of the convolution
* @param groups: controls connections between inputs and outputs
* @param dynamic_state: use dynamically allocated layer state
*/
template <typename T, int in_sizet, int out_sizet, int kernel_size, int dilation_rate, int stride, int groups = 1, bool dynamic_state = false>
class StridedConv1DT
{
Conv1DT<T, in_sizet, out_sizet, kernel_size, dilation_rate, groups, dynamic_state> internal;

int strides_counter = 0;

public:
static constexpr auto in_size = in_sizet;
static constexpr auto out_size = out_sizet;
static constexpr auto filters_per_group = in_size / groups;
static constexpr auto channels_per_group = out_size / groups;

StridedConv1DT()
: outs(internal.outs)
{
}

/** Returns the name of this layer. */
std::string getName() const noexcept { return "strided_conv1d"; }

/** Returns false since convolution is not an activation layer. */
constexpr bool isActivation() const noexcept { return false; }

/** Resets the layer state. */
RTNEURAL_REALTIME void reset()
{
internal.reset();
}

/** Performs a stride step for this layer. */
template <typename Inputs>
RTNEURAL_REALTIME inline void skip(const Inputs& ins) noexcept
{
internal.skip(ins);
}

/** Performs forward propagation for this layer. */
template <typename Inputs>
RTNEURAL_REALTIME inline void forward(const Inputs& ins) noexcept
{
if(strides_counter == 0)
internal.forward(ins);
else
internal.skip(ins);

strides_counter = (strides_counter == stride - 1) ? 0 : strides_counter + 1;
}

/**
* Sets the layer weights.
*
* The weights vector must have size weights[out_size][group_count][kernel_size * dilation]
*/
RTNEURAL_REALTIME void setWeights(const std::vector<std::vector<std::vector<T>>>& weights)
{
internal.setWeights(weights);
}

/**
* Sets the layer biases.
*
* The bias vector must have size bias[out_size]
*/
RTNEURAL_REALTIME void setBias(const std::vector<T>& biasVals)
{
internal.setBias(biasVals);
}

/** Returns the size of the convolution kernel. */
RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; }

/** Returns the convolution dilation rate. */
RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; }

/** Returns the number of "groups" in the convolution. */
int getGroups() const noexcept { return groups; }

/** Reference to the internal layer weights. */
decltype(internal.outs)& outs;
};
}
Loading

0 comments on commit e4e5659

Please sign in to comment.