Skip to content

Commit

Permalink
Merge pull request #53 from Sergio0694/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
Sergio0694 authored Dec 31, 2017
2 parents 2d56d11 + 8f636c0 commit 369b1c8
Show file tree
Hide file tree
Showing 65 changed files with 1,766 additions and 1,777 deletions.
84 changes: 0 additions & 84 deletions NeuralNetwork.NET.Cuda/APIs/CuDnnNetworkLayers.cs

This file was deleted.

39 changes: 0 additions & 39 deletions NeuralNetwork.NET.Cuda/APIs/CuDnnNetworkLayersDeserializer.cs

This file was deleted.

4 changes: 0 additions & 4 deletions NeuralNetwork.NET.Cuda/AssemblyInfo.cs

This file was deleted.

21 changes: 0 additions & 21 deletions NeuralNetwork.NET.Cuda/NeuralNetwork.NET.Cuda.csproj

This file was deleted.

136 changes: 136 additions & 0 deletions NeuralNetwork.NET/APIs/CuDnnNetworkLayers.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
using System;
using System.Linq;
using JetBrains.Annotations;
using NeuralNetworkNET.APIs.Enums;
using NeuralNetworkNET.APIs.Structs;
using NeuralNetworkNET.Extensions;
using NeuralNetworkNET.Networks.Activations;
using NeuralNetworkNET.Networks.Layers.Cuda;

namespace NeuralNetworkNET.APIs
{
/// <summary>
/// A static class that exposes the available cuDNN network layer types
/// </summary>
public static class CuDnnNetworkLayers
{
/// <summary>
/// Gets whether or not the Cuda acceleration is supported on the current system
/// </summary>
public static bool IsCudaSupportAvailable
{
[Pure]
get
{
try
{
// Calling this directly would could a crash in the <Module> loader due to the missing .dll files
return CuDnnSupportHelper.IsGpuAccelerationSupported();
}
catch (TypeInitializationException)
{
// Missing .dll file
return false;
}
}
}

/// <summary>
/// Creates a new fully connected layer with the specified number of input and output neurons, and the given activation function
/// </summary>
/// <param name="neurons">The number of output neurons</param>
/// <param name="activation">The desired activation function to use in the network layer</param>
/// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
/// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
[PublicAPI]
[Pure, NotNull]
public static LayerFactory FullyConnected(
int neurons, ActivationFunctionType activation,
WeightsInitializationMode weightsMode = WeightsInitializationMode.GlorotUniform, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
=> input => new CuDnnFullyConnectedLayer(input, neurons, activation, weightsMode, biasMode);

/// <summary>
/// Creates a fully connected softmax output layer (used for classification problems with mutually-exclusive classes)
/// </summary>
/// <param name="outputs">The number of output neurons</param>
/// <param name="weightsMode">The desired initialization mode for the weights in the network layer</param>
/// <param name="biasMode">The desired initialization mode to use for the layer bias values</param>
[PublicAPI]
[Pure, NotNull]
public static LayerFactory Softmax(
int outputs,
WeightsInitializationMode weightsMode = WeightsInitializationMode.GlorotUniform, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
=> input => new CuDnnSoftmaxLayer(input, outputs, weightsMode, biasMode);

/// <summary>
/// Creates a convolutional layer with the desired number of kernels
/// </summary>
/// <param name="info">The info on the convolution operation to perform</param>
/// <param name="kernel">The volume information of the kernels used in the layer</param>
/// <param name="kernels">The number of convolution kernels to apply to the input volume</param>
/// <param name="activation">The desired activation function to use in the network layer</param>
/// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
[PublicAPI]
[Pure, NotNull]
public static LayerFactory Convolutional(
ConvolutionInfo info, (int X, int Y) kernel, int kernels, ActivationFunctionType activation,
BiasInitializationMode biasMode = BiasInitializationMode.Zero)
=> input => new CuDnnConvolutionalLayer(input, info, kernel, kernels, activation, biasMode);

/// <summary>
/// Creates a pooling layer with a window of size 2 and a stride of 2
/// </summary>
/// <param name="info">The info on the pooling operation to perform</param>
/// <param name="activation">The desired activation function to use in the network layer</param>
[PublicAPI]
[Pure, NotNull]
public static LayerFactory Pooling(PoolingInfo info, ActivationFunctionType activation) => input => new CuDnnPoolingLayer(input, info, activation);

/// <summary>
/// Creates a new inception layer with the given features
/// </summary>
/// <param name="info">The info on the operations to execute inside the layer</param>
/// <param name="biasMode">Indicates the desired initialization mode to use for the layer bias values</param>
[PublicAPI]
[Pure, NotNull]
public static LayerFactory Inception(InceptionInfo info, BiasInitializationMode biasMode = BiasInitializationMode.Zero)
=> input => new CuDnnInceptionLayer(input, info, biasMode);

#region Feature helper

/// <summary>
/// A private class that is used to create a new standalone type that contains the actual test method (decoupling is needed to &lt;Module&gt; loading crashes)
/// </summary>
private static class CuDnnSupportHelper
{
/// <summary>
/// Checks whether or not the Cuda features are currently supported
/// </summary>
public static bool IsGpuAccelerationSupported()
{
try
{
// CUDA test
Alea.Gpu gpu = Alea.Gpu.Default;
if (gpu == null) return false;
if (!Alea.cuDNN.Dnn.IsAvailable) return false; // cuDNN
using (Alea.DeviceMemory<float> sample_gpu = gpu.AllocateDevice<float>(1024))
{
Alea.deviceptr<float> ptr = sample_gpu.Ptr;
void Kernel(int i) => ptr[i] = i;
Alea.Parallel.GpuExtension.For(gpu, 0, 1024, Kernel); // JIT test
float[] sample = Alea.Gpu.CopyToHost(sample_gpu);
return Enumerable.Range(0, 1024).Select<int, float>(i => i).ToArray().ContentEquals(sample);
}
}
catch
{
// Missing .dll or other errors
return false;
}
}
}

#endregion
}
}
15 changes: 0 additions & 15 deletions NeuralNetwork.NET/APIs/Delegates/LayerDeserializer.cs

This file was deleted.

11 changes: 11 additions & 0 deletions NeuralNetwork.NET/APIs/Enums/LayersLoadingPreference.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
namespace NeuralNetworkNET.APIs.Enums
{
/// <summary>
/// Indicates the preferred type of network layers to serialize, whenever possible
/// </summary>
public enum LayersLoadingPreference
{
Cpu,
Cuda
}
}
5 changes: 5 additions & 0 deletions NeuralNetwork.NET/APIs/Interfaces/INeuralNetwork.cs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ public interface INeuralNetwork : IEquatable<INeuralNetwork>, IClonable<INeuralN
[NotNull, ItemNotNull]
IReadOnlyList<INetworkLayer> Layers { get; }

/// <summary>
/// Gets the total number of parameters in the current network layer
/// </summary>
int Parameters { get; }

#endregion

#region Methods
Expand Down
Loading

0 comments on commit 369b1c8

Please sign in to comment.