pomocy przy konwersji C++ na Delphi

0

Mam taki problem.
Mam wygenerowany kod sieci neuronowej w C++, a potrzebuję przerobić go na Delphi, na C++ się nie znam, może ktoś ma pomyś jak to zrobić.
[email protected]

0

daj kod - moze ktos (albo nawet ja :P) pomoze

0

kod wygląda tak:

/* standard includes. math.h needed for exp() function. */

#include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>

#ifndef FALSE
#define FALSE 0
#define TRUE 1
#endif

#define MENUCODE -999


static double MLP_8_ukrytych12Thresholds[] =
{

/* layer 1 */
0.48719668555754214, -0.53899224319196948, 0.36400560840783303, 0.85156717089316092,
0.41940705466573258, -0.39421948165960635, -0.20768931977363411, -0.2418955323288467,

/* layer 2 */
-0.79680499107436509, -1.0243508914805921, -0.66793058959285023

};

static double MLP_8_ukrytych12Weights[] =
{

/* layer 1 */
0.30428010633427677, 0.042271367532646244, 0.25831993004294579,
0.97243048404029375, 0.10665541098185974, 0.0093496563933714791,
-0.13700710206394631, 0.23196574300643785, 0.7230564920915743,
0.053763397427626848, 0.51484617831982271, -0.06381003314695799,
-0.078334222038227902, 0.55834678767952339, -0.93153487979282223,
-0.059851176420416513, 0.44492920533406427, -0.25193109187232798,
0.26589542163001961, -0.013437622904855721, -0.3207386074679896,
0.41947319396580718, 1.1237485518025898, 0.0080421278665221418,

/* layer 2 */
0.084794421725160482, 0.27363156004159334, 0.78885761645034103, 0.39142910103413431,
-0.25819639708098824, -0.33233873383599, -0.19430822157705929, -0.41968214642971935,
1.0761548024171641, -0.18459494632194534, 0.44288492970828119, 0.18306580464091446,
-0.14808093474445116, -0.17688503487218735, -0.76020019760231305, 0.021742160478823859,
0.42016996073190388, 0.41932945390228121, 0.18720306397680989, 0.15596319323486774,
0.77812380286063665, -0.10067803880638822, 0.074197999979061346, 0.15594333465450305

};

static double MLP_8_ukrytych12Acts[28];

/* ---------------------------------------------------------- */
/*
  MLP_8_ukrytych12Run - run neural network MLP_8_ukrytych12

  Input and Output variables.
  Variable names are listed below in order, together with each
  variable's offset in the data set at the time code was
  generated (if the variable is then available).
  For nominal variables, the numeric code - class name
  conversion is shown indented below the variable name.
  To provide nominal inputs, use the corresponding numeric code.
  Input variables (Offset):
  T1 (0)
  T2 (1)
  N (2)

  Wyjście:
  P (3)
  Q (4)
  Tt (5)

*/
/* ---------------------------------------------------------- */

void MLP_8_ukrytych12Run( double inputs[], double outputs[], int outputType )
{
  int i, j, k, u;
  double *w = MLP_8_ukrytych12Weights, *t = MLP_8_ukrytych12Thresholds;

  /* Process inputs - apply pre-processing to each input in turn,
   * storing results in the neuron activations array.
   */

  /* Input 0: standard numeric pre-processing: linear shift and scale. */
  if ( inputs[0] == -9999 )
    MLP_8_ukrytych12Acts[0] = 0.46666666666666679;
  else
    MLP_8_ukrytych12Acts[0] = inputs[0] * 0.016666666666666666 + -3;

  /* Input 1: standard numeric pre-processing: linear shift and scale. */
  if ( inputs[1] == -9999 )
    MLP_8_ukrytych12Acts[1] = 0.43333333333333313;
  else
    MLP_8_ukrytych12Acts[1] = inputs[1] * 0.016666666666666666 + -3;

  /* Input 2: standard numeric pre-processing: linear shift and scale. */
  if ( inputs[2] == -9999 )
    MLP_8_ukrytych12Acts[2] = 0.586915887850467;
  else
    MLP_8_ukrytych12Acts[2] = inputs[2] * 0.023364485981308403 + -1.859813084112149;

  /*
   * Process layer 1.
   */

  /* For each unit in turn */
  for ( u=0; u < 8; ++u )
  {
    /*
     * First, calculate post-synaptic potentials, storing
     * these in the MLP_8_ukrytych12Acts array.
     */

    /* Initialise hidden unit activation to zero */
    MLP_8_ukrytych12Acts[3+u] = 0.0;

    /* Accumulate weighted sum from inputs */
    for ( i=0; i < 3; ++i )
      MLP_8_ukrytych12Acts[3+u] += *w++ * MLP_8_ukrytych12Acts[0+i];

    /* Subtract threshold */
    MLP_8_ukrytych12Acts[3+u] -= *t++;

    /* Now apply the hyperbolic activation function, ( e^x - e^-x ) / ( e^x + e^-x ).
     * Deal with overflow and underflow
     */
    if ( MLP_8_ukrytych12Acts[3+u] > 100.0 )
       MLP_8_ukrytych12Acts[3+u] = 1.0;
    else if ( MLP_8_ukrytych12Acts[3+u] < -100.0 )
      MLP_8_ukrytych12Acts[3+u] = -1.0;
    else
    {
      double e1 = exp( MLP_8_ukrytych12Acts[3+u] ), e2 = exp( -MLP_8_ukrytych12Acts[3+u] );
      MLP_8_ukrytych12Acts[3+u] = ( e1 - e2 ) / ( e1 + e2 );
    }
  }

  /*
   * Process layer 2.
   */

  /* For each unit in turn */
  for ( u=0; u < 3; ++u )
  {
    /*
     * First, calculate post-synaptic potentials, storing
     * these in the MLP_8_ukrytych12Acts array.
     */

    /* Initialise hidden unit activation to zero */
    MLP_8_ukrytych12Acts[11+u] = 0.0;

    /* Accumulate weighted sum from inputs */
    for ( i=0; i < 8; ++i )
      MLP_8_ukrytych12Acts[11+u] += *w++ * MLP_8_ukrytych12Acts[3+i];

    /* Subtract threshold */
    MLP_8_ukrytych12Acts[11+u] -= *t++;

  }

  /* Type of output required - selected by outputType parameter */
  switch ( outputType )
  {
    /* The usual type is to generate the output variables */
    case 0:


      /* Post-process output 0, numeric linear scaling */
      outputs[0] = ( MLP_8_ukrytych12Acts[11] - -1.0689655172413792 ) / 0.00017241379310344826;

      /* Post-process output 1, numeric linear scaling */
      outputs[1] = ( MLP_8_ukrytych12Acts[12] - -1.8139534883720938 ) / 232.55813953488376;

      /* Post-process output 2, numeric linear scaling */
      outputs[2] = ( MLP_8_ukrytych12Acts[13] - -4.7043010752688188 ) / 0.022401433691756279;
      break;

    /* type 1 is activation of output neurons */
    case 1:
      for ( i=0; i < 3; ++i )
        outputs[i] = MLP_8_ukrytych12Acts[11+i];
      break;

    /* type 2 is codebook vector of winning node (lowest actn) 1st hidden layer */
    case 2:
      {
        int winner=0;
        for ( i=1; i < 8; ++i )
          if ( MLP_8_ukrytych12Acts[3+i] < MLP_8_ukrytych12Acts[3+winner] )
            winner=i;

        for ( i=0; i < 3; ++i )
          outputs[i] = MLP_8_ukrytych12Weights[3*winner+i];
      }
      break;

    /* type 3 indicates winning node (lowest actn) in 1st hidden layer */
    case 3:
      {
        int winner=0;
        for ( i=1; i < 8; ++i )
          if ( MLP_8_ukrytych12Acts[3+i] < MLP_8_ukrytych12Acts[3+winner] )
            winner=i;

        outputs[0] = winner;
      }
      break;
  }
}

/* ---------------------------------------------------------- */
/*
  MLP_8_ukrytych12RunPadded - network MLP_8_ukrytych12

  inputs - the input variables, in the same number and order
  as in the data set at the time the code was generated.
  This alternative routine is useful if you want a consistent
  interface for your generated routines, so that the number
  and order of variables is the same for all of them.
  Variables (ones used as inputs marked thus *):
  0)	T1 *
  1)	T2 *
  2)	N *

  Wyjście:
  P (3)
  Q (4)
  Tt (5)

*/
/* ---------------------------------------------------------- */

void MLP_8_ukrytych12RunPadded( double inputs[], double outputs[], int outputType )
{
  double in[3];

  /* Copy inputs */
  in[0]=inputs[0];
  in[1]=inputs[1];
  in[2]=inputs[2];


  /* Run the network */
  MLP_8_ukrytych12Run( in, outputs, outputType );
}
0

uhhh. sporo tego :/ nie sadze aby ktos zrobil to za ciebie, ale jesli znasz delphi to nie powinienes miec problemow

1 użytkowników online, w tym zalogowanych: 0, gości: 1