[PD-cvs] externals/grh/pix_linNN LinNeuralNet.cpp, NONE, 1.1 LinNeuralNet.h, NONE, 1.1 gpl.txt, NONE, 1.1 help-pix_linNN.pd, NONE, 1.1 pix_linNN.cpp, NONE, 1.1 pix_linNN.h, NONE, 1.1 readme.txt, NONE, 1.1

Georg Holzmann grholzi at users.sourceforge.net
Tue Jul 12 16:38:48 CEST 2005


Update of /cvsroot/pure-data/externals/grh/pix_linNN
In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv12810/pix_linNN

Added Files:
	LinNeuralNet.cpp LinNeuralNet.h gpl.txt help-pix_linNN.pd 
	pix_linNN.cpp pix_linNN.h readme.txt 
Log Message:
initial commit of pix_linNN


--- NEW FILE: LinNeuralNet.cpp ---
/////////////////////////////////////////////////////////////////////////////
//
// class LinNeuralNet
//
//   source file
//
//   Copyright (c) 2004 Georg Holzmann <grh at gmx.at>
//
//   For information on usage and redistribution, and for a DISCLAIMER OF ALL
//   WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
//
/////////////////////////////////////////////////////////////////////////////

#include "LinNeuralNet.h"

//--------------------------------------------------
/* Constructor
 */
LinNeuralNet::LinNeuralNet(int netsize) : learn_rate_(0), range_(1), IW_(NULL), b1_(0)
{
  // set random seed:
  srand( (unsigned)time(NULL) );

  netsize_ = (netsize<1) ? 1 : netsize;
}

//--------------------------------------------------
/* Destructor
 */
LinNeuralNet::~LinNeuralNet()
{
  if(IW_)
    delete[] IW_;
}

//--------------------------------------------------
/* creates a new IW-matrix (size: netsize_) and 
 * b1-vector
 * ATTENTION: if they exist they'll be deleted
 */
bool LinNeuralNet::createNeurons()
{
  // delete if they exist
  if(IW_)
    delete[] IW_;

  IW_ = new float[netsize_];
  if(!IW_)
    return false;

  return true;
}

//--------------------------------------------------
/* inits the weight matrix and the bias vector of
 * the network with random values between [min|max]
 */
bool LinNeuralNet::initNetworkRand(const int &min, const int &max)
{
  if(!IW_)
    return false;

  // make randomvalue between 0 and 1
  // then map it to the bounds
  b1_ = ((float)rand()/(float)RAND_MAX)*(max-min) + min;

  for(int i=0; i<netsize_; i++)
    {
      IW_[i] = ((float)rand()/(float)RAND_MAX)*(max-min) + min;
    }

  return true;
}

//--------------------------------------------------
/* inits the net with a given weight matrix and bias
 * (makes a deep copy)
 * ATTENTION: the dimension of IW-pointer must be the same
 *            as the netsize !!!
 * returns false if there's a failure
 */
bool LinNeuralNet::initNetwork(const float *IW, float b1)
{
  if(!IW_)
    return false;

  b1_ = b1;

  for(int i=0; i<netsize_; i++)
      IW_[i] = IW[i];

  return true;
}

//--------------------------------------------------
/* calculates the output with the current IW, b1 values
 * ATTENTION: the array input_data must be in the same
 *            size as netsize_
 */
float LinNeuralNet::calculateNet(float *input_data)
{
  if(!IW_)
    return 0;

  float output = 0;

  // multiply the inputs with the weight matrix IW
  // and add the bias vector b1
  for(int i=0; i<netsize_; i++)
      output += input_data[i] * IW_[i];
  
  // map input values to the range
  output /= range_;
  
  return (output+b1_);
}

//--------------------------------------------------
/* this method trains the network:
 * input_data is, as above, the input data, output_data is the 
 * output of the current net with input_data (output_data is not
 * calculated in that method !), target_output is the desired
 * output data
 * (this is the LMS-algorithm to train linear neural networks)
 * ATTENTION: the array input_data must be in the same
 *            size as netsize_
 */
bool LinNeuralNet::trainNet(float *input_data, const float &output_data, 
			    const float &target_output)
{
  if(!IW_)
    return false;

  // this is the LMS-algorithm to train linear
  // neural networks
  
  // calculate the error signal:
  float error = (target_output - output_data);

  // now change the weights the bias
  for(int i=0; i<netsize_; i++)
    IW_[i] += 2 * learn_rate_ * error * (input_data[i]/range_);

  b1_ += 2 * learn_rate_ * error; 

  return true;
}

--- NEW FILE: gpl.txt ---
		    GNU GENERAL PUBLIC LICENSE
		       Version 2, June 1991

 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

			    Preamble
 
  The licenses for most software are designed to take away your
freedom to share and change it.  By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users.  This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it.  (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.)  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.

  To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have.  You must make sure that they, too, receive or can get the
source code.  And you must show them these terms so they know their
rights.

  We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.

  Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software.  If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.

  Finally, any free program is threatened constantly by software
patents.  We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary.  To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.

  The precise terms and conditions for copying, distribution and
modification follow.


		    GNU GENERAL PUBLIC LICENSE
   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

  0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License.  The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language.  (Hereinafter, translation is included without limitation in
the term "modification".)  Each licensee is addressed as "you".

Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope.  The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.

  1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.

You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.

  2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:

    a) You must cause the modified files to carry prominent notices
    stating that you changed the files and the date of any change.

    b) You must cause any work that you distribute or publish, that in
    whole or in part contains or is derived from the Program or any
    part thereof, to be licensed as a whole at no charge to all third
    parties under the terms of this License.

    c) If the modified program normally reads commands interactively
    when run, you must cause it, when started running for such
    interactive use in the most ordinary way, to print or display an
    announcement including an appropriate copyright notice and a
    notice that there is no warranty (or else, saying that you provide
    a warranty) and that users may redistribute the program under
    these conditions, and telling the user how to view a copy of this
    License.  (Exception: if the Program itself is interactive but
    does not normally print such an announcement, your work based on
    the Program is not required to print an announcement.)


These requirements apply to the modified work as a whole.  If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works.  But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.

In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.

  3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:

    a) Accompany it with the complete corresponding machine-readable
    source code, which must be distributed under the terms of Sections
    1 and 2 above on a medium customarily used for software interchange; or,

    b) Accompany it with a written offer, valid for at least three
    years, to give any third party, for a charge no more than your
    cost of physically performing source distribution, a complete
    machine-readable copy of the corresponding source code, to be
    distributed under the terms of Sections 1 and 2 above on a medium
    customarily used for software interchange; or,

    c) Accompany it with the information you received as to the offer
    to distribute corresponding source code.  (This alternative is
    allowed only for noncommercial distribution and only if you
    received the program in object code or executable form with such
    an offer, in accord with Subsection b above.)

The source code for a work means the preferred form of the work for
making modifications to it.  For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable.  However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.

If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.


  4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License.  Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.

  5. You are not required to accept this License, since you have not
signed it.  However, nothing else grants you permission to modify or
distribute the Program or its derivative works.  These actions are
prohibited by law if you do not accept this License.  Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.

  6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions.  You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.

  7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all.  For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.

If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.

It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices.  Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.

This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.


  8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded.  In such case, this License incorporates
the limitation as if written in the body of this License.

  9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

Each version is given a distinguishing version number.  If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation.  If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.

  10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission.  For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this.  Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.

			    NO WARRANTY

  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.

  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.

		     END OF TERMS AND CONDITIONS


	    How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    <one line to give the program's name and a brief idea of what it does.>
    Copyright (C) 19yy  <name of author>

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA


Also add information on how to contact you by electronic and paper mail.

If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:

    Gnomovision version 69, Copyright (C) 19yy name of author
    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.

You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary.  Here is a sample; alter the names:

  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
  `Gnomovision' (which makes passes at compilers) written by James Hacker.

  <signature of Ty Coon>, 1 April 1989
  Ty Coon, President of Vice

This General Public License does not permit incorporating your program into
proprietary programs.  If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library.  If this is what you want to do, use the GNU Library General
Public License instead of this License.


--- NEW FILE: pix_linNN.cpp ---
/////////////////////////////////////////////////////////////////////////////
//
//   GEM - Graphics Environment for Multimedia
//
//   pix_linNN
//
//   Implementation file
//
//   Copyright (c) 2004 Georg Holzmann <grh at gmx.at>
//   (and of course lot's of other developers for PD and GEM)
//
//   For information on usage and redistribution, and for a DISCLAIMER OF ALL
//   WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
//
/////////////////////////////////////////////////////////////////////////////

#include "pix_linNN.h"

CPPEXTERN_NEW_WITH_TWO_ARGS(pix_linNN, t_floatarg, A_DEFFLOAT, t_floatarg, A_DEFFLOAT)

//----------------------------------------------------------
/* Constructor
 */
  pix_linNN::pix_linNN(t_floatarg arg0=64, t_floatarg arg1=1) : 
    m_data_(NULL), m_xsize_(0), m_ysize_(0), m_csize_(0), 
    train_on_(false), net_(NULL)
{
  // init args ?????????????????????????????????
  neuron_nr_=2048;          //static_cast<int>((arg0<0)?2:arg0);
  precision_=2;          //static_cast<int>((arg1<1)?1:arg1);
  //post("arg0: %d, arg1: %d",arg0,arg1);

  // generate the in- and outlet:
  out0_ = outlet_new(this->x_obj, &s_signal);
  inlet_new(this->x_obj, &this->x_obj->ob_pd, &s_signal, &s_signal);

  // set random seed:
  srand( (unsigned)time(NULL) );

  // creates the nets
  net_ = new LinNeuralNet[neuron_nr_](3);
  if(!net_)
    {
      post("pix_linNN~: no memory for neural nets!");
      return;
    }

  for(int i=0; i<neuron_nr_; i++)
    {
      if( !net_[i].createNeurons() )
	{
	  post("pix_linNN~: error in creating the net!");
	  return;
	}
      if( !net_[i].initNetworkRand(-1,1) )
	{
	  post("pix_linNN~: error in initializing the net!");
	  return;
	}	

      net_[i].setRange(255);
      net_[i].setLearningRate(0.01);
    }
}

//----------------------------------------------------------
/* Destructor
 */
pix_linNN::~pix_linNN()
{
  outlet_free(out0_);
  m_data_ = NULL;
  m_xsize_ = 0;
  m_ysize_ = 0;

  // delete weight matrix and bias vector
  delete[] net_;
}

//----------------------------------------------------------
/* processImage
 */
void pix_linNN::processImage(imageStruct &image)
{
  m_data_ = image.data;
  m_xsize_ = image.xsize;
  m_ysize_ = image.ysize;
  m_csize_ = image.csize;
  m_format_ = image.format;
}

//----------------------------------------------------------
/* DSP perform
 */
t_int* pix_linNN::perform(t_int* w)
{
  pix_linNN *x = GetMyClass((void*)w[1]);
  t_float* in_signal = (t_float*)(w[2]);
  t_float* out_signal = (t_float*)(w[3]);
  int blocksize = (t_int)(w[4]);

  if(blocksize != x->neuron_nr_)
    {
      post("pix_linNN~: neurons and buffersize are different! You MUST have the same neuron nr as the buffersize !!!");
      post("neurons: %d, buffersize: %d", x->neuron_nr_, blocksize);
      return (w+5);
    }

  
  // some needed data
  long int pix_size = x->m_xsize_ * x->m_ysize_;
  int pix_blocksize  = (blocksize<pix_size)?blocksize:pix_size;

  // splits the frame into slices, so that the average
  // of one slice can be used for the network input
  // there are as much slices as the buffsize is

  float nr = sqrt(blocksize); // the number of slices at the
                              // x- and y-axis

  float x_slice = x->m_xsize_ / nr; // x size of a slice in pixels
  float y_slice = x->m_ysize_ / nr; // x size of a slice in pixels
  int x_slice_int = static_cast<int>( x_slice );
  int y_slice_int = static_cast<int>( y_slice );

  // the number of slices on one axis (is the float nr
  // from above rounded up)
  int slice_nr = static_cast<int>(nr) + 1;

  if (x->m_data_)
  {
    switch(x->m_format_)
    {
    case GL_RGBA:
      for(int n=0; n<pix_blocksize; n++)
      {
	//post("Block %d:",n);

	// calulate the pixel in left upper edge of every slice
	int lu_pix_x = static_cast<int>( (n % slice_nr) * x_slice );
	int lu_pix_y = static_cast<int>( static_cast<int>(n / slice_nr) * y_slice );

	//post("lu_pix: %d, %d", lu_pix_x, lu_pix_y);

	// now sum up all the pixels of one slice and then divide through the
	// number of pixels
	unsigned long int temp_data[3] = { 0, 0, 0 };  // the storage to sum the pixels
	t_float average_pix[3] = { 0, 0, 0 };  // the average of the pixels
	
	// only for optimization:
	int helper1 = x->m_xsize_ * x->m_csize_;
	int add_count = 0;

	for(int i=0; i<x_slice_int; i+=x->precision_)
	  {
	    for(int j=0; j<y_slice_int; j+=x->precision_)
	      {
		// the way to access the pixels: (C=chRed, chBlue, ...)
		//data[Y * xsize * csize + X * csize + C]
		
		//post("current pixel: %d %d", 
		//     ((lu_pix_x+i)%x->m_xsize), ((lu_pix_y+j)%x->m_ysize) );

		temp_data[0] += x->m_data_[ 
			     (lu_pix_y+j) * helper1
			     + (lu_pix_x+i) * x->m_csize_ + chRed ];
		
		temp_data[1] += x->m_data_[ 
			     ((lu_pix_y+j)) * helper1
			     + ((lu_pix_x+i)) * x->m_csize_ + chGreen ];
		
		temp_data[2] += x->m_data_[ 
			     ((lu_pix_y+j)%x->m_ysize_) * helper1
			     + ((lu_pix_x+i)%x->m_xsize_) * x->m_csize_ + chBlue ];
		
		add_count++;
	      }
	  }
	average_pix[0] = temp_data[0] / add_count;
	average_pix[1] = temp_data[1] / add_count;
	average_pix[2] = temp_data[2] / add_count;

	// the calculation of the network:
	*out_signal = x->net_[n].calculateNet(average_pix);

	//post("%d: RGBav: %f %f %f, out_signal: %f",
	//n,average_pix[0],average_pix[1],average_pix[2],*out_signal);
	
	// learning:
	if(x->train_on_)
	  x->net_[n].trainNet(average_pix, *out_signal, *in_signal);

	out_signal++;
	in_signal++;
      }
      break;
    default:
      post("RGB only for now");
    }
  } 
  else 
    {
      pix_blocksize=blocksize;
      while (pix_blocksize--) *out_signal++=0;
    }

  x->train_on_=false;
  return (w+5);
}

//----------------------------------------------------------
/* DSP-Message
 */
void pix_linNN::dspMess(void *data, t_signal** sp)
{
  dsp_add(perform, 4, data, sp[0]->s_vec, sp[1]->s_vec, sp[0]->s_n);
}

//----------------------------------------------------------
/* saves the contents of the current net to file
 * (it saves the neuron_nr_, learning rate
 * IW-matrix and b1-vector of the net)
 */
void pix_linNN::saveNet(string filename)
{
  // open and check outfile
  ofstream outfile;
  outfile.open(filename.c_str());
  if(!outfile)
    {
      post("pix_linNN~: failed to open output-file!");
      return;
    }
  
  // write XML-header
  outfile << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\" ?>" << endl;

  // start-tag
  outfile << "<linNN>" << endl;

  // neuron_nr_(=size) and learning rate
  outfile << "\t<neurons> " << neuron_nr_ << " </neurons>" << endl;
  outfile << "\t<learnrate> " << net_[0].getLearningRate() 
	  << " </learnrate>" << endl;

  // now the IW-matrix of the neural net
  outfile << "\t<IW>" << endl;
  for(int i=0; i<neuron_nr_; i++)
    {
      outfile << "\t\t" << net_[i].getIW()[0] << " "
	      << net_[i].getIW()[1] << " "
	      << net_[i].getIW()[2] << endl;
    }
  outfile << "\t</IW>" << endl;

  // and the b1-vector
  outfile << "\t<b1>" << endl << "\t\t";
  for(int i=0; i<neuron_nr_; i++)
    {
      outfile << net_[i].getb1() << " ";
    }
  outfile << endl << "\t</b1>" << endl;

  // end-tag
  outfile << "</linNN>" << endl;


  outfile.close();
  post("pix_linNN~: saved to output-file %s", filename.c_str());
  return;
}

//----------------------------------------------------------
/* loads the parameters of the net from file
 * (it loads the neuron_nr_, learning rate
 * IW-matrix and b1-vector of the net)
 */
void pix_linNN::loadNet(string filename)
{
  // temp variables
  float IW[3];
  float b1, learnrate;

  ifstream infile;
  infile.open(filename.c_str());

  if(!infile)
    {
      post("pix_linNN~: cannot open input-file!");
      return;
    }

  post("pix_linNN~: loading input-file %s",filename.c_str());

  int state = 0, IWcount = 0, b1count = 0;
  bool tag=false;
  string line, temp;

  while (getline(infile, line))
    {
      istringstream instream(line);
      instream >> temp;

      // specify the tags
      //post("input: %s",temp.c_str());
      if( temp == "<neurons>" ) 
	{state=1; }
      if( temp == "<learnrate>" ) 
	{state=2; }
      if( temp == "<IW>" ) 
	{state=3; }
      if( temp == "<b1>" ) 
	{state=4; }
      if( !strncmp(temp.c_str(),"</",2) ) 
	{state=0;}

      if( !strncmp(temp.c_str(),"<",1) ) 
	{tag=true; }
      else 
	{tag=false; }

      // make string stream again
      instream.str(line);
      if(tag)
	instream >> temp; // if theres a tag, stream it


      bool go_on=false;
      while(!go_on)
	{
	  // end of a line
	  if(instream.eof() || !state)
	    {
	      go_on=true;
	      break;
	    }


	  // <neuron>
	  if(state == 1)
	    {
	      instream >> neuron_nr_;
	      if(!net_)
		{
		  // creates new nets
		  net_ = new LinNeuralNet[neuron_nr_](3);
		  if(!net_)
		    {
		      post("pix_linNN~: no memory for neural nets!");
		      break;
		    }
		}
	      for(int i=0; i<neuron_nr_; i++)
		{
		  if( !net_[i].createNeurons() )
		    {
		  post("pix_linNN~: error in creating the net!");
		  break;
		    }
		}

	      go_on=false;
	      break;
	    }
	  
	  // <learnrate>
	  if(state == 2)
	    {
	      instream >> learnrate;

	      for(int i=0; i<neuron_nr_; i++)
		net_[i].setLearningRate(learnrate);

	      go_on=false;
	      break;
	    }
	  
	  // <IW>
	  if(state == 3)
	    {
	      instream >> IW[0];
	      instream >> IW[1];
	      instream >> IW[2];
	      
	      if(IWcount<neuron_nr_)
		net_[IWcount++].setIW(IW);
	      else
		{
		  go_on = false;
		  break;
		}
	    }
	  
	  // <b1>
	  if(state == 4)
	    {
	      for(int i=0; i<neuron_nr_; i++)
		{	      
		  instream >> b1;
		  net_[b1count++].setb1(b1);
		}

	      go_on = false;
	      break;
	    }

	  //else:
	  go_on=false;
	  break;
	}
    }

  infile.close();
  return;
}

//----------------------------------------------------------
/* setup callback
 */
void pix_linNN::obj_setupCallback(t_class *classPtr)
{
  class_addcreator((t_newmethod)_classpix_linNN, gensym("pix_linNN~"), A_NULL);

  class_addmethod(classPtr, (t_method)pix_linNN::setNeurons,
		  gensym("neurons"), A_FLOAT, A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::getNeurons,
		  gensym("getneurons"), A_NULL); 
  class_addmethod(classPtr, (t_method)pix_linNN::setPrecision,
		  gensym("precision"), A_FLOAT, A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::getPrecision,
		  gensym("getprecision"), A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::setTrainOn,
		  gensym("train"), A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::setLearnrate,
		  gensym("learnrate"), A_FLOAT, A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::getLearnrate,
		  gensym("getlearnrate"), A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::saveToFile,
		  gensym("save"), A_SYMBOL, A_NULL);
  class_addmethod(classPtr, (t_method)pix_linNN::loadFromFile,
		  gensym("load"), A_SYMBOL, A_NULL);

  class_addmethod(classPtr, (t_method)pix_linNN::dspMessCallback, 
		  gensym("dsp"), A_NULL);
  class_addmethod(classPtr, nullfn, gensym("signal"), A_NULL);
}

//----------------------------------------------------------
/* DSP callback
 */
void pix_linNN::dspMessCallback(void *data, t_signal** sp)
{
  GetMyClass(data)->dspMess(data, sp);
}

//----------------------------------------------------------
/* sets the precision
 */
void pix_linNN::setPrecision(void *data, t_floatarg precision)
{
  GetMyClass(data)->precision_ = 
    (precision<1) ? 1 : static_cast<int>(precision);
}
void pix_linNN::getPrecision(void *data)
{
  post("pix_linNN~: precision: %d",GetMyClass(data)->precision_);
}

//----------------------------------------------------------
/* method to train the network
 */
void pix_linNN::setTrainOn(void *data)
{
  GetMyClass(data)->train_on_ = true; 
}

//----------------------------------------------------------
/* changes the number of neurons
 * (which should be the same as the audio buffer)
 * ATTENTION: a new IW-matrix and b1-vector will be initialized
 */
void pix_linNN::setNeurons(void *data, t_floatarg neurons)
{
  GetMyClass(data)->neuron_nr_ =
    (neurons<1) ? 1 : static_cast<int>(neurons);

  if(GetMyClass(data)->net_)
    delete[] GetMyClass(data)->net_;

  // creates the nets
  GetMyClass(data)->net_ = new LinNeuralNet[GetMyClass(data)->neuron_nr_](3);
  if(!GetMyClass(data)->net_)
    {
      post("pix_linNN~: no memory for neural nets!");
      return;
    }

  for(int i=0; i<GetMyClass(data)->neuron_nr_; i++)
    {
      if( !GetMyClass(data)->net_[i].createNeurons() )
	{
	  post("pix_linNN~: error in creating the net!");
	  return;
	}
      if( !GetMyClass(data)->net_[i].initNetworkRand(-1,1) )
	{
	  post("pix_linNN~: error in initializing the net!");
	  return;
	}
    }
}
void pix_linNN::getNeurons(void *data)
{
  post("pix_linNN~: nr of neurons: %d (MUST be the same as buffersize!)",
       GetMyClass(data)->neuron_nr_);
}

//----------------------------------------------------------
/* sets the learnrate of the net
 */
void pix_linNN::setLearnrate(void *data, t_floatarg learn_rate)
{
  for(int i=0; i<GetMyClass(data)->neuron_nr_; i++)
    GetMyClass(data)->net_[i].setLearningRate(learn_rate);
}
void pix_linNN::getLearnrate(void *data)
{
  post("pix_linNN~: learning rate: %f",GetMyClass(data)->net_[0].getLearningRate());
}

//----------------------------------------------------------
/* FileIO-stuff
 */
void pix_linNN::saveToFile(void *data, t_symbol *filename)
{
  GetMyClass(data)->saveNet(filename->s_name);
}
void pix_linNN::loadFromFile(void *data, t_symbol *filename)
{
  GetMyClass(data)->loadNet(filename->s_name);
}

--- NEW FILE: help-pix_linNN.pd ---
#N canvas 871 74 498 738 10;
#X obj 28 237 gemwin;
#X msg 28 211 create \, 1;
#N canvas 463 0 765 790 pix2sig_stuff~ 0;
#X obj 120 35 gemhead;
#X obj 120 132 pix_texture;
#X obj 119 274 outlet~;
#X obj 139 185 square 4;
#X obj 139 163 separator;
#X obj 61 165 separator;
#X obj 120 101 pix_video;
#X msg 186 64 dimen 640 480;
#X obj 26 36 block~ 2048;
#X msg 186 38 dimen 320 240;
#X msg 76 535 getprecision;
#X msg 93 696 getlearnrate;
#X msg 65 671 learnrate 0.2;
#X msg 424 459 getneurons;
#X msg 404 206 train;
#X obj 31 227 inlet~;
#X msg 65 647 learnrate 0.05;
#X text 296 49 <- input dimension;
#X msg 76 498 precision \$1;
#X floatatom 76 481 5 0 0 0 - - -;
#X text 42 335 precision:;
#X text 53 358 1: means every pixel is used in calculation;
#X text 53 372 2: only every second pixel;
#X text 53 386 ...;
#X obj 62 411 loadbang;
#X msg 407 401 neurons 2048;
#X msg 407 422 neurons 64;
#X text 403 336 neurons:;
#X text 416 357 nr. of neurons used in the calculation;
#X text 415 370 (_MUST_ be the same as the buffersize !!!);
#X text 43 615 learnrate:;
#X msg 62 456 precision 1;
#X msg 62 436 precision 4;
#X text 397 126 train:;
#X text 417 152 trains the neural net;
#X text 418 166 (the current video frame to;
#X text 425 178 the current audio block);
#X obj 61 252 pix_linNN;
#X text 346 592 save/load;
#X text 359 614 saves/load the actual trained net to/from a file;
#X msg 440 684 load net.dat;
#X msg 440 664 save net.dat;
#X obj 78 226 r \$0-linNN;
#X obj 404 233 s \$0-linNN;
#X obj 62 564 s \$0-linNN;
#X obj 407 492 s \$0-linNN;
#X obj 65 725 s \$0-linNN;
#X obj 440 723 s \$0-linNN;
#X connect 0 0 6 0;
#X connect 1 0 4 0;
#X connect 1 0 5 0;
#X connect 4 0 3 0;
#X connect 5 0 37 0;
#X connect 6 0 1 0;
#X connect 7 0 6 0;
#X connect 9 0 6 0;
#X connect 10 0 44 0;
#X connect 11 0 46 0;
#X connect 12 0 46 0;
#X connect 13 0 45 0;
#X connect 14 0 43 0;
#X connect 15 0 37 0;
#X connect 16 0 46 0;
#X connect 18 0 44 0;
#X connect 19 0 18 0;
#X connect 24 0 32 0;
#X connect 25 0 45 0;
#X connect 26 0 45 0;
#X connect 31 0 44 0;
#X connect 32 0 44 0;
#X connect 37 1 2 0;
#X connect 40 0 47 0;
#X connect 41 0 47 0;
#X connect 42 0 37 0;
#X restore 87 492 pd pix2sig_stuff~;
#X msg 102 212 0 \, destroy;
#X obj 114 537 unsig~;
#X obj 204 382 osc~ 440;
#X obj 203 406 *~;
#X obj 235 406 tgl 15 0 empty empty empty 0 -6 0 8 -262144 -1 -1 0
1;
#X obj 205 446 sig~ 0;
#X floatatom 115 558 8 0 0 0 - - -;
#X text 199 230 <- create gemwin;
#X obj 39 392 readsf~;
#X obj 39 351 openpanel;
#X msg 39 371 open \$1;
#X obj 39 330 bng 15 250 50 0 empty empty empty 0 -6 0 8 -262144 -1
-1;
#X text 65 329 <- load sample for training;
#X obj 120 367 tgl 25 0 empty empty empty 0 -6 0 8 -195568 -1 -1 0
1;
#X floatatom 204 364 5 0 0 0 - - -;
#X text 270 381 <- simple osc for training;
#X text 260 447 <- to train silence;
#X obj 83 413 bng 15 250 50 0 empty empty empty 0 -6 0 8 -262144 -1
-1;
#X text 214 491 <- audio/video work;
#X obj 88 634 dac~;
#X obj 88 609 *~;
#X obj 116 609 dbtorms;
#X floatatom 116 591 5 0 0 0 - - -;
#X text 166 588 <- outvol in dB;
#X text 110 703 Georg Holzmann <grh at mur.at> \, 2004;
#X text 24 23 pix_linNN:;
#X text 22 58 (see also pix_recNN !!!);
#X text 24 90 pix_linNN~ calculates an audio signal out of a video
frame with a linear neural network \, which can be trained.;
#X text 24 124 The network has one neuron per audio sample: this neuron
has three inputs (a RGB-signal) \, a weight vector for each of the
inputs \, a bias value and a linear output function.;
#X connect 1 0 0 0;
#X connect 2 0 4 0;
#X connect 2 0 23 0;
#X connect 3 0 0 0;
#X connect 4 0 9 0;
#X connect 5 0 6 0;
#X connect 6 0 2 0;
#X connect 7 0 6 1;
#X connect 8 0 2 0;
#X connect 11 0 2 0;
#X connect 11 1 20 0;
#X connect 12 0 13 0;
#X connect 13 0 11 0;
#X connect 14 0 12 0;
#X connect 16 0 11 0;
#X connect 17 0 5 0;
#X connect 23 0 22 0;
#X connect 23 0 22 1;
#X connect 24 0 23 1;
#X connect 25 0 24 0;

--- NEW FILE: pix_linNN.h ---
/////////////////////////////////////////////////////////////////////////////
//
//   GEM - Graphics Environment for Multimedia
//
//   pix_linNN~
//   Calculates an audio signal out of a video frame
//   with a linear neural network, which can be trained
//
//   the network has one neuron per audio sample: this neuron has
//   three inputs (a RGB-signal), a weight vector for each of the inputs,
//   a bias value and a linear output function
//   (see LinNeuralNet.h for more info)
//
//   header file
//
//   Copyright (c) 2004 Georg Holzmann <grh at gmx.at>
//   (and of course lot's of other developers for PD and GEM)
//
//   For information on usage and redistribution, and for a DISCLAIMER OF ALL
//   WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
//
/////////////////////////////////////////////////////////////////////////////


#ifndef _INCLUDE_PIX_LINNN_H__
#define _INCLUDE_PIX_LINNN_H__

#include <string>
#include <sstream>
#include <fstream>
#include "Base/GemPixObj.h"
#include "LinNeuralNet.h"


using std::string;
using std::endl;
using std::ifstream;
using std::ofstream;
using std::istringstream;


/*-----------------------------------------------------------------
 *  CLASS
 * pix_linNN~
 *   
 * calculates an audio signal out of a video frame with
 * a linear neural network
 *   
 * KEYWORDS
 * pix audio
 *   
 * DESCRIPTION
 * 1 signal-outlet
 */
class GEM_EXTERN pix_linNN : public GemPixObj
{
  CPPEXTERN_HEADER(pix_linNN, GemPixObj)

 public:

  /* Constructor
   */
  pix_linNN(t_floatarg arg0, t_floatarg arg1);
    	
 protected:
    	
  /* Destructor
   */  
  virtual ~pix_linNN();


  //-----------------------------------
  /* Image STUFF:
   */
    
  /* The pixBlock with the current image
   *  pixBlock    	m_pixBlock;
   */
  unsigned char *m_data_;
  int            m_xsize_;
  int            m_ysize_;
  int            m_csize_;
  int            m_format_;

  /* precision of the image:
   * 1 means every pixel is taken for the calculation,
   * 2 every second pixel, 3 every third, ...
   */
  int precision_;

  /* processImage
   */
  virtual void processImage(imageStruct &image);


  //-----------------------------------
  /* Neural Network STUFF:
   */

  /* the linear neural nets
   * (size: buffsize)
   */
  LinNeuralNet *net_;

  /* training modus on
   * (will only be on for one audio buffer)
   */
  bool train_on_;

  /* the number of neurons, which should be
   * (= size of the array nets_)
   * THE SAME as the audio buffer size
   */
  int neuron_nr_;


  //-----------------------------------
  /* Audio STUFF:
   */

  /* the outlet
   */
  t_outlet *out0_;

  /* DSP perform
   */
  static t_int* perform(t_int* w);

  /* DSP-Message
   */
  virtual void dspMess(void *data, t_signal** sp);


  //-----------------------------------
  /* File IO:
   */

  /* saves the contents of the current net to file
   * (it saves the neuron_nr_, learning rate
   * IW-matrix and b1-vector of the net)
   */
  virtual void saveNet(string filename);

  /* loads the parameters of the net from file
   * (it loads the neuron_nr_, learning rate
   * IW-matrix and b1-vector of the net)
   */
  virtual void loadNet(string filename);

 private:

  //-----------------------------------
  /* static members
   * (interface to the PD world)
   */

  /* set/get the precision of the image calculation
   */
  static void setPrecision(void *data, t_floatarg precision);
  static void getPrecision(void *data);

  /* method to train the network
   */
  static void setTrainOn(void *data);

  /* changes the number of neurons
   * (which should be the same as the audio buffer)
   * ATTENTION: a new IW-matrix and b1-vector will be initialized
   */
  static void setNeurons(void *data, t_floatarg neurons);
  static void getNeurons(void *data);

  /* sets the learnrate of the net
   */
  static void setLearnrate(void *data, t_floatarg learn_rate);
  static void getLearnrate(void *data);

  /* DSP callback
   */
  static void dspMessCallback(void* data, t_signal** sp);

  /* File IO:
   */
  static void saveToFile(void *data, t_symbol *filename);
  static void loadFromFile(void *data, t_symbol *filename); 
};

#endif	// for header file

--- NEW FILE: LinNeuralNet.h ---
/////////////////////////////////////////////////////////////////////////////
//
// class LinNeuralNet
//
//   this is an implementation of a simple linear neural net with one neuron
//   so this net has a Weight-Matrix IW and a bias vector b1
//   this net can have n input values, but only one output value
//   (see NeuralNet documentations for more information)
//
//   header file
//
//   Copyright (c) 2004 Georg Holzmann <grh at gmx.at>
//
//   For information on usage and redistribution, and for a DISCLAIMER OF ALL
//   WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution.
//
/////////////////////////////////////////////////////////////////////////////


#ifndef _INCLUDE_LIN_NEURAL_NET__
#define _INCLUDE_LIN_NEURAL_NET__

#include <stdlib.h>
#include <ctime>
//#include "m_pd.h"   // for debug

class LinNeuralNet
{
 protected:

  /* this is the number of input values, which is
   * automatically the netsize and the size of IW
   */
  int netsize_;

  /* the input weight matrix IW
   * (size: netsize )
   */
  float *IW_;

  /* the bias vector b1
   */
  float b1_;

  /* the learning rate of the net
   */
  float learn_rate_;

  /* the range of the input values should be from 0
   * to range_
   * outputvalues are from -1 to 1
   */
  float range_;


 public:

  /* Constructor
   */
  LinNeuralNet(int netsize);

  /* Destructor
   */
  virtual ~LinNeuralNet();


  //-----------------------------------------------------

  /* Set/Get learning rate
   */
  virtual void setLearningRate(float learn_rate)
  {  learn_rate_=learn_rate; }
  virtual float getLearningRate() const
  {  return learn_rate_; }

  /* Set/Get range
   */
  virtual void setRange(float range)
  {  range_=range; }
  virtual float getRange() const
  {  return range_; }

  /* some more get/set methods
   */
  virtual int getNetsize() const
  {  return netsize_; }
  virtual float *getIW() const
  {  return IW_; }
  virtual void setIW(const float *IW)
  {  for(int i=0; i<netsize_; i++) IW_[i] = IW[i]; }
  virtual float getb1() const
  {  return b1_; }
  virtual void setb1(float b1)
  {  b1_ = b1; }


  //-----------------------------------------------------

  /* creates a new IW-matrix (size: netsize_) and 
   * b1-vector
   * returns false if there's a failure
   * ATTENTION: if they exist they'll be deleted
   */
  virtual bool createNeurons();

  /* inits the weight matrix and the bias vector of
   * the network with random values between [min|max]
   * returns false if there's a failure
   */
  virtual bool initNetworkRand(const int &min, const int &max);

  /* inits the net with a given weight matrix and bias
   * (makes a deep copy)
   * ATTENTION: the dimension of IW-pointer must be the same
   *            as the netsize !!!
   * returns false if there's a failure
   */
  virtual bool initNetwork(const float *IW, float b1);

  /* calculates the output with the current IW, b1 values
   * ATTENTION: the array input_data must be in the same
   *            size as netsize_
   */
  virtual float calculateNet(float *input_data);

  /* this method trains the network:
   * input_data is, as above, the input data, output_data is the 
   * output of the current net with input_data (output_data is not
   * calculated in that method !), target_output is the desired
   * output data
   * (this is the LMS-algorithm to train linear neural networks)
   * returns false if there's a failure
   * ATTENTION: the array input_data must be in the same
   *            size as netsize_
   */
  virtual bool trainNet(float *input_data, const float &output_data, 
			const float &target_output);

 private:
  /* Copy Construction is not allowed
   */
  LinNeuralNet(const LinNeuralNet &src)
    { }

  /* assignement operator is not allowed
   */
  const LinNeuralNet& operator= (const LinNeuralNet& src)
    { return *this; }
};




#endif //_INCLUDE_LIN_NEURAL_NET__

--- NEW FILE: readme.txt ---
pix_linNN - by Georg Holzmann <grh at mur.at>, 2004
look at: http://grh.mur.at/software/thebrain.html

--------------------------------license---------------------------------------

This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
 
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.

In the official pix_recNN distribution, the GNU General Public License is
in the file gpl.txt


-------------------------------information-----------------------------------

see the PD help patch




More information about the Pd-cvs mailing list