The gene expression process in nature involves several
representation transformations of the genome.
Translation is one among them; it constructs the amino
acid sequence in proteins from the nucleic acid-based
mRNA sequence. Translation is defined by a code book,
known as the universal genetic code. This paper
explores the role of genetic code and similar
representation transformations for enhancing the
performance of inductive machine learning algorithms.
It considers an abstract model of genetic code-like
transformations (GCTs) introduced elsewhere 21 and
develops the notion of randomised GCTs. It shows that
randomized GCTs can construct a representation of the
learning problem where the mean-square-error surface is
almost convex quadratic and therefore easier to
minimise. It considers the functionally complete
Fourier representation of Boolean functions to analyse
this effect of such representation transformations. It
offers experimental results to substantiate this claim.
It shows that a linear classifier like the Perceptron
38 can learn non-linear XOR and DNF functions using a
gradient-descent algorithm in a representation
constructed by randomized GCTs. The paper also
discusses the immediate challenges that must be solved
before the proposed technique can be used as a viable
approach for representation construction in machine
learning.
%0 Journal Article
%1 Kargupta+ghosh:2002:GPEM
%A Kargupta, Hillol
%A Ghosh, Samiran
%D 2002
%J Genetic Programming and Evolvable Machines
%K algorithms, code, construction, expression, gene genetic learning machine representation
%N 3
%P 231--258
%R doi:10.1023/A:1020130108341
%T Toward Machine Learning Through Genetic Code-like
Transformations
%V 3
%X The gene expression process in nature involves several
representation transformations of the genome.
Translation is one among them; it constructs the amino
acid sequence in proteins from the nucleic acid-based
mRNA sequence. Translation is defined by a code book,
known as the universal genetic code. This paper
explores the role of genetic code and similar
representation transformations for enhancing the
performance of inductive machine learning algorithms.
It considers an abstract model of genetic code-like
transformations (GCTs) introduced elsewhere 21 and
develops the notion of randomised GCTs. It shows that
randomized GCTs can construct a representation of the
learning problem where the mean-square-error surface is
almost convex quadratic and therefore easier to
minimise. It considers the functionally complete
Fourier representation of Boolean functions to analyse
this effect of such representation transformations. It
offers experimental results to substantiate this claim.
It shows that a linear classifier like the Perceptron
38 can learn non-linear XOR and DNF functions using a
gradient-descent algorithm in a representation
constructed by randomized GCTs. The paper also
discusses the immediate challenges that must be solved
before the proposed technique can be used as a viable
approach for representation construction in machine
learning.
@article{Kargupta+ghosh:2002:GPEM,
abstract = {The gene expression process in nature involves several
representation transformations of the genome.
Translation is one among them; it constructs the amino
acid sequence in proteins from the nucleic acid-based
mRNA sequence. Translation is defined by a code book,
known as the universal genetic code. This paper
explores the role of genetic code and similar
representation transformations for enhancing the
performance of inductive machine learning algorithms.
It considers an abstract model of genetic code-like
transformations (GCTs) introduced elsewhere [21] and
develops the notion of randomised GCTs. It shows that
randomized GCTs can construct a representation of the
learning problem where the mean-square-error surface is
almost convex quadratic and therefore easier to
minimise. It considers the functionally complete
Fourier representation of Boolean functions to analyse
this effect of such representation transformations. It
offers experimental results to substantiate this claim.
It shows that a linear classifier like the Perceptron
[38] can learn non-linear XOR and DNF functions using a
gradient-descent algorithm in a representation
constructed by randomized GCTs. The paper also
discusses the immediate challenges that must be solved
before the proposed technique can be used as a viable
approach for representation construction in machine
learning.},
added-at = {2008-06-19T17:35:00.000+0200},
author = {Kargupta, Hillol and Ghosh, Samiran},
biburl = {https://www.bibsonomy.org/bibtex/23e7f54b7c817c5c4280e909328bd5701/brazovayeye},
doi = {doi:10.1023/A:1020130108341},
interhash = {cf2f6301d809ab5f1e2846e0456e449a},
intrahash = {3e7f54b7c817c5c4280e909328bd5701},
issn = {1389-2576},
journal = {Genetic Programming and Evolvable Machines},
keywords = {algorithms, code, construction, expression, gene genetic learning machine representation},
month = {September},
notes = {Article ID: 5091790},
number = 3,
pages = {231--258},
timestamp = {2008-06-19T17:42:55.000+0200},
title = {Toward Machine Learning Through Genetic Code-like
Transformations},
volume = 3,
year = 2002
}