This commit is contained in:
root
2024-07-12 02:25:57 +08:00
parent 3194678a15
commit 5fc63fdaf1
28 changed files with 351 additions and 142 deletions

View File

@@ -75,7 +75,7 @@ from pipeline import Pipeline
from algorithm.neat import *
from problem.rl_env import BraxEnv
from tensorneat.utils import Act
from tensorneat.utils import ACT
if __name__ == '__main__':
pipeline = Pipeline(
@@ -87,8 +87,8 @@ if __name__ == '__main__':
max_nodes=50,
max_conns=100,
node_gene=DefaultNodeGene(
activation_options=(Act.tanh,),
activation_default=Act.tanh,
activation_options=(ACT.tanh,),
activation_default=ACT.tanh,
)
),
pop_size=1000,

View File

@@ -3,7 +3,7 @@ from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.problem.rl import BraxEnv
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
import jax
@@ -26,10 +26,10 @@ if __name__ == "__main__":
num_outputs=6,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=Act.tanh,
aggregation_options=Agg.sum,
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=Act.standard_tanh,
output_transform=ACT.standard_tanh,
),
),
problem=BraxEnv(

View File

@@ -3,7 +3,7 @@ from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.problem.rl import BraxEnv
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
import jax, jax.numpy as jnp
@@ -26,10 +26,10 @@ if __name__ == "__main__":
num_outputs=6,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=Act.tanh,
aggregation_options=Agg.sum,
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=Act.standard_tanh,
output_transform=ACT.standard_tanh,
),
),
problem=BraxEnv(

View File

@@ -4,7 +4,7 @@ from tensorneat.pipeline import Pipeline
from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, DefaultNode, DefaultMutation, BiasNode
from tensorneat.problem.func_fit import CustomFuncFit
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
def pagie_polynomial(inputs):
@@ -35,10 +35,10 @@ if __name__ == "__main__":
num_outputs=1,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=[Act.identity, Act.inv, Act.square],
aggregation_options=[Agg.sum, Agg.product],
activation_options=[ACT.identity, ACT.inv, ACT.square],
aggregation_options=[AGG.sum, AGG.product],
),
output_transform=Act.identity,
output_transform=ACT.identity,
),
),
problem=custom_problem,

View File

@@ -2,7 +2,7 @@ from tensorneat.pipeline import Pipeline
from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome
from tensorneat.problem.func_fit import XOR3d
from tensorneat.common import Act
from tensorneat.common import ACT
if __name__ == "__main__":
pipeline = Pipeline(
@@ -14,7 +14,7 @@ if __name__ == "__main__":
num_inputs=3,
num_outputs=1,
init_hidden_layers=(),
output_transform=Act.standard_sigmoid,
output_transform=ACT.standard_sigmoid,
),
),
problem=XOR3d(),

View File

@@ -2,7 +2,7 @@ from tensorneat.pipeline import Pipeline
from tensorneat.algorithm.neat import NEAT
from tensorneat.algorithm.hyperneat import HyperNEAT, FullSubstrate
from tensorneat.genome import DefaultGenome
from tensorneat.common import Act
from tensorneat.common import ACT
from tensorneat.problem.func_fit import XOR3d
@@ -22,12 +22,12 @@ if __name__ == "__main__":
num_inputs=4, # size of query coors
num_outputs=1,
init_hidden_layers=(),
output_transform=Act.standard_tanh,
output_transform=ACT.standard_tanh,
),
),
activation=Act.tanh,
activation=ACT.tanh,
activate_time=10,
output_transform=Act.standard_sigmoid,
output_transform=ACT.standard_sigmoid,
),
problem=XOR3d(),
generation_limit=300,

View File

@@ -2,7 +2,7 @@ from tensorneat.pipeline import Pipeline
from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import RecurrentGenome
from tensorneat.problem.func_fit import XOR3d
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
if __name__ == "__main__":
pipeline = Pipeline(
@@ -14,7 +14,7 @@ if __name__ == "__main__":
num_inputs=3,
num_outputs=1,
init_hidden_layers=(),
output_transform=Act.standard_sigmoid,
output_transform=ACT.standard_sigmoid,
activate_time=10,
),
),

View File

@@ -5,7 +5,7 @@ from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.problem.rl import GymNaxEnv
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
@@ -24,8 +24,8 @@ if __name__ == "__main__":
num_outputs=3,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=Act.tanh,
aggregation_options=Agg.sum,
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=jnp.argmax,
),

View File

@@ -5,7 +5,7 @@ from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.problem.rl import GymNaxEnv
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
@@ -24,8 +24,8 @@ if __name__ == "__main__":
num_outputs=2,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=Act.tanh,
aggregation_options=Agg.sum,
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=jnp.argmax,
),

View File

@@ -4,7 +4,7 @@ from tensorneat.pipeline import Pipeline
from tensorneat.algorithm.neat import NEAT
from tensorneat.algorithm.hyperneat import HyperNEAT, FullSubstrate
from tensorneat.genome import DefaultGenome
from tensorneat.common import Act
from tensorneat.common import ACT
from tensorneat.problem import GymNaxEnv
@@ -27,10 +27,10 @@ if __name__ == "__main__":
num_inputs=4, # size of query coors
num_outputs=1,
init_hidden_layers=(),
output_transform=Act.standard_tanh,
output_transform=ACT.standard_tanh,
),
),
activation=Act.tanh,
activation=ACT.tanh,
activate_time=10,
output_transform=jnp.argmax,
),

View File

@@ -5,7 +5,7 @@ from tensorneat.algorithm.neat import NEAT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.problem.rl import GymNaxEnv
from tensorneat.common import Act, Agg
from tensorneat.common import ACT, AGG
@@ -21,10 +21,10 @@ if __name__ == "__main__":
num_outputs=1,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=Act.tanh,
aggregation_options=Agg.sum,
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=Act.standard_tanh,
output_transform=ACT.standard_tanh,
),
),
problem=GymNaxEnv(

View File

@@ -11,7 +11,7 @@
"from algorithm.neat.genome.advance import AdvanceInitialize\n",
"from algorithm.neat.gene.node.default_without_response import NodeGeneWithoutResponse\n",
"from utils.graph import topological_sort_python\n",
"from tensorneat.utils import Act, Agg\n",
"from tensorneat.utils import ACT, AGG\n",
"\n",
"import numpy as np"
],
@@ -36,11 +36,11 @@
" max_nodes=30,\n",
" max_conns=50,\n",
" node_gene=NodeGeneWithoutResponse(\n",
" activation_default= Act.identity,\n",
" aggregation_default=Agg.sum,\n",
" # activation_options=(Act.tanh, Act.sigmoid, Act.identity, Act.clamped),\n",
" activation_options=( Act.identity, ),\n",
" aggregation_options=(Agg.sum,),\n",
" activation_default= ACT.identity,\n",
" aggregation_default=AGG.sum,\n",
" # activation_options=(ACT.tanh, ACT.sigmoid, ACT.identity, ACT.clamped),\n",
" activation_options=( ACT.identity, ),\n",
" aggregation_options=(AGG.sum,),\n",
" ),\n",
" # output_transform=jnp.tanh,\n",
")\n",

File diff suppressed because one or more lines are too long

View File

@@ -7,7 +7,7 @@ from tensorneat.examples.with_evox.evox_algorithm_adaptor import EvoXAlgorithmAd
from tensorneat.examples.with_evox.tensorneat_monitor import TensorNEATMonitor
from tensorneat.algorithm import NEAT
from tensorneat.algorithm.neat import DefaultSpecies, DefaultGenome, DefaultNodeGene
from tensorneat.common import Act
from tensorneat.common import ACT
neat_algorithm = NEAT(
species=DefaultSpecies(
@@ -17,10 +17,10 @@ neat_algorithm = NEAT(
max_nodes=200,
max_conns=500,
node_gene=DefaultNodeGene(
activation_options=(Act.standard_tanh,),
activation_default=Act.standard_tanh,
activation_options=(ACT.standard_tanh,),
activation_default=ACT.standard_tanh,
),
output_transform=Act.tanh,
output_transform=ACT.tanh,
),
pop_size=10000,
species_size=10,

View File

@@ -0,0 +1,180 @@
Metadata-Version: 2.1
Name: tensorneat
Version: 0.1.0
Summary: tensorneat
Author-email: Lishuang Wang <wanglishuang22@gmail.com>
License: BSD 3-Clause License
Copyright (c) 2024, EMI-Group
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Project-URL: Homepage, https://github.com/EMI-Group/tensorneat
Project-URL: Bug Tracker, https://github.com/EMI-Group/tensorneat/issues
Classifier: Programming Language :: Python :: 3
Classifier: License :: OSI Approved :: BSD License
Classifier: Intended Audience :: Science/Research
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
Requires-Python: >=3.9
Description-Content-Type: text/markdown
License-File: LICENSE
Requires-Dist: brax>=0.10.3
Requires-Dist: jax>=0.4.28
Requires-Dist: gymnax>=0.0.8
Requires-Dist: jaxopt>=0.8.3
Requires-Dist: optax>=0.2.2
Requires-Dist: flax>=0.8.4
Requires-Dist: mujoco>=3.1.4
Requires-Dist: mujoco-mjx>=3.1.4
<h1 align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="./imgs/evox_logo_dark.png">
<source media="(prefers-color-scheme: light)" srcset="./imgs/evox_logo_light.png">
<a href="https://github.com/EMI-Group/evox">
<img alt="EvoX Logo" height="50" src="./imgs/evox_logo_light.png">
</a>
</picture>
<br>
</h1>
<p align="center">
🌟 TensorNEAT: Tensorized NEAT Implementation in JAX 🌟
</p>
<p align="center">
<a href="https://arxiv.org/abs/2404.01817">
<img src="https://img.shields.io/badge/paper-arxiv-red?style=for-the-badge" alt="TensorNEAT Paper on arXiv">
</a>
</p>
## Introduction
TensorNEAT is a JAX-based libaray for NeuroEvolution of Augmenting Topologies (NEAT) algorithms, focused on harnessing GPU acceleration to enhance the efficiency of evolving neural network structures for complex tasks. Its core mechanism involves the tensorization of network topologies, enabling parallel processing and significantly boosting computational speed and scalability by leveraging modern hardware accelerators. TensorNEAT is compatible with the [EvoX](https://github.com/EMI-Group/evox/) framewrok.
## Requirements
Due to the rapid iteration of JAX versions, configuring the runtime environment for TensorNEAT can be challenging. We recommend the following versions for the relevant libraries:
- jax (0.4.28)
- jaxlib (0.4.28+cuda12.cudnn89)
- brax (0.10.3)
- gymnax (0.0.8)
We provide detailed JAX-related environment references in [recommend_environment](recommend_environment.txt). If you encounter any issues while configuring the environment yourself, you can use this as a reference.
## Example
Simple Example for XOR problem:
```python
from pipeline import Pipeline
from algorithm.neat import *
from problem.func_fit import XOR3d
if __name__ == '__main__':
pipeline = Pipeline(
algorithm=NEAT(
species=DefaultSpecies(
genome=DefaultGenome(
num_inputs=3,
num_outputs=1,
max_nodes=50,
max_conns=100,
),
pop_size=10000,
species_size=10,
compatibility_threshold=3.5,
),
),
problem=XOR3d(),
generation_limit=10000,
fitness_target=-1e-8
)
# initialize state
state = pipeline.setup()
# print(state)
# run until terminate
state, best = pipeline.auto_run(state)
# show result
pipeline.show(state, best)
```
Simple Example for RL envs in Brax (Ant):
```python
from pipeline import Pipeline
from algorithm.neat import *
from problem.rl_env import BraxEnv
from tensorneat.utils import ACT
if __name__ == '__main__':
pipeline = Pipeline(
algorithm=NEAT(
species=DefaultSpecies(
genome=DefaultGenome(
num_inputs=27,
num_outputs=8,
max_nodes=50,
max_conns=100,
node_gene=DefaultNodeGene(
activation_options=(ACT.tanh,),
activation_default=ACT.tanh,
)
),
pop_size=1000,
species_size=10,
),
),
problem=BraxEnv(
env_name='ant',
),
generation_limit=10000,
fitness_target=5000
)
# initialize state
state = pipeline.setup()
# print(state)
# run until terminate
state, best = pipeline.auto_run(state)
```
more examples are in `tensorneat/examples`.
## Community & Support
- Engage in discussions and share your experiences on [GitHub Discussion Board](https://github.com/EMI-Group/evox/discussions).
- Join our QQ group (ID: 297969717).
## Citing TensorNEAT
If you use TensorNEAT in your research and want to cite it in your work, please use:
```
@article{tensorneat,
title = {{Tensorized} {NeuroEvolution} of {Augmenting} {Topologies} for {GPU} {Acceleration}},
author = {Wang, Lishuang and Zhao, Mengfei and Liu, Enyu and Sun, Kebin and Cheng, Ran},
booktitle = {Proceedings of the Genetic and Evolutionary Computation Conference (GECCO)},
year = {2024}
}

View File

@@ -0,0 +1,69 @@
LICENSE
README.md
pyproject.toml
src/tensorneat/pipeline.py
src/tensorneat.egg-info/PKG-INFO
src/tensorneat.egg-info/SOURCES.txt
src/tensorneat.egg-info/dependency_links.txt
src/tensorneat.egg-info/requires.txt
src/tensorneat.egg-info/top_level.txt
src/tensorneat/algorithm/__init__.py
src/tensorneat/algorithm/base.py
src/tensorneat/algorithm/hyperneat/__init__.py
src/tensorneat/algorithm/hyperneat/hyperneat.py
src/tensorneat/algorithm/hyperneat/substrate/__init__.py
src/tensorneat/algorithm/hyperneat/substrate/base.py
src/tensorneat/algorithm/hyperneat/substrate/default.py
src/tensorneat/algorithm/hyperneat/substrate/full.py
src/tensorneat/algorithm/neat/__init__.py
src/tensorneat/algorithm/neat/neat.py
src/tensorneat/algorithm/neat/species.py
src/tensorneat/common/__init__.py
src/tensorneat/common/graph.py
src/tensorneat/common/state.py
src/tensorneat/common/stateful_class.py
src/tensorneat/common/tools.py
src/tensorneat/common/functions/__init__.py
src/tensorneat/common/functions/act_jnp.py
src/tensorneat/common/functions/act_sympy.py
src/tensorneat/common/functions/agg_jnp.py
src/tensorneat/common/functions/agg_sympy.py
src/tensorneat/common/functions/manager.py
src/tensorneat/genome/__init__.py
src/tensorneat/genome/base.py
src/tensorneat/genome/default.py
src/tensorneat/genome/recurrent.py
src/tensorneat/genome/utils.py
src/tensorneat/genome/gene/__init__.py
src/tensorneat/genome/gene/base.py
src/tensorneat/genome/gene/conn/__init__.py
src/tensorneat/genome/gene/conn/base.py
src/tensorneat/genome/gene/conn/default.py
src/tensorneat/genome/gene/node/__init__.py
src/tensorneat/genome/gene/node/base.py
src/tensorneat/genome/gene/node/bias.py
src/tensorneat/genome/gene/node/default.py
src/tensorneat/genome/operations/__init__.py
src/tensorneat/genome/operations/crossover/__init__.py
src/tensorneat/genome/operations/crossover/base.py
src/tensorneat/genome/operations/crossover/default.py
src/tensorneat/genome/operations/distance/__init__.py
src/tensorneat/genome/operations/distance/base.py
src/tensorneat/genome/operations/distance/default.py
src/tensorneat/genome/operations/mutation/__init__.py
src/tensorneat/genome/operations/mutation/base.py
src/tensorneat/genome/operations/mutation/default.py
src/tensorneat/problem/__init__.py
src/tensorneat/problem/base.py
src/tensorneat/problem/func_fit/__init__.py
src/tensorneat/problem/func_fit/custom.py
src/tensorneat/problem/func_fit/func_fit.py
src/tensorneat/problem/func_fit/xor.py
src/tensorneat/problem/func_fit/xor3d.py
src/tensorneat/problem/rl/__init__.py
src/tensorneat/problem/rl/brax.py
src/tensorneat/problem/rl/gymnax.py
src/tensorneat/problem/rl/rl_jit.py
test/test_genome.py
test/test_nan_fitness.py
test/test_record_episode.py

View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,8 @@
brax>=0.10.3
jax>=0.4.28
gymnax>=0.0.8
jaxopt>=0.8.3
optax>=0.2.2
flax>=0.8.4
mujoco>=3.1.4
mujoco-mjx>=3.1.4

View File

@@ -0,0 +1 @@
tensorneat

View File

@@ -4,7 +4,7 @@ import jax
from jax import vmap, numpy as jnp
from .substrate import *
from tensorneat.common import State, Act, Agg
from tensorneat.common import State, ACT, AGG
from tensorneat.algorithm import BaseAlgorithm, NEAT
from tensorneat.genome import BaseNode, BaseConn, RecurrentGenome
@@ -16,10 +16,10 @@ class HyperNEAT(BaseAlgorithm):
neat: NEAT,
weight_threshold: float = 0.3,
max_weight: float = 5.0,
aggregation: Callable = Agg.sum,
activation: Callable = Act.sigmoid,
aggregation: Callable = AGG.sum,
activation: Callable = ACT.sigmoid,
activate_time: int = 10,
output_transform: Callable = Act.standard_sigmoid,
output_transform: Callable = ACT.standard_sigmoid,
):
assert (
substrate.query_coors.shape[1] == neat.num_inputs
@@ -102,8 +102,8 @@ class HyperNEAT(BaseAlgorithm):
class HyperNEATNode(BaseNode):
def __init__(
self,
aggregation=Agg.sum,
activation=Act.sigmoid,
aggregation=AGG.sum,
activation=ACT.sigmoid,
):
super().__init__()
self.aggregation = aggregation

View File

@@ -1,56 +1,6 @@
from tensorneat.common.aggregation.agg_jnp import Agg, agg_func, AGG_ALL
from .tools import *
from .graph import *
from .state import State
from .stateful_class import StatefulBaseClass
from .aggregation.agg_jnp import Agg, AGG_ALL, agg_func
from .activation.act_jnp import Act, ACT_ALL, act_func
from .aggregation.agg_sympy import *
from .activation.act_sympy import *
from typing import Callable, Union
name2sympy = {
"sigmoid": SympySigmoid,
"standard_sigmoid": SympyStandardSigmoid,
"tanh": SympyTanh,
"standard_tanh": SympyStandardTanh,
"sin": SympySin,
"relu": SympyRelu,
"lelu": SympyLelu,
"identity": SympyIdentity,
"inv": SympyInv,
"log": SympyLog,
"exp": SympyExp,
"abs": SympyAbs,
"sum": SympySum,
"product": SympyProduct,
"max": SympyMax,
"min": SympyMin,
"maxabs": SympyMaxabs,
"mean": SympyMean,
"clip": SympyClip,
"square": SympySquare,
}
def convert_to_sympy(func: Union[str, Callable]):
if isinstance(func, str):
name = func
else:
name = func.__name__
if name in name2sympy:
return name2sympy[name]
else:
raise ValueError(
f"Can not convert to sympy! Function {name} not found in name2sympy"
)
SYMPY_FUNCS_MODULE_NP = {}
SYMPY_FUNCS_MODULE_JNP = {}
for cls in name2sympy.values():
if hasattr(cls, "numerical_eval"):
SYMPY_FUNCS_MODULE_NP[cls.__name__] = cls.numerical_eval
SYMPY_FUNCS_MODULE_JNP[cls.__name__] = partial(cls.numerical_eval, backend=jnp)
from .functions import ACT, AGG, apply_activation, apply_aggregation

View File

@@ -4,10 +4,10 @@ import numpy as np
import jax, jax.numpy as jnp
import sympy as sp
from tensorneat.common import (
Act,
Agg,
act_func,
agg_func,
ACT,
AGG,
apply_activation,
apply_aggregation,
mutate_int,
mutate_float,
convert_to_sympy,
@@ -34,10 +34,10 @@ class BiasNode(BaseNode):
bias_lower_bound: float = -5,
bias_upper_bound: float = 5,
aggregation_default: Optional[Callable] = None,
aggregation_options: Union[Callable, Sequence[Callable]] = Agg.sum,
aggregation_options: Union[Callable, Sequence[Callable]] = AGG.sum,
aggregation_replace_rate: float = 0.1,
activation_default: Optional[Callable] = None,
activation_options: Union[Callable, Sequence[Callable]] = Act.sigmoid,
activation_options: Union[Callable, Sequence[Callable]] = ACT.sigmoid,
activation_replace_rate: float = 0.1,
):
super().__init__()
@@ -73,7 +73,7 @@ class BiasNode(BaseNode):
def new_identity_attrs(self, state):
return jnp.array(
[0, self.aggregation_default, -1]
) # activation=-1 means Act.identity
) # activation=-1 means ACT.identity
def new_random_attrs(self, state, randkey):
k1, k2, k3 = jax.random.split(randkey, num=3)
@@ -115,12 +115,12 @@ class BiasNode(BaseNode):
def forward(self, state, attrs, inputs, is_output_node=False):
bias, agg, act = attrs
z = agg_func(agg, inputs, self.aggregation_options)
z = apply_aggregation(agg, inputs, self.aggregation_options)
z = bias + z
# the last output node should not be activated
z = jax.lax.cond(
is_output_node, lambda: z, lambda: act_func(act, z, self.activation_options)
is_output_node, lambda: z, lambda: apply_activation(act, z, self.activation_options)
)
return z
@@ -134,7 +134,7 @@ class BiasNode(BaseNode):
act = int(act)
if act == -1:
act_func = Act.identity
act_func = ACT.identity
else:
act_func = self.activation_options[act]
return "{}(idx={:<{idx_width}}, bias={:<{float_width}}, aggregation={:<{func_width}}, activation={:<{func_width}})".format(
@@ -158,7 +158,7 @@ class BiasNode(BaseNode):
act = int(act)
if act == -1:
act_func = Act.identity
act_func = ACT.identity
else:
act_func = self.activation_options[act]

View File

@@ -5,10 +5,10 @@ import jax, jax.numpy as jnp
import sympy as sp
from tensorneat.common import (
Act,
Agg,
act_func,
agg_func,
ACT,
AGG,
apply_activation,
apply_aggregation,
mutate_int,
mutate_float,
convert_to_sympy,
@@ -39,10 +39,10 @@ class DefaultNode(BaseNode):
response_lower_bound: float = -5,
response_upper_bound: float = 5,
aggregation_default: Optional[Callable] = None,
aggregation_options: Union[Callable, Sequence[Callable]] = Agg.sum,
aggregation_options: Union[Callable, Sequence[Callable]] = AGG.sum,
aggregation_replace_rate: float = 0.1,
activation_default: Optional[Callable] = None,
activation_options: Union[Callable, Sequence[Callable]] = Act.sigmoid,
activation_options: Union[Callable, Sequence[Callable]] = ACT.sigmoid,
activation_replace_rate: float = 0.1,
):
super().__init__()
@@ -89,7 +89,7 @@ class DefaultNode(BaseNode):
agg = self.aggregation_default
act = self.activation_default
return jnp.array([bias, res, agg, act]) # activation=-1 means Act.identity
return jnp.array([bias, res, agg, act]) # activation=-1 means ACT.identity
def new_random_attrs(self, state, randkey):
k1, k2, k3, k4 = jax.random.split(randkey, num=4)
@@ -148,12 +148,12 @@ class DefaultNode(BaseNode):
def forward(self, state, attrs, inputs, is_output_node=False):
bias, res, agg, act = attrs
z = agg_func(agg, inputs, self.aggregation_options)
z = apply_aggregation(agg, inputs, self.aggregation_options)
z = bias + res * z
# the last output node should not be activated
z = jax.lax.cond(
is_output_node, lambda: z, lambda: act_func(act, z, self.activation_options)
is_output_node, lambda: z, lambda: apply_activation(act, z, self.activation_options)
)
return z
@@ -168,7 +168,7 @@ class DefaultNode(BaseNode):
act = int(act)
if act == -1:
act_func = Act.identity
act_func = ACT.identity
else:
act_func = self.activation_options[act]
return "{}(idx={:<{idx_width}}, bias={:<{float_width}}, response={:<{float_width}}, aggregation={:<{func_width}}, activation={:<{func_width}})".format(
@@ -193,7 +193,7 @@ class DefaultNode(BaseNode):
act = int(act)
if act == -1:
act_func = Act.identity
act_func = ACT.identity
else:
act_func = self.activation_options[act]
return {

View File

@@ -1,5 +1,5 @@
import jax, jax.numpy as jnp
from tensorneat.common import Act
from tensorneat.common import ACT
from algorithm.neat import *
import numpy as np

View File

@@ -1,5 +1,5 @@
import jax, jax.numpy as jnp
from tensorneat.common import Act
from tensorneat.common import ACT
from algorithm.neat import *
import numpy as np
@@ -17,8 +17,8 @@ def main():
max_nodes=20,
max_conns=20,
node_gene=DefaultNodeGene(
activation_options=(Act.tanh,),
activation_default=Act.tanh,
activation_options=(ACT.tanh,),
activation_default=ACT.tanh,
),
)

View File

@@ -27,7 +27,7 @@
"from algorithm.neat.gene.node.kan_node import KANNode\n",
"from algorithm.neat.gene.conn.bspline import BSplineConn\n",
"from problem.func_fit import XOR3d\n",
"from tensorneat.utils import Act\n",
"from tensorneat.utils import ACT\n",
"\n",
"import jax, jax.numpy as jnp\n",
"\n",
@@ -38,7 +38,7 @@
" max_conns=10,\n",
" node_gene=KANNode(),\n",
" conn_gene=BSplineConn(),\n",
" output_transform=Act.sigmoid, # the activation function for output node\n",
" output_transform=ACT.sigmoid, # the activation function for output node\n",
" mutation=DefaultMutation(\n",
" node_add=0.1,\n",
" conn_add=0.1,\n",

View File

@@ -1,5 +1,5 @@
import jax, jax.numpy as jnp
from tensorneat.common import Act
from tensorneat.common import ACT
from algorithm.neat import *
import numpy as np
@@ -17,8 +17,8 @@ def main():
max_nodes=20,
max_conns=20,
node_gene=DefaultNodeGene(
activation_options=(Act.tanh,),
activation_default=Act.tanh,
activation_options=(ACT.tanh,),
activation_default=ACT.tanh,
),
)

View File

@@ -145,10 +145,10 @@
"source": [
"from algorithm.neat.gene.node.normalized import NormalizedNode\n",
"from algorithm.neat.gene.conn import DefaultConnGene\n",
"from tensorneat.utils import Act\n",
"from tensorneat.utils import ACT\n",
"\n",
"genome = DefaultGenome(num_inputs=3, num_outputs=2, max_nodes=10, max_conns=10,\n",
" node_gene=NormalizedNode(activation_default=Act.identity, activation_options=(Act.identity,)),\n",
" node_gene=NormalizedNode(activation_default=ACT.identity, activation_options=(ACT.identity,)),\n",
" conn_gene=DefaultConnGene(weight_init_mean=1))\n",
"state = genome.setup()\n",
"randkey = jax.random.key(0)\n",