update functions. Visualize, Interpretable and with evox

This commit is contained in:
root
2024-07-12 04:35:22 +08:00
parent 5fc63fdaf1
commit 0d6e7477bf
32 changed files with 207 additions and 427 deletions

View File

@@ -20,16 +20,16 @@ if __name__ == "__main__":
survival_threshold=0.1,
compatibility_threshold=1.0,
genome=DefaultGenome(
max_nodes=100,
max_nodes=50,
max_conns=200,
num_inputs=17,
num_outputs=6,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=ACT.tanh,
activation_options=ACT.scaled_tanh,
aggregation_options=AGG.sum,
),
output_transform=ACT.standard_tanh,
output_transform=ACT.tanh,
),
),
problem=BraxEnv(

View File

@@ -20,7 +20,7 @@ if __name__ == "__main__":
survival_threshold=0.1,
compatibility_threshold=1.0,
genome=DefaultGenome(
max_nodes=100,
max_nodes=50,
max_conns=200,
num_inputs=17,
num_outputs=6,
@@ -29,7 +29,7 @@ if __name__ == "__main__":
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=ACT.standard_tanh,
output_transform=ACT.tanh,
),
),
problem=BraxEnv(

View File

@@ -6,7 +6,7 @@ from tensorneat.genome import DefaultGenome, DefaultNode, DefaultMutation, BiasN
from tensorneat.problem.func_fit import CustomFuncFit
from tensorneat.common import ACT, AGG
# define a custom function fit problem
def pagie_polynomial(inputs):
x, y = inputs
res = 1 / (1 + jnp.pow(x, -4)) + 1 / (1 + jnp.pow(y, -4))
@@ -14,9 +14,12 @@ def pagie_polynomial(inputs):
# important! returns an array, NOT a scalar
return jnp.array([res])
# define custom activate function and register it
def square(x):
return x ** 2
ACT.add_func("square", square)
if __name__ == "__main__":
custom_problem = CustomFuncFit(
func=pagie_polynomial,
low_bounds=[-1, -1],

View File

@@ -14,7 +14,7 @@ if __name__ == "__main__":
num_inputs=3,
num_outputs=1,
init_hidden_layers=(),
output_transform=ACT.standard_sigmoid,
output_transform=ACT.sigmoid,
),
),
problem=XOR3d(),

View File

@@ -22,12 +22,12 @@ if __name__ == "__main__":
num_inputs=4, # size of query coors
num_outputs=1,
init_hidden_layers=(),
output_transform=ACT.standard_tanh,
output_transform=ACT.tanh,
),
),
activation=ACT.tanh,
activate_time=10,
output_transform=ACT.standard_sigmoid,
output_transform=ACT.sigmoid,
),
problem=XOR3d(),
generation_limit=300,

View File

@@ -14,7 +14,7 @@ if __name__ == "__main__":
num_inputs=3,
num_outputs=1,
init_hidden_layers=(),
output_transform=ACT.standard_sigmoid,
output_transform=ACT.sigmoid,
activate_time=10,
),
),

View File

@@ -27,7 +27,7 @@ if __name__ == "__main__":
num_inputs=4, # size of query coors
num_outputs=1,
init_hidden_layers=(),
output_transform=ACT.standard_tanh,
output_transform=ACT.tanh,
),
),
activation=ACT.tanh,

View File

@@ -24,7 +24,7 @@ if __name__ == "__main__":
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
output_transform=ACT.standard_tanh,
output_transform=ACT.tanh,
),
),
problem=GymNaxEnv(

View File

@@ -1,16 +1,16 @@
import jax, jax.numpy as jnp
from algorithm.neat import *
from algorithm.neat.genome.dense import DenseInitialize
from utils.graph import topological_sort_python
from tensorneat.genome import DefaultGenome
from tensorneat.common import *
from tensorneat.common.functions import SympySigmoid
if __name__ == "__main__":
genome = DenseInitialize(
genome = DefaultGenome(
num_inputs=3,
num_outputs=1,
max_nodes=50,
max_conns=500,
output_transform=ACT.sigmoid,
)
state = genome.setup()
@@ -22,7 +22,7 @@ if __name__ == "__main__":
input_idx, output_idx = genome.get_input_idx(), genome.get_output_idx()
res = genome.sympy_func(state, network, sympy_input_transform=lambda x: 999999999*x, sympy_output_transform=SympyStandardSigmoid)
res = genome.sympy_func(state, network, sympy_input_transform=lambda x: 999*x, sympy_output_transform=SympySigmoid)
(symbols,
args_symbols,
input_symbols,
@@ -35,3 +35,11 @@ if __name__ == "__main__":
inputs = jnp.zeros(3)
print(forward_func(inputs))
print(genome.forward(state, genome.transform(state, nodes, conns), inputs))
print(AGG.sympy_module("jax"))
print(AGG.sympy_module("numpy"))
print(ACT.sympy_module("jax"))
print(ACT.sympy_module("numpy"))

View File

@@ -1,34 +0,0 @@
import jax.numpy as jnp
from evox import Algorithm as EvoXAlgorithm, State as EvoXState, jit_class
from tensorneat.algorithm import BaseAlgorithm as TensorNEATAlgorithm
from tensorneat.common import State as TensorNEATState
@jit_class
class EvoXAlgorithmAdaptor(EvoXAlgorithm):
def __init__(self, algorithm: TensorNEATAlgorithm):
self.algorithm = algorithm
self.fixed_state = None
def setup(self, key):
neat_algorithm_state = TensorNEATState(randkey=key)
neat_algorithm_state = self.algorithm.setup(neat_algorithm_state)
self.fixed_state = neat_algorithm_state
return EvoXState(alg_state=neat_algorithm_state)
def ask(self, state: EvoXState):
population = self.algorithm.ask(state.alg_state)
return population, state
def tell(self, state: EvoXState, fitness):
fitness = jnp.where(jnp.isnan(fitness), -jnp.inf, fitness)
neat_algorithm_state = self.algorithm.tell(state.alg_state, fitness)
return state.replace(alg_state=neat_algorithm_state)
def transform(self, individual):
return self.algorithm.transform(self.fixed_state, individual)
def forward(self, transformed, inputs):
return self.algorithm.forward(self.fixed_state, transformed, inputs)

View File

@@ -1,133 +0,0 @@
import warnings
import os
import time
import numpy as np
import jax
from jax.experimental import io_callback
from evox import Monitor
from evox import State as EvoXState
from tensorneat.algorithm import BaseAlgorithm as TensorNEATAlgorithm
from tensorneat.common import State as TensorNEATState
class TensorNEATMonitor(Monitor):
def __init__(
self,
neat_algorithm: TensorNEATAlgorithm,
save_dir: str = None,
is_save: bool = False,
):
super().__init__()
self.neat_algorithm = neat_algorithm
self.generation_timestamp = time.time()
self.alg_state: TensorNEATState = None
self.fitness = None
self.best_fitness = -np.inf
self.best_genome = None
self.is_save = is_save
if is_save:
if save_dir is None:
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.save_dir = f"./{self.__class__.__name__} {now}"
else:
self.save_dir = save_dir
print(f"save to {self.save_dir}")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.genome_dir = os.path.join(self.save_dir, "genomes")
if not os.path.exists(self.genome_dir):
os.makedirs(self.genome_dir)
def hooks(self):
return ["pre_tell"]
def pre_tell(self, state: EvoXState, cand_sol, transformed_cand_sol, fitness, transformed_fitness):
io_callback(
self.store_info,
None,
state,
transformed_fitness,
)
def store_info(self, state: EvoXState, fitness):
self.alg_state: TensorNEATState = state.query_state("algorithm").alg_state
self.fitness = jax.device_get(fitness)
def show(self):
pop = self.neat_algorithm.ask(self.alg_state)
valid_fitnesses = self.fitness[~np.isinf(self.fitness)]
max_f, min_f, mean_f, std_f = (
max(valid_fitnesses),
min(valid_fitnesses),
np.mean(valid_fitnesses),
np.std(valid_fitnesses),
)
new_timestamp = time.time()
cost_time = new_timestamp - self.generation_timestamp
self.generation_timestamp = new_timestamp
max_idx = np.argmax(self.fitness)
if self.fitness[max_idx] > self.best_fitness:
self.best_fitness = self.fitness[max_idx]
self.best_genome = pop[0][max_idx], pop[1][max_idx]
if self.is_save:
best_genome = jax.device_get((pop[0][max_idx], pop[1][max_idx]))
with open(
os.path.join(
self.genome_dir,
f"{int(self.neat_algorithm.generation(self.alg_state))}.npz",
),
"wb",
) as f:
np.savez(
f,
nodes=best_genome[0],
conns=best_genome[1],
fitness=self.best_fitness,
)
# save best if save path is not None
member_count = jax.device_get(self.neat_algorithm.member_count(self.alg_state))
species_sizes = [int(i) for i in member_count if i > 0]
pop = jax.device_get(pop)
pop_nodes, pop_conns = pop # (P, N, NL), (P, C, CL)
nodes_cnt = (~np.isnan(pop_nodes[:, :, 0])).sum(axis=1) # (P,)
conns_cnt = (~np.isnan(pop_conns[:, :, 0])).sum(axis=1) # (P,)
max_node_cnt, min_node_cnt, mean_node_cnt = (
max(nodes_cnt),
min(nodes_cnt),
np.mean(nodes_cnt),
)
max_conn_cnt, min_conn_cnt, mean_conn_cnt = (
max(conns_cnt),
min(conns_cnt),
np.mean(conns_cnt),
)
print(
f"Generation: {self.neat_algorithm.generation(self.alg_state)}, Cost time: {cost_time * 1000:.2f}ms\n",
f"\tnode counts: max: {max_node_cnt}, min: {min_node_cnt}, mean: {mean_node_cnt:.2f}\n",
f"\tconn counts: max: {max_conn_cnt}, min: {min_conn_cnt}, mean: {mean_conn_cnt:.2f}\n",
f"\tspecies: {len(species_sizes)}, {species_sizes}\n",
f"\tfitness: valid cnt: {len(valid_fitnesses)}, max: {max_f:.4f}, min: {min_f:.4f}, mean: {mean_f:.4f}, std: {std_f:.4f}\n",
)
# append log
if self.is_save:
with open(os.path.join(self.save_dir, "log.txt"), "a") as f:
f.write(
f"{self.neat_algorithm.generation(self.alg_state)},{max_f},{min_f},{mean_f},{std_f},{cost_time}\n"
)

View File

@@ -1,29 +1,29 @@
import jax
import jax.numpy as jnp
from evox import workflows, algorithms, problems
from evox import workflows, problems
from tensorneat.examples.with_evox.evox_algorithm_adaptor import EvoXAlgorithmAdaptor
from tensorneat.examples.with_evox.tensorneat_monitor import TensorNEATMonitor
from tensorneat.common.evox_adaptors import EvoXAlgorithmAdaptor, TensorNEATMonitor
from tensorneat.algorithm import NEAT
from tensorneat.algorithm.neat import DefaultSpecies, DefaultGenome, DefaultNodeGene
from tensorneat.common import ACT
from tensorneat.genome import DefaultGenome, BiasNode
from tensorneat.common import ACT, AGG
neat_algorithm = NEAT(
species=DefaultSpecies(
genome=DefaultGenome(
num_inputs=17,
num_outputs=6,
max_nodes=200,
max_conns=500,
node_gene=DefaultNodeGene(
activation_options=(ACT.standard_tanh,),
activation_default=ACT.standard_tanh,
),
output_transform=ACT.tanh,
pop_size=1000,
species_size=20,
survival_threshold=0.1,
compatibility_threshold=1.0,
genome=DefaultGenome(
max_nodes=50,
max_conns=200,
num_inputs=17,
num_outputs=6,
init_hidden_layers=(),
node_gene=BiasNode(
activation_options=ACT.tanh,
aggregation_options=AGG.sum,
),
pop_size=10000,
species_size=10,
output_transform=ACT.tanh,
),
)
evox_algorithm = EvoXAlgorithmAdaptor(neat_algorithm)
@@ -37,12 +37,13 @@ problem = problems.neuroevolution.Brax(
policy=evox_algorithm.forward,
max_episode_length=1000,
num_episodes=1,
backend="mjx"
)
def nan2inf(x):
return jnp.where(jnp.isnan(x), -jnp.inf, x)
# create a workflow
workflow = workflows.StdWorkflow(
algorithm=evox_algorithm,
@@ -55,11 +56,11 @@ workflow = workflows.StdWorkflow(
# init the workflow
state = workflow.init(workflow_key)
# state = workflow.enable_multi_devices(state)
# run the workflow for 100 steps
import time
# enable multi devices
state = workflow.enable_multi_devices(state)
# run the workflow for 100 steps
for i in range(100):
tic = time.time()
train_info, state = workflow.step(state)
monitor.show()
monitor.show()