又搞到3点,还是没有找到问题在哪,不过已经排除了是forward的问题
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
from .genome import create_initialize_function, expand, expand_single, analysis
|
||||
from .genome import create_initialize_function, expand, expand_single, analysis, pop_analysis
|
||||
from .distance import distance
|
||||
from .mutate import create_mutate_function
|
||||
from .forward import create_forward_function
|
||||
|
||||
@@ -69,7 +69,6 @@ agg_name2key = {
|
||||
|
||||
def agg(idx, z):
|
||||
idx = np.asarray(idx, dtype=np.int32)
|
||||
|
||||
if np.all(z == 0.):
|
||||
return 0
|
||||
else:
|
||||
|
||||
@@ -76,7 +76,6 @@ def forward_single(inputs: NDArray, N: int, input_idx: NDArray, output_idx: NDAr
|
||||
# for some nodes (inputs nodes), the output z will be nan, thus we do not update the vals
|
||||
ini_vals[i] = z
|
||||
|
||||
|
||||
return ini_vals[output_idx]
|
||||
|
||||
|
||||
|
||||
@@ -198,8 +198,13 @@ def analysis(nodes: NDArray, connections: NDArray, input_keys, output_keys) -> \
|
||||
|
||||
def pop_analysis(pop_nodes, pop_connections, input_keys, output_keys):
|
||||
res = []
|
||||
total_nodes, total_connections = 0, 0
|
||||
for nodes, connections in zip(pop_nodes, pop_connections):
|
||||
res.append(analysis(nodes, connections, input_keys, output_keys))
|
||||
nodes, connections = analysis(nodes, connections, input_keys, output_keys)
|
||||
res.append((nodes, connections))
|
||||
total_nodes += len(nodes)
|
||||
total_connections += len(connections)
|
||||
print(total_nodes - 200, total_connections)
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,8 @@ from .utils import fetch_random, fetch_first, I_INT
|
||||
from .genome import add_node, add_connection_by_idx, delete_node_by_idx, delete_connection_by_idx
|
||||
from .graph import check_cycles
|
||||
|
||||
add_node_cnt, delete_node_cnt, add_connection_cnt, delete_connection_cnt = 0, 0, 0, 0
|
||||
|
||||
|
||||
def create_mutate_function(config, input_keys, output_keys, batch: bool):
|
||||
"""
|
||||
@@ -79,11 +81,15 @@ def create_mutate_function(config, input_keys, output_keys, batch: bool):
|
||||
return mutate_func
|
||||
else:
|
||||
def batch_mutate_func(pop_nodes, pop_connections, new_node_keys):
|
||||
global add_node_cnt, delete_node_cnt, add_connection_cnt, delete_connection_cnt
|
||||
add_node_cnt, delete_node_cnt, add_connection_cnt, delete_connection_cnt = 0, 0, 0, 0
|
||||
res_nodes, res_connections = [], []
|
||||
for nodes, connections, new_node_key in zip(pop_nodes, pop_connections, new_node_keys):
|
||||
nodes, connections = mutate_func(nodes, connections, new_node_key)
|
||||
res_nodes.append(nodes)
|
||||
res_connections.append(connections)
|
||||
# print(f"add_node_cnt: {add_node_cnt}, delete_node_cnt: {delete_node_cnt}, "
|
||||
# f"add_connection_cnt: {add_connection_cnt}, delete_connection_cnt: {delete_connection_cnt}")
|
||||
return np.stack(res_nodes, axis=0), np.stack(res_connections, axis=0)
|
||||
|
||||
return batch_mutate_func
|
||||
@@ -161,6 +167,8 @@ def mutate(nodes: NDArray,
|
||||
:return:
|
||||
"""
|
||||
|
||||
global add_node_cnt, delete_node_cnt, add_connection_cnt, delete_connection_cnt
|
||||
|
||||
# mutate_structure
|
||||
def nothing(n, c):
|
||||
return n, c
|
||||
@@ -200,18 +208,22 @@ def mutate(nodes: NDArray,
|
||||
# mutate add node
|
||||
if rand() < add_node_rate:
|
||||
nodes, connections = m_add_node(nodes, connections)
|
||||
add_node_cnt += 1
|
||||
|
||||
# mutate delete node
|
||||
if rand() < delete_node_rate:
|
||||
nodes, connections = m_delete_node(nodes, connections)
|
||||
delete_node_cnt += 1
|
||||
|
||||
# mutate add connection
|
||||
if rand() < add_connection_rate:
|
||||
nodes, connections = m_add_connection(nodes, connections)
|
||||
add_connection_cnt += 1
|
||||
|
||||
# mutate delete connection
|
||||
if rand() < delete_connection_rate:
|
||||
nodes, connections = m_delete_connection(nodes, connections)
|
||||
delete_connection_cnt += 1
|
||||
|
||||
nodes, connections = mutate_values(nodes, connections, bias_mean, bias_std, bias_mutate_strength,
|
||||
bias_mutate_rate, bias_replace_rate, response_mean, response_std,
|
||||
@@ -220,6 +232,8 @@ def mutate(nodes: NDArray,
|
||||
weight_mutate_rate, weight_replace_rate, act_range, act_replace_rate, agg_range,
|
||||
agg_replace_rate, enabled_reverse_rate)
|
||||
|
||||
# print(add_node_cnt, delete_node_cnt, add_connection_cnt, delete_connection_cnt)
|
||||
|
||||
return nodes, connections
|
||||
|
||||
|
||||
@@ -321,9 +335,9 @@ def mutate_float_values(old_vals: NDArray, mean: float, std: float,
|
||||
replace = np.random.normal(size=old_vals.shape) * std + mean
|
||||
r = rand(*old_vals.shape)
|
||||
new_vals = old_vals
|
||||
new_vals = np.where(r < mutate_rate, new_vals + noise, new_vals)
|
||||
new_vals = np.where(r <= mutate_rate, new_vals + noise, new_vals)
|
||||
new_vals = np.where(
|
||||
np.logical_and(mutate_rate < r, r < mutate_rate + replace_rate),
|
||||
(mutate_rate < r) & (r <= mutate_rate + replace_rate),
|
||||
replace,
|
||||
new_vals
|
||||
)
|
||||
@@ -413,7 +427,7 @@ def mutate_delete_node(nodes: NDArray, connections: NDArray,
|
||||
node_key, node_idx = choice_node_key(nodes, input_keys, output_keys,
|
||||
allow_input_keys=False, allow_output_keys=False)
|
||||
|
||||
if np.isnan(node_key):
|
||||
if node_idx == I_INT:
|
||||
return nodes, connections
|
||||
|
||||
# delete the node
|
||||
|
||||
82
algorithms/neat/genome/origin_neat/__init__.py
Normal file
82
algorithms/neat/genome/origin_neat/__init__.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import numpy as np
|
||||
from .genome import Genome
|
||||
from .gene import NodeGene, ConnectionGene
|
||||
from .feedforward import FeedForwardNetwork
|
||||
|
||||
def object2array(genome, N):
|
||||
"""
|
||||
convert objective genome to array
|
||||
:param genome:
|
||||
:param N: the size of the array
|
||||
:return: Tuple(Array, Array), represents the nodes and connections array
|
||||
nodes: shape(N, 5), dtype=float
|
||||
connections: shape(2, N, N), dtype=float
|
||||
con[:, i, j] != nan, means there is a connection from i to j
|
||||
"""
|
||||
nodes = np.full((N, 5), np.nan)
|
||||
connections = np.full((2, N, N), np.nan)
|
||||
|
||||
assert len(genome.nodes) + len(genome.input_keys) + 1 <= N # remain one inf row for mutation adding extra node
|
||||
|
||||
idx = 0
|
||||
n2i = {}
|
||||
for i in genome.input_keys:
|
||||
nodes[idx, 0] = i
|
||||
n2i[i] = idx
|
||||
idx += 1
|
||||
|
||||
for k, v in genome.nodes.items():
|
||||
nodes[idx, 0] = k
|
||||
nodes[idx, 1] = v.bias
|
||||
nodes[idx, 2] = v.response
|
||||
nodes[idx, 3] = 0
|
||||
nodes[idx, 4] = 0
|
||||
n2i[k] = idx
|
||||
idx += 1
|
||||
|
||||
for (f, t), v in genome.connections.items():
|
||||
f_i, t_i = n2i[f], n2i[t]
|
||||
connections[0, f_i, t_i] = v.weight
|
||||
connections[1, f_i, t_i] = v.enabled
|
||||
|
||||
return nodes, connections
|
||||
|
||||
|
||||
def array2object(config, nodes, connections):
|
||||
"""
|
||||
convert array to genome
|
||||
:param config:
|
||||
:param nodes:
|
||||
:param connections:
|
||||
:return:
|
||||
"""
|
||||
genome = Genome(0, config, None, init_val=False)
|
||||
genome.input_keys = [0, 1]
|
||||
genome.output_keys = [2]
|
||||
idx2key = {}
|
||||
for i in range(nodes.shape[0]):
|
||||
key = nodes[i, 0]
|
||||
if np.isnan(key):
|
||||
continue
|
||||
key = int(key)
|
||||
idx2key[i] = key
|
||||
if key in genome.input_keys:
|
||||
continue
|
||||
node_gene = NodeGene(key, config, init_val=False)
|
||||
node_gene.bias = nodes[i, 1]
|
||||
node_gene.response = nodes[i, 2]
|
||||
node_gene.act = 'sigmoid'
|
||||
node_gene.agg = 'sum'
|
||||
genome.nodes[key] = node_gene
|
||||
|
||||
for i in range(connections.shape[1]):
|
||||
for j in range(connections.shape[2]):
|
||||
if np.isnan(connections[0, i, j]):
|
||||
continue
|
||||
key = (idx2key[i], idx2key[j])
|
||||
connection_gene = ConnectionGene(key, config, init_val=False)
|
||||
connection_gene.weight = connections[0, i, j]
|
||||
connection_gene.enabled = connections[1, i, j] == 1
|
||||
genome.connections[key] = connection_gene
|
||||
|
||||
return genome
|
||||
17
algorithms/neat/genome/origin_neat/activations.py
Normal file
17
algorithms/neat/genome/origin_neat/activations.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""
|
||||
Has the built-in activation functions,
|
||||
code for using them,
|
||||
and code for adding new user-defined ones
|
||||
"""
|
||||
import math
|
||||
|
||||
def sigmoid_activation(z):
|
||||
z = max(-60.0, min(60.0, 5.0 * z))
|
||||
return 1.0 / (1.0 + math.exp(-z))
|
||||
|
||||
|
||||
activation_dict = {
|
||||
"sigmoid": sigmoid_activation,
|
||||
}
|
||||
|
||||
full_activation_list = list(activation_dict.keys())
|
||||
14
algorithms/neat/genome/origin_neat/aggregations.py
Normal file
14
algorithms/neat/genome/origin_neat/aggregations.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
Has the built-in aggregation functions, code for using them,
|
||||
and code for adding new user-defined ones.
|
||||
"""
|
||||
|
||||
def sum_aggregation(x):
|
||||
return sum(x)
|
||||
|
||||
|
||||
aggregation_dict = {
|
||||
'sum': sum_aggregation,
|
||||
}
|
||||
|
||||
full_aggregation_list = list(aggregation_dict.keys())
|
||||
54
algorithms/neat/genome/origin_neat/feedforward.py
Normal file
54
algorithms/neat/genome/origin_neat/feedforward.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from .graphs import node_calculate_sequence
|
||||
from .activations import activation_dict
|
||||
from .aggregations import aggregation_dict
|
||||
|
||||
|
||||
class FeedForwardNetwork(object):
|
||||
def __init__(self, inputs, outputs, node_evals):
|
||||
self.input_nodes = inputs
|
||||
self.output_nodes = outputs
|
||||
self.node_evals = node_evals
|
||||
self.values = dict((key, 0.0) for key in inputs + outputs)
|
||||
|
||||
def activate(self, inputs):
|
||||
if len(self.input_nodes) != len(inputs):
|
||||
raise RuntimeError("Expected {0:n} inputs, got {1:n}".format(len(self.input_nodes), len(inputs)))
|
||||
|
||||
for k, v in zip(self.input_nodes, inputs):
|
||||
self.values[k] = v
|
||||
|
||||
for node, act_func, agg_func, bias, response, links in self.node_evals:
|
||||
node_inputs = []
|
||||
for i, w in links:
|
||||
node_inputs.append(self.values[i] * w)
|
||||
if len(node_inputs) == 0:
|
||||
s = 0.0
|
||||
else:
|
||||
s = agg_func(node_inputs)
|
||||
self.values[node] = act_func(bias + response * s)
|
||||
|
||||
return [self.values[i] for i in self.output_nodes]
|
||||
|
||||
@staticmethod
|
||||
def create(genome):
|
||||
""" Receives a genome and returns its phenotype (a FeedForwardNetwork). """
|
||||
|
||||
# Gather expressed connections.
|
||||
connections = [cg.key for cg in genome.connections.values() if cg.enabled]
|
||||
|
||||
seqs, useful_connections = node_calculate_sequence(genome.input_keys, genome.output_keys, connections)
|
||||
node_evals = []
|
||||
for node in seqs:
|
||||
inputs = []
|
||||
for conn_key in useful_connections:
|
||||
inode, onode = conn_key
|
||||
if onode == node:
|
||||
cg = genome.connections[conn_key]
|
||||
inputs.append((inode, cg.weight))
|
||||
|
||||
ng = genome.nodes[node]
|
||||
act_func = activation_dict[ng.act]
|
||||
agg_func = aggregation_dict[ng.agg]
|
||||
node_evals.append((node, act_func, agg_func, ng.bias, ng.response, inputs))
|
||||
|
||||
return FeedForwardNetwork(genome.input_keys, genome.output_keys, node_evals)
|
||||
152
algorithms/neat/genome/origin_neat/gene.py
Normal file
152
algorithms/neat/genome/origin_neat/gene.py
Normal file
@@ -0,0 +1,152 @@
|
||||
from typing import Tuple
|
||||
from random import gauss, choice, random
|
||||
|
||||
|
||||
def clip(x, min_val, max_val):
|
||||
return min(max(x, min_val), max_val)
|
||||
|
||||
|
||||
class NodeGene:
|
||||
|
||||
def __init__(self, key: int, config, init_val=True):
|
||||
self.key = key
|
||||
self.config = config
|
||||
|
||||
if init_val:
|
||||
self.init_value()
|
||||
else:
|
||||
self.bias = 0
|
||||
self.response = 1
|
||||
self.act = 0
|
||||
self.agg = 0
|
||||
|
||||
def __repr__(self):
|
||||
return f'node({self.key}, bias: {self.bias:.3f}, ' \
|
||||
f'response: {self.response:.3f}, act: {self.act}, agg: {self.agg})'
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, NodeGene):
|
||||
return False
|
||||
return self.key == other.key and \
|
||||
self.bias == other.bias and \
|
||||
self.response == other.response and \
|
||||
self.act == other.act and \
|
||||
self.agg == other.agg
|
||||
|
||||
def copy(self):
|
||||
new_gene = self.__class__(self.key, config=self.config, init_val=False)
|
||||
new_gene.bias = self.bias # numpy array is mutable, so we need to copy it
|
||||
new_gene.response = self.response
|
||||
new_gene.act = self.act
|
||||
new_gene.agg = self.agg
|
||||
return new_gene
|
||||
|
||||
def init_value(self):
|
||||
c = self.config.gene
|
||||
self.bias = gauss(c.bias.init_mean, c.bias.init_stdev)
|
||||
self.response = gauss(c.response.init_mean, c.response.init_stdev)
|
||||
self.act = choice(c.activation.options)
|
||||
self.agg = choice(c.aggregation.options)
|
||||
|
||||
self.bias = clip(self.bias, c.bias.min_value, c.bias.max_value)
|
||||
self.response = clip(self.response, c.response.min_value, c.response.max_value)
|
||||
|
||||
def distance(self, other):
|
||||
s = abs(self.bias - other.bias) + abs(self.response - other.response)
|
||||
if self.act != other.act:
|
||||
s += 1
|
||||
if self.agg != other.agg:
|
||||
s += 1
|
||||
return s
|
||||
|
||||
def mutate(self):
|
||||
self.bias = mutate_float(self.bias, self.config.gene.bias)
|
||||
self.response = mutate_float(self.response, self.config.gene.response)
|
||||
self.act = mutate_string(self.act, self.config.gene.activation)
|
||||
self.agg = mutate_string(self.agg, self.config.gene.aggregation)
|
||||
|
||||
@classmethod
|
||||
def crossover(cls, g1, g2):
|
||||
assert g1.key == g2.key
|
||||
c = cls(g1.key, g1.config, init_val=False)
|
||||
c.bias = g1.bias if random() > 0.5 else g2.bias
|
||||
c.response = g1.response if random() > 0.5 else g2.response
|
||||
c.act = g1.act if random() > 0.5 else g2.act
|
||||
c.agg = g1.agg if random() > 0.5 else g2.agg
|
||||
return c
|
||||
|
||||
|
||||
class ConnectionGene:
|
||||
def __init__(self, key: Tuple[int, int], config, init_val=True):
|
||||
self.key = key
|
||||
self.config = config
|
||||
self.enabled = True
|
||||
if init_val:
|
||||
self.init_value()
|
||||
else:
|
||||
self.weight = 1
|
||||
|
||||
def __repr__(self):
|
||||
return f'connection({self.key}, {self.weight:.3f}, {self.enabled})'
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, ConnectionGene):
|
||||
return False
|
||||
return self.key == other.key and \
|
||||
self.weight == other.weight and \
|
||||
self.enabled == other.enabled
|
||||
|
||||
def copy(self):
|
||||
new_gene = self.__class__(self.key, self.config, init_val=False)
|
||||
new_gene.weight = self.weight
|
||||
new_gene.enabled = self.enabled
|
||||
return new_gene
|
||||
|
||||
def init_value(self):
|
||||
c = self.config.gene
|
||||
self.weight = gauss(c.weight.init_mean, c.weight.init_stdev)
|
||||
self.weight = clip(self.weight, c.weight.min_value, c.weight.max_value)
|
||||
|
||||
def distance(self, other):
|
||||
s = abs(self.weight - other.weight)
|
||||
if self.enabled != other.enabled:
|
||||
s += 1
|
||||
return s
|
||||
|
||||
def mutate(self):
|
||||
self.weight = mutate_float(self.weight, self.config.gene.weight)
|
||||
if random() < self.config.gene.enabled.mutate_rate:
|
||||
self.enabled = not self.enabled
|
||||
|
||||
@classmethod
|
||||
def crossover(cls, g1, g2):
|
||||
assert g1.key == g2.key
|
||||
c = cls(g1.key, g1.config, init_val=False)
|
||||
c.weight = g1.weight if random() > 0.5 else g2.weight
|
||||
c.enabled = g1.enabled if random() > 0.5 else g2.enabled
|
||||
return c
|
||||
|
||||
|
||||
# HAHA, exactly the bug is here!!
|
||||
# After I fixed it, the result is much better!!
|
||||
def mutate_float(v, vc):
|
||||
"""vc -> value config"""
|
||||
r = random()
|
||||
if r < vc.mutate_rate:
|
||||
v += gauss(0, vc.mutate_power)
|
||||
v = clip(v, vc.min_value, vc.max_value)
|
||||
# Previous, seems like a huge bug
|
||||
# if r < vc.mutate_rate + vc.replace_rate:
|
||||
# Now:
|
||||
elif r < vc.mutate_rate + vc.replace_rate:
|
||||
v = gauss(vc.init_mean, vc.init_stdev)
|
||||
v = clip(v, vc.min_value, vc.max_value)
|
||||
return v
|
||||
|
||||
|
||||
def mutate_string(v, vc):
|
||||
"""vc -> value config"""
|
||||
r = random()
|
||||
if r < vc.mutate_rate:
|
||||
v = choice(vc.options)
|
||||
return v
|
||||
246
algorithms/neat/genome/origin_neat/genome.py
Normal file
246
algorithms/neat/genome/origin_neat/genome.py
Normal file
@@ -0,0 +1,246 @@
|
||||
from random import random, choice
|
||||
|
||||
from .gene import NodeGene, ConnectionGene
|
||||
from .graphs import creates_cycle
|
||||
|
||||
|
||||
class Genome:
|
||||
def __init__(self, key, config, global_idx, init_val=True):
|
||||
# Unique identifier for a genome instance.
|
||||
self.key = key
|
||||
self.config = config
|
||||
self.global_idx = global_idx
|
||||
|
||||
# (gene_key, gene) pairs for gene sets.
|
||||
self.connections = {}
|
||||
self.nodes = {}
|
||||
|
||||
# Fitness results.
|
||||
self.fitness = None
|
||||
|
||||
self.input_keys = [-i - 1 for i in range(config.basic.num_inputs)]
|
||||
self.output_keys = [i for i in range(config.basic.num_outputs)]
|
||||
|
||||
if init_val:
|
||||
self.initialize()
|
||||
|
||||
def __repr__(self):
|
||||
nodes_info = ',\n\t\t'.join(map(str, self.nodes.values()))
|
||||
connections_info = ',\n\t\t'.join(map(str, self.connections.values()))
|
||||
|
||||
return f'Genome(\n\t' \
|
||||
f'key: {self.key}, \n' \
|
||||
f'\tinput_keys: {self.input_keys}, \n' \
|
||||
f'\toutput_keys: {self.output_keys}, \n' \
|
||||
f'\tnodes: \n\t\t' \
|
||||
f'{nodes_info} \n' \
|
||||
f'\tconnections: \n\t\t' \
|
||||
f'{connections_info} \n)'
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Genome):
|
||||
return False
|
||||
if self.key != other.key:
|
||||
return False
|
||||
if len(self.nodes) != len(other.nodes) or len(self.connections) != len(other.connections):
|
||||
return False
|
||||
for k, v in self.nodes.items():
|
||||
o_v = other.nodes.get(k)
|
||||
if o_v is None or v != o_v:
|
||||
return False
|
||||
for k, v in self.connections.items():
|
||||
o_v = other.connections.get(k)
|
||||
if o_v is None or v != o_v:
|
||||
return False
|
||||
return True
|
||||
|
||||
def initialize(self):
|
||||
"""Configure a new genome based on the given configuration."""
|
||||
|
||||
# Create node genes for the output pins.
|
||||
for node_key in self.output_keys:
|
||||
self.nodes[node_key] = NodeGene(node_key, self.config, init_val=True)
|
||||
|
||||
# Add connections based on initial connectivity type.
|
||||
# ONLY ALLOW FULL HERE AND NO HIDDEN!!!
|
||||
for i in self.input_keys:
|
||||
for j in self.output_keys:
|
||||
key = (i, j)
|
||||
self.connections[key] = ConnectionGene(key, self.config, init_val=True)
|
||||
|
||||
def distance(self, other):
|
||||
"""Calculate the distance between two genomes."""
|
||||
|
||||
wc = self.config.genome.compatibility_weight_coefficient
|
||||
dc = self.config.genome.compatibility_disjoint_coefficient
|
||||
|
||||
node_distance = 0.0
|
||||
if self.nodes or other.nodes: # otherwise, both are empty
|
||||
disjoint_nodes = 0
|
||||
for k2 in other.nodes:
|
||||
if k2 not in self.nodes:
|
||||
disjoint_nodes += 1
|
||||
|
||||
for k1, n1 in self.nodes.items():
|
||||
n2 = other.nodes.get(k1)
|
||||
if n2 is None:
|
||||
disjoint_nodes += 1
|
||||
else:
|
||||
# Homologous genes compute their own distance value.
|
||||
node_distance += n1.distance(n2)
|
||||
|
||||
max_nodes = max(len(self.nodes), len(other.nodes))
|
||||
node_distance = (wc * node_distance + dc * disjoint_nodes) / max_nodes
|
||||
|
||||
connection_distance = 0.0
|
||||
if self.connections or other.connections:
|
||||
disjoint_connections = 0
|
||||
for k2 in other.connections:
|
||||
if k2 not in self.connections:
|
||||
disjoint_connections += 1
|
||||
|
||||
for k1, c1 in self.connections.items():
|
||||
c2 = other.connections.get(k1)
|
||||
if c2 is None:
|
||||
disjoint_connections += 1
|
||||
else:
|
||||
# Homologous genes compute their own distance value.
|
||||
connection_distance += c1.distance(c2)
|
||||
|
||||
max_conn = max(len(self.connections), len(other.connections))
|
||||
connection_distance = (wc * connection_distance + dc * disjoint_connections) / max_conn
|
||||
|
||||
return node_distance + connection_distance
|
||||
|
||||
@classmethod
|
||||
def crossover(cls, new_key, g1, g2):
|
||||
if g1.fitness > g2.fitness:
|
||||
p1, p2 = g1, g2
|
||||
else:
|
||||
p1, p2 = g2, g1
|
||||
|
||||
child = cls(new_key, p1.config, p1.global_idx, init_val=False)
|
||||
|
||||
for k, cg1 in p1.connections.items():
|
||||
cg2 = p2.connections.get(k)
|
||||
if cg2 is None:
|
||||
child.connections[k] = cg1.copy()
|
||||
else:
|
||||
child.connections[k] = ConnectionGene.crossover(cg1, cg2)
|
||||
|
||||
for k, ng1 in p1.nodes.items():
|
||||
ng2 = p2.nodes.get(k)
|
||||
if ng2 is None:
|
||||
child.nodes[k] = ng1.copy()
|
||||
else:
|
||||
child.nodes[k] = NodeGene.crossover(ng1, ng2)
|
||||
|
||||
return child
|
||||
|
||||
def mutate(self):
|
||||
c = self.config.genome
|
||||
|
||||
if c.single_structural_mutation:
|
||||
div = max(1, c.conn_add_prob + c.conn_delete_prob + c.node_add_prob + c.node_delete_prob)
|
||||
r = random()
|
||||
|
||||
if r < c.node_add_prob / div:
|
||||
self.mutate_add_node()
|
||||
elif r < (c.node_add_prob + c.node_delete_prob) / div:
|
||||
self.mutate_delete_node()
|
||||
elif r < (c.node_add_prob + c.node_delete_prob + c.conn_add_prob) / div:
|
||||
self.mutate_add_connection()
|
||||
elif r < (c.node_add_prob + c.node_delete_prob + c.conn_add_prob + c.conn_delete_prob) / div:
|
||||
self.mutate_delete_connection()
|
||||
else:
|
||||
if random() < c.node_add_prob:
|
||||
self.mutate_add_node()
|
||||
if random() < c.node_delete_prob:
|
||||
self.mutate_delete_node()
|
||||
if random() < c.conn_add_prob:
|
||||
self.mutate_add_connection()
|
||||
if random() < c.conn_delete_prob:
|
||||
self.mutate_delete_connection()
|
||||
|
||||
for cg in self.connections.values():
|
||||
cg.mutate()
|
||||
|
||||
for ng in self.nodes.values():
|
||||
ng.mutate()
|
||||
|
||||
def mutate_add_node(self):
|
||||
# create a node from splitting a connection
|
||||
if not self.connections:
|
||||
return -1
|
||||
|
||||
# Choose a random connection to split
|
||||
conn_to_split = choice(list(self.connections.values()))
|
||||
new_node_id = self.global_idx.next_node()
|
||||
ng = NodeGene(new_node_id, self.config, init_val=False)
|
||||
self.nodes[new_node_id] = ng
|
||||
|
||||
# Create two new connections
|
||||
conn_to_split.enabled = False
|
||||
i, o = conn_to_split.key
|
||||
con1 = ConnectionGene((i, new_node_id), self.config, init_val=False)
|
||||
con2 = ConnectionGene((new_node_id, o), self.config, init_val=False)
|
||||
|
||||
# The new node+connections have roughly the same behavior as
|
||||
# the original connection (depending on the activation function of the new node).
|
||||
con2.weight = conn_to_split.weight
|
||||
self.connections[con1.key] = con1
|
||||
self.connections[con2.key] = con2
|
||||
|
||||
return 1
|
||||
|
||||
def mutate_delete_node(self):
|
||||
# Do nothing if there are no non-output nodes.
|
||||
available_nodes = [k for k in self.nodes if k not in self.output_keys]
|
||||
if not available_nodes:
|
||||
return -1
|
||||
|
||||
del_key = choice(available_nodes)
|
||||
connections_to_delete = set()
|
||||
for k, v in self.connections.items():
|
||||
if del_key in v.key:
|
||||
connections_to_delete.add(v.key)
|
||||
|
||||
for key in connections_to_delete:
|
||||
del self.connections[key]
|
||||
|
||||
del self.nodes[del_key]
|
||||
|
||||
return del_key
|
||||
|
||||
def mutate_add_connection(self):
|
||||
"""
|
||||
Attempt to add a new connection, the only restriction being that the output
|
||||
node cannot be one of the network input pins.
|
||||
"""
|
||||
possible_outputs = list(self.nodes)
|
||||
out_node = choice(possible_outputs)
|
||||
|
||||
possible_inputs = possible_outputs + self.input_keys
|
||||
in_node = choice(possible_inputs)
|
||||
|
||||
# in recurrent networks, the input node can be the same as the output node
|
||||
key = (in_node, out_node)
|
||||
if key in self.connections:
|
||||
self.connections[key].enabled = True
|
||||
return -1
|
||||
|
||||
# if feedforward network, check if the connection creates a cycle
|
||||
if self.config.genome.feedforward and creates_cycle(self.connections.keys(), key):
|
||||
return -1
|
||||
|
||||
cg = ConnectionGene(key, self.config, init_val=True)
|
||||
self.connections[key] = cg
|
||||
return key
|
||||
|
||||
def mutate_delete_connection(self):
|
||||
if self.connections:
|
||||
key = choice(list(self.connections.keys()))
|
||||
del self.connections[key]
|
||||
|
||||
def complexity(self):
|
||||
return len(self.connections) * 2 + len(self.nodes) * 4
|
||||
130
algorithms/neat/genome/origin_neat/graphs.py
Normal file
130
algorithms/neat/genome/origin_neat/graphs.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Directed graph algorithm implementations."""
|
||||
|
||||
|
||||
def creates_cycle(connections, test):
|
||||
"""
|
||||
Returns true if the addition of the 'test' connection would create a cycle,
|
||||
assuming that no cycle already exists in the graph represented by 'connections'.
|
||||
"""
|
||||
i, o = test
|
||||
if i == o:
|
||||
return True
|
||||
|
||||
visited = {o}
|
||||
while True:
|
||||
num_added = 0
|
||||
for a, b in connections:
|
||||
if a in visited and b not in visited:
|
||||
if b == i:
|
||||
return True
|
||||
|
||||
visited.add(b)
|
||||
num_added += 1
|
||||
|
||||
if num_added == 0:
|
||||
return False
|
||||
|
||||
|
||||
def required_for_output(inputs, outputs, connections):
|
||||
"""
|
||||
Collect the nodes whose state is required to compute the final network output(s).
|
||||
:param inputs: list of the input identifiers
|
||||
:param outputs: list of the output node identifiers
|
||||
:param connections: list of (input, output) connections in the network.
|
||||
NOTE: It is assumed that the input identifier set and the node identifier set are disjoint.
|
||||
By convention, the output node ids are always the same as the output index.
|
||||
|
||||
Returns a set of identifiers of required nodes.
|
||||
"""
|
||||
assert not set(inputs).intersection(outputs)
|
||||
|
||||
required = set(outputs)
|
||||
s = set(outputs)
|
||||
while 1:
|
||||
# Find nodes not in s whose output is consumed by a node in s.
|
||||
t = set(a for (a, b) in connections if b in s and a not in s)
|
||||
|
||||
if not t:
|
||||
break
|
||||
|
||||
layer_nodes = set(x for x in t if x not in inputs)
|
||||
if not layer_nodes:
|
||||
break
|
||||
|
||||
required = required.union(layer_nodes)
|
||||
s = s.union(t)
|
||||
|
||||
return required
|
||||
|
||||
|
||||
def feed_forward_layers(inputs, outputs, connections):
|
||||
"""
|
||||
Collect the layers whose members can be evaluated in parallel in a feed-forward network.
|
||||
:param inputs: list of the network input nodes
|
||||
:param outputs: list of the output node identifiers
|
||||
:param connections: list of (input, output) connections in the network.
|
||||
|
||||
Returns a list of layers, with each layer consisting of a set of node identifiers.
|
||||
Note that the returned layers do not contain nodes whose output is ultimately
|
||||
never used to compute the final network output.
|
||||
"""
|
||||
|
||||
required = required_for_output(inputs, outputs, connections)
|
||||
|
||||
layers = []
|
||||
s = set(inputs)
|
||||
while 1:
|
||||
# Find candidate nodes c for the next layer. These nodes should connect
|
||||
# a node in s to a node not in s.
|
||||
c = set(b for (a, b) in connections if a in s and b not in s)
|
||||
# Keep only the used nodes whose entire input set is contained in s.
|
||||
t = set()
|
||||
for n in c:
|
||||
if n in required and all(a in s for (a, b) in connections if b == n):
|
||||
t.add(n)
|
||||
|
||||
if not t:
|
||||
break
|
||||
|
||||
layers.append(t)
|
||||
s = s.union(t)
|
||||
|
||||
return layers
|
||||
|
||||
|
||||
def node_calculate_sequence(inputs, outputs, connections):
|
||||
"""
|
||||
Collect the sequence of nodes to calculate in order to compute the final network output(s).
|
||||
:param required_nodes:
|
||||
:param connections:
|
||||
:return:
|
||||
"""
|
||||
required_nodes = required_for_output(inputs, outputs, connections)
|
||||
useful_nodes = required_nodes.copy()
|
||||
useful_nodes.update(inputs)
|
||||
useful_connections = [c for c in connections if c[0] in useful_nodes and c[1] in useful_nodes]
|
||||
|
||||
# do topological sort on useful_connections
|
||||
in_degrees = {n: 0 for n in useful_nodes}
|
||||
for a, b in useful_connections:
|
||||
in_degrees[b] += 1
|
||||
topological_order = []
|
||||
while len(topological_order) < len(useful_nodes):
|
||||
for n in in_degrees:
|
||||
if in_degrees[n] == 0:
|
||||
topological_order.append(n)
|
||||
in_degrees[n] -= 1
|
||||
for a, b in useful_connections:
|
||||
if a == n:
|
||||
in_degrees[b] -= 1
|
||||
|
||||
[topological_order.remove(n) for n in inputs] # remove inputs from topological order
|
||||
return topological_order, useful_connections
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
inputs = [-1, -2]
|
||||
outputs = [0]
|
||||
connections = [(-2, 2), (-2, 3), (4, 0), (3, 0), (2, 0), (2, 3), (2, 4)]
|
||||
seqs = node_calculate_sequence(inputs, outputs, connections)
|
||||
print(seqs)
|
||||
@@ -6,7 +6,12 @@ import numpy as np
|
||||
from .species import SpeciesController
|
||||
from .genome.numpy import create_initialize_function, create_mutate_function, create_forward_function
|
||||
from .genome.numpy import batch_crossover
|
||||
from .genome.numpy import expand, expand_single
|
||||
from .genome.numpy import expand, expand_single, pop_analysis
|
||||
|
||||
from .genome.origin_neat import *
|
||||
|
||||
xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
|
||||
xor_outputs = np.array([[0], [1], [1], [0]])
|
||||
|
||||
|
||||
class Pipeline:
|
||||
@@ -14,8 +19,7 @@ class Pipeline:
|
||||
Neat algorithm pipeline.
|
||||
"""
|
||||
|
||||
def __init__(self, config, seed=42):
|
||||
np.random.seed(seed)
|
||||
def __init__(self, config):
|
||||
|
||||
self.config = config
|
||||
self.N = config.basic.init_maximum_nodes
|
||||
@@ -48,6 +52,15 @@ class Pipeline:
|
||||
return func
|
||||
|
||||
def tell(self, fitnesses):
|
||||
# idx = np.argmax(fitnesses)
|
||||
# print(f"argmax: {idx}, max: {np.max(fitnesses)}, a_max: {fitnesses[idx]}")
|
||||
# n, c = self.pop_nodes[idx], self.pop_connections[idx]
|
||||
# func = create_forward_function(n, c, self.N, self.input_idx, self.output_idx, batch=True)
|
||||
# out = func(xor_inputs)
|
||||
# print(f"max fitness: {fitnesses[idx]}")
|
||||
# print(f"real fitness: {4 - np.sum(np.abs(out - xor_outputs), axis=0)}")
|
||||
# print(f"Out:\n{func(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]))}")
|
||||
|
||||
self.generation += 1
|
||||
|
||||
self.species_controller.update_species_fitnesses(fitnesses)
|
||||
@@ -56,12 +69,31 @@ class Pipeline:
|
||||
|
||||
self.update_next_generation(crossover_pair)
|
||||
|
||||
# print(pop_analysis(self.pop_nodes, self.pop_connections, self.input_idx, self.output_idx))
|
||||
analysis = pop_analysis(self.pop_nodes, self.pop_connections, self.input_idx, self.output_idx)
|
||||
|
||||
try:
|
||||
for nodes, connections in zip(self.pop_nodes, self.pop_connections):
|
||||
g = array2object(self.config, nodes, connections)
|
||||
print(g)
|
||||
net = FeedForwardNetwork.create(g)
|
||||
real_out = [net.activate(x) for x in xor_inputs]
|
||||
func = create_forward_function(nodes, connections, self.N, self.input_idx, self.output_idx, batch=True)
|
||||
out = func(xor_inputs)
|
||||
real_out = np.array(real_out)
|
||||
out = np.array(out)
|
||||
print(real_out, out)
|
||||
assert np.allclose(real_out, out)
|
||||
except AssertionError:
|
||||
np.save("err_nodes.npy", self.pop_nodes)
|
||||
np.save("err_connections.npy", self.pop_connections)
|
||||
|
||||
# print(g)
|
||||
|
||||
self.species_controller.speciate(self.pop_nodes, self.pop_connections, self.generation)
|
||||
|
||||
self.expand()
|
||||
|
||||
|
||||
def auto_run(self, fitness_func, analysis: Union[Callable, str] = "default"):
|
||||
for _ in range(self.config.neat.population.generation_limit):
|
||||
forward_func = self.ask(batch=True)
|
||||
@@ -77,6 +109,7 @@ class Pipeline:
|
||||
self.tell(fitnesses)
|
||||
print("Generation limit reached!")
|
||||
|
||||
|
||||
def update_next_generation(self, crossover_pair: List[Union[int, Tuple[int, int]]]) -> None:
|
||||
"""
|
||||
create the next generation
|
||||
@@ -105,6 +138,7 @@ class Pipeline:
|
||||
|
||||
# mutate
|
||||
new_node_keys = np.array(self.fetch_new_node_keys())
|
||||
|
||||
m_npn, m_npc = self.mutate_func(npn, npc, new_node_keys) # mutate_new_pop_nodes
|
||||
|
||||
# elitism don't mutate
|
||||
@@ -122,6 +156,7 @@ class Pipeline:
|
||||
unused.append(key)
|
||||
self.new_node_keys_pool = unused + self.new_node_keys_pool
|
||||
|
||||
|
||||
def expand(self):
|
||||
"""
|
||||
Expand the population if needed.
|
||||
@@ -133,14 +168,15 @@ class Pipeline:
|
||||
pop_node_sizes = np.sum(~np.isnan(pop_node_keys), axis=1)
|
||||
max_node_size = np.max(pop_node_sizes)
|
||||
if max_node_size >= self.N:
|
||||
print(f"expand to {self.N}!")
|
||||
self.N = int(self.N * self.expand_coe)
|
||||
print(f"expand to {self.N}!")
|
||||
self.pop_nodes, self.pop_connections = expand(self.pop_nodes, self.pop_connections, self.N)
|
||||
|
||||
# don't forget to expand representation genome in species
|
||||
for s in self.species_controller.species.values():
|
||||
s.representative = expand_single(*s.representative, self.N)
|
||||
|
||||
|
||||
def fetch_new_node_keys(self):
|
||||
# if remain unused keys are not enough, create new keys
|
||||
if len(self.new_node_keys_pool) < self.pop_size:
|
||||
@@ -153,6 +189,7 @@ class Pipeline:
|
||||
self.new_node_keys_pool = self.new_node_keys_pool[self.pop_size:]
|
||||
return res
|
||||
|
||||
|
||||
def default_analysis(self, fitnesses):
|
||||
max_f, min_f, mean_f, std_f = max(fitnesses), min(fitnesses), np.mean(fitnesses), np.std(fitnesses)
|
||||
species_sizes = [len(s.members) for s in self.species_controller.species.values()]
|
||||
@@ -162,4 +199,4 @@ class Pipeline:
|
||||
self.generation_timestamp = new_timestamp
|
||||
|
||||
print(f"Generation: {self.generation}",
|
||||
f"fitness: {max_f}, {min_f}, {mean_f}, {std_f}, Species sizes: {species_sizes}, Cost time: {cost_time}")
|
||||
f"fitness: {max_f}, {min_f}, {mean_f}, {std_f}, Species sizes: {species_sizes}, Cost time: {cost_time}")
|
||||
|
||||
@@ -138,7 +138,8 @@ class SpeciesController:
|
||||
for sid, s in self.species.items():
|
||||
# TODO: here use mean to measure the fitness of a species, but it may be other functions
|
||||
s.member_fitnesses = s.get_fitnesses(fitnesses)
|
||||
s.fitness = np.mean(s.member_fitnesses)
|
||||
# s.fitness = np.mean(s.member_fitnesses)
|
||||
s.fitness = np.max(s.member_fitnesses)
|
||||
s.fitness_history.append(s.fitness)
|
||||
s.adjusted_fitness = None
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ def evaluate(forward_func: Callable) -> List[float]:
|
||||
:return:
|
||||
"""
|
||||
outs = forward_func(xor_inputs)
|
||||
fitnesses = np.mean((outs - xor_outputs) ** 2, axis=(1, 2))
|
||||
fitnesses = 4 - np.sum(np.abs(outs - xor_outputs), axis=(1, 2))
|
||||
# print(fitnesses)
|
||||
return fitnesses.tolist() # returns a list
|
||||
|
||||
@@ -38,4 +38,5 @@ def main():
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
np.random.seed(63124326)
|
||||
main()
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
"basic": {
|
||||
"num_inputs": 2,
|
||||
"num_outputs": 1,
|
||||
"init_maximum_nodes": 10,
|
||||
"expands_coe": 2
|
||||
"init_maximum_nodes": 5,
|
||||
"expands_coe": 1.5
|
||||
},
|
||||
"neat": {
|
||||
"population": {
|
||||
"fitness_criterion": "max",
|
||||
"fitness_threshold": 3,
|
||||
"generation_limit": 100,
|
||||
"fitness_threshold": 76,
|
||||
"generation_limit": 1000,
|
||||
"pop_size": 100,
|
||||
"reset_on_extinction": "False"
|
||||
},
|
||||
@@ -17,8 +17,6 @@
|
||||
"bias": {
|
||||
"init_mean": 0.0,
|
||||
"init_stdev": 1.0,
|
||||
"max_value": 30.0,
|
||||
"min_value": -30.0,
|
||||
"mutate_power": 0.5,
|
||||
"mutate_rate": 0.7,
|
||||
"replace_rate": 0.1
|
||||
@@ -26,34 +24,23 @@
|
||||
"response": {
|
||||
"init_mean": 1.0,
|
||||
"init_stdev": 0.0,
|
||||
"max_value": 30.0,
|
||||
"min_value": -30.0,
|
||||
"mutate_power": 0.0,
|
||||
"mutate_rate": 0.0,
|
||||
"replace_rate": 0.0
|
||||
},
|
||||
"activation": {
|
||||
"default": "sigmoid",
|
||||
"options": "sigmoid",
|
||||
"options": ["sigmoid"],
|
||||
"mutate_rate": 0.01
|
||||
},
|
||||
"aggregation": {
|
||||
"default": "sum",
|
||||
"options": [
|
||||
"product",
|
||||
"sum",
|
||||
"max",
|
||||
"min",
|
||||
"median",
|
||||
"mean"
|
||||
],
|
||||
"options": ["sum"],
|
||||
"mutate_rate": 0.01
|
||||
},
|
||||
"weight": {
|
||||
"init_mean": 0.0,
|
||||
"init_stdev": 1.0,
|
||||
"max_value": 30.0,
|
||||
"min_value": -30.0,
|
||||
"mutate_power": 0.5,
|
||||
"mutate_rate": 0.8,
|
||||
"replace_rate": 0.1
|
||||
@@ -65,7 +52,6 @@
|
||||
"genome": {
|
||||
"compatibility_disjoint_coefficient": 1.0,
|
||||
"compatibility_weight_coefficient": 0.5,
|
||||
"feedforward": "True",
|
||||
"single_structural_mutation": "False",
|
||||
"conn_add_prob": 0.5,
|
||||
"conn_delete_prob": 0.5,
|
||||
@@ -81,28 +67,5 @@
|
||||
"survival_threshold": 0.2,
|
||||
"min_species_size": 1
|
||||
}
|
||||
},
|
||||
"hyperneat": {
|
||||
"substrate": {
|
||||
"type": "feedforward",
|
||||
"layers": [
|
||||
3,
|
||||
10,
|
||||
10,
|
||||
1
|
||||
],
|
||||
"x_lim": [
|
||||
-5,
|
||||
5
|
||||
],
|
||||
"y_lim": [
|
||||
-5,
|
||||
5
|
||||
],
|
||||
"threshold": 0.2,
|
||||
"max_weight": 5.0
|
||||
}
|
||||
},
|
||||
"es-hyperneat": {
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user