add License and pyproject.toml
This commit is contained in:
3
src/tensorneat/genome/gene/__init__.py
Normal file
3
src/tensorneat/genome/gene/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .base import BaseGene
|
||||
from .conn import *
|
||||
from .node import *
|
||||
45
src/tensorneat/genome/gene/base.py
Normal file
45
src/tensorneat/genome/gene/base.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import jax, jax.numpy as jnp
|
||||
from tensorneat.common import State, StatefulBaseClass, hash_array
|
||||
|
||||
|
||||
class BaseGene(StatefulBaseClass):
|
||||
"Base class for node genes or connection genes."
|
||||
fixed_attrs = []
|
||||
custom_attrs = []
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def new_identity_attrs(self, state):
|
||||
# the attrs which do identity transformation, used in mutate add node
|
||||
raise NotImplementedError
|
||||
|
||||
def new_random_attrs(self, state, randkey):
|
||||
# random attributes of the gene. used in initialization.
|
||||
raise NotImplementedError
|
||||
|
||||
def mutate(self, state, randkey, attrs):
|
||||
raise NotImplementedError
|
||||
|
||||
def crossover(self, state, randkey, attrs1, attrs2):
|
||||
return jnp.where(
|
||||
jax.random.normal(randkey, attrs1.shape) > 0,
|
||||
attrs1,
|
||||
attrs2,
|
||||
)
|
||||
|
||||
def distance(self, state, attrs1, attrs2):
|
||||
raise NotImplementedError
|
||||
|
||||
def forward(self, state, attrs, inputs):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def length(self):
|
||||
return len(self.fixed_attrs) + len(self.custom_attrs)
|
||||
|
||||
def repr(self, state, gene, precision=2):
|
||||
raise NotImplementedError
|
||||
|
||||
def hash(self, gene):
|
||||
return hash_array(gene)
|
||||
2
src/tensorneat/genome/gene/conn/__init__.py
Normal file
2
src/tensorneat/genome/gene/conn/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from .base import BaseConn
|
||||
from .default import DefaultConn
|
||||
35
src/tensorneat/genome/gene/conn/base.py
Normal file
35
src/tensorneat/genome/gene/conn/base.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from ..base import BaseGene
|
||||
|
||||
|
||||
class BaseConn(BaseGene):
|
||||
"Base class for connection genes."
|
||||
fixed_attrs = ["input_index", "output_index"]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def new_zero_attrs(self, state):
|
||||
# the attrs which make the least influence on the network, used in mutate add conn
|
||||
raise NotImplementedError
|
||||
|
||||
def forward(self, state, attrs, inputs):
|
||||
raise NotImplementedError
|
||||
|
||||
def repr(self, state, conn, precision=2, idx_width=3, func_width=8):
|
||||
in_idx, out_idx = conn[:2]
|
||||
in_idx = int(in_idx)
|
||||
out_idx = int(out_idx)
|
||||
|
||||
return "{}(in: {:<{idx_width}}, out: {:<{idx_width}})".format(
|
||||
self.__class__.__name__, in_idx, out_idx, idx_width=idx_width
|
||||
)
|
||||
|
||||
def to_dict(self, state, conn):
|
||||
in_idx, out_idx = conn[:2]
|
||||
return {
|
||||
"in": int(in_idx),
|
||||
"out": int(out_idx),
|
||||
}
|
||||
|
||||
def sympy_func(self, state, conn_dict, inputs):
|
||||
raise NotImplementedError
|
||||
96
src/tensorneat/genome/gene/conn/default.py
Normal file
96
src/tensorneat/genome/gene/conn/default.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import jax.numpy as jnp
|
||||
import jax.random
|
||||
import sympy as sp
|
||||
from tensorneat.common import mutate_float
|
||||
from .base import BaseConn
|
||||
|
||||
|
||||
class DefaultConn(BaseConn):
|
||||
"Default connection gene, with the same behavior as in NEAT-python."
|
||||
|
||||
custom_attrs = ["weight"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
weight_init_mean: float = 0.0,
|
||||
weight_init_std: float = 1.0,
|
||||
weight_mutate_power: float = 0.15,
|
||||
weight_mutate_rate: float = 0.2,
|
||||
weight_replace_rate: float = 0.015,
|
||||
weight_lower_bound: float = -5.0,
|
||||
weight_upper_bound: float = 5.0,
|
||||
):
|
||||
super().__init__()
|
||||
self.weight_init_mean = weight_init_mean
|
||||
self.weight_init_std = weight_init_std
|
||||
self.weight_mutate_power = weight_mutate_power
|
||||
self.weight_mutate_rate = weight_mutate_rate
|
||||
self.weight_replace_rate = weight_replace_rate
|
||||
self.weight_lower_bound = weight_lower_bound
|
||||
self.weight_upper_bound = weight_upper_bound
|
||||
|
||||
|
||||
def new_zero_attrs(self, state):
|
||||
return jnp.array([0.0]) # weight = 0
|
||||
|
||||
def new_identity_attrs(self, state):
|
||||
return jnp.array([1.0]) # weight = 1
|
||||
|
||||
def new_random_attrs(self, state, randkey):
|
||||
weight = (
|
||||
jax.random.normal(randkey, ()) * self.weight_init_std
|
||||
+ self.weight_init_mean
|
||||
)
|
||||
weight = jnp.clip(weight, self.weight_lower_bound, self.weight_upper_bound)
|
||||
return jnp.array([weight])
|
||||
|
||||
def mutate(self, state, randkey, attrs):
|
||||
weight = attrs[0]
|
||||
weight = mutate_float(
|
||||
randkey,
|
||||
weight,
|
||||
self.weight_init_mean,
|
||||
self.weight_init_std,
|
||||
self.weight_mutate_power,
|
||||
self.weight_mutate_rate,
|
||||
self.weight_replace_rate,
|
||||
)
|
||||
weight = jnp.clip(weight, self.weight_lower_bound, self.weight_upper_bound)
|
||||
return jnp.array([weight])
|
||||
|
||||
def distance(self, state, attrs1, attrs2):
|
||||
weight1 = attrs1[0]
|
||||
weight2 = attrs2[0]
|
||||
return jnp.abs(weight1 - weight2)
|
||||
|
||||
def forward(self, state, attrs, inputs):
|
||||
weight = attrs[0]
|
||||
return inputs * weight
|
||||
|
||||
def repr(self, state, conn, precision=2, idx_width=3, func_width=8):
|
||||
in_idx, out_idx, weight = conn
|
||||
|
||||
in_idx = int(in_idx)
|
||||
out_idx = int(out_idx)
|
||||
weight = round(float(weight), precision)
|
||||
|
||||
return "{}(in: {:<{idx_width}}, out: {:<{idx_width}}, weight: {:<{float_width}})".format(
|
||||
self.__class__.__name__,
|
||||
in_idx,
|
||||
out_idx,
|
||||
weight,
|
||||
idx_width=idx_width,
|
||||
float_width=precision + 3,
|
||||
)
|
||||
|
||||
def to_dict(self, state, conn):
|
||||
return {
|
||||
"in": int(conn[0]),
|
||||
"out": int(conn[1]),
|
||||
"weight": jnp.float32(conn[2]),
|
||||
}
|
||||
|
||||
def sympy_func(self, state, conn_dict, inputs, precision=None):
|
||||
weight = sp.symbols(f"c_{conn_dict['in']}_{conn_dict['out']}_w")
|
||||
|
||||
return inputs * weight, {weight: conn_dict["weight"]}
|
||||
3
src/tensorneat/genome/gene/node/__init__.py
Normal file
3
src/tensorneat/genome/gene/node/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .base import BaseNode
|
||||
from .default import DefaultNode
|
||||
from .bias import BiasNode
|
||||
30
src/tensorneat/genome/gene/node/base.py
Normal file
30
src/tensorneat/genome/gene/node/base.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import jax, jax.numpy as jnp
|
||||
from .. import BaseGene
|
||||
|
||||
|
||||
class BaseNode(BaseGene):
|
||||
"Base class for node genes."
|
||||
fixed_attrs = ["index"]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, state, attrs, inputs, is_output_node=False):
|
||||
raise NotImplementedError
|
||||
|
||||
def repr(self, state, node, precision=2, idx_width=3, func_width=8):
|
||||
idx = node[0]
|
||||
|
||||
idx = int(idx)
|
||||
return "{}(idx={:<{idx_width}})".format(
|
||||
self.__class__.__name__, idx, idx_width=idx_width
|
||||
)
|
||||
|
||||
def to_dict(self, state, node):
|
||||
idx = node[0]
|
||||
return {
|
||||
"idx": int(idx),
|
||||
}
|
||||
|
||||
def sympy_func(self, state, node_dict, inputs, is_output_node=False):
|
||||
raise NotImplementedError
|
||||
185
src/tensorneat/genome/gene/node/bias.py
Normal file
185
src/tensorneat/genome/gene/node/bias.py
Normal file
@@ -0,0 +1,185 @@
|
||||
from typing import Union, Sequence, Callable, Optional
|
||||
|
||||
import numpy as np
|
||||
import jax, jax.numpy as jnp
|
||||
import sympy as sp
|
||||
from tensorneat.common import (
|
||||
Act,
|
||||
Agg,
|
||||
act_func,
|
||||
agg_func,
|
||||
mutate_int,
|
||||
mutate_float,
|
||||
convert_to_sympy,
|
||||
)
|
||||
|
||||
from . import BaseNode
|
||||
|
||||
|
||||
class BiasNode(BaseNode):
|
||||
"""
|
||||
Default node gene, with the same behavior as in NEAT-python.
|
||||
The attribute response is removed.
|
||||
"""
|
||||
|
||||
custom_attrs = ["bias", "aggregation", "activation"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bias_init_mean: float = 0.0,
|
||||
bias_init_std: float = 1.0,
|
||||
bias_mutate_power: float = 0.15,
|
||||
bias_mutate_rate: float = 0.2,
|
||||
bias_replace_rate: float = 0.015,
|
||||
bias_lower_bound: float = -5,
|
||||
bias_upper_bound: float = 5,
|
||||
aggregation_default: Optional[Callable] = None,
|
||||
aggregation_options: Union[Callable, Sequence[Callable]] = Agg.sum,
|
||||
aggregation_replace_rate: float = 0.1,
|
||||
activation_default: Optional[Callable] = None,
|
||||
activation_options: Union[Callable, Sequence[Callable]] = Act.sigmoid,
|
||||
activation_replace_rate: float = 0.1,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if isinstance(aggregation_options, Callable):
|
||||
aggregation_options = [aggregation_options]
|
||||
if isinstance(activation_options, Callable):
|
||||
activation_options = [activation_options]
|
||||
|
||||
if aggregation_default is None:
|
||||
aggregation_default = aggregation_options[0]
|
||||
if activation_default is None:
|
||||
activation_default = activation_options[0]
|
||||
|
||||
self.bias_init_mean = bias_init_mean
|
||||
self.bias_init_std = bias_init_std
|
||||
self.bias_mutate_power = bias_mutate_power
|
||||
self.bias_mutate_rate = bias_mutate_rate
|
||||
self.bias_replace_rate = bias_replace_rate
|
||||
self.bias_lower_bound = bias_lower_bound
|
||||
self.bias_upper_bound = bias_upper_bound
|
||||
|
||||
self.aggregation_default = aggregation_options.index(aggregation_default)
|
||||
self.aggregation_options = aggregation_options
|
||||
self.aggregation_indices = np.arange(len(aggregation_options))
|
||||
self.aggregation_replace_rate = aggregation_replace_rate
|
||||
|
||||
self.activation_default = activation_options.index(activation_default)
|
||||
self.activation_options = activation_options
|
||||
self.activation_indices = np.arange(len(activation_options))
|
||||
self.activation_replace_rate = activation_replace_rate
|
||||
|
||||
def new_identity_attrs(self, state):
|
||||
return jnp.array(
|
||||
[0, self.aggregation_default, -1]
|
||||
) # activation=-1 means Act.identity
|
||||
|
||||
def new_random_attrs(self, state, randkey):
|
||||
k1, k2, k3 = jax.random.split(randkey, num=3)
|
||||
bias = jax.random.normal(k1, ()) * self.bias_init_std + self.bias_init_mean
|
||||
bias = jnp.clip(bias, self.bias_lower_bound, self.bias_upper_bound)
|
||||
agg = jax.random.choice(k2, self.aggregation_indices)
|
||||
act = jax.random.choice(k3, self.activation_indices)
|
||||
|
||||
return jnp.array([bias, agg, act])
|
||||
|
||||
def mutate(self, state, randkey, attrs):
|
||||
k1, k2, k3 = jax.random.split(randkey, num=3)
|
||||
bias, agg, act = attrs
|
||||
|
||||
bias = mutate_float(
|
||||
k1,
|
||||
bias,
|
||||
self.bias_init_mean,
|
||||
self.bias_init_std,
|
||||
self.bias_mutate_power,
|
||||
self.bias_mutate_rate,
|
||||
self.bias_replace_rate,
|
||||
)
|
||||
bias = jnp.clip(bias, self.bias_lower_bound, self.bias_upper_bound)
|
||||
agg = mutate_int(
|
||||
k2, agg, self.aggregation_indices, self.aggregation_replace_rate
|
||||
)
|
||||
|
||||
act = mutate_int(k3, act, self.activation_indices, self.activation_replace_rate)
|
||||
|
||||
return jnp.array([bias, agg, act])
|
||||
|
||||
def distance(self, state, attrs1, attrs2):
|
||||
bias1, agg1, act1 = attrs1
|
||||
bias2, agg2, act2 = attrs2
|
||||
|
||||
return jnp.abs(bias1 - bias2) + (agg1 != agg2) + (act1 != act2)
|
||||
|
||||
def forward(self, state, attrs, inputs, is_output_node=False):
|
||||
bias, agg, act = attrs
|
||||
|
||||
z = agg_func(agg, inputs, self.aggregation_options)
|
||||
z = bias + z
|
||||
|
||||
# the last output node should not be activated
|
||||
z = jax.lax.cond(
|
||||
is_output_node, lambda: z, lambda: act_func(act, z, self.activation_options)
|
||||
)
|
||||
|
||||
return z
|
||||
|
||||
def repr(self, state, node, precision=2, idx_width=3, func_width=8):
|
||||
idx, bias, agg, act = node
|
||||
|
||||
idx = int(idx)
|
||||
bias = round(float(bias), precision)
|
||||
agg = int(agg)
|
||||
act = int(act)
|
||||
|
||||
if act == -1:
|
||||
act_func = Act.identity
|
||||
else:
|
||||
act_func = self.activation_options[act]
|
||||
return "{}(idx={:<{idx_width}}, bias={:<{float_width}}, aggregation={:<{func_width}}, activation={:<{func_width}})".format(
|
||||
self.__class__.__name__,
|
||||
idx,
|
||||
bias,
|
||||
self.aggregation_options[agg].__name__,
|
||||
act_func.__name__,
|
||||
idx_width=idx_width,
|
||||
float_width=precision + 3,
|
||||
func_width=func_width,
|
||||
)
|
||||
|
||||
def to_dict(self, state, node):
|
||||
idx, bias, agg, act = node
|
||||
|
||||
idx = int(idx)
|
||||
|
||||
bias = jnp.float32(bias)
|
||||
agg = int(agg)
|
||||
act = int(act)
|
||||
|
||||
if act == -1:
|
||||
act_func = Act.identity
|
||||
else:
|
||||
act_func = self.activation_options[act]
|
||||
|
||||
return {
|
||||
"idx": idx,
|
||||
"bias": bias,
|
||||
"agg": self.aggregation_options[int(agg)].__name__,
|
||||
"act": act_func.__name__,
|
||||
}
|
||||
|
||||
def sympy_func(self, state, node_dict, inputs, is_output_node=False):
|
||||
nd = node_dict
|
||||
|
||||
bias = sp.symbols(f"n_{nd['idx']}_b")
|
||||
|
||||
z = convert_to_sympy(nd["agg"])(inputs)
|
||||
|
||||
z = bias + z
|
||||
if is_output_node:
|
||||
pass
|
||||
else:
|
||||
z = convert_to_sympy(nd["act"])(z)
|
||||
|
||||
return z, {bias: nd["bias"]}
|
||||
220
src/tensorneat/genome/gene/node/default.py
Normal file
220
src/tensorneat/genome/gene/node/default.py
Normal file
@@ -0,0 +1,220 @@
|
||||
from typing import Optional, Union, Sequence, Callable
|
||||
|
||||
import numpy as np
|
||||
import jax, jax.numpy as jnp
|
||||
import sympy as sp
|
||||
|
||||
from tensorneat.common import (
|
||||
Act,
|
||||
Agg,
|
||||
act_func,
|
||||
agg_func,
|
||||
mutate_int,
|
||||
mutate_float,
|
||||
convert_to_sympy,
|
||||
)
|
||||
|
||||
from .base import BaseNode
|
||||
|
||||
|
||||
class DefaultNode(BaseNode):
|
||||
"Default node gene, with the same behavior as in NEAT-python."
|
||||
|
||||
custom_attrs = ["bias", "response", "aggregation", "activation"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bias_init_mean: float = 0.0,
|
||||
bias_init_std: float = 1.0,
|
||||
bias_mutate_power: float = 0.15,
|
||||
bias_mutate_rate: float = 0.2,
|
||||
bias_replace_rate: float = 0.015,
|
||||
bias_lower_bound: float = -5,
|
||||
bias_upper_bound: float = 5,
|
||||
response_init_mean: float = 1.0,
|
||||
response_init_std: float = 0.0,
|
||||
response_mutate_power: float = 0.15,
|
||||
response_mutate_rate: float = 0.2,
|
||||
response_replace_rate: float = 0.015,
|
||||
response_lower_bound: float = -5,
|
||||
response_upper_bound: float = 5,
|
||||
aggregation_default: Optional[Callable] = None,
|
||||
aggregation_options: Union[Callable, Sequence[Callable]] = Agg.sum,
|
||||
aggregation_replace_rate: float = 0.1,
|
||||
activation_default: Optional[Callable] = None,
|
||||
activation_options: Union[Callable, Sequence[Callable]] = Act.sigmoid,
|
||||
activation_replace_rate: float = 0.1,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if isinstance(aggregation_options, Callable):
|
||||
aggregation_options = [aggregation_options]
|
||||
if isinstance(activation_options, Callable):
|
||||
activation_options = [activation_options]
|
||||
|
||||
if aggregation_default is None:
|
||||
aggregation_default = aggregation_options[0]
|
||||
if activation_default is None:
|
||||
activation_default = activation_options[0]
|
||||
|
||||
self.bias_init_mean = bias_init_mean
|
||||
self.bias_init_std = bias_init_std
|
||||
self.bias_mutate_power = bias_mutate_power
|
||||
self.bias_mutate_rate = bias_mutate_rate
|
||||
self.bias_replace_rate = bias_replace_rate
|
||||
self.bias_lower_bound = bias_lower_bound
|
||||
self.bias_upper_bound = bias_upper_bound
|
||||
|
||||
self.response_init_mean = response_init_mean
|
||||
self.response_init_std = response_init_std
|
||||
self.response_mutate_power = response_mutate_power
|
||||
self.response_mutate_rate = response_mutate_rate
|
||||
self.response_replace_rate = response_replace_rate
|
||||
self.reponse_lower_bound = response_lower_bound
|
||||
self.response_upper_bound = response_upper_bound
|
||||
|
||||
self.aggregation_default = aggregation_options.index(aggregation_default)
|
||||
self.aggregation_options = aggregation_options
|
||||
self.aggregation_indices = np.arange(len(aggregation_options))
|
||||
self.aggregation_replace_rate = aggregation_replace_rate
|
||||
|
||||
self.activation_default = activation_options.index(activation_default)
|
||||
self.activation_options = activation_options
|
||||
self.activation_indices = np.arange(len(activation_options))
|
||||
self.activation_replace_rate = activation_replace_rate
|
||||
|
||||
def new_identity_attrs(self, state):
|
||||
bias = 0
|
||||
res = 1
|
||||
agg = self.aggregation_default
|
||||
act = self.activation_default
|
||||
|
||||
return jnp.array([bias, res, agg, act]) # activation=-1 means Act.identity
|
||||
|
||||
def new_random_attrs(self, state, randkey):
|
||||
k1, k2, k3, k4 = jax.random.split(randkey, num=4)
|
||||
bias = jax.random.normal(k1, ()) * self.bias_init_std + self.bias_init_mean
|
||||
bias = jnp.clip(bias, self.bias_lower_bound, self.bias_upper_bound)
|
||||
res = (
|
||||
jax.random.normal(k2, ()) * self.response_init_std + self.response_init_mean
|
||||
)
|
||||
res = jnp.clip(res, self.reponse_lower_bound, self.response_upper_bound)
|
||||
agg = jax.random.choice(k3, self.aggregation_indices)
|
||||
act = jax.random.choice(k4, self.activation_indices)
|
||||
|
||||
return jnp.array([bias, res, agg, act])
|
||||
|
||||
def mutate(self, state, randkey, attrs):
|
||||
k1, k2, k3, k4 = jax.random.split(randkey, num=4)
|
||||
bias, res, agg, act = attrs
|
||||
bias = mutate_float(
|
||||
k1,
|
||||
bias,
|
||||
self.bias_init_mean,
|
||||
self.bias_init_std,
|
||||
self.bias_mutate_power,
|
||||
self.bias_mutate_rate,
|
||||
self.bias_replace_rate,
|
||||
)
|
||||
bias = jnp.clip(bias, self.bias_lower_bound, self.bias_upper_bound)
|
||||
res = mutate_float(
|
||||
k2,
|
||||
res,
|
||||
self.response_init_mean,
|
||||
self.response_init_std,
|
||||
self.response_mutate_power,
|
||||
self.response_mutate_rate,
|
||||
self.response_replace_rate,
|
||||
)
|
||||
res = jnp.clip(res, self.reponse_lower_bound, self.response_upper_bound)
|
||||
agg = mutate_int(
|
||||
k4, agg, self.aggregation_indices, self.aggregation_replace_rate
|
||||
)
|
||||
|
||||
act = mutate_int(k3, act, self.activation_indices, self.activation_replace_rate)
|
||||
|
||||
return jnp.array([bias, res, agg, act])
|
||||
|
||||
def distance(self, state, attrs1, attrs2):
|
||||
bias1, res1, agg1, act1 = attrs1
|
||||
bias2, res2, agg2, act2 = attrs2
|
||||
return (
|
||||
jnp.abs(bias1 - bias2) # bias
|
||||
+ jnp.abs(res1 - res2) # response
|
||||
+ (agg1 != agg2) # aggregation
|
||||
+ (act1 != act2) # activation
|
||||
)
|
||||
|
||||
def forward(self, state, attrs, inputs, is_output_node=False):
|
||||
bias, res, agg, act = attrs
|
||||
|
||||
z = agg_func(agg, inputs, self.aggregation_options)
|
||||
z = bias + res * z
|
||||
|
||||
# the last output node should not be activated
|
||||
z = jax.lax.cond(
|
||||
is_output_node, lambda: z, lambda: act_func(act, z, self.activation_options)
|
||||
)
|
||||
|
||||
return z
|
||||
|
||||
def repr(self, state, node, precision=2, idx_width=3, func_width=8):
|
||||
idx, bias, res, agg, act = node
|
||||
|
||||
idx = int(idx)
|
||||
bias = round(float(bias), precision)
|
||||
res = round(float(res), precision)
|
||||
agg = int(agg)
|
||||
act = int(act)
|
||||
|
||||
if act == -1:
|
||||
act_func = Act.identity
|
||||
else:
|
||||
act_func = self.activation_options[act]
|
||||
return "{}(idx={:<{idx_width}}, bias={:<{float_width}}, response={:<{float_width}}, aggregation={:<{func_width}}, activation={:<{func_width}})".format(
|
||||
self.__class__.__name__,
|
||||
idx,
|
||||
bias,
|
||||
res,
|
||||
self.aggregation_options[agg].__name__,
|
||||
act_func.__name__,
|
||||
idx_width=idx_width,
|
||||
float_width=precision + 3,
|
||||
func_width=func_width,
|
||||
)
|
||||
|
||||
def to_dict(self, state, node):
|
||||
idx, bias, res, agg, act = node
|
||||
|
||||
idx = int(idx)
|
||||
bias = jnp.float32(bias)
|
||||
res = jnp.float32(res)
|
||||
agg = int(agg)
|
||||
act = int(act)
|
||||
|
||||
if act == -1:
|
||||
act_func = Act.identity
|
||||
else:
|
||||
act_func = self.activation_options[act]
|
||||
return {
|
||||
"idx": idx,
|
||||
"bias": bias,
|
||||
"res": res,
|
||||
"agg": self.aggregation_options[int(agg)].__name__,
|
||||
"act": act_func.__name__,
|
||||
}
|
||||
|
||||
def sympy_func(self, state, node_dict, inputs, is_output_node=False):
|
||||
nd = node_dict
|
||||
bias = sp.symbols(f"n_{nd['idx']}_b")
|
||||
res = sp.symbols(f"n_{nd['idx']}_r")
|
||||
|
||||
z = convert_to_sympy(nd["agg"])(inputs)
|
||||
z = bias + res * z
|
||||
|
||||
if is_output_node:
|
||||
pass
|
||||
else:
|
||||
z = convert_to_sympy(nd["act"])(z)
|
||||
|
||||
return z, {bias: nd["bias"], res: nd["res"]}
|
||||
Reference in New Issue
Block a user