import streamlit as st import numpy as np import tensorflow as tf from tensorflow import keras from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import random # Define a function to generate a dataset def generate_dataset(task_id): X, y = make_classification(n_samples=100, n_features=10, n_informative=5, n_redundant=3, n_repeated=2, random_state=task_id) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=task_id) return X_train, X_test, y_train, y_test # Define a neural network class class Net(keras.Model): def __init__(self): super(Net, self).__init__() self.fc1 = keras.layers.Dense(20, activation='relu', input_shape=(10,)) self.fc2 = keras.layers.Dense(10, activation='relu') self.fc3 = keras.layers.Dense(2) def call(self, x): x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x # Define a genetic algorithm class class GeneticAlgorithm: def __init__(self, population_size): self.population_size = population_size self.population = [Net() for _ in range(population_size)] def selection(self, task_id): X_train, X_test, y_train, y_test = generate_dataset(task_id) fitness = [] for net in self.population: net.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) net.fit(X_train, y_train, epochs=10, verbose=0) loss, accuracy = net.evaluate(X_test, y_test, verbose=0) fitness.append(accuracy) self.population = [self.population[i] for i in np.argsort(fitness)[-self.population_size//2:]] def crossover(self): offspring = [] for _ in range(self.population_size//2): parent1, parent2 = random.sample(self.population, 2) child = Net() child.fc1.set_weights((np.array(parent1.fc1.get_weights()) + np.array(parent2.fc1.get_weights())) / 2) child.fc2.set_weights((np.array(parent1.fc2.get_weights()) + np.array(parent2.fc2.get_weights())) / 2) child.fc3.set_weights((np.array(parent1.fc3.get_weights()) + np.array(parent2.fc3.get_weights())) / 2) offspring.append(child) self.population += offspring def mutation(self): for net in self.population: if random.random() < 0.1: weights = net.fc1.get_weights() weights[0] += np.random.randn(*weights[0].shape) * 0.1 weights[1] += np.random.randn(*weights[1].shape) * 0.1 net.fc1.set_weights(weights) weights = net.fc2.get_weights() weights[0] += np.random.randn(*weights[0].shape) * 0.1 weights[1] += np.random.randn(*weights[1].shape) * 0.1 net.fc2.set_weights(weights) weights = net.fc3.get_weights() weights[0] += np.random.randn(*weights[0].shape) * 0.1 weights[1] += np.random.randn(*weights[1].shape) * 0.1 net.fc3.set_weights(weights) # Streamlit app st.title("Evolution of Sub-Models") # Parameters st.sidebar.header("Parameters") population_size = st.sidebar.slider("Population size", 10, 100, 50) num_tasks = st.sidebar.slider("Number of tasks", 1, 10, 5) num_generations = st.sidebar.slider("Number of generations", 1, 100, 10) # Run the evolution if st.button("Run evolution"): ga = GeneticAlgorithm(population_size) for generation in range(num_generations): for task_id in range(num_tasks): ga.selection(task_id) ga.crossover() ga.mutation() st.write(f"Generation {generation+1} complete") # Evaluate the final population final_accuracy = [] for task_id in range(num_tasks): X_train, X_test, y_train, y_test = generate_dataset(task_id) accuracy = [] for net in ga.population: net.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) net.fit(X_train, y_train, epochs=10, verbose=0) loss, acc = net.evaluate(X_test, y_test, verbose=0) accuracy.append(acc) final_accuracy.append(np.mean(accuracy)) st.write(f"Final accuracy: {np.mean(final_accuracy)}")