import numpy as np # in-place def clip_params(population, clipper_function): for i, individual in enumerate(population): population[i] = clipper_function(individual) def evolution_strategies_optimizer(objective_function, clipper_function, init_mean, init_scale): # Initialize parameters population_size = 100 number_of_generations = 30 mutation_scale = 0.1 selection_ratio = 0.5 selected_size = int(population_size * selection_ratio) # Initialize population (randomly) population = np.random.normal(loc=init_mean, scale=init_scale, size=(population_size, len(init_mean))) clip_params(population, clipper_function) for generation in range(number_of_generations): # Evaluate fitness fitness = np.array([objective_function(individual) for individual in population]) # Select the best individuals selected_indices = np.argsort(fitness)[:selected_size] selected = population[selected_indices] # Reproduce (mutate) offspring = selected + np.random.randn(selected_size, 2) * mutation_scale clip_params(offspring, clipper_function) # in-place # Replacement: Here we simply generate new candidates around the selected ones population[:selected_size] = selected population[selected_size:] = offspring # Logging best_fitness = fitness[selected_indices[0]] best_index = np.argmin(fitness) best_solution = population[best_index] print(f"Generation {generation + 1}: Best Fitness = {best_fitness}", f"Best solution so far: {best_solution}") # Best solution best_index = np.argmin(fitness) best_solution = population[best_index] print(f"Best solution found: {best_solution}") return best_solution def toy_objective_function(x): return (x[0] - 3)**2 + (x[1] + 2)**2 def toy_clipper_function(x): return x def main(): init_mean = np.array([0.0, 0.0]) init_scale = np.array([10.0, 10.0]) best_solution = evolution_strategies_optimizer(toy_objective_function, toy_clipper_function, init_mean, init_scale) if __name__ == '__main__': main()