library(stats) eurodistmat <- as.matrix(eurodist) #A function for drawing routes drawRoute <- function(sq, distances){ #find the 2d coordinates loc <- -cmdscale(distances, add = TRUE)$points #PCA x <- loc[,1]; y <- loc[,2] s <- seq_len(nrow(distances)) tspinit <- loc[c(sq, sq[1]),] #create a cycle #draw the cities and the inital solution plot(x, y, type = "n", asp = 1, xlab = "", ylab = "", main = "TSP route", axes = FALSE) arrows(tspinit[s,1], tspinit[s,2], tspinit[s+1,1], tspinit[s+1,2], angle = 10, col = "red") # Loop through the sequence and draw arrows with distances for (i in s) { j <- i + 1 if(j > length(s)){ j <- 1 } xi <- tspinit[i,1]; yi <- tspinit[i,2] xj <- tspinit[j,1]; yj <- tspinit[j,2] arrows(xi, yi, xj, yj, angle = 10, col = "red") # Calculate and draw the distance on each arrow dist <- distances[sq[i], sq[j %% (nrow(distances) + 1)]] text((xi + xj) / 2, (yi + yj) / 2, labels = round(dist, 1), cex = 0.8, col = "blue") } text(x, y, labels(eurodist), cex = 0.8) } #Draw initial solution drawRoute(1:21, eurodistmat) #Basic local search for TSP #Lets implement a basic local search # Define your objective function objective_function <- function(solution) { # Evaluate the solution and return its objective value indexes <- embed(c(solution, solution[1]), 2) sum(eurodistmat[cbind(indexes[,2], indexes[,1])]) } objective_function(s2) objective_function(res$solution) debug(objective_function) # Define the function to generate neighbors generate_neighbors <- function(current_solution) { # Generate and return a list of neighboring solutions neighbors <- list() all_swaps <- combn(1:length(current_solution), 2) for(i in 1:ncol(all_swaps)){ a <- all_swaps[1, i] b <- all_swaps[2, i] i_neighbor <- current_solution i_neighbor[a] <- current_solution[b] i_neighbor[b] <- current_solution[a] neighbors[[i]] <- i_neighbor } neighbors } #Test generate_neighbors(1:21) # Define the function for the local search algorithm local_search <- function(initial_solution, max_iterations) { current_solution <- initial_solution current_objective_value <- objective_function(current_solution) cat("Starting solution value: ", current_objective_value, "\n", sep = "") for (iteration in 1:max_iterations) { # Generate neighbors of the current solution neighbors <- generate_neighbors(current_solution) # Evaluate the neighbors and find the best one best_neighbor <- NULL best_neighbor_objective_value <- Inf # Set to -Inf for maximization problems for (neighbor in neighbors) { neighbor_objective_value <- objective_function(neighbor) if (neighbor_objective_value < best_neighbor_objective_value) { # Change to '>' for maximization problems best_neighbor <- neighbor best_neighbor_objective_value <- neighbor_objective_value } } # If the best neighbor is better than the current solution, update the current solution if (best_neighbor_objective_value < current_objective_value) { # Change to '>' for maximization problems current_solution <- best_neighbor current_objective_value <- best_neighbor_objective_value cat("Better solution found at iteration ", iteration, ".\n", sep = "") cat("New objective value: ", best_neighbor_objective_value, "\n", sep = "") } else { # If there is no improvement, stop the search break } } return(list(solution = current_solution, objective_value = current_objective_value)) } res <- local_search(sample(1:21, 21), max_iterations) res$solution res$objective_value # Implementation of Guided Local Search # Define the penalized objective function penalized_objective_function <- function(solution, penalty_matrix, lambda) { original_value <- objective_function(solution) #f(x) used_edges <- identify_edges(solution) #I(x) penalized_value <- original_value + sum(penalty_matrix[used_edges] * lambda) return(penalized_value) } #The I 'matrix' identify_edges <- function(solution){ n <- length(solution) shifted_solution <- c(solution[-1], solution[1]) indexes <- matrix(1:(n*n), nrow = n, ncol = n) identified_edges <- vector() for(i in 1:length(solution)){ identified_edges <- c(identified_edges, indexes[solution[i], shifted_solution[i]]) } identified_edges } #Test identify_edges(1:21) #At the start the penalization is 0 but we set the size of the matrix initialize_penalty_matrix <- function(initial_solution){ #initial_solution is only needed to figure out the size of the problem n <- length(initial_solution) #return a n x n matrix with 0 penalties at the start matrix(0, nrow = n, ncol = n) } #Util(x, i) function maximum_utility <- function(identified_edges, penalty_matrix){ max_util <- -Inf for(e in identified_edges){ util_e <- eurodistmat[e]/(1+penalty_matrix[e]) #c/1+p if(util_e > max_util){ max_util <- util_e } } max_indexes <- vector() for(e in identified_edges){ util_e <- eurodistmat[e]/(1+penalty_matrix[e]) if(util_e == max_util){ max_indexes <- c(max_indexes, e) } } max_indexes } # Define the GLS algorithm guided_local_search <- function(initial_solution, max_iterations, lambda) { current_solution <- initial_solution # Initialize the penalty matrix based on your problem's structure penalty_matrix <- initialize_penalty_matrix(current_solution) current_objective_value <- penalized_objective_function(current_solution, penalty_matrix, lambda) real_best_objective_value <- objective_function(current_solution) real_best_solution <- current_solution for (iteration in 1:max_iterations) { report = FALSE # Generate neighbors of the current solution neighbors <- generate_neighbors(current_solution) # Evaluate the neighbors and find the best one based on the penalized objective function best_neighbor <- NULL best_neighbor_objective_value <- Inf # Set to -Inf for maximization problems for (neighbor in neighbors) { neighbor_objective_value <- penalized_objective_function(neighbor, penalty_matrix, lambda) if (neighbor_objective_value < best_neighbor_objective_value) { # Change to '>' for maximization problems best_neighbor <- neighbor best_neighbor_objective_value <- neighbor_objective_value } #Check the "real" non-penalized objective function real_neighbor_objective_value <- objective_function(neighbor) if(real_neighbor_objective_value < real_best_objective_value){ real_best_solution <- neighbor real_best_objective_value <- real_neighbor_objective_value report = TRUE } } # If the best neighbor is better than the current solution, update the current solution if (best_neighbor_objective_value < current_objective_value) { # Change to '>' for maximization problems current_solution <- best_neighbor current_objective_value <- best_neighbor_objective_value #This is not always best real solution } else { # If there is no improvement penalize the objective function identified_edges <- identify_edges(current_solution) components_to_penalize <- maximum_utility(identified_edges, penalty_matrix) penalty_matrix[components_to_penalize] <- penalty_matrix[components_to_penalize] + 1 #Update current_objective_value using the penalized function current_objective_value <- penalized_objective_function(current_solution, penalty_matrix, lambda) } if(report){ cat("Iteration ", iteration, ".\n") cat("Best objective value updated to: ", real_best_objective_value, ".\n", sep = "") } } return(list(solution = real_best_solution, objective_value = real_best_objective_value, final_penalty_matrix = penalty_matrix)) } res <- guided_local_search(1:21, 1000, 1) res$solution res$objective_value drawRoute(res$solution, eurodistmat) #Why did we get the same solution as just classical local search? mean(eurodistmat) #Better results res <- guided_local_search(sample(1:21, 21), 10000, 100) res$solution res$objective_value drawRoute(res$solution,eurodistmat) #Graphical bug? colnames(eurodistmat)[res$solution] s2 <- res$solution s2[2] <- res$solution[3] s2[3] <- res$solution[2] drawRoute(s2, eurodistmat) distance(s2) distance(res$solution) # Define the function for the local search algorithm with restarts local_search_restarts <- function(initial_solution, max_iterations) { current_solution <- initial_solution current_objective_value <- objective_function(current_solution) best_solution <- current_solution best_objective_value <- current_objective_value cat("Starting solution value: ", current_objective_value, "\n") for (iteration in 1:max_iterations) { # Generate neighbors of the current solution neighbors <- generate_neighbors(current_solution) # Evaluate the neighbors and find the best one best_neighbor <- NULL best_neighbor_objective_value <- Inf # Set to -Inf for maximization problems for (neighbor in neighbors) { neighbor_objective_value <- objective_function(neighbor) if (neighbor_objective_value < best_neighbor_objective_value) { # Change to '>' for maximization problems best_neighbor <- neighbor best_neighbor_objective_value <- neighbor_objective_value } } # If the best neighbor is better than the current solution, update the current solution if (best_neighbor_objective_value < current_objective_value) { # Change to '>' for maximization problems current_solution <- best_neighbor current_objective_value <- best_neighbor_objective_value cat("Better solution found at iteration ", iteration, ".\n") cat("New objective value: ", best_neighbor_objective_value, "\n") } else { # If there is no improvement, random restart if(current_objective_value < best_objective_value){ best_objective_value <- current_objective_value best_solution <- current_solution } current_solution <- sample(current_solution, 21) current_objective_value <- objective_function(current_solution) } } #check again if the last search didn't converge if(current_objective_value < best_objective_value){ best_objective_value <- current_objective_value best_solution <- current_solution } return(list(solution = best_solution, objective_value = best_objective_value)) } local_search_restarts(1:21, 10000) #compare