matlab神经网络编程
1. MATLAB线性神经网络的程序,跪求。。
美国Michigan 大学的 Holland 教授提出的遗传算法(GeneticAlgorithm, GA)是求解复杂的组合优化问题的有效方法 ,其思想来自于达尔文进化论和门德尔松遗传学说 ,它模拟生物进化过程来从庞大的搜索空间中筛选出较优秀的解,是戚镇一种高效而且具有强鲁棒性方法。所以,遗传算法在求解TSP和 MTSP问题中得到了广泛的应用。
matlab程序如下:
function[opt_rte,opt_brk,min_dist] =mtspf_ga(xy,dmat,salesmen,min_tour,pop_size,num_iter)
%%
%实例
% n = 20;%城市个数
% xy = 10*rand(n,2);%城市坐标 随机产生,也可以自己设定
% salesmen = 5;%旅行商个数
% min_tour = 3;%每个旅行商最少访问的城市数
% pop_size = 80;%种群个数
% num_iter = 200;%迭代次数
% a = meshgrid(1:n);
% dmat =reshape(sqrt(sum((xy(a,:)-xy(a',:)).^2,2)),n,n);
% [opt_rte,opt_brk,min_dist] = mtspf_ga(xy,dmat,salesmen,min_tour,...
% pop_size,num_iter);%函数
%%
[N,dims]= size(xy); %城市矩阵大小
[nr,nc]= size(dmat); %城市距离矩阵大小
n = N -1;% 除去起始的城市后剩余的城市的数
% 初始化路线、断点的选择
num_brks= salesmen-1;
dof = n- min_tour*salesmen; %初始化路线、断点的选择
addto =ones(1,dof+1);
for k =2:num_brks
addto = cumsum(addto);
end
cum_prob= cumsum(addto)/sum(addto);
%% 初始化种群
pop_rte= zeros(pop_size,n); % 种群路径
pop_brk= zeros(pop_size,num_brks); % 断点集合的种群
for k =1:pop_size
pop_rte(k,:) = randperm(n)+1;
pop_brk(k,:) = randbreaks();
end
% 画图路径曲线颜色
clr =[1 0 0; 0 0 1; 0.67 0 1; 0 1 0; 1 0.5 0];
ifsalesmen > 5
clr = hsv(salesmen);
end
%%
% 基于遗传算法的MTSP
global_min= Inf; %初始化最短路径
total_dist= zeros(1,pop_size);
dist_history= zeros(1,num_iter);
tmp_pop_rte= zeros(8,n);%当前的路径设置
tmp_pop_brk= zeros(8,num_brks); %当前的断点设置
new_pop_rte= zeros(pop_size,n);%更新的路径设置
new_pop_brk= zeros(pop_size,num_brks);%更新的断点设置
foriter = 1:num_iter
% 计算适应值
for p = 1:pop_size
d = 0;
p_rte = pop_rte(p,:);
p_brk = pop_brk(p,:);
rng = [[1 p_brk+1];[p_brk n]]';
for s = 1:salesmen
d = d + dmat(1,p_rte(rng(s,1)));% 添加开始的路径
for k = rng(s,1):rng(s,2)-1
d = d + dmat(p_rte(k),p_rte(k+1));
end
渗旁 d = d + dmat(p_rte(rng(s,2)),1); % 添加结束的的路径
end
丛仔橡 total_dist(p) = d;
end
% 找到种群中最优路径
[min_dist,index] = min(total_dist);
dist_history(iter) = min_dist;
if min_dist < global_min
global_min = min_dist;
opt_rte = pop_rte(index,:); %最优的最短路径
opt_brk = pop_brk(index,:);%最优的断点设置
rng = [[1 opt_brk+1];[opt_brk n]]';%设置记录断点的方法
figure(1);
for s = 1:salesmen
rte = [1 opt_rte(rng(s,1):rng(s,2))1];
plot(xy(rte,1),xy(rte,2),'.-','Color',clr(s,:));
title(sprintf('城市数目为 = %d,旅行商数目为 = %d,总路程 = %1.4f, 迭代次数 =%d',n+1,salesmen,min_dist,iter));
hold on
grid on
end
plot(xy(1,1),xy(1,2),'ko');
hold off
end
% 遗传操作
rand_grouping = randperm(pop_size);
for p = 8:8:pop_size
rtes = pop_rte(rand_grouping(p-7:p),:);
brks = pop_brk(rand_grouping(p-7:p),:);
dists =total_dist(rand_grouping(p-7:p));
[ignore,idx] = min(dists);
best_of_8_rte = rtes(idx,:);
best_of_8_brk = brks(idx,:);
rte_ins_pts = sort(ceil(n*rand(1,2)));
I = rte_ins_pts(1);
J = rte_ins_pts(2);
for k = 1:8 %产生新种群
tmp_pop_rte(k,:) = best_of_8_rte;
tmp_pop_brk(k,:) = best_of_8_brk;
switch k
case 2% 倒置操作
tmp_pop_rte(k,I:J) =fliplr(tmp_pop_rte(k,I:J));
case 3 % 互换操作
tmp_pop_rte(k,[I J]) =tmp_pop_rte(k,[J I]);
case 4 % 滑动平移操作
tmp_pop_rte(k,I:J) =tmp_pop_rte(k,[I+1:J I]);
case 5% 更新断点
tmp_pop_brk(k,:) = randbreaks();
case 6 % 倒置并更新断点
tmp_pop_rte(k,I:J) =fliplr(tmp_pop_rte(k,I:J));
tmp_pop_brk(k,:) =randbreaks();
case 7 % 互换并更新断点
tmp_pop_rte(k,[I J]) =tmp_pop_rte(k,[J I]);
tmp_pop_brk(k,:) =randbreaks();
case 8 % 评议并更新断点
tmp_pop_rte(k,I:J) =tmp_pop_rte(k,[I+1:J I]);
tmp_pop_brk(k,:) =randbreaks();
otherwise
end
end
new_pop_rte(p-7:p,:) = tmp_pop_rte;
new_pop_brk(p-7:p,:) = tmp_pop_brk;
end
pop_rte = new_pop_rte;
pop_brk = new_pop_brk;
end
figure(2)
plot(dist_history,'b','LineWidth',2);
title('历史最优解');
xlabel('迭代次数')
ylabel('最优路程')
% 随机产生一套断点 的集合
function breaks = randbreaks()
if min_tour == 1 % 一个旅行商时,没有断点的设置
tmp_brks = randperm(n-1);
breaks =sort(tmp_brks(1:num_brks));
else % 强制断点至少找到最短的履行长度
num_adjust = find(rand <cum_prob,1)-1;
spaces =ceil(num_brks*rand(1,num_adjust));
adjust = zeros(1,num_brks);
for kk = 1:num_brks
adjust(kk) = sum(spaces == kk);
end
breaks = min_tour*(1:num_brks) +cumsum(adjust);
end
end
disp('最优路径为:/n')
disp(opt_rte);
disp('其中断点为为:/n')
disp(opt_brk);
end
2. 如何用MATLAB的神经网络工具箱实现三层BP网络
使用神经网络工具箱可以非常简便地实现网络建立和训练,实例代码如下:
%%BP算法
functionOut=bpnet(p,t,p_test)
%p,t为样本需要提前组织好
globalS1
net=newff(minmax(p),[S1,8],{'tansig','purelin'},'trainlm');%trainlm训练函数最有效
%net=newff(P,T,31,{'tansig','purelin'},'trainlm');%新版用法
net.trainParam.epochs=1000;
net.trainParam.goal=0.00001;
net.trainParam.lr=0.01;
net.trainParam.showWindow=false;%阻止训练窗口的弹出
net.trainParam.showCommandLine=false;%阻止训练窗口的弹出
net=train(net,p,t);
Out=sim(net,p_test);
end
上面的代码不完整,完整的带训练样本数据的程序见附件。