运筹系列68:julia启发式求解tsp问题

Posted IE06

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了运筹系列68:julia启发式求解tsp问题相关的知识,希望对你有一定的参考价值。

1. 基础函数

1.1 读取数据和绘图

将读取数据/绘图等库安装好:

using Gadfly
using LinearAlgebra
using TravelingSalesmanHeuristics
using TSPLIB
tsp = readTSPLIB(:a280)
n = tsp.dimension
plot_instance(tsp) = plot(x = tsp.nodes[:,1], y = tsp.nodes[:,2], Geom.point, Guide.xlabel(nothing), Guide.ylabel(nothing))
function plot_solution(tsp, path, extras = [])
    ptspath = tsp.nodes[path,:]
    plot(x = ptspath[:,1], y = ptspath[:,2], Geom.point, Geom.path, Guide.xlabel(nothing), Guide.ylabel(nothing), extras...)
end
distmat = [norm(tsp.nodes[i,:] - tsp.nodes[j,:]) for i in 1:n, j in 1:n]

还有一个是树的绘制,代码如下:

for i in 1:length(mst)
    l = tsp.nodes[collect(mst[i]),:]
    PyPlot.plot(l[:,1], l[:,2], color="r",linewidth = 0.5, linestyle="--")
end

1.2 求解调用

第一种调用方法是使用quality_factor:

@time path, cost = solve_tsp(distmat; quality_factor = 85)

第二种是指定构造型启发式算法:

path_nn, cost_nn = nearest_neighbor(distmat; firstcity = 1, do2opt = false)
path_nn2opt, cost_nn2opt = nearest_neighbor(distmat; firstcity = 1, do2opt = true)
path_fi, cost_fi = farthest_insertion(distmat; firstcity = 1, do2opt = false)
path_fi2opt, cost_fi2opt = farthest_insertion(distmat; firstcity = 1, do2opt = true) 

第三种是指定改进型启发式算法:

path_sa, cost_sa = simulated_annealing(distmat; init_path = path_nn, num_starts = 10)

2. 获取下界

可以调用方法如下:

TravelingSalesmanHeuristics.vertwise_bound(distmat)
TravelingSalesmanHeuristics.hkinspired_bound(distmat)

2.1 vertwise下界

第一种下界非常简单,每个点取最短的两条边加起来即可。

# the cost of a tour must be >= the sum over all vertices of
# the cost of the cheapest edge leaving that vertex
# likewise for the cheapest edge entering that vertex
# since we must go to and leave each vertex
function vertwise_bound(distmat::AbstractMatrixT) where T<:Real
    # the simple code below would tend to pick out the 0 costs on the diagonal
    # so make a doctored copy of the distance matrix with high costs on the diagonal
    m = maximum(distmat)
    distmat_nodiag = distmat + m * I
    leaving = sum(minimum(distmat_nodiag, dims = 2))
    entering = sum(minimum(distmat_nodiag, dims = 1))
    return maximum([leaving, entering])
end

2.2 1-tree下界

这里要用到最小生成树:

# 最小生成树
# returns a (n-1) long Vector of TupleInt, Int where each tuple is an edge in the MST
# and the total weight of the tree
# the matrix passed in must be symmetric or you won't get out the minimum spanning tree
function minspantree(dm::AbstractMatrixT) where T<:Real # accepts views
    mst_edges = VectorTupleInt, Int()
    mst_cost = zero(T)
    n = size(dm, 1)

    # we keep a running list of the distance from each vertex to the partly formed tree
    # rather than using 0 for vertices already in the tree, we use a large value so that we
    # can find the closest non-tree vertex via call to Julia's `findmin`.
    bigval = maximum(dm) + one(T)
    tree_dists = dm[1,:] # distance to tree
    closest_tree_verts = ones(Int, n)
    tree_dists[1] = bigval # vert 1 is in tree now

    for _ in 1:(n-1) # need to add n - 1 other verts to tree
        cost, newvert = findmin(tree_dists)
        treevert = closest_tree_verts[newvert]
        # add costs and edges
        mst_cost += cost
        if treevert < newvert
            push!(mst_edges, (treevert, newvert))
        else
            push!(mst_edges, (newvert, treevert))
        end
        # update distances to tree
        tree_dists[newvert] = bigval
        for i in 1:n
            c = tree_dists[i]
            if c >= bigval # already in tree
                continue
            end
            # maybe this vertex is closer to the new vertex than the prior iteration's tree
            if c > dm[i, newvert]
                tree_dists[i] = dm[i, newvert]
                closest_tree_verts[i] = newvert
            end
        end
    end

    return mst_edges, mst_cost
end

调用示例如下:

1-tree的流程如下:

1.  在V中选一个节点v0
2.  设r为(V-V0,d)最小生成树的长度
3.  设s为以v0为顶点的最短的两条边的和,即s=mind(v0,x)+d(v0,y):x,y属于V-v0,x不等于y
4.  输出t:=r+s

我们可以将完整mst的所有叶子结点拿出来作为v0,取其中最小的t,代码如下:

function minimum_two(distmat::AbstractMatrixT) where T<:Real
    n = size(distmat)[1]
    s = zeros(n)
    max_num = typemax(Int32)
    for j in 1:n
        arr = view(distmat,j,[1:j-1;j+1:n])
        m1=max_num
        m2=max_num
        for i in 1:n-1
            if arr[i]<m1
                m2=m1
                m1=arr[i]
            elseif arr[i]<m2
                m2=arr[i]
            end
        end
        s[j] = m1+m2
    end
    
    return s
end

function hkinspired_bound_2(distmat::AbstractMatrixT) where T<:Real
    mst, c = minspantree(distmat)
    roots = Dict()
    for m in mst
        roots[m[1]] = m[2]
        roots[m[2]] = m[1]
    end
    x = counter(cat([m[1] for m in mst],[m[2] for m in mst],dims=1))
    leaves = []
    for xi in x
        if xi[2]==1
            append!(leaves,xi[1])
        end
    end
    maxcost = 0
    min2 = minimum_two(distmat)
    for i in leaves
        cost = c - distmat[i,roots[i]] + min2[i] #+ 2*minimum(distmat_nodiag[i,:])
        if cost > maxcost
            maxcost = cost
        end
    end
    
    return maxcost
end

另一种方式是将所有的点拿出来做v0,重新计算mst,速度会慢不少,代码如下

# a simplified/looser version of Held-Karp bounds
# any tour is a spanning tree on (n-1) verts plus two edges from the left out vert
# so the maximum over all verts (as the left out vert) of
# MST cost on remaining vertices plus 2 cheapest edges from
# the left out vert is a lower bound
# for extra simplicity, the distance matrix is modified to be symmetric so we can treat
# the underlying graph as undirected. This also doesn't help the bound!
function hkinspired_bound(distmat::AbstractMatrixT) where T<:Real
    n = size(distmat, 1)

    # get a view of the distmat with one vertex deleted
    function del_vert(v)
        keep = [1:(v-1) ; (v+1):n]
        return view(distmat, keep, keep)
    end

    # make sure min(distmat[v,:]) doesn't pick diagonal elements
    m = maximum(distmat)
    distmat_nodiag = distmat + m * I

    # lower bound the optimal cost by leaving a single vertex out
    # forming spanning tree on the rest
    # connecting the left-out vertex
    function cost_leave_out(v)
        dmprime = del_vert(v)
        _, c = minspantree(dmprime)
        c += 2*minimum(distmat_nodiag[v,:])
        return c
    end

    return maximum(map(cost_leave_out, 1:n))
end

2.3 Held-Karp下界

Held-Karp法在LKH算法中也用到了,核心如下图:

我们可以用类似训练深度学习的方法,来逐步调节所有的 π \\pi π,直到收敛。

3. 模拟退火

使用简单的2-opt算子进行优化,以一定的概率接受退化解。源代码如下:

"""
The temperature decays exponentially from `init_temp` to `final_temp`,接受退化解的概率
"""
function simulated_annealing(distmat::MatrixT where T<:Real;
                             steps = 50*length(distmat),
                             num_starts = 1,
                             init_temp = exp(8), final_temp = exp(-6.5),
                             init_path::UnionVectorInt, Nothing = nothing)
    # cooling rate: we multiply by a constant mult each step
    cool_rate = (final_temp / init_temp)^(1 / (steps - 1))

   # do SA with a single starting path,将一部分路径反转
    function sahelper!(path)
        temp = init_temp / cool_rate # divide by cool_rate so when we first multiply we get init_temp
        n = size(distmat, 1)
        for i in 1:steps
            temp *= cool_rate
            first, last = rand(2:n), rand(2:n)
            if first > last
                first, last = last, first
            end
            cost_delta = pathcost_rev_delta(distmat, path, first, last)
            @fastmath accept = cost_delta < 0 ? true : rand() < exp(-cost_delta / temp)
            # should we accept?
            if accept
                reverse!(path, first, last)
            end
        end
        return path, pathcost(distmat, path)
    end

    # unpack the initial path
    if init_path == nothing
        randstart = true
        path = randpath(n)
    else
        if !legal_circuit(init_path)
            error("The init_path passed to simulated_annealing must be a legal circuit.")
        end
        randstart = false
        path = init_path
    end
    cost = pathcost(distmat, path)
    # 核心流程
    for _ in 1:num_starts
        path_this_start = randstart ? randpath(n) : deepcopy(init_path)
        otherpath, othercost = sahelper!(path_this_start)
        if othercost < cost
            cost = othercost
            path = otherpath
        end
    end

    return path, cost
end


function randpath(n)
    path = 1:n |> collect |> shuffle
    push!(path, path[1]) # loop
    return path
end

以上是关于运筹系列68:julia启发式求解tsp问题的主要内容,如果未能解决你的问题,请参考以下文章

运筹系列68:TSP问题Held-Karp下界的julia实现

运筹系列68:Julia的图论包

运筹系列66:tsp问题求解器concorde介绍

运筹系列72:TSP问题精确求解基础

运筹系列65:TSP问题的精确求解法概述

运筹系列79:使用Julia进行column generation求解