google-research
275 строк · 8.8 Кб
1# coding=utf-8
2# Copyright 2024 The Google Research Authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16"""This file contains the warm-start Ford-Fulkerson algorithm implmentation, the image sequence experiments
17
18and the subroutines used by the experiment.
19"""
20
21import numpy as np
22from queue import Queue
23from collections import defaultdict
24from copy import deepcopy
25import time
26from imagesegmentation import parseArgs, imageSegmentation
27from augmentingPath import augmentingPath
28import os
29"""
30One iteration of feasibility projection.
31Algorithm: given a node u, BFS until find an excess-deficit path in residual graph
32Prioritize paths that do not contain s or t, if no such path exists, allow using s or t.
33Such a path must be found.
34"""
35
36
37def FeasRestoreIter(p, rGraph, excesses, s, t):
38
39def FindPath(chain, node, excesses, s, t, out_dir=True):
40# here into_dir = True denotes the path should carry flow OUT OF v.
41v, path = node, [node]
42path_flow = abs(excesses[v]) if v not in [s, t] else float('inf')
43u = chain[v]
44while u >= 0:
45path.append(u)
46(head, tail) = (u, v) if out_dir else (v, u)
47path_flow = min(rGraph[head][tail], path_flow)
48v = u
49u = chain[v]
50if v not in [s, t]:
51path_flow = min(path_flow, abs(excesses[v]))
52if out_dir:
53path.reverse()
54return path, path_flow
55
56if excesses[p] == 0:
57return [p], 0
58direc = (excesses[p] > 0)
59chain = defaultdict(int)
60q = Queue()
61q.put(p)
62visited = {p}
63chain[p] = -1
64
65while not q.empty():
66u = q.get()
67
68for v in list(rGraph[u].keys()) + [s, t]:
69(head, tail) = (u, v) if direc else (v, u)
70if v not in visited and rGraph[head][tail] > 0:
71q.put(v)
72chain[v] = u
73visited.add(v)
74if excesses[p] * excesses[v] < 0:
75return FindPath(chain, v, excesses, s, t, direc)
76
77if t in visited:
78return FindPath(chain, t, excesses, s, t, direc)
79if s in visited:
80return FindPath(chain, s, excesses, s, t, direc)
81
82return None, None
83
84
85"""
86Given a flow and the original graph, build the residual graph in the same dictionary format.
87"""
88
89
90def BuildRdGraph(flows, graph):
91rGraph = deepcopy(graph)
92for i in graph:
93for j in graph[i]:
94assert flows[i][j] <= graph[i][j]
95rGraph[i][j] -= flows[i][j]
96rGraph[j][i] += flows[i][j]
97return rGraph
98
99
100"""
101Given an infeasible flow, project it to a feasible one satisfying capacity constraints on the new graph
102Iteratively calling the FeasRestoreIter() subroutine.
103"""
104
105
106def FeasProj(flows, graph, excesses, V, s, t):
107assert len(excesses) == V
108num_paths = 0
109total_path_len = 0
110rGraph = BuildRdGraph(flows, graph)
111proj_flows = deepcopy(flows)
112
113begin = time.time()
114for i in range(V):
115if i in [s, t] or excesses[i] == 0:
116continue
117while excesses[i] != 0:
118path, pathFlow = FeasRestoreIter(i, rGraph, excesses, s, t)
119if pathFlow is None:
120raise Exception(
121"Code has a bug. Didn't find a path during feasibility projection.")
122
123num_paths += 1
124total_path_len += (len(path) - 1)
125
126for j in range(len(path) - 1):
127u, v = path[j], path[j + 1]
128rGraph[u][v] -= pathFlow
129rGraph[v][u] += pathFlow
130proj_flows[u][v] += max(0, pathFlow - proj_flows[v][u])
131proj_flows[v][u] = max(0, proj_flows[v][u] - pathFlow)
132
133if path[0] not in [s, t]:
134excesses[path[0]] -= pathFlow
135if path[-1] not in [s, t]:
136excesses[path[-1]] += pathFlow
137end = time.time()
138
139print('feasiblity projection time:', end - begin)
140
141if num_paths > 0:
142feas_proj_avg_len = float(total_path_len) / num_paths
143else:
144feas_proj_avg_len = 0
145print('# of paths: %d, average length: %f' % (num_paths, feas_proj_avg_len))
146return proj_flows, rGraph, end - begin, num_paths, float(
147total_path_len) / num_paths
148
149
150"""
151The main function for warm-starting Ford-Fulkerson with a potentially infeasible flow.
152- First perform feasibility projection.
153- Then, based on the feasible flow, keep finding augmenting path and increasing the flow until termination.
154"""
155
156
157def WarmStartFlow(inf_flows, graph, excesses, V, s, t):
158begin = time.time()
159proj_flows, rGraph, proj_time, num_paths_proj, avg_len_proj = FeasProj(
160inf_flows, graph, excesses, V, s, t)
161proj_value = sum([proj_flows[s][v] for v in proj_flows[s]])
162max_flows, cuts, num_paths_aug, avg_len_aug = augmentingPath(
163graph, V, s, t, proj_flows, rGraph)
164end = time.time()
165print('warmstart total time: ', end - begin)
166print(
167'finding maximum flow # of augmenting path: %d, average augmenting path length: %f'
168% (num_paths_aug, avg_len_aug))
169return max_flows, cuts, proj_value, proj_time, end - begin, num_paths_proj, avg_len_proj, num_paths_aug, avg_len_aug
170
171
172"""
173Given another flow conserving flow and graph, round down the flow on every arc if it exceeds the capacity.
174Returns the rounded flow and the excess at every node.
175This function is not needed for warm-start itself but mostly for the experiment setting.
176"""
177
178
179def RoundDown(flows, graph, V, s, t):
180excesses = np.zeros(V, dtype=int)
181for i in range(V):
182for j in graph[i]:
183diff = max(0, flows[i][j] - graph[i][j])
184if i != s:
185excesses[i] += diff
186if j != t:
187excesses[j] -= diff
188flows[i][j] -= diff
189return excesses
190
191
192"""
193This function prints out the excesses/deficits on the graph so that we can see where they are.
194"""
195
196
197def ExcessGraph(excesses, size):
198excess_graph = np.zeros((size, size))
199for u in range(size * size):
200i, j = u // size, u % size
201excess_graph[i][j] = excesses[u]
202print(excess_graph)
203return
204
205
206"""
207The main function for running the full experiment with an existing set of seeds. The experiment does the following from image to image:
208- Construct the graph on the new image.
209- Take the old max flow solution from the last image, and round down according to the new capacities, thus generating excesses/deficits.
210- Run warm-start Ford-Fulkerson on the resulting infeasible flow.
211"""
212
213
214def Exp(args):
215folder, group, size, algo, loadseed = args.folder, args.group, args.size, args.algo, args.loadseed
216image_dir = folder + '/' + group + '_cropped'
217image_list = os.listdir(image_dir)
218V = size * size + 2
219SOURCE, SINK = V - 2, V - 1
220
221result_dir = folder + '/' + group + '_results' + '/'
222if not os.path.exists(result_dir):
223os.makedirs(result_dir)
224time_dir = result_dir + str(size) + '_time.txt'
225path_dir = result_dir + str(size) + '_path.txt'
226
227time_f = open(time_dir, 'w')
228time_f.write('image_name\tff\twarm_start\tfeas_proj\n')
229path_f = open(path_dir, 'w')
230path_f.write(
231'image_name\tflow_value\texcess\trecoverd_flow\tnum_aug_path\tavg_path_len\tnum_proj_path\tavg_proj_len\tnum_warm_start_path\tavg_warm_start_len\n'
232)
233
234num_images = len(image_list)
235old_flows = None
236for i in range(num_images):
237new_image = image_list[i]
238new_flows, min_cut, path_count, average_path_len, new_graph, ff_time = imageSegmentation(
239new_image, folder, group, (size, size), algo, loadseed)
240if old_flows is None:
241old_flows = deepcopy(new_flows)
242continue
243excesses = RoundDown(old_flows, new_graph, V, SOURCE, SINK)
244total_excess = max(
245np.sum(np.maximum(excesses, 0)), np.sum(np.maximum(-excesses, 0)))
246print('total excess/deficit to round down:' + str(total_excess))
247warmstart_flows, warmstart_cuts, proj_value, proj_time, warmstart_time, num_paths_proj, avg_len_proj, num_paths_aug, avg_len_aug = WarmStartFlow(
248old_flows, new_graph, excesses, V, SOURCE, SINK)
249
250# check that the warm start algo reaches optimality
251opt_flow_value = sum([new_flows[SOURCE][v] for v in new_flows[SOURCE]])
252ws_flow_value = sum(
253[warmstart_flows[SOURCE][v] for v in warmstart_flows[SOURCE]])
254assert opt_flow_value == ws_flow_value
255old_flows = deepcopy(new_flows)
256
257# write the results
258time_f.write(new_image.split('.')[0] + '\t')
259time_f.write('{}\t{}\t{}\n'.format(ff_time, warmstart_time, proj_time))
260time_f.flush()
261path_f.write(new_image.split('.')[0] + '\t')
262path_f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
263opt_flow_value, total_excess, proj_value, path_count,
264round(average_path_len, 4), num_paths_proj, round(avg_len_proj, 4),
265num_paths_aug, round(avg_len_aug, 4)))
266path_f.flush()
267
268time_f.close()
269path_f.close()
270return
271
272
273if __name__ == '__main__':
274args = parseArgs()
275Exp(args)
276