sync: add separate --jobs options for different steps

The number of jobs one wants to run against the network tends to
factor differently from the number of jobs one wants to run when
checking out local projects.  The former is constrained by your
internet connection & server limits while the later is constrained
by your local computer's CPU & storage I/O.  People with beefier
computers probably want to keep the network/server jobs bounded a
bit lower than the local/checkout jobs.

Change-Id: Ia27ab682c62c09d244a8a1427b1c65acf0116c1c
Reviewed-on: https://gerrit-review.googlesource.com/c/git-repo/+/302804
Reviewed-by: Raman Tenneti <rtenneti@google.com>
Tested-by: Mike Frysinger <vapier@google.com>
This commit is contained in:
Mike Frysinger 2021-04-09 00:21:02 -04:00
parent a1051d8baa
commit 49de8ef584

View File

@ -174,6 +174,11 @@ later is required to fix a server side protocol bug.
pass
super()._Options(p)
p.add_option('--jobs-network', default=None, type=int, metavar='JOBS',
help='number of network jobs to run in parallel (defaults to --jobs)')
p.add_option('--jobs-checkout', default=None, type=int, metavar='JOBS',
help='number of local checkout jobs to run in parallel (defaults to --jobs)')
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
@ -364,6 +369,7 @@ later is required to fix a server side protocol bug.
def _Fetch(self, projects, opt, err_event):
ret = True
jobs = opt.jobs_network if opt.jobs_network else self.jobs
fetched = set()
pm = Progress('Fetching', len(projects), delay=False)
@ -391,7 +397,7 @@ later is required to fix a server side protocol bug.
return ret
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(projects_list) == 1 or opt.jobs == 1:
if len(projects_list) == 1 or jobs == 1:
if not _ProcessResults(self._FetchProjectList(opt, x) for x in projects_list):
ret = False
else:
@ -409,7 +415,7 @@ later is required to fix a server side protocol bug.
else:
pm.update(inc=0, msg='warming up')
chunksize = 4
with multiprocessing.Pool(opt.jobs) as pool:
with multiprocessing.Pool(jobs) as pool:
results = pool.imap_unordered(
functools.partial(self._FetchProjectList, opt),
projects_list,
@ -463,6 +469,7 @@ later is required to fix a server side protocol bug.
err_results: A list of strings, paths to git repos where checkout failed.
"""
ret = True
jobs = opt.jobs_checkout if opt.jobs_checkout else self.jobs
# Only checkout projects with worktrees.
all_projects = [x for x in all_projects if x.worktree]
@ -483,11 +490,11 @@ later is required to fix a server side protocol bug.
return True
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(all_projects) == 1 or opt.jobs == 1:
if len(all_projects) == 1 or jobs == 1:
if not _ProcessResults(self._CheckoutOne(opt, x) for x in all_projects):
ret = False
else:
with multiprocessing.Pool(opt.jobs) as pool:
with multiprocessing.Pool(jobs) as pool:
results = pool.imap_unordered(
functools.partial(self._CheckoutOne, opt),
all_projects,