This one could definitely benefit from limiting the number of parallel threads, especially when dealing with the GCC PKGBUILD. Definitely a lot of contention and times varied from twice as fast to a few seconds slower, depending on if the cache decided it needed to flush out some data. Limiting the number of threads to the number of CPUs would probably go a long way to resolving some of this contention for IO bandwidth. Signed-off-by: Dan McGee <dan@archlinux.org> --- scripts/makepkg.sh.in | 23 ++++++++++++++++++----- 1 files changed, 18 insertions(+), 5 deletions(-) diff --git a/scripts/makepkg.sh.in b/scripts/makepkg.sh.in index 92b7597..b9588a1 100644 --- a/scripts/makepkg.sh.in +++ b/scripts/makepkg.sh.in @@ -692,6 +692,8 @@ check_checksums() { extract_sources() { msg "$(gettext "Extracting Sources...")" local netfile + local -a jobs + local job for netfile in "${source[@]}"; do local file=$(get_filename "$netfile") if in_array "$file" ${noextract[@]}; then @@ -732,15 +734,26 @@ extract_sources() { fi ;; esac - local ret=0 msg2 "$(gettext "Extracting %s with %s")" "$file" "$cmd" if [[ $cmd = bsdtar ]]; then - $cmd -xf "$file" || ret=$? + ( + $cmd -xf "$file" + ) & + job=$! else - rm -f "${file%.*}" - $cmd -dcf "$file" > "${file%.*}" || ret=$? + ( + rm -f "${file%.*}" + $cmd -dcf "$file" > "${file%.*}" + ) & + job=$! fi - if (( ret )); then + # push job id onto jobs stack + jobs[${#jobs[@]}]=$job + done + + for job in ${jobs[@]}; do + wait $job + if (( $? )); then error "$(gettext "Failed to extract %s")" "$file" plain "$(gettext "Aborting...")" exit 1 -- 1.7.4.4