[pacman-dev] [PATCH] libalpm: download rates becoming negative
When a download fails on one mirror a new download is started on the next mirror. This new download will have the initial size of whatever has been downloaded so far, as well as the ammount downloaded reset to 0. To account for this, when a download changes mirror, save how much has been downloaded so far and add that to dlcb calls. --- lib/libalpm/dload.c | 14 ++++++++++++-- lib/libalpm/dload.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..27a7748a 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -207,8 +207,8 @@ static int dload_progress_cb(void *file, curl_off_t dltotal, curl_off_t dlnow, /* do NOT include initial_size since it wasn't part of the package's * download_size (nor included in the total download size callback) */ - cb_data.total = dltotal; - cb_data.downloaded = dlnow; + cb_data.total = dltotal + payload->resetprogress; + cb_data.downloaded = dlnow + payload->resetprogress; payload->handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_PROGRESS, &cb_data); payload->prevprogress = current_size; @@ -440,6 +440,16 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); } + + /* reset progress for next server */ + payload->resetprogress += payload->prevprogress - payload->initial_size; + payload->unlink_on_fail = 0; + + if(payload->allow_resume) { + payload->initial_size = payload->prevprogress; + } + + /* Set curl with the new URL */ curl_easy_setopt(curl, CURLOPT_URL, payload->fileurl); diff --git a/lib/libalpm/dload.h b/lib/libalpm/dload.h index 8f3d17b4..abfd9466 100644 --- a/lib/libalpm/dload.h +++ b/lib/libalpm/dload.h @@ -42,6 +42,7 @@ struct dload_payload { off_t initial_size; off_t max_size; off_t prevprogress; + off_t resetprogress; /* progress made before we try a new server */ int force; int allow_resume; int errors_ok; -- 2.31.1
When the download estimate is over an hour the format displayed changes from mm:ss to hh:mm:ss. This causes everything to be out of alignment due to the extra characters. So instead lets just go back to --:-- when the download => 100 minutes. --- src/pacman/callback.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..5e910136 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -813,10 +813,6 @@ static void draw_pacman_progress_bar(struct pacman_progress_bar *bar) * + 2 spaces + 4 for rate + 1 space + 3 for label + 2 for /s + 1 space + * 8 for eta, gives us the magic 33 */ filenamelen = infolen - 33; - /* see printf() code, we omit 'HH:' in these conditions */ - if(eta_h == 0 || eta_h >= 100) { - filenamelen += 3; - } /* In order to deal with characters from all locales, we have to worry * about wide characters and their column widths. A lot of stuff is @@ -860,8 +856,8 @@ static void draw_pacman_progress_bar(struct pacman_progress_bar *bar) } if(eta_h == 0) { printf("%02u:%02u", eta_m, eta_s); - } else if(eta_h < 100) { - printf("%02u:%02u:%02u", eta_h, eta_m, eta_s); + } else if(eta_h == 1 && eta_m < 40) { + printf("%02u:%02u", eta_m + 60, eta_s); } else { fputs("--:--", stdout); } -- 2.31.1
On 04/18/21 at 11:31pm, morganamilo wrote:
When a download fails on one mirror a new download is started on the next mirror. This new download will have the initial size of whatever has been downloaded so far, as well as the ammount downloaded reset to 0.
To account for this, when a download changes mirror, save how much has been downloaded so far and add that to dlcb calls. --- lib/libalpm/dload.c | 14 ++++++++++++-- lib/libalpm/dload.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..27a7748a 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -207,8 +207,8 @@ static int dload_progress_cb(void *file, curl_off_t dltotal, curl_off_t dlnow,
/* do NOT include initial_size since it wasn't part of the package's * download_size (nor included in the total download size callback) */ - cb_data.total = dltotal; - cb_data.downloaded = dlnow; + cb_data.total = dltotal + payload->resetprogress; + cb_data.downloaded = dlnow + payload->resetprogress; payload->handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_PROGRESS, &cb_data); payload->prevprogress = current_size;
@@ -440,6 +440,16 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); }
+ + /* reset progress for next server */ + payload->resetprogress += payload->prevprogress - payload->initial_size; + payload->unlink_on_fail = 0;
Without looking at the rest of the patch, how does this line not just straight up break unlink_on_fail functionality for any download that falls back to another server?
On 19/04/2021 05:53, Andrew Gregory wrote:
On 04/18/21 at 11:31pm, morganamilo wrote:
When a download fails on one mirror a new download is started on the next mirror. This new download will have the initial size of whatever has been downloaded so far, as well as the ammount downloaded reset to 0.
To account for this, when a download changes mirror, save how much has been downloaded so far and add that to dlcb calls. --- lib/libalpm/dload.c | 14 ++++++++++++-- lib/libalpm/dload.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..27a7748a 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -207,8 +207,8 @@ static int dload_progress_cb(void *file, curl_off_t dltotal, curl_off_t dlnow,
/* do NOT include initial_size since it wasn't part of the package's * download_size (nor included in the total download size callback) */ - cb_data.total = dltotal; - cb_data.downloaded = dlnow; + cb_data.total = dltotal + payload->resetprogress; + cb_data.downloaded = dlnow + payload->resetprogress; payload->handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_PROGRESS, &cb_data); payload->prevprogress = current_size;
@@ -440,6 +440,16 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); }
+ + /* reset progress for next server */ + payload->resetprogress += payload->prevprogress - payload->initial_size; + payload->unlink_on_fail = 0;
Without looking at the rest of the patch, how does this line not just straight up break unlink_on_fail functionality for any download that falls back to another server?
Honestly don't really know. I was using e83e868a77865d42a33076605f9a90a165f7c93a as a base and it was done there. I guess the point is that curl_check_finished_download() will set unlink_on_fail = 1 in most situations, so we reset it to 0 on retry knowing it may then be set to 1 again on the next check_finished. Though I guess this breaks tempfile downloads which should always be unlink_on_fail = 1. Also looking at the code above: if(payload->unlink_on_fail) { /* we keep the file for a new retry but remove its data if any */ fflush(payload->localf); if(ftruncate(fileno(payload->localf), 0)) { RET_ERR(handle, ALPM_ERR_SYSTEM, -1); } fseek(payload->localf, 0, SEEK_SET); } I don't see why we'd want to wipe the file instead of trying to resume. In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
When the download estimate is over an hour the format displayed changes from mm:ss to hh:mm:ss. This causes everything to be out of alignment due to the extra characters. So instead lets just go back to --:-- when the download => 100 minutes. --- src/pacman/callback.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..3c1d3f14 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -811,12 +811,8 @@ static void draw_pacman_progress_bar(struct pacman_progress_bar *bar) /* 1 space + filenamelen + 1 space + 6 for size + 1 space + 3 for label + * + 2 spaces + 4 for rate + 1 space + 3 for label + 2 for /s + 1 space + - * 8 for eta, gives us the magic 33 */ - filenamelen = infolen - 33; - /* see printf() code, we omit 'HH:' in these conditions */ - if(eta_h == 0 || eta_h >= 100) { - filenamelen += 3; - } + * 5 for eta, gives us the magic 33 */ + filenamelen = infolen - 30; /* In order to deal with characters from all locales, we have to worry * about wide characters and their column widths. A lot of stuff is @@ -860,8 +856,8 @@ static void draw_pacman_progress_bar(struct pacman_progress_bar *bar) } if(eta_h == 0) { printf("%02u:%02u", eta_m, eta_s); - } else if(eta_h < 100) { - printf("%02u:%02u:%02u", eta_h, eta_m, eta_s); + } else if(eta_h == 1 && eta_m < 40) { + printf("%02u:%02u", eta_m + 60, eta_s); } else { fputs("--:--", stdout); } -- 2.31.1
On 04/19/21 at 10:26am, Morgan Adamiec wrote:
On 19/04/2021 05:53, Andrew Gregory wrote:
On 04/18/21 at 11:31pm, morganamilo wrote:
When a download fails on one mirror a new download is started on the next mirror. This new download will have the initial size of whatever has been downloaded so far, as well as the ammount downloaded reset to 0.
To account for this, when a download changes mirror, save how much has been downloaded so far and add that to dlcb calls. --- lib/libalpm/dload.c | 14 ++++++++++++-- lib/libalpm/dload.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..27a7748a 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -207,8 +207,8 @@ static int dload_progress_cb(void *file, curl_off_t dltotal, curl_off_t dlnow,
/* do NOT include initial_size since it wasn't part of the package's * download_size (nor included in the total download size callback) */ - cb_data.total = dltotal; - cb_data.downloaded = dlnow; + cb_data.total = dltotal + payload->resetprogress; + cb_data.downloaded = dlnow + payload->resetprogress; payload->handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_PROGRESS, &cb_data); payload->prevprogress = current_size;
@@ -440,6 +440,16 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); }
+ + /* reset progress for next server */ + payload->resetprogress += payload->prevprogress - payload->initial_size; + payload->unlink_on_fail = 0;
Without looking at the rest of the patch, how does this line not just straight up break unlink_on_fail functionality for any download that falls back to another server?
Honestly don't really know. I was using e83e868a77865d42a33076605f9a90a165f7c93a as a base and it was done there.
I guess the point is that curl_check_finished_download() will set unlink_on_fail = 1 in most situations, so we reset it to 0 on retry knowing it may then be set to 1 again on the next check_finished.
Though I guess this breaks tempfile downloads which should always be unlink_on_fail = 1.
Also looking at the code above:
if(payload->unlink_on_fail) { /* we keep the file for a new retry but remove its data if any */ fflush(payload->localf); if(ftruncate(fileno(payload->localf), 0)) { RET_ERR(handle, ALPM_ERR_SYSTEM, -1); } fseek(payload->localf, 0, SEEK_SET); }
I don't see why we'd want to wipe the file instead of trying to resume.
Because the file may not be the same after we fallback to a different server.
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
Because the file may not be the same after we fallback to a different server.
That shouldn't be the case though. Do we actually support that use case?
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
I guess, though the above point applies. it may be a different mirror and therefore the file may not be the same. So I feel if you can resume between pacman calls you should be able to resume between servers.
On 04/19/21 at 01:32pm, Morgan Adamiec wrote:
Because the file may not be the same after we fallback to a different server.
That shouldn't be the case though. Do we actually support that use case?
Yes. Databases.
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
I guess, though the above point applies. it may be a different mirror and therefore the file may not be the same. So I feel if you can resume between pacman calls you should be able to resume between servers.
On 19/04/2021 13:39, Andrew Gregory wrote:
On 04/19/21 at 01:32pm, Morgan Adamiec wrote:
Because the file may not be the same after we fallback to a different server.
That shouldn't be the case though. Do we actually support that use case?
Yes. Databases.
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
I guess, though the above point applies. it may be a different mirror and therefore the file may not be the same. So I feel if you can resume between pacman calls you should be able to resume between servers.
Ah yeah I guess databases makes sense. But why not just have allow_resume as false for those downloads and allow package downloads to continue?
On 19/4/21 7:36 pm, morganamilo wrote:
When the download estimate is over an hour the format displayed changes from mm:ss to hh:mm:ss. This causes everything to be out of alignment due to the extra characters.
So instead lets just go back to --:-- when the download => 100 minutes. --- src/pacman/callback.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..3c1d3f14 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -811,12 +811,8 @@ static void draw_pacman_progress_bar(struct pacman_progress_bar *bar)
/* 1 space + filenamelen + 1 space + 6 for size + 1 space + 3 for label + * + 2 spaces + 4 for rate + 1 space + 3 for label + 2 for /s + 1 space + - * 8 for eta, gives us the magic 33 */ - filenamelen = infolen - 33; - /* see printf() code, we omit 'HH:' in these conditions */ - if(eta_h == 0 || eta_h >= 100) { - filenamelen += 3; - } + * 5 for eta, gives us the magic 33 */
I fixed this comment. Otherwise fine. A
Hi On Mon, Apr 19, 2021, 02:46 Morgan Adamiec <morganamilo@archlinux.org> wrote:
On 19/04/2021 13:39, Andrew Gregory wrote:
On 04/19/21 at 01:32pm, Morgan Adamiec wrote:
Because the file may not be the same after we fallback to a different server.
That shouldn't be the case though. Do we actually support that use case?
Yes. Databases.
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
I guess, though the above point applies. it may be a different mirror and therefore the file may not be the same. So I feel if you can resume between pacman calls you should be able to resume between servers.
Ah yeah I guess databases makes sense. But why not just have allow_resume as false for those downloads and allow package downloads to continue?
If it cannot update the packages database the there is not much sense to start downloading packages.
Hi On Sun, Apr 18, 2021, 12:32 morganamilo <morganamilo@archlinux.org> wrote:
When a download fails on one mirror a new download is started on the next mirror. This new download will have the initial size of whatever has been downloaded so far, as well as the ammount downloaded reset to 0.
Thanks for tracking down this bug. I wonder if it is the same issue that was reported a while ago ("UI is broken with slow downloads").
To account for this, when a download changes mirror, save how much has been downloaded so far and add that to dlcb calls. --- lib/libalpm/dload.c | 14 ++++++++++++-- lib/libalpm/dload.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..27a7748a 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -207,8 +207,8 @@ static int dload_progress_cb(void *file, curl_off_t dltotal, curl_off_t dlnow,
/* do NOT include initial_size since it wasn't part of the package's * download_size (nor included in the total download size callback) */ - cb_data.total = dltotal; - cb_data.downloaded = dlnow; + cb_data.total = dltotal + payload->resetprogress; + cb_data.downloaded = dlnow + payload->resetprogress; payload->handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_PROGRESS, &cb_data); payload->prevprogress = current_size;
@@ -440,6 +440,16 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); }
+ + /* reset progress for next server */ + payload->resetprogress += payload->prevprogress - payload->initial_size; + payload->unlink_on_fail = 0; + + if(payload->allow_resume) { + payload->initial_size = payload->prevprogress; + } + + /* Set curl with the new URL */ curl_easy_setopt(curl, CURLOPT_URL, payload->fileurl);
diff --git a/lib/libalpm/dload.h b/lib/libalpm/dload.h index 8f3d17b4..abfd9466 100644 --- a/lib/libalpm/dload.h +++ b/lib/libalpm/dload.h @@ -42,6 +42,7 @@ struct dload_payload { off_t initial_size; off_t max_size; off_t prevprogress; + off_t resetprogress; /* progress made before we try a new server */ int force; int allow_resume; int errors_ok; -- 2.31.1
On 19/04/2021 17:08, Anatol Pomozov wrote:
Hi
On Mon, Apr 19, 2021, 02:46 Morgan Adamiec <morganamilo@archlinux.org> wrote:
On 19/04/2021 13:39, Andrew Gregory wrote:
On 04/19/21 at 01:32pm, Morgan Adamiec wrote:
Because the file may not be the same after we fallback to a different server.
That shouldn't be the case though. Do we actually support that use case?
Yes. Databases.
In fact I don't see the point of this variable at all. Why would we ever not want to unlink on fail?
So that it can be resumed later.
I guess, though the above point applies. it may be a different mirror and therefore the file may not be the same. So I feel if you can resume between pacman calls you should be able to resume between servers.
Ah yeah I guess databases makes sense. But why not just have allow_resume as false for those downloads and allow package downloads to continue?
If it cannot update the packages database the there is not much sense to start downloading packages.
This is in curl_retry_next_server. It's about not clearing the progress when switching from one server to the next. A mirror may fail to download the database but overall it can still succeed and continue to package download.
When a download fails on one mirror a new download is started on the next mirror. This causes the amount downloaded to reset, confusing the rate math and making it display a negative rate. This is further complicated by the fact that a download may be resumed from where it is or started over. To account for this we alert the frontend that the download was restarted. Pacman then starts the progress bar over. diff --git a/lib/libalpm/alpm.h b/lib/libalpm/alpm.h index 833df829..e9cad015 100644 --- a/lib/libalpm/alpm.h +++ b/lib/libalpm/alpm.h @@ -1169,6 +1169,8 @@ typedef enum _alpm_download_event_type_t { ALPM_DOWNLOAD_INIT, /** A download made progress */ ALPM_DOWNLOAD_PROGRESS, + /** Download will be retried */ + ALPM_DOWNLOAD_RETRY, /** A download completed */ ALPM_DOWNLOAD_COMPLETED } alpm_download_event_type_t; @@ -1187,6 +1189,11 @@ typedef struct _alpm_download_event_progress_t { off_t total; } alpm_download_event_progress_t; +/** Context struct for when a download retries. */ +typedef struct _alpm_download_event_retry_t { + /** If the download will resume or start over */ + int resume; +} alpm_download_event_retry_t; /** Context struct for when a download completes. */ typedef struct _alpm_download_event_completed_t { diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..98ea5b6f 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -431,7 +431,11 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload MALLOC(payload->fileurl, len, RET_ERR(handle, ALPM_ERR_MEMORY, -1)); snprintf(payload->fileurl, len, "%s/%s", server, payload->filepath); - if(payload->unlink_on_fail) { + if(payload->allow_resume) { + /* reset progress for next server */ + curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, payload->prevprogress); + payload->initial_size = payload->prevprogress; + } else { /* we keep the file for a new retry but remove its data if any */ fflush(payload->localf); if(ftruncate(fileno(payload->localf), 0)) { @@ -440,6 +444,12 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload fseek(payload->localf, 0, SEEK_SET); } + if(handle->dlcb && !payload->signature) { + alpm_download_event_retry_t cb_data; + cb_data.resume = payload->allow_resume; + handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_RETRY, &cb_data); + } + /* Set curl with the new URL */ curl_easy_setopt(curl, CURLOPT_URL, payload->fileurl); @@ -487,7 +497,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, _alpm_log(handle, ALPM_LOG_DEBUG, "%s: response code %ld\n", payload->remote_name, payload->respcode); if(payload->respcode >= 400) { - payload->unlink_on_fail = 1; if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_RETRIEVE; /* non-translated message is same as libcurl */ @@ -502,6 +511,7 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; } } @@ -519,7 +529,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, } goto cleanup; case CURLE_COULDNT_RESOLVE_HOST: - payload->unlink_on_fail = 1; handle->pm_errno = ALPM_ERR_SERVER_BAD_URL; _alpm_log(handle, ALPM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), @@ -529,13 +538,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; } default: - /* delete zero length downloads */ - if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { - payload->unlink_on_fail = 1; - } if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_LIBCURL; _alpm_log(handle, ALPM_LOG_ERROR, @@ -551,6 +557,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + /* delete zero length downloads */ + if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { + payload->unlink_on_fail = 1; + } goto cleanup; } } diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..19bcfe58 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -992,6 +992,34 @@ static void dload_progress_event(const char *filename, alpm_download_event_progr fflush(stdout); } +/* download retried */ +static void dload_retry_event(const char *filename, alpm_download_event_retry_t *data) { + if(!dload_progressbar_enabled()) { + return; + } + + int index; + struct pacman_progress_bar *bar; + bool ok = find_bar_for_filename(filename, &index, &bar); + assert(ok); + + if(!data->resume) { + if(total_enabled) { + /* TODO if !resume then we delete the file. this may make the new total larger */ + totalbar->xfered -= bar->xfered; + } + } + + bar->xfered = 0; + bar->total_size = 0; + bar->init_time = get_time_ms(); + bar->sync_time = 0; + bar->sync_xfered = 0; + bar->rate = 0.0; + bar->eta = 0.0; +} + + /* download completed */ static void dload_complete_event(const char *filename, alpm_download_event_completed_t *data) { @@ -1075,6 +1103,8 @@ void cb_download(const char *filename, alpm_download_event_type_t event, void *d dload_init_event(filename, data); } else if(event == ALPM_DOWNLOAD_PROGRESS) { dload_progress_event(filename, data); + } else if(event == ALPM_DOWNLOAD_RETRY) { + dload_retry_event(filename, data); } else if(event == ALPM_DOWNLOAD_COMPLETED) { dload_complete_event(filename, data); } else { -- 2.31.1
When a download fails on one mirror a new download is started on the next mirror. This causes the ammount downloaded to reset, confusing the rate math and making it display a negative rate. This is further complicated by the fact that a download may be resumed from where it is or started over. To account for this we alert the frontend that the download was restarted. Pacman then starts the progress bar over. --- v2: stat tempfile to get initial_size --- lib/libalpm/alpm.h | 7 +++++++ lib/libalpm/dload.c | 33 +++++++++++++++++++++++++-------- src/pacman/callback.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/lib/libalpm/alpm.h b/lib/libalpm/alpm.h index 833df829..e9cad015 100644 --- a/lib/libalpm/alpm.h +++ b/lib/libalpm/alpm.h @@ -1169,6 +1169,8 @@ typedef enum _alpm_download_event_type_t { ALPM_DOWNLOAD_INIT, /** A download made progress */ ALPM_DOWNLOAD_PROGRESS, + /** Download will be retried */ + ALPM_DOWNLOAD_RETRY, /** A download completed */ ALPM_DOWNLOAD_COMPLETED } alpm_download_event_type_t; @@ -1187,6 +1189,11 @@ typedef struct _alpm_download_event_progress_t { off_t total; } alpm_download_event_progress_t; +/** Context struct for when a download retries. */ +typedef struct _alpm_download_event_retry_t { + /** If the download will resume or start over */ + int resume; +} alpm_download_event_retry_t; /** Context struct for when a download completes. */ typedef struct _alpm_download_event_completed_t { diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..5beea1d9 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -412,6 +412,7 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload { const char *server; size_t len; + struct stat st; alpm_handle_t *handle = payload->handle; while(payload->servers && should_skip_server(handle, payload->servers->data)) { @@ -431,15 +432,31 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload MALLOC(payload->fileurl, len, RET_ERR(handle, ALPM_ERR_MEMORY, -1)); snprintf(payload->fileurl, len, "%s/%s", server, payload->filepath); - if(payload->unlink_on_fail) { + + fflush(payload->localf); + + if(stat(payload->tempfile_name, &st) == 0 && payload->allow_resume) { + /* a previous partial download exists, resume from end of file. */ + payload->tempfile_openmode = "ab"; + curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, (curl_off_t)st.st_size); + _alpm_log(handle, ALPM_LOG_DEBUG, + "%s: tempfile found, attempting continuation from %jd bytes\n", + payload->remote_name, (intmax_t)st.st_size); + payload->initial_size = st.st_size; + } else { /* we keep the file for a new retry but remove its data if any */ - fflush(payload->localf); if(ftruncate(fileno(payload->localf), 0)) { RET_ERR(handle, ALPM_ERR_SYSTEM, -1); } fseek(payload->localf, 0, SEEK_SET); } + if(handle->dlcb && !payload->signature) { + alpm_download_event_retry_t cb_data; + cb_data.resume = payload->allow_resume; + handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_RETRY, &cb_data); + } + /* Set curl with the new URL */ curl_easy_setopt(curl, CURLOPT_URL, payload->fileurl); @@ -487,7 +504,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, _alpm_log(handle, ALPM_LOG_DEBUG, "%s: response code %ld\n", payload->remote_name, payload->respcode); if(payload->respcode >= 400) { - payload->unlink_on_fail = 1; if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_RETRIEVE; /* non-translated message is same as libcurl */ @@ -502,6 +518,7 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; } } @@ -519,7 +536,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, } goto cleanup; case CURLE_COULDNT_RESOLVE_HOST: - payload->unlink_on_fail = 1; handle->pm_errno = ALPM_ERR_SERVER_BAD_URL; _alpm_log(handle, ALPM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), @@ -529,13 +545,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; } default: - /* delete zero length downloads */ - if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { - payload->unlink_on_fail = 1; - } if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_LIBCURL; _alpm_log(handle, ALPM_LOG_ERROR, @@ -551,6 +564,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + /* delete zero length downloads */ + if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { + payload->unlink_on_fail = 1; + } goto cleanup; } } diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..19bcfe58 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -992,6 +992,34 @@ static void dload_progress_event(const char *filename, alpm_download_event_progr fflush(stdout); } +/* download retried */ +static void dload_retry_event(const char *filename, alpm_download_event_retry_t *data) { + if(!dload_progressbar_enabled()) { + return; + } + + int index; + struct pacman_progress_bar *bar; + bool ok = find_bar_for_filename(filename, &index, &bar); + assert(ok); + + if(!data->resume) { + if(total_enabled) { + /* TODO if !resume then we delete the file. this may make the new total larger */ + totalbar->xfered -= bar->xfered; + } + } + + bar->xfered = 0; + bar->total_size = 0; + bar->init_time = get_time_ms(); + bar->sync_time = 0; + bar->sync_xfered = 0; + bar->rate = 0.0; + bar->eta = 0.0; +} + + /* download completed */ static void dload_complete_event(const char *filename, alpm_download_event_completed_t *data) { @@ -1075,6 +1103,8 @@ void cb_download(const char *filename, alpm_download_event_type_t event, void *d dload_init_event(filename, data); } else if(event == ALPM_DOWNLOAD_PROGRESS) { dload_progress_event(filename, data); + } else if(event == ALPM_DOWNLOAD_RETRY) { + dload_retry_event(filename, data); } else if(event == ALPM_DOWNLOAD_COMPLETED) { dload_complete_event(filename, data); } else { -- 2.31.1
On 3/5/21 1:41 pm, morganamilo wrote:
When a download fails on one mirror a new download is started on the next mirror. This causes the ammount downloaded to reset, confusing the rate math and making it display a negative rate.
This is further complicated by the fact that a download may be resumed from where it is or started over.
To account for this we alert the frontend that the download was restarted. Pacman then starts the progress bar over.
---
v2: stat tempfile to get initial_size --- lib/libalpm/alpm.h | 7 +++++++ lib/libalpm/dload.c | 33 +++++++++++++++++++++++++-------- src/pacman/callback.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 8 deletions(-)
diff --git a/lib/libalpm/alpm.h b/lib/libalpm/alpm.h index 833df829..e9cad015 100644 --- a/lib/libalpm/alpm.h +++ b/lib/libalpm/alpm.h @@ -1169,6 +1169,8 @@ typedef enum _alpm_download_event_type_t { ALPM_DOWNLOAD_INIT, /** A download made progress */ ALPM_DOWNLOAD_PROGRESS, + /** Download will be retried */ + ALPM_DOWNLOAD_RETRY, /** A download completed */ ALPM_DOWNLOAD_COMPLETED } alpm_download_event_type_t;
OK.
@@ -1187,6 +1189,11 @@ typedef struct _alpm_download_event_progress_t { off_t total; } alpm_download_event_progress_t;
+/** Context struct for when a download retries. */ +typedef struct _alpm_download_event_retry_t { + /** If the download will resume or start over */ + int resume; +} alpm_download_event_retry_t;
/** Context struct for when a download completes. */ typedef struct _alpm_download_event_completed_t {
OK.
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a4c42f8d..5beea1d9 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -412,6 +412,7 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload { const char *server; size_t len; + struct stat st; alpm_handle_t *handle = payload->handle;
while(payload->servers && should_skip_server(handle, payload->servers->data)) { @@ -431,15 +432,31 @@ static int curl_retry_next_server(CURLM *curlm, CURL *curl, struct dload_payload MALLOC(payload->fileurl, len, RET_ERR(handle, ALPM_ERR_MEMORY, -1)); snprintf(payload->fileurl, len, "%s/%s", server, payload->filepath);
- if(payload->unlink_on_fail) { + + fflush(payload->localf); + + if(stat(payload->tempfile_name, &st) == 0 && payload->allow_resume) {
No point doing the stat unless we are resuming. if(payload->allow_resume) { if(stat(payload....)
+ /* a previous partial download exists, resume from end of file. */ + payload->tempfile_openmode = "ab"; + curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, (curl_off_t)st.st_size); + _alpm_log(handle, ALPM_LOG_DEBUG, + "%s: tempfile found, attempting continuation from %jd bytes\n", + payload->remote_name, (intmax_t)st.st_size); + payload->initial_size = st.st_size; + } else { /* we keep the file for a new retry but remove its data if any */ - fflush(payload->localf); if(ftruncate(fileno(payload->localf), 0)) { RET_ERR(handle, ALPM_ERR_SYSTEM, -1); } fseek(payload->localf, 0, SEEK_SET); }
OK.
+ if(handle->dlcb && !payload->signature) { + alpm_download_event_retry_t cb_data; + cb_data.resume = payload->allow_resume; + handle->dlcb(payload->remote_name, ALPM_DOWNLOAD_RETRY, &cb_data);
This was changed by Andrew's recent patch. handle->dlcb(handle->dlcb_ctx, payload->remote_name, ...
+ } + /* Set curl with the new URL */ curl_easy_setopt(curl, CURLOPT_URL, payload->fileurl);
@@ -487,7 +504,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, _alpm_log(handle, ALPM_LOG_DEBUG, "%s: response code %ld\n", payload->remote_name, payload->respcode); if(payload->respcode >= 400) { - payload->unlink_on_fail = 1; if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_RETRIEVE; /* non-translated message is same as libcurl */ @@ -502,6 +518,7 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; } }
Hrm... This looks OK. But possibly needed fixed in a separate patch?
@@ -519,7 +536,6 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, } goto cleanup; case CURLE_COULDNT_RESOLVE_HOST: - payload->unlink_on_fail = 1; handle->pm_errno = ALPM_ERR_SERVER_BAD_URL; _alpm_log(handle, ALPM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), @@ -529,13 +545,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + payload->unlink_on_fail = 1; goto cleanup; }
OK
default: - /* delete zero length downloads */ - if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { - payload->unlink_on_fail = 1; - } if(!payload->errors_ok) { handle->pm_errno = ALPM_ERR_LIBCURL; _alpm_log(handle, ALPM_LOG_ERROR, @@ -551,6 +564,10 @@ static int curl_check_finished_download(CURLM *curlm, CURLMsg *msg, (*active_downloads_num)++; return 2; } else { + /* delete zero length downloads */ + if(fstat(fileno(payload->localf), &st) == 0 && st.st_size == 0) { + payload->unlink_on_fail = 1; + } goto cleanup; } }
OK
diff --git a/src/pacman/callback.c b/src/pacman/callback.c index a28a79a9..19bcfe58 100644 --- a/src/pacman/callback.c +++ b/src/pacman/callback.c @@ -992,6 +992,34 @@ static void dload_progress_event(const char *filename, alpm_download_event_progr fflush(stdout); }
+/* download retried */ +static void dload_retry_event(const char *filename, alpm_download_event_retry_t *data) { + if(!dload_progressbar_enabled()) { + return; + } + + int index; + struct pacman_progress_bar *bar; + bool ok = find_bar_for_filename(filename, &index, &bar); + assert(ok); + + if(!data->resume) { + if(total_enabled) {
This is always enabled now.
+ /* TODO if !resume then we delete the file. this may make the new total larger */
I don't think this is a TODO, but rather a note. Change this to a comment: /* note total download does not reflect partial downloads that are restarted */
+ totalbar->xfered -= bar->xfered; + } + } + + bar->xfered = 0; + bar->total_size = 0; + bar->init_time = get_time_ms(); + bar->sync_time = 0; + bar->sync_xfered = 0; + bar->rate = 0.0; + bar->eta = 0.0;
OK.
+} + + /* download completed */ static void dload_complete_event(const char *filename, alpm_download_event_completed_t *data) { @@ -1075,6 +1103,8 @@ void cb_download(const char *filename, alpm_download_event_type_t event, void *d dload_init_event(filename, data); } else if(event == ALPM_DOWNLOAD_PROGRESS) { dload_progress_event(filename, data); + } else if(event == ALPM_DOWNLOAD_RETRY) { + dload_retry_event(filename, data); } else if(event == ALPM_DOWNLOAD_COMPLETED) { dload_complete_event(filename, data); } else {
OK
participants (5)
-
Allan McRae
-
Anatol Pomozov
-
Andrew Gregory
-
Morgan Adamiec
-
morganamilo