PERFORCE change 181439 for review
Julien LAFFAYE
jlaffaye at freebsd.org
Sun Jul 25 12:52:21 UTC 2010
On Sun, Jul 25, 2010 at 1:09 PM, Garrett Cooper <gcooper at freebsd.org> wrote:
> On Sat, Jul 24, 2010 at 1:51 PM, Julien Laffaye <jlaffaye at freebsd.org> wrote:
>> http://p4web.freebsd.org/@@181439?ac=10
>>
>> Change 181439 by jlaffaye at jlaffaye-chulak on 2010/07/24 20:51:13
>>
>> Add support for fetching packages. Read them on the fly thanks
>> to libarchive(3) callbacks.
>> Fix segfault by not plist_free'ing an unitialised plist.
>
> Was this perhaps a problem with the structure not being NULL'ed out
> after it was free'd?
Actually, on a particular error I was trying to cleanup the plist, but
it was not initialized yet.
So pkg.head contained an arbitrary value (but not NULL), so
plist_free() started to follow
this memory address... which was indeed garbage.
>
>> Affected files ...
>>
>> .. //depot/projects/soc2010/pkg_complete/lib/libpkg/pkg.h#7 edit
>> .. //depot/projects/soc2010/pkg_complete/lib/libpkg/url.c#3 edit
>> .. //depot/projects/soc2010/pkg_complete/usr.sbin/pkg_install/add/extract.c#6 edit
>> .. //depot/projects/soc2010/pkg_complete/usr.sbin/pkg_install/add/perform.c#6 edit
>>
>> Differences ...
>>
>> ==== //depot/projects/soc2010/pkg_complete/lib/libpkg/pkg.h#7 (text+ko) ====
>>
>> @@ -33,6 +33,7 @@
>> #include <sys/stat.h>
>> #include <sys/queue.h>
>> #include <sys/utsname.h>
>> +#include <archive.h>
>
> This should be moved down below all of the standard system headers
> (libarchive might require some of the headers above.. fetch.h has
> similar requirements)...
>From style(9), I can read:
"Leave a blank line before the next group, the /usr/include files,
which should be sorted
alphabetically by name".
But that's right, fetch(3) uses FILE which is defined in <stdio.h>
So I guess the point here is 'standard' as in C standard or as in
"shipped with base".
Anyway, I must admit that the advised blank line is missing here ;p
>
>> #include <ctype.h>
>> #include <dirent.h>
>> #include <stdarg.h>
>> @@ -144,6 +145,12 @@
>> };
>> STAILQ_HEAD(reqr_by_head, reqr_by_entry);
>>
>> +struct fetch_data {
>> + FILE *ftp;
>> + int pkgfd;
>> + char buf[8192];
>> +};
>
> Using BUFSIZ might be a better idea.
I am not sure that BUFSIZ is meant to be used in this case.
On my system, its value is 1024 which is pretty low for net I/O IMHO.
Furthermore, I've experienced something "strange" (I might ask Tim):
on open, libarchive called ~50 times the read callback. Which means
that libarchive
stored ~409Kb somewhere in a buffer, while I did not called
next_header() nor read_data()...
Likewise, libarchive called like 10times the read callback, then
extracted ~6files, and so on.
I am confused cause it's advertised that it's a no-copy architecture.
Anyway, it works and that's
all that matters.
>
>> +
>> /* Prototypes */
>> /* Misc */
>> int vsystem(const char *, ...);
>> @@ -173,6 +180,8 @@
>> Boolean issymlink(const char *);
>> Boolean isURL(const char *);
>> const char *fileGetURL(const char *, const char *, int);
>> +int fetch_archive(struct archive *, const char *, const char *,
>> + Boolean);
>> char *fileFindByPath(const char *, const char *);
>> char *fileGetContents(const char *);
>> ssize_t write_file(const char *, const char *);
>>
>> ==== //depot/projects/soc2010/pkg_complete/lib/libpkg/url.c#3 (text+ko) ====
>>
>> @@ -23,15 +23,23 @@
>>
>> #include <sys/param.h>
>> #include <sys/wait.h>
>> +#include <archive.h>
>> #include <err.h>
>> +#include <errno.h>
>> #include <libgen.h>
>> #include <stdio.h>
>> #include <fetch.h> /* NOTE: stdio must come before fetch. */
>> #include "pkg.h"
>>
>> +static ssize_t archive_read_cb(struct archive *, void *, const void **);
>> +static int archive_open_cb(struct archive *a, void *);
>> +static int archive_close_cb(struct archive *, void *);
>> +
>> /*
>> * Try and fetch a file by URL, returning the directory name for where
>> * it's unpacked, if successful.
>> + * XXX (jlaffaye): to be removed when all call to fileGetURL() are converted to
>> + * fetch_archive()
>> */
>> const char *
>> fileGetURL(const char *base, const char *spec, int keep_package)
>> @@ -113,11 +121,11 @@
>> fetchDebug = (Verbose > 0);
>> if ((ftp = fetchGetURL(fname, Verbose ? "v" : NULL)) == NULL) {
>> warnx("Error: Unable to get %s: %s\n", fname,
>> - fetchLastErrString);
>> + fetchLastErrString);
>> /* If the fetch fails, yank the package. */
>> if (keep_package && unlink(pkg) < 0) {
>> warnx("failed to remove partially fetched package: %s",
>> - pkg);
>> + pkg);
>> }
>> return (NULL);
>> }
>> @@ -182,3 +190,170 @@
>> return (rp);
>>
>> }
>> +
>> +/*
>> + * Setup the archive `a' callbacks to read data from an URL `spec' via fetch(3).
>> + * If `spec' is not an URL, the function try to find the location of the file
>> + * via `base' or via the environment variable `PKG_ADD_BASE'.
>> + * Returns 0 on success, 1 otherwise.
>> + */
>> +int
>> +fetch_archive(struct archive *a, const char *base, const char *spec,
>> + Boolean keep_package)
>> +{
>> + struct fetch_data *data = NULL;
>> + char *cp, *hint, *tmp;
>> + char fname[FILENAME_MAX];
>> + char pkg[FILENAME_MAX];
>> + int retcode = 0;
>> +
>> + if ((data = malloc(sizeof(struct fetch_data))) == NULL)
>> + err(EXIT_FAILURE, "malloc()");
>> +
>> + if (!isURL(spec)) {
>> + /*
>> + * We've been given an existing URL (that's known-good) and now
>> + * we need to construct a composite one out of that and the
>> + * basename we were handed as a dependency.
>> + */
>> + if (base != NULL) {
>> + strcpy(fname, base);
>> + /*
>> + * Advance back two slashes to get to the root of the
>> + * package hierarchy
>> + */
>> + cp = strrchr(fname, '/');
>> + if (cp) {
>> + *cp = '\0'; /* chop name */
>> + cp = strrchr(fname, '/');
>> + }
>> + if (cp != NULL) {
>> + *(cp + 1) = '\0';
>> + strcat(cp, "All/");
>> + strcat(cp, spec);
>> + strcat(cp, ".tbz");
>> + } else {
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> + }
>> + /* Special tip that sysinstall left for us */
>> + else if ((hint = getenv("PKG_ADD_BASE")) != NULL) {
>> + /*
>> + * Otherwise, we've been given an environment variable
>> + * hinting at the right location from sysinstall
>> + */
>> + strcpy(fname, hint);
>> + strcat(fname, spec);
>> + strcat(fname, ".tbz");
>> + }
>> + /* We dont have an url and are unable to guess one */
>> + else {
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> + }
>> + else
>> + strcpy(fname, spec);
>> +
>> + if (keep_package) {
>> + tmp = getenv("PKGDIR");
>> + strlcpy(pkg, tmp ? tmp : ".", sizeof(pkg));
>> + tmp = basename(fname);
>> + strlcat(pkg, "/", sizeof(pkg));
>> + strlcat(pkg, tmp, sizeof(pkg));
>> +
>> + data->pkgfd = open(pkg, O_WRONLY|O_CREAT|O_TRUNC, 0644);
>> + if (data->pkgfd == -1) {
>> + warn("Error: Unable to open %s", pkg);
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> + } else
>> + data->pkgfd = 0;
>> +
>> + fetchDebug = (Verbose > 0);
>> + if ((data->ftp = fetchGetURL(fname, Verbose ? "v" : NULL)) == NULL) {
>> + warnx("Error: Unable to get %s: %s\n", fname, fetchLastErrString);
>> + /* If the fetch fails, yank the package. */
>> + if (keep_package && unlink(pkg) < 0)
>> + warnx("failed to remove partially fetched package: %s", pkg);
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> +
>> + if (isatty(0) || Verbose) {
>> + printf("Fetching %s...", fname);
>> + fflush(stdout);
>> + }
>> +
>> + if (archive_read_open(a, data, archive_open_cb, archive_read_cb,
>> + archive_close_cb) != ARCHIVE_OK) {
>> + warnx("Can not open '%s': %s", pkg, archive_error_string(a));
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> +
>> + cleanup:
>> + if (retcode == 1 && data != NULL)
>> + free(data);
>> +
>> + return (retcode);
>> +}
>> +
>> +/*
>> + * Libarchive callback called when more data is needed.
>> + * Read the data from the fetch(3) file descriptor and store it into buf.
>> + * If `pkgfd' is a valid file descriptor, also write the data on disk.
>> + * Returns the read size, 0 on EOF, -1 on error.
>> + */
>> +static ssize_t
>> +archive_read_cb(struct archive *a, void *client_data, const void **buf)
>> +{
>> + ssize_t r;
>> + struct fetch_data *data = client_data;
>> +
>> + *buf = data->buf;
>> + if ((r = fread(data->buf, 1, sizeof(data->buf), data->ftp)) < 1)
>> + if (ferror(data->ftp)) {
>> + archive_set_error(a, 0, "error while fetching : %s",
>> + fetchLastErrString);
>> + return (-1);
>> + }
>> +
>> + if (data->pkgfd > 0 && r > 0)
>
> What if the pkgfd is <= 0 and r is > 0?
If pkgfd is <= 0 then we did not open the file on disk cause
KeepPackage was set to FALSE.
So we do nothing.
>
>> + if (write(data->pkgfd, buf, r) != r) {
>> + archive_set_error(a, 0, "can not write to package file: %s",
>> + strerror(errno));
>> + return (-1);
>> + }
>> +
>> + return (r);
>> +}
>> +
>> +/*
>> + * Libarchive callback called by archive_open()
>> + * Since all the job is done in fetch_archive(), always return success.
>> + */
>> +static int
>> +archive_open_cb(struct archive *a, void *client_data)
>> +{
>> + return (ARCHIVE_OK);
>> +}
>> +
>> +/*
>> + * Libarchive callback called by archive_close().
>> + * Release the file descriptors and free the structure.
>> + */
>> +static int
>> +archive_close_cb(struct archive *a, void *client_data)
>> +{
>> + struct fetch_data *data = client_data;
>> +
>> + fclose(data->ftp);
>> + if (data->pkgfd > 0)
>> + close(data->pkgfd);
>> + free(data);
>> +
>> + return (ARCHIVE_OK);
>> +}
>>
>> ==== //depot/projects/soc2010/pkg_complete/usr.sbin/pkg_install/add/extract.c#6 (text+ko) ====
>>
>> @@ -43,7 +43,8 @@
>> if ((plist_buf = malloc(s+1)) == NULL)
>> err(EXIT_FAILURE, "malloc()");
>> if (archive_read_data(a, plist_buf, s) != s) {
>> - warnx("Can not extract plist: %s", archive_error_string(a));
>> + warnx("Can not extract %s: %s", CONTENTS_FNAME,
>> + archive_error_string(a));
>> return (1);
>> }
>> plist_buf[s] = '\0';
>> @@ -52,7 +53,7 @@
>> retcode = read_plist_from_buffer(pkg, plist_buf, s);
>> free(plist_buf);
>> if (retcode != 0) {
>> - warnx("Unable to parse plist!");
>> + warnx("Unable to parse %s!", CONTENTS_FNAME);
>> return (1);
>> }
>>
>>
>> ==== //depot/projects/soc2010/pkg_complete/usr.sbin/pkg_install/add/perform.c#6 (text+ko) ====
>>
>> @@ -73,20 +73,23 @@
>>
>> /*
>> * TODO:
>> - * dowload the package if it is an URL, read from stdin if "-"
>> * Deal with master/slave modes.
>> * add support for complete packages
>> */
>> if (isURL(fname)) {
>> - /* TODO: add support */
>> - return (1);
>> + if (fetch_archive(a, NULL, fname, KeepPackage) != 0) {
>> + warnx("Can not fetch '%s' - aborting", fname);
>> + retcode = 1;
>> + goto cleanup;
>> + }
>> } else {
>> if (strcmp(fname, "-") == 0)
>> fd = fileno(stdin);
>> else {
>> if ((fd = open(fname, O_RDONLY)) == -1) {
>> - warn("open(%s)", fname);
>> - return (1);
>> + warn("Can not open '%s' for reading", fname);
>> + retcode = 1;
>> + goto cleanup;
>> }
>> }
>>
>> @@ -102,11 +105,12 @@
>> pathname = archive_entry_pathname(entry);
>> if (strcmp(pathname, CONTENTS_FNAME) == 0) {
>> if (extract_plist(a, entry, &pkg) != 0) {
>> - warnx("Can not extract & parse " CONTENTS_FNAME);
>> + warnx("Can not proceed without packing list - aborting");
>> retcode = 1;
>> goto cleanup;
>> }
>> - extract_package(a, &pkg);
>> + retcode = extract_package(a, &pkg);
>> + free_plist(&pkg);
>> } else if (strcmp(pathname, "+PKG_COMPLETE") == 0) {
>> if (Verbose)
>> printf("'%s' is a complete package...\n", fname);
>> @@ -126,8 +130,6 @@
>> cleanup:
>> if (a != NULL)
>> archive_read_finish(a);
>> - if (pkg.head != NULL)
>> - free_plist(&pkg);
>> return (retcode);
>>
>> # if 0
>>
>
More information about the p4-projects
mailing list