diff --git a/Documentation/config.txt b/Documentation/config.txt index bf82766a6a272d..a1efd744cd88d3 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -440,6 +440,8 @@ include::config/rerere.txt[] include::config/reset.txt[] +include::config/safe.txt[] + include::config/sendemail.txt[] include::config/sequencer.txt[] diff --git a/Documentation/config.txt.orig b/Documentation/config.txt.orig new file mode 100644 index 00000000000000..bf82766a6a272d --- /dev/null +++ b/Documentation/config.txt.orig @@ -0,0 +1,479 @@ +CONFIGURATION FILE +------------------ + +The Git configuration file contains a number of variables that affect +the Git commands' behavior. The files `.git/config` and optionally +`config.worktree` (see the "CONFIGURATION FILE" section of +linkgit:git-worktree[1]) in each repository are used to store the +configuration for that repository, and `$HOME/.gitconfig` is used to +store a per-user configuration as fallback values for the `.git/config` +file. The file `/etc/gitconfig` can be used to store a system-wide +default configuration. + +The configuration variables are used by both the Git plumbing +and the porcelains. The variables are divided into sections, wherein +the fully qualified variable name of the variable itself is the last +dot-separated segment and the section name is everything before the last +dot. The variable names are case-insensitive, allow only alphanumeric +characters and `-`, and must start with an alphabetic character. Some +variables may appear multiple times; we say then that the variable is +multivalued. + +Syntax +~~~~~~ + +The syntax is fairly flexible and permissive; whitespaces are mostly +ignored. The '#' and ';' characters begin comments to the end of line, +blank lines are ignored. + +The file consists of sections and variables. A section begins with +the name of the section in square brackets and continues until the next +section begins. Section names are case-insensitive. Only alphanumeric +characters, `-` and `.` are allowed in section names. Each variable +must belong to some section, which means that there must be a section +header before the first setting of a variable. + +Sections can be further divided into subsections. To begin a subsection +put its name in double quotes, separated by space from the section name, +in the section header, like in the example below: + +-------- + [section "subsection"] + +-------- + +Subsection names are case sensitive and can contain any characters except +newline and the null byte. Doublequote `"` and backslash can be included +by escaping them as `\"` and `\\`, respectively. Backslashes preceding +other characters are dropped when reading; for example, `\t` is read as +`t` and `\0` is read as `0`. Section headers cannot span multiple lines. +Variables may belong directly to a section or to a given subsection. You +can have `[section]` if you have `[section "subsection"]`, but you don't +need to. + +There is also a deprecated `[section.subsection]` syntax. With this +syntax, the subsection name is converted to lower-case and is also +compared case sensitively. These subsection names follow the same +restrictions as section names. + +All the other lines (and the remainder of the line after the section +header) are recognized as setting variables, in the form +'name = value' (or just 'name', which is a short-hand to say that +the variable is the boolean "true"). +The variable names are case-insensitive, allow only alphanumeric characters +and `-`, and must start with an alphabetic character. + +A line that defines a value can be continued to the next line by +ending it with a `\`; the backslash and the end-of-line are +stripped. Leading whitespaces after 'name =', the remainder of the +line after the first comment character '#' or ';', and trailing +whitespaces of the line are discarded unless they are enclosed in +double quotes. Internal whitespaces within the value are retained +verbatim. + +Inside double quotes, double quote `"` and backslash `\` characters +must be escaped: use `\"` for `"` and `\\` for `\`. + +The following escape sequences (beside `\"` and `\\`) are recognized: +`\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) +and `\b` for backspace (BS). Other char escape sequences (including octal +escape sequences) are invalid. + + +Includes +~~~~~~~~ + +The `include` and `includeIf` sections allow you to include config +directives from another source. These sections behave identically to +each other with the exception that `includeIf` sections may be ignored +if their condition does not evaluate to true; see "Conditional includes" +below. + +You can include a config file from another by setting the special +`include.path` (or `includeIf.*.path`) variable to the name of the file +to be included. The variable takes a pathname as its value, and is +subject to tilde expansion. These variables can be given multiple times. + +The contents of the included file are inserted immediately, as if they +had been found at the location of the include directive. If the value of the +variable is a relative path, the path is considered to +be relative to the configuration file in which the include directive +was found. See below for examples. + +Conditional includes +~~~~~~~~~~~~~~~~~~~~ + +You can include a config file from another conditionally by setting a +`includeIf..path` variable to the name of the file to be +included. + +The condition starts with a keyword followed by a colon and some data +whose format and meaning depends on the keyword. Supported keywords +are: + +`gitdir`:: + + The data that follows the keyword `gitdir:` is used as a glob + pattern. If the location of the .git directory matches the + pattern, the include condition is met. ++ +The .git location may be auto-discovered, or come from `$GIT_DIR` +environment variable. If the repository is auto discovered via a .git +file (e.g. from submodules, or a linked worktree), the .git location +would be the final location where the .git directory is, not where the +.git file is. ++ +The pattern can contain standard globbing wildcards and two additional +ones, `**/` and `/**`, that can match multiple path components. Please +refer to linkgit:gitignore[5] for details. For convenience: + + * If the pattern starts with `~/`, `~` will be substituted with the + content of the environment variable `HOME`. + + * If the pattern starts with `./`, it is replaced with the directory + containing the current config file. + + * If the pattern does not start with either `~/`, `./` or `/`, `**/` + will be automatically prepended. For example, the pattern `foo/bar` + becomes `**/foo/bar` and would match `/any/path/to/foo/bar`. + + * If the pattern ends with `/`, `**` will be automatically added. For + example, the pattern `foo/` becomes `foo/**`. In other words, it + matches "foo" and everything inside, recursively. + +`gitdir/i`:: + This is the same as `gitdir` except that matching is done + case-insensitively (e.g. on case-insensitive file systems) + +`onbranch`:: + The data that follows the keyword `onbranch:` is taken to be a + pattern with standard globbing wildcards and two additional + ones, `**/` and `/**`, that can match multiple path components. + If we are in a worktree where the name of the branch that is + currently checked out matches the pattern, the include condition + is met. ++ +If the pattern ends with `/`, `**` will be automatically added. For +example, the pattern `foo/` becomes `foo/**`. In other words, it matches +all branches that begin with `foo/`. This is useful if your branches are +organized hierarchically and you would like to apply a configuration to +all the branches in that hierarchy. + +A few more notes on matching via `gitdir` and `gitdir/i`: + + * Symlinks in `$GIT_DIR` are not resolved before matching. + + * Both the symlink & realpath versions of paths will be matched + outside of `$GIT_DIR`. E.g. if ~/git is a symlink to + /mnt/storage/git, both `gitdir:~/git` and `gitdir:/mnt/storage/git` + will match. ++ +This was not the case in the initial release of this feature in +v2.13.0, which only matched the realpath version. Configuration that +wants to be compatible with the initial release of this feature needs +to either specify only the realpath version, or both versions. + + * Note that "../" is not special and will match literally, which is + unlikely what you want. + +Example +~~~~~~~ + +---- +# Core variables +[core] + ; Don't trust file modes + filemode = false + +# Our diff algorithm +[diff] + external = /usr/local/bin/diff-wrapper + renames = true + +[branch "devel"] + remote = origin + merge = refs/heads/devel + +# Proxy settings +[core] + gitProxy="ssh" for "kernel.org" + gitProxy=default-proxy ; for the rest + +[include] + path = /path/to/foo.inc ; include by absolute path + path = foo.inc ; find "foo.inc" relative to the current file + path = ~/foo.inc ; find "foo.inc" in your `$HOME` directory + +; include if $GIT_DIR is /path/to/foo/.git +[includeIf "gitdir:/path/to/foo/.git"] + path = /path/to/foo.inc + +; include for all repositories inside /path/to/group +[includeIf "gitdir:/path/to/group/"] + path = /path/to/foo.inc + +; include for all repositories inside $HOME/to/group +[includeIf "gitdir:~/to/group/"] + path = /path/to/foo.inc + +; relative paths are always relative to the including +; file (if the condition is true); their location is not +; affected by the condition +[includeIf "gitdir:/path/to/group/"] + path = foo.inc + +; include only if we are in a worktree where foo-branch is +; currently checked out +[includeIf "onbranch:foo-branch"] + path = foo.inc +---- + +Values +~~~~~~ + +Values of many variables are treated as a simple string, but there +are variables that take values of specific types and there are rules +as to how to spell them. + +boolean:: + + When a variable is said to take a boolean value, many + synonyms are accepted for 'true' and 'false'; these are all + case-insensitive. + + true;; Boolean true literals are `yes`, `on`, `true`, + and `1`. Also, a variable defined without `= ` + is taken as true. + + false;; Boolean false literals are `no`, `off`, `false`, + `0` and the empty string. ++ +When converting a value to its canonical form using the `--type=bool` type +specifier, 'git config' will ensure that the output is "true" or +"false" (spelled in lowercase). + +integer:: + The value for many variables that specify various sizes can + be suffixed with `k`, `M`,... to mean "scale the number by + 1024", "by 1024x1024", etc. + +color:: + The value for a variable that takes a color is a list of + colors (at most two, one for foreground and one for background) + and attributes (as many as you want), separated by spaces. ++ +The basic colors accepted are `normal`, `black`, `red`, `green`, `yellow`, +`blue`, `magenta`, `cyan` and `white`. The first color given is the +foreground; the second is the background. All the basic colors except +`normal` have a bright variant that can be specified by prefixing the +color with `bright`, like `brightred`. ++ +Colors may also be given as numbers between 0 and 255; these use ANSI +256-color mode (but note that not all terminals may support this). If +your terminal supports it, you may also specify 24-bit RGB values as +hex, like `#ff0ab3`. ++ +The accepted attributes are `bold`, `dim`, `ul`, `blink`, `reverse`, +`italic`, and `strike` (for crossed-out or "strikethrough" letters). +The position of any attributes with respect to the colors +(before, after, or in between), doesn't matter. Specific attributes may +be turned off by prefixing them with `no` or `no-` (e.g., `noreverse`, +`no-ul`, etc). ++ +An empty color string produces no color effect at all. This can be used +to avoid coloring specific elements without disabling color entirely. ++ +For git's pre-defined color slots, the attributes are meant to be reset +at the beginning of each item in the colored output. So setting +`color.decorate.branch` to `black` will paint that branch name in a +plain `black`, even if the previous thing on the same output line (e.g. +opening parenthesis before the list of branch names in `log --decorate` +output) is set to be painted with `bold` or some other attribute. +However, custom log formats may do more complicated and layered +coloring, and the negated forms may be useful there. + +pathname:: + A variable that takes a pathname value can be given a + string that begins with "`~/`" or "`~user/`", and the usual + tilde expansion happens to such a string: `~/` + is expanded to the value of `$HOME`, and `~user/` to the + specified user's home directory. + + +Variables +~~~~~~~~~ + +Note that this list is non-comprehensive and not necessarily complete. +For command-specific variables, you will find a more detailed description +in the appropriate manual page. + +Other git-related tools may and do use their own variables. When +inventing new variables for use in your own tool, make sure their +names do not conflict with those that are used by Git itself and +other popular tools, and describe them in your documentation. + +include::config/advice.txt[] + +include::config/core.txt[] + +include::config/add.txt[] + +include::config/alias.txt[] + +include::config/am.txt[] + +include::config/apply.txt[] + +include::config/blame.txt[] + +include::config/branch.txt[] + +include::config/browser.txt[] + +include::config/checkout.txt[] + +include::config/clean.txt[] + +include::config/clone.txt[] + +include::config/color.txt[] + +include::config/column.txt[] + +include::config/commit.txt[] + +include::config/commitgraph.txt[] + +include::config/credential.txt[] + +include::config/completion.txt[] + +include::config/diff.txt[] + +include::config/difftool.txt[] + +include::config/extensions.txt[] + +include::config/fastimport.txt[] + +include::config/feature.txt[] + +include::config/fetch.txt[] + +include::config/format.txt[] + +include::config/filter.txt[] + +include::config/fsck.txt[] + +include::config/gc.txt[] + +include::config/gitcvs.txt[] + +include::config/gitweb.txt[] + +include::config/grep.txt[] + +include::config/gpg.txt[] + +include::config/gui.txt[] + +include::config/guitool.txt[] + +include::config/help.txt[] + +include::config/http.txt[] + +include::config/i18n.txt[] + +include::config/imap.txt[] + +include::config/index.txt[] + +include::config/init.txt[] + +include::config/instaweb.txt[] + +include::config/interactive.txt[] + +include::config/log.txt[] + +include::config/lsrefs.txt[] + +include::config/mailinfo.txt[] + +include::config/mailmap.txt[] + +include::config/maintenance.txt[] + +include::config/man.txt[] + +include::config/merge.txt[] + +include::config/mergetool.txt[] + +include::config/notes.txt[] + +include::config/pack.txt[] + +include::config/pager.txt[] + +include::config/pretty.txt[] + +include::config/protocol.txt[] + +include::config/pull.txt[] + +include::config/push.txt[] + +include::config/rebase.txt[] + +include::config/receive.txt[] + +include::config/remote.txt[] + +include::config/remotes.txt[] + +include::config/repack.txt[] + +include::config/rerere.txt[] + +include::config/reset.txt[] + +include::config/sendemail.txt[] + +include::config/sequencer.txt[] + +include::config/showbranch.txt[] + +include::config/splitindex.txt[] + +include::config/ssh.txt[] + +include::config/status.txt[] + +include::config/stash.txt[] + +include::config/submodule.txt[] + +include::config/tag.txt[] + +include::config/tar.txt[] + +include::config/trace2.txt[] + +include::config/transfer.txt[] + +include::config/uploadarchive.txt[] + +include::config/uploadpack.txt[] + +include::config/url.txt[] + +include::config/user.txt[] + +include::config/versionsort.txt[] + +include::config/web.txt[] + +include::config/worktree.txt[] diff --git a/Documentation/config/protocol.txt b/Documentation/config/protocol.txt index 756591d77b080c..799389132f07b5 100644 --- a/Documentation/config/protocol.txt +++ b/Documentation/config/protocol.txt @@ -1,10 +1,10 @@ protocol.allow:: If set, provide a user defined default policy for all protocols which don't explicitly have a policy (`protocol..allow`). By default, - if unset, known-safe protocols (http, https, git, ssh, file) have a + if unset, known-safe protocols (http, https, git, ssh) have a default policy of `always`, known-dangerous protocols (ext) have a - default policy of `never`, and all other protocols have a default - policy of `user`. Supported policies: + default policy of `never`, and all other protocols (including file) + have a default policy of `user`. Supported policies: + -- diff --git a/Documentation/config/safe.txt b/Documentation/config/safe.txt new file mode 100644 index 00000000000000..6d764fe0ccf3a8 --- /dev/null +++ b/Documentation/config/safe.txt @@ -0,0 +1,28 @@ +safe.directory:: + These config entries specify Git-tracked directories that are + considered safe even if they are owned by someone other than the + current user. By default, Git will refuse to even parse a Git + config of a repository owned by someone else, let alone run its + hooks, and this config setting allows users to specify exceptions, + e.g. for intentionally shared repositories (see the `--shared` + option in linkgit:git-init[1]). ++ +This is a multi-valued setting, i.e. you can add more than one directory +via `git config --add`. To reset the list of safe directories (e.g. to +override any such directories specified in the system config), add a +`safe.directory` entry with an empty value. ++ +This config setting is only respected when specified in a system or global +config, not when it is specified in a repository config or via the command +line option `-c safe.directory=`. ++ +The value of this setting is interpolated, i.e. `~/` expands to a +path relative to the home directory and `%(prefix)/` expands to a +path relative to Git's (runtime) prefix. ++ +To completely opt-out of this security check, set `safe.directory` to the +string `*`. This will allow all repositories to be treated as if their +directory was listed in the `safe.directory` list. If `safe.directory=*` +is set in system config and you want to re-enable this protection, then +initialize your list with an empty value before listing the repositories +that you deem safe. diff --git a/alias.c b/alias.c index c4715380205b5f..00abde08173943 100644 --- a/alias.c +++ b/alias.c @@ -46,14 +46,16 @@ void list_aliases(struct string_list *list) #define SPLIT_CMDLINE_BAD_ENDING 1 #define SPLIT_CMDLINE_UNCLOSED_QUOTE 2 +#define SPLIT_CMDLINE_ARGC_OVERFLOW 3 static const char *split_cmdline_errors[] = { N_("cmdline ends with \\"), - N_("unclosed quote") + N_("unclosed quote"), + N_("too many arguments"), }; int split_cmdline(char *cmdline, const char ***argv) { - int src, dst, count = 0, size = 16; + size_t src, dst, count = 0, size = 16; char quoted = 0; ALLOC_ARRAY(*argv, size); @@ -96,6 +98,11 @@ int split_cmdline(char *cmdline, const char ***argv) return -SPLIT_CMDLINE_UNCLOSED_QUOTE; } + if (count >= INT_MAX) { + FREE_AND_NULL(*argv); + return -SPLIT_CMDLINE_ARGC_OVERFLOW; + } + ALLOC_GROW(*argv, count + 1, size); (*argv)[count] = NULL; diff --git a/apply.c b/apply.c index 43a0aebf4eec85..81ac28fd7430df 100644 --- a/apply.c +++ b/apply.c @@ -4424,6 +4424,33 @@ static int create_one_file(struct apply_state *state, if (state->cached) return 0; + /* + * We already try to detect whether files are beyond a symlink in our + * up-front checks. But in the case where symlinks are created by any + * of the intermediate hunks it can happen that our up-front checks + * didn't yet see the symlink, but at the point of arriving here there + * in fact is one. We thus repeat the check for symlinks here. + * + * Note that this does not make the up-front check obsolete as the + * failure mode is different: + * + * - The up-front checks cause us to abort before we have written + * anything into the working directory. So when we exit this way the + * working directory remains clean. + * + * - The checks here happen in the middle of the action where we have + * already started to apply the patch. The end result will be a dirty + * working directory. + * + * Ideally, we should update the up-front checks to catch what would + * happen when we apply the patch before we damage the working tree. + * We have all the information necessary to do so. But for now, as a + * part of embargoed security work, having this check would serve as a + * reasonable first step. + */ + if (path_is_beyond_symlink(state, path)) + return error(_("affected file '%s' is beyond a symbolic link"), path); + res = try_create_file(state, path, mode, buf, size); if (res < 0) return -1; @@ -4555,7 +4582,7 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch) FILE *rej; char namebuf[PATH_MAX]; struct fragment *frag; - int cnt = 0; + int fd, cnt = 0; struct strbuf sb = STRBUF_INIT; for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) { @@ -4595,7 +4622,17 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch) memcpy(namebuf, patch->new_name, cnt); memcpy(namebuf + cnt, ".rej", 5); - rej = fopen(namebuf, "w"); + fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666); + if (fd < 0) { + if (errno != EEXIST) + return error_errno(_("cannot open %s"), namebuf); + if (unlink(namebuf)) + return error_errno(_("cannot unlink '%s'"), namebuf); + fd = open(namebuf, O_CREAT | O_EXCL | O_WRONLY, 0666); + if (fd < 0) + return error_errno(_("cannot open %s"), namebuf); + } + rej = fdopen(fd, "w"); if (!rej) return error_errno(_("cannot open %s"), namebuf); diff --git a/apply.c.orig b/apply.c.orig new file mode 100644 index 00000000000000..98505d1bfea01b --- /dev/null +++ b/apply.c.orig @@ -0,0 +1,5115 @@ +/* + * apply.c + * + * Copyright (C) Linus Torvalds, 2005 + * + * This applies patches on top of some (arbitrary) version of the SCM. + * + */ + +#include "cache.h" +#include "config.h" +#include "object-store.h" +#include "blob.h" +#include "delta.h" +#include "diff.h" +#include "dir.h" +#include "xdiff-interface.h" +#include "ll-merge.h" +#include "lockfile.h" +#include "parse-options.h" +#include "quote.h" +#include "rerere.h" +#include "apply.h" +#include "entry.h" + +struct gitdiff_data { + struct strbuf *root; + int linenr; + int p_value; +}; + +static void git_apply_config(void) +{ + git_config_get_string("apply.whitespace", &apply_default_whitespace); + git_config_get_string("apply.ignorewhitespace", &apply_default_ignorewhitespace); + git_config(git_xmerge_config, NULL); +} + +static int parse_whitespace_option(struct apply_state *state, const char *option) +{ + if (!option) { + state->ws_error_action = warn_on_ws_error; + return 0; + } + if (!strcmp(option, "warn")) { + state->ws_error_action = warn_on_ws_error; + return 0; + } + if (!strcmp(option, "nowarn")) { + state->ws_error_action = nowarn_ws_error; + return 0; + } + if (!strcmp(option, "error")) { + state->ws_error_action = die_on_ws_error; + return 0; + } + if (!strcmp(option, "error-all")) { + state->ws_error_action = die_on_ws_error; + state->squelch_whitespace_errors = 0; + return 0; + } + if (!strcmp(option, "strip") || !strcmp(option, "fix")) { + state->ws_error_action = correct_ws_error; + return 0; + } + /* + * Please update $__git_whitespacelist in git-completion.bash + * when you add new options. + */ + return error(_("unrecognized whitespace option '%s'"), option); +} + +static int parse_ignorewhitespace_option(struct apply_state *state, + const char *option) +{ + if (!option || !strcmp(option, "no") || + !strcmp(option, "false") || !strcmp(option, "never") || + !strcmp(option, "none")) { + state->ws_ignore_action = ignore_ws_none; + return 0; + } + if (!strcmp(option, "change")) { + state->ws_ignore_action = ignore_ws_change; + return 0; + } + return error(_("unrecognized whitespace ignore option '%s'"), option); +} + +int init_apply_state(struct apply_state *state, + struct repository *repo, + const char *prefix) +{ + memset(state, 0, sizeof(*state)); + state->prefix = prefix; + state->repo = repo; + state->apply = 1; + state->line_termination = '\n'; + state->p_value = 1; + state->p_context = UINT_MAX; + state->squelch_whitespace_errors = 5; + state->ws_error_action = warn_on_ws_error; + state->ws_ignore_action = ignore_ws_none; + state->linenr = 1; + string_list_init_nodup(&state->fn_table); + string_list_init_nodup(&state->limit_by_name); + string_list_init_nodup(&state->symlink_changes); + strbuf_init(&state->root, 0); + + git_apply_config(); + if (apply_default_whitespace && parse_whitespace_option(state, apply_default_whitespace)) + return -1; + if (apply_default_ignorewhitespace && parse_ignorewhitespace_option(state, apply_default_ignorewhitespace)) + return -1; + return 0; +} + +void clear_apply_state(struct apply_state *state) +{ + string_list_clear(&state->limit_by_name, 0); + string_list_clear(&state->symlink_changes, 0); + strbuf_release(&state->root); + + /* &state->fn_table is cleared at the end of apply_patch() */ +} + +static void mute_routine(const char *msg, va_list params) +{ + /* do nothing */ +} + +int check_apply_state(struct apply_state *state, int force_apply) +{ + int is_not_gitdir = !startup_info->have_repository; + + if (state->apply_with_reject && state->threeway) + return error(_("--reject and --3way cannot be used together.")); + if (state->threeway) { + if (is_not_gitdir) + return error(_("--3way outside a repository")); + state->check_index = 1; + } + if (state->apply_with_reject) { + state->apply = 1; + if (state->apply_verbosity == verbosity_normal) + state->apply_verbosity = verbosity_verbose; + } + if (!force_apply && (state->diffstat || state->numstat || state->summary || state->check || state->fake_ancestor)) + state->apply = 0; + if (state->check_index && is_not_gitdir) + return error(_("--index outside a repository")); + if (state->cached) { + if (is_not_gitdir) + return error(_("--cached outside a repository")); + state->check_index = 1; + } + if (state->ita_only && (state->check_index || is_not_gitdir)) + state->ita_only = 0; + if (state->check_index) + state->unsafe_paths = 0; + + if (state->apply_verbosity <= verbosity_silent) { + state->saved_error_routine = get_error_routine(); + state->saved_warn_routine = get_warn_routine(); + set_error_routine(mute_routine); + set_warn_routine(mute_routine); + } + + return 0; +} + +static void set_default_whitespace_mode(struct apply_state *state) +{ + if (!state->whitespace_option && !apply_default_whitespace) + state->ws_error_action = (state->apply ? warn_on_ws_error : nowarn_ws_error); +} + +/* + * This represents one "hunk" from a patch, starting with + * "@@ -oldpos,oldlines +newpos,newlines @@" marker. The + * patch text is pointed at by patch, and its byte length + * is stored in size. leading and trailing are the number + * of context lines. + */ +struct fragment { + unsigned long leading, trailing; + unsigned long oldpos, oldlines; + unsigned long newpos, newlines; + /* + * 'patch' is usually borrowed from buf in apply_patch(), + * but some codepaths store an allocated buffer. + */ + const char *patch; + unsigned free_patch:1, + rejected:1; + int size; + int linenr; + struct fragment *next; +}; + +/* + * When dealing with a binary patch, we reuse "leading" field + * to store the type of the binary hunk, either deflated "delta" + * or deflated "literal". + */ +#define binary_patch_method leading +#define BINARY_DELTA_DEFLATED 1 +#define BINARY_LITERAL_DEFLATED 2 + +static void free_fragment_list(struct fragment *list) +{ + while (list) { + struct fragment *next = list->next; + if (list->free_patch) + free((char *)list->patch); + free(list); + list = next; + } +} + +static void free_patch(struct patch *patch) +{ + free_fragment_list(patch->fragments); + free(patch->def_name); + free(patch->old_name); + free(patch->new_name); + free(patch->result); + free(patch); +} + +static void free_patch_list(struct patch *list) +{ + while (list) { + struct patch *next = list->next; + free_patch(list); + list = next; + } +} + +/* + * A line in a file, len-bytes long (includes the terminating LF, + * except for an incomplete line at the end if the file ends with + * one), and its contents hashes to 'hash'. + */ +struct line { + size_t len; + unsigned hash : 24; + unsigned flag : 8; +#define LINE_COMMON 1 +#define LINE_PATCHED 2 +}; + +/* + * This represents a "file", which is an array of "lines". + */ +struct image { + char *buf; + size_t len; + size_t nr; + size_t alloc; + struct line *line_allocated; + struct line *line; +}; + +static uint32_t hash_line(const char *cp, size_t len) +{ + size_t i; + uint32_t h; + for (i = 0, h = 0; i < len; i++) { + if (!isspace(cp[i])) { + h = h * 3 + (cp[i] & 0xff); + } + } + return h; +} + +/* + * Compare lines s1 of length n1 and s2 of length n2, ignoring + * whitespace difference. Returns 1 if they match, 0 otherwise + */ +static int fuzzy_matchlines(const char *s1, size_t n1, + const char *s2, size_t n2) +{ + const char *end1 = s1 + n1; + const char *end2 = s2 + n2; + + /* ignore line endings */ + while (s1 < end1 && (end1[-1] == '\r' || end1[-1] == '\n')) + end1--; + while (s2 < end2 && (end2[-1] == '\r' || end2[-1] == '\n')) + end2--; + + while (s1 < end1 && s2 < end2) { + if (isspace(*s1)) { + /* + * Skip whitespace. We check on both buffers + * because we don't want "a b" to match "ab". + */ + if (!isspace(*s2)) + return 0; + while (s1 < end1 && isspace(*s1)) + s1++; + while (s2 < end2 && isspace(*s2)) + s2++; + } else if (*s1++ != *s2++) + return 0; + } + + /* If we reached the end on one side only, lines don't match. */ + return s1 == end1 && s2 == end2; +} + +static void add_line_info(struct image *img, const char *bol, size_t len, unsigned flag) +{ + ALLOC_GROW(img->line_allocated, img->nr + 1, img->alloc); + img->line_allocated[img->nr].len = len; + img->line_allocated[img->nr].hash = hash_line(bol, len); + img->line_allocated[img->nr].flag = flag; + img->nr++; +} + +/* + * "buf" has the file contents to be patched (read from various sources). + * attach it to "image" and add line-based index to it. + * "image" now owns the "buf". + */ +static void prepare_image(struct image *image, char *buf, size_t len, + int prepare_linetable) +{ + const char *cp, *ep; + + memset(image, 0, sizeof(*image)); + image->buf = buf; + image->len = len; + + if (!prepare_linetable) + return; + + ep = image->buf + image->len; + cp = image->buf; + while (cp < ep) { + const char *next; + for (next = cp; next < ep && *next != '\n'; next++) + ; + if (next < ep) + next++; + add_line_info(image, cp, next - cp, 0); + cp = next; + } + image->line = image->line_allocated; +} + +static void clear_image(struct image *image) +{ + free(image->buf); + free(image->line_allocated); + memset(image, 0, sizeof(*image)); +} + +/* fmt must contain _one_ %s and no other substitution */ +static void say_patch_name(FILE *output, const char *fmt, struct patch *patch) +{ + struct strbuf sb = STRBUF_INIT; + + if (patch->old_name && patch->new_name && + strcmp(patch->old_name, patch->new_name)) { + quote_c_style(patch->old_name, &sb, NULL, 0); + strbuf_addstr(&sb, " => "); + quote_c_style(patch->new_name, &sb, NULL, 0); + } else { + const char *n = patch->new_name; + if (!n) + n = patch->old_name; + quote_c_style(n, &sb, NULL, 0); + } + fprintf(output, fmt, sb.buf); + fputc('\n', output); + strbuf_release(&sb); +} + +#define SLOP (16) + +static int read_patch_file(struct strbuf *sb, int fd) +{ + if (strbuf_read(sb, fd, 0) < 0) + return error_errno("git apply: failed to read"); + + /* + * Make sure that we have some slop in the buffer + * so that we can do speculative "memcmp" etc, and + * see to it that it is NUL-filled. + */ + strbuf_grow(sb, SLOP); + memset(sb->buf + sb->len, 0, SLOP); + return 0; +} + +static unsigned long linelen(const char *buffer, unsigned long size) +{ + unsigned long len = 0; + while (size--) { + len++; + if (*buffer++ == '\n') + break; + } + return len; +} + +static int is_dev_null(const char *str) +{ + return skip_prefix(str, "/dev/null", &str) && isspace(*str); +} + +#define TERM_SPACE 1 +#define TERM_TAB 2 + +static int name_terminate(int c, int terminate) +{ + if (c == ' ' && !(terminate & TERM_SPACE)) + return 0; + if (c == '\t' && !(terminate & TERM_TAB)) + return 0; + + return 1; +} + +/* remove double slashes to make --index work with such filenames */ +static char *squash_slash(char *name) +{ + int i = 0, j = 0; + + if (!name) + return NULL; + + while (name[i]) { + if ((name[j++] = name[i++]) == '/') + while (name[i] == '/') + i++; + } + name[j] = '\0'; + return name; +} + +static char *find_name_gnu(struct strbuf *root, + const char *line, + int p_value) +{ + struct strbuf name = STRBUF_INIT; + char *cp; + + /* + * Proposed "new-style" GNU patch/diff format; see + * https://lore.kernel.org/git/7vll0wvb2a.fsf@assigned-by-dhcp.cox.net/ + */ + if (unquote_c_style(&name, line, NULL)) { + strbuf_release(&name); + return NULL; + } + + for (cp = name.buf; p_value; p_value--) { + cp = strchr(cp, '/'); + if (!cp) { + strbuf_release(&name); + return NULL; + } + cp++; + } + + strbuf_remove(&name, 0, cp - name.buf); + if (root->len) + strbuf_insert(&name, 0, root->buf, root->len); + return squash_slash(strbuf_detach(&name, NULL)); +} + +static size_t sane_tz_len(const char *line, size_t len) +{ + const char *tz, *p; + + if (len < strlen(" +0500") || line[len-strlen(" +0500")] != ' ') + return 0; + tz = line + len - strlen(" +0500"); + + if (tz[1] != '+' && tz[1] != '-') + return 0; + + for (p = tz + 2; p != line + len; p++) + if (!isdigit(*p)) + return 0; + + return line + len - tz; +} + +static size_t tz_with_colon_len(const char *line, size_t len) +{ + const char *tz, *p; + + if (len < strlen(" +08:00") || line[len - strlen(":00")] != ':') + return 0; + tz = line + len - strlen(" +08:00"); + + if (tz[0] != ' ' || (tz[1] != '+' && tz[1] != '-')) + return 0; + p = tz + 2; + if (!isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || + !isdigit(*p++) || !isdigit(*p++)) + return 0; + + return line + len - tz; +} + +static size_t date_len(const char *line, size_t len) +{ + const char *date, *p; + + if (len < strlen("72-02-05") || line[len-strlen("-05")] != '-') + return 0; + p = date = line + len - strlen("72-02-05"); + + if (!isdigit(*p++) || !isdigit(*p++) || *p++ != '-' || + !isdigit(*p++) || !isdigit(*p++) || *p++ != '-' || + !isdigit(*p++) || !isdigit(*p++)) /* Not a date. */ + return 0; + + if (date - line >= strlen("19") && + isdigit(date[-1]) && isdigit(date[-2])) /* 4-digit year */ + date -= strlen("19"); + + return line + len - date; +} + +static size_t short_time_len(const char *line, size_t len) +{ + const char *time, *p; + + if (len < strlen(" 07:01:32") || line[len-strlen(":32")] != ':') + return 0; + p = time = line + len - strlen(" 07:01:32"); + + /* Permit 1-digit hours? */ + if (*p++ != ' ' || + !isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || + !isdigit(*p++) || !isdigit(*p++) || *p++ != ':' || + !isdigit(*p++) || !isdigit(*p++)) /* Not a time. */ + return 0; + + return line + len - time; +} + +static size_t fractional_time_len(const char *line, size_t len) +{ + const char *p; + size_t n; + + /* Expected format: 19:41:17.620000023 */ + if (!len || !isdigit(line[len - 1])) + return 0; + p = line + len - 1; + + /* Fractional seconds. */ + while (p > line && isdigit(*p)) + p--; + if (*p != '.') + return 0; + + /* Hours, minutes, and whole seconds. */ + n = short_time_len(line, p - line); + if (!n) + return 0; + + return line + len - p + n; +} + +static size_t trailing_spaces_len(const char *line, size_t len) +{ + const char *p; + + /* Expected format: ' ' x (1 or more) */ + if (!len || line[len - 1] != ' ') + return 0; + + p = line + len; + while (p != line) { + p--; + if (*p != ' ') + return line + len - (p + 1); + } + + /* All spaces! */ + return len; +} + +static size_t diff_timestamp_len(const char *line, size_t len) +{ + const char *end = line + len; + size_t n; + + /* + * Posix: 2010-07-05 19:41:17 + * GNU: 2010-07-05 19:41:17.620000023 -0500 + */ + + if (!isdigit(end[-1])) + return 0; + + n = sane_tz_len(line, end - line); + if (!n) + n = tz_with_colon_len(line, end - line); + end -= n; + + n = short_time_len(line, end - line); + if (!n) + n = fractional_time_len(line, end - line); + end -= n; + + n = date_len(line, end - line); + if (!n) /* No date. Too bad. */ + return 0; + end -= n; + + if (end == line) /* No space before date. */ + return 0; + if (end[-1] == '\t') { /* Success! */ + end--; + return line + len - end; + } + if (end[-1] != ' ') /* No space before date. */ + return 0; + + /* Whitespace damage. */ + end -= trailing_spaces_len(line, end - line); + return line + len - end; +} + +static char *find_name_common(struct strbuf *root, + const char *line, + const char *def, + int p_value, + const char *end, + int terminate) +{ + int len; + const char *start = NULL; + + if (p_value == 0) + start = line; + while (line != end) { + char c = *line; + + if (!end && isspace(c)) { + if (c == '\n') + break; + if (name_terminate(c, terminate)) + break; + } + line++; + if (c == '/' && !--p_value) + start = line; + } + if (!start) + return squash_slash(xstrdup_or_null(def)); + len = line - start; + if (!len) + return squash_slash(xstrdup_or_null(def)); + + /* + * Generally we prefer the shorter name, especially + * if the other one is just a variation of that with + * something else tacked on to the end (ie "file.orig" + * or "file~"). + */ + if (def) { + int deflen = strlen(def); + if (deflen < len && !strncmp(start, def, deflen)) + return squash_slash(xstrdup(def)); + } + + if (root->len) { + char *ret = xstrfmt("%s%.*s", root->buf, len, start); + return squash_slash(ret); + } + + return squash_slash(xmemdupz(start, len)); +} + +static char *find_name(struct strbuf *root, + const char *line, + char *def, + int p_value, + int terminate) +{ + if (*line == '"') { + char *name = find_name_gnu(root, line, p_value); + if (name) + return name; + } + + return find_name_common(root, line, def, p_value, NULL, terminate); +} + +static char *find_name_traditional(struct strbuf *root, + const char *line, + char *def, + int p_value) +{ + size_t len; + size_t date_len; + + if (*line == '"') { + char *name = find_name_gnu(root, line, p_value); + if (name) + return name; + } + + len = strchrnul(line, '\n') - line; + date_len = diff_timestamp_len(line, len); + if (!date_len) + return find_name_common(root, line, def, p_value, NULL, TERM_TAB); + len -= date_len; + + return find_name_common(root, line, def, p_value, line + len, 0); +} + +/* + * Given the string after "--- " or "+++ ", guess the appropriate + * p_value for the given patch. + */ +static int guess_p_value(struct apply_state *state, const char *nameline) +{ + char *name, *cp; + int val = -1; + + if (is_dev_null(nameline)) + return -1; + name = find_name_traditional(&state->root, nameline, NULL, 0); + if (!name) + return -1; + cp = strchr(name, '/'); + if (!cp) + val = 0; + else if (state->prefix) { + /* + * Does it begin with "a/$our-prefix" and such? Then this is + * very likely to apply to our directory. + */ + if (starts_with(name, state->prefix)) + val = count_slashes(state->prefix); + else { + cp++; + if (starts_with(cp, state->prefix)) + val = count_slashes(state->prefix) + 1; + } + } + free(name); + return val; +} + +/* + * Does the ---/+++ line have the POSIX timestamp after the last HT? + * GNU diff puts epoch there to signal a creation/deletion event. Is + * this such a timestamp? + */ +static int has_epoch_timestamp(const char *nameline) +{ + /* + * We are only interested in epoch timestamp; any non-zero + * fraction cannot be one, hence "(\.0+)?" in the regexp below. + * For the same reason, the date must be either 1969-12-31 or + * 1970-01-01, and the seconds part must be "00". + */ + const char stamp_regexp[] = + "^[0-2][0-9]:([0-5][0-9]):00(\\.0+)?" + " " + "([-+][0-2][0-9]:?[0-5][0-9])\n"; + const char *timestamp = NULL, *cp, *colon; + static regex_t *stamp; + regmatch_t m[10]; + int zoneoffset, epoch_hour, hour, minute; + int status; + + for (cp = nameline; *cp != '\n'; cp++) { + if (*cp == '\t') + timestamp = cp + 1; + } + if (!timestamp) + return 0; + + /* + * YYYY-MM-DD hh:mm:ss must be from either 1969-12-31 + * (west of GMT) or 1970-01-01 (east of GMT) + */ + if (skip_prefix(timestamp, "1969-12-31 ", ×tamp)) + epoch_hour = 24; + else if (skip_prefix(timestamp, "1970-01-01 ", ×tamp)) + epoch_hour = 0; + else + return 0; + + if (!stamp) { + stamp = xmalloc(sizeof(*stamp)); + if (regcomp(stamp, stamp_regexp, REG_EXTENDED)) { + warning(_("Cannot prepare timestamp regexp %s"), + stamp_regexp); + return 0; + } + } + + status = regexec(stamp, timestamp, ARRAY_SIZE(m), m, 0); + if (status) { + if (status != REG_NOMATCH) + warning(_("regexec returned %d for input: %s"), + status, timestamp); + return 0; + } + + hour = strtol(timestamp, NULL, 10); + minute = strtol(timestamp + m[1].rm_so, NULL, 10); + + zoneoffset = strtol(timestamp + m[3].rm_so + 1, (char **) &colon, 10); + if (*colon == ':') + zoneoffset = zoneoffset * 60 + strtol(colon + 1, NULL, 10); + else + zoneoffset = (zoneoffset / 100) * 60 + (zoneoffset % 100); + if (timestamp[m[3].rm_so] == '-') + zoneoffset = -zoneoffset; + + return hour * 60 + minute - zoneoffset == epoch_hour * 60; +} + +/* + * Get the name etc info from the ---/+++ lines of a traditional patch header + * + * FIXME! The end-of-filename heuristics are kind of screwy. For existing + * files, we can happily check the index for a match, but for creating a + * new file we should try to match whatever "patch" does. I have no idea. + */ +static int parse_traditional_patch(struct apply_state *state, + const char *first, + const char *second, + struct patch *patch) +{ + char *name; + + first += 4; /* skip "--- " */ + second += 4; /* skip "+++ " */ + if (!state->p_value_known) { + int p, q; + p = guess_p_value(state, first); + q = guess_p_value(state, second); + if (p < 0) p = q; + if (0 <= p && p == q) { + state->p_value = p; + state->p_value_known = 1; + } + } + if (is_dev_null(first)) { + patch->is_new = 1; + patch->is_delete = 0; + name = find_name_traditional(&state->root, second, NULL, state->p_value); + patch->new_name = name; + } else if (is_dev_null(second)) { + patch->is_new = 0; + patch->is_delete = 1; + name = find_name_traditional(&state->root, first, NULL, state->p_value); + patch->old_name = name; + } else { + char *first_name; + first_name = find_name_traditional(&state->root, first, NULL, state->p_value); + name = find_name_traditional(&state->root, second, first_name, state->p_value); + free(first_name); + if (has_epoch_timestamp(first)) { + patch->is_new = 1; + patch->is_delete = 0; + patch->new_name = name; + } else if (has_epoch_timestamp(second)) { + patch->is_new = 0; + patch->is_delete = 1; + patch->old_name = name; + } else { + patch->old_name = name; + patch->new_name = xstrdup_or_null(name); + } + } + if (!name) + return error(_("unable to find filename in patch at line %d"), state->linenr); + + return 0; +} + +static int gitdiff_hdrend(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return 1; +} + +/* + * We're anal about diff header consistency, to make + * sure that we don't end up having strange ambiguous + * patches floating around. + * + * As a result, gitdiff_{old|new}name() will check + * their names against any previous information, just + * to make sure.. + */ +#define DIFF_OLD_NAME 0 +#define DIFF_NEW_NAME 1 + +static int gitdiff_verify_name(struct gitdiff_data *state, + const char *line, + int isnull, + char **name, + int side) +{ + if (!*name && !isnull) { + *name = find_name(state->root, line, NULL, state->p_value, TERM_TAB); + return 0; + } + + if (*name) { + char *another; + if (isnull) + return error(_("git apply: bad git-diff - expected /dev/null, got %s on line %d"), + *name, state->linenr); + another = find_name(state->root, line, NULL, state->p_value, TERM_TAB); + if (!another || strcmp(another, *name)) { + free(another); + return error((side == DIFF_NEW_NAME) ? + _("git apply: bad git-diff - inconsistent new filename on line %d") : + _("git apply: bad git-diff - inconsistent old filename on line %d"), state->linenr); + } + free(another); + } else { + if (!is_dev_null(line)) + return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr); + } + + return 0; +} + +static int gitdiff_oldname(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return gitdiff_verify_name(state, line, + patch->is_new, &patch->old_name, + DIFF_OLD_NAME); +} + +static int gitdiff_newname(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return gitdiff_verify_name(state, line, + patch->is_delete, &patch->new_name, + DIFF_NEW_NAME); +} + +static int parse_mode_line(const char *line, int linenr, unsigned int *mode) +{ + char *end; + *mode = strtoul(line, &end, 8); + if (end == line || !isspace(*end)) + return error(_("invalid mode on line %d: %s"), linenr, line); + return 0; +} + +static int gitdiff_oldmode(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return parse_mode_line(line, state->linenr, &patch->old_mode); +} + +static int gitdiff_newmode(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return parse_mode_line(line, state->linenr, &patch->new_mode); +} + +static int gitdiff_delete(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_delete = 1; + free(patch->old_name); + patch->old_name = xstrdup_or_null(patch->def_name); + return gitdiff_oldmode(state, line, patch); +} + +static int gitdiff_newfile(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_new = 1; + free(patch->new_name); + patch->new_name = xstrdup_or_null(patch->def_name); + return gitdiff_newmode(state, line, patch); +} + +static int gitdiff_copysrc(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_copy = 1; + free(patch->old_name); + patch->old_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); + return 0; +} + +static int gitdiff_copydst(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_copy = 1; + free(patch->new_name); + patch->new_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); + return 0; +} + +static int gitdiff_renamesrc(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_rename = 1; + free(patch->old_name); + patch->old_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); + return 0; +} + +static int gitdiff_renamedst(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + patch->is_rename = 1; + free(patch->new_name); + patch->new_name = find_name(state->root, line, NULL, state->p_value ? state->p_value - 1 : 0, 0); + return 0; +} + +static int gitdiff_similarity(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + unsigned long val = strtoul(line, NULL, 10); + if (val <= 100) + patch->score = val; + return 0; +} + +static int gitdiff_dissimilarity(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + unsigned long val = strtoul(line, NULL, 10); + if (val <= 100) + patch->score = val; + return 0; +} + +static int gitdiff_index(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + /* + * index line is N hexadecimal, "..", N hexadecimal, + * and optional space with octal mode. + */ + const char *ptr, *eol; + int len; + const unsigned hexsz = the_hash_algo->hexsz; + + ptr = strchr(line, '.'); + if (!ptr || ptr[1] != '.' || hexsz < ptr - line) + return 0; + len = ptr - line; + memcpy(patch->old_oid_prefix, line, len); + patch->old_oid_prefix[len] = 0; + + line = ptr + 2; + ptr = strchr(line, ' '); + eol = strchrnul(line, '\n'); + + if (!ptr || eol < ptr) + ptr = eol; + len = ptr - line; + + if (hexsz < len) + return 0; + memcpy(patch->new_oid_prefix, line, len); + patch->new_oid_prefix[len] = 0; + if (*ptr == ' ') + return gitdiff_oldmode(state, ptr + 1, patch); + return 0; +} + +/* + * This is normal for a diff that doesn't change anything: we'll fall through + * into the next diff. Tell the parser to break out. + */ +static int gitdiff_unrecognized(struct gitdiff_data *state, + const char *line, + struct patch *patch) +{ + return 1; +} + +/* + * Skip p_value leading components from "line"; as we do not accept + * absolute paths, return NULL in that case. + */ +static const char *skip_tree_prefix(int p_value, + const char *line, + int llen) +{ + int nslash; + int i; + + if (!p_value) + return (llen && line[0] == '/') ? NULL : line; + + nslash = p_value; + for (i = 0; i < llen; i++) { + int ch = line[i]; + if (ch == '/' && --nslash <= 0) + return (i == 0) ? NULL : &line[i + 1]; + } + return NULL; +} + +/* + * This is to extract the same name that appears on "diff --git" + * line. We do not find and return anything if it is a rename + * patch, and it is OK because we will find the name elsewhere. + * We need to reliably find name only when it is mode-change only, + * creation or deletion of an empty file. In any of these cases, + * both sides are the same name under a/ and b/ respectively. + */ +static char *git_header_name(int p_value, + const char *line, + int llen) +{ + const char *name; + const char *second = NULL; + size_t len, line_len; + + line += strlen("diff --git "); + llen -= strlen("diff --git "); + + if (*line == '"') { + const char *cp; + struct strbuf first = STRBUF_INIT; + struct strbuf sp = STRBUF_INIT; + + if (unquote_c_style(&first, line, &second)) + goto free_and_fail1; + + /* strip the a/b prefix including trailing slash */ + cp = skip_tree_prefix(p_value, first.buf, first.len); + if (!cp) + goto free_and_fail1; + strbuf_remove(&first, 0, cp - first.buf); + + /* + * second points at one past closing dq of name. + * find the second name. + */ + while ((second < line + llen) && isspace(*second)) + second++; + + if (line + llen <= second) + goto free_and_fail1; + if (*second == '"') { + if (unquote_c_style(&sp, second, NULL)) + goto free_and_fail1; + cp = skip_tree_prefix(p_value, sp.buf, sp.len); + if (!cp) + goto free_and_fail1; + /* They must match, otherwise ignore */ + if (strcmp(cp, first.buf)) + goto free_and_fail1; + strbuf_release(&sp); + return strbuf_detach(&first, NULL); + } + + /* unquoted second */ + cp = skip_tree_prefix(p_value, second, line + llen - second); + if (!cp) + goto free_and_fail1; + if (line + llen - cp != first.len || + memcmp(first.buf, cp, first.len)) + goto free_and_fail1; + return strbuf_detach(&first, NULL); + + free_and_fail1: + strbuf_release(&first); + strbuf_release(&sp); + return NULL; + } + + /* unquoted first name */ + name = skip_tree_prefix(p_value, line, llen); + if (!name) + return NULL; + + /* + * since the first name is unquoted, a dq if exists must be + * the beginning of the second name. + */ + for (second = name; second < line + llen; second++) { + if (*second == '"') { + struct strbuf sp = STRBUF_INIT; + const char *np; + + if (unquote_c_style(&sp, second, NULL)) + goto free_and_fail2; + + np = skip_tree_prefix(p_value, sp.buf, sp.len); + if (!np) + goto free_and_fail2; + + len = sp.buf + sp.len - np; + if (len < second - name && + !strncmp(np, name, len) && + isspace(name[len])) { + /* Good */ + strbuf_remove(&sp, 0, np - sp.buf); + return strbuf_detach(&sp, NULL); + } + + free_and_fail2: + strbuf_release(&sp); + return NULL; + } + } + + /* + * Accept a name only if it shows up twice, exactly the same + * form. + */ + second = strchr(name, '\n'); + if (!second) + return NULL; + line_len = second - name; + for (len = 0 ; ; len++) { + switch (name[len]) { + default: + continue; + case '\n': + return NULL; + case '\t': case ' ': + /* + * Is this the separator between the preimage + * and the postimage pathname? Again, we are + * only interested in the case where there is + * no rename, as this is only to set def_name + * and a rename patch has the names elsewhere + * in an unambiguous form. + */ + if (!name[len + 1]) + return NULL; /* no postimage name */ + second = skip_tree_prefix(p_value, name + len + 1, + line_len - (len + 1)); + if (!second) + return NULL; + /* + * Does len bytes starting at "name" and "second" + * (that are separated by one HT or SP we just + * found) exactly match? + */ + if (second[len] == '\n' && !strncmp(name, second, len)) + return xmemdupz(name, len); + } + } +} + +static int check_header_line(int linenr, struct patch *patch) +{ + int extensions = (patch->is_delete == 1) + (patch->is_new == 1) + + (patch->is_rename == 1) + (patch->is_copy == 1); + if (extensions > 1) + return error(_("inconsistent header lines %d and %d"), + patch->extension_linenr, linenr); + if (extensions && !patch->extension_linenr) + patch->extension_linenr = linenr; + return 0; +} + +int parse_git_diff_header(struct strbuf *root, + int *linenr, + int p_value, + const char *line, + int len, + unsigned int size, + struct patch *patch) +{ + unsigned long offset; + struct gitdiff_data parse_hdr_state; + + /* A git diff has explicit new/delete information, so we don't guess */ + patch->is_new = 0; + patch->is_delete = 0; + + /* + * Some things may not have the old name in the + * rest of the headers anywhere (pure mode changes, + * or removing or adding empty files), so we get + * the default name from the header. + */ + patch->def_name = git_header_name(p_value, line, len); + if (patch->def_name && root->len) { + char *s = xstrfmt("%s%s", root->buf, patch->def_name); + free(patch->def_name); + patch->def_name = s; + } + + line += len; + size -= len; + (*linenr)++; + parse_hdr_state.root = root; + parse_hdr_state.linenr = *linenr; + parse_hdr_state.p_value = p_value; + + for (offset = len ; size > 0 ; offset += len, size -= len, line += len, (*linenr)++) { + static const struct opentry { + const char *str; + int (*fn)(struct gitdiff_data *, const char *, struct patch *); + } optable[] = { + { "@@ -", gitdiff_hdrend }, + { "--- ", gitdiff_oldname }, + { "+++ ", gitdiff_newname }, + { "old mode ", gitdiff_oldmode }, + { "new mode ", gitdiff_newmode }, + { "deleted file mode ", gitdiff_delete }, + { "new file mode ", gitdiff_newfile }, + { "copy from ", gitdiff_copysrc }, + { "copy to ", gitdiff_copydst }, + { "rename old ", gitdiff_renamesrc }, + { "rename new ", gitdiff_renamedst }, + { "rename from ", gitdiff_renamesrc }, + { "rename to ", gitdiff_renamedst }, + { "similarity index ", gitdiff_similarity }, + { "dissimilarity index ", gitdiff_dissimilarity }, + { "index ", gitdiff_index }, + { "", gitdiff_unrecognized }, + }; + int i; + + len = linelen(line, size); + if (!len || line[len-1] != '\n') + break; + for (i = 0; i < ARRAY_SIZE(optable); i++) { + const struct opentry *p = optable + i; + int oplen = strlen(p->str); + int res; + if (len < oplen || memcmp(p->str, line, oplen)) + continue; + res = p->fn(&parse_hdr_state, line + oplen, patch); + if (res < 0) + return -1; + if (check_header_line(*linenr, patch)) + return -1; + if (res > 0) + goto done; + break; + } + } + +done: + if (!patch->old_name && !patch->new_name) { + if (!patch->def_name) { + error(Q_("git diff header lacks filename information when removing " + "%d leading pathname component (line %d)", + "git diff header lacks filename information when removing " + "%d leading pathname components (line %d)", + parse_hdr_state.p_value), + parse_hdr_state.p_value, *linenr); + return -128; + } + patch->old_name = xstrdup(patch->def_name); + patch->new_name = xstrdup(patch->def_name); + } + if ((!patch->new_name && !patch->is_delete) || + (!patch->old_name && !patch->is_new)) { + error(_("git diff header lacks filename information " + "(line %d)"), *linenr); + return -128; + } + patch->is_toplevel_relative = 1; + return offset; +} + +static int parse_num(const char *line, unsigned long *p) +{ + char *ptr; + + if (!isdigit(*line)) + return 0; + *p = strtoul(line, &ptr, 10); + return ptr - line; +} + +static int parse_range(const char *line, int len, int offset, const char *expect, + unsigned long *p1, unsigned long *p2) +{ + int digits, ex; + + if (offset < 0 || offset >= len) + return -1; + line += offset; + len -= offset; + + digits = parse_num(line, p1); + if (!digits) + return -1; + + offset += digits; + line += digits; + len -= digits; + + *p2 = 1; + if (*line == ',') { + digits = parse_num(line+1, p2); + if (!digits) + return -1; + + offset += digits+1; + line += digits+1; + len -= digits+1; + } + + ex = strlen(expect); + if (ex > len) + return -1; + if (memcmp(line, expect, ex)) + return -1; + + return offset + ex; +} + +static void recount_diff(const char *line, int size, struct fragment *fragment) +{ + int oldlines = 0, newlines = 0, ret = 0; + + if (size < 1) { + warning("recount: ignore empty hunk"); + return; + } + + for (;;) { + int len = linelen(line, size); + size -= len; + line += len; + + if (size < 1) + break; + + switch (*line) { + case ' ': case '\n': + newlines++; + /* fall through */ + case '-': + oldlines++; + continue; + case '+': + newlines++; + continue; + case '\\': + continue; + case '@': + ret = size < 3 || !starts_with(line, "@@ "); + break; + case 'd': + ret = size < 5 || !starts_with(line, "diff "); + break; + default: + ret = -1; + break; + } + if (ret) { + warning(_("recount: unexpected line: %.*s"), + (int)linelen(line, size), line); + return; + } + break; + } + fragment->oldlines = oldlines; + fragment->newlines = newlines; +} + +/* + * Parse a unified diff fragment header of the + * form "@@ -a,b +c,d @@" + */ +static int parse_fragment_header(const char *line, int len, struct fragment *fragment) +{ + int offset; + + if (!len || line[len-1] != '\n') + return -1; + + /* Figure out the number of lines in a fragment */ + offset = parse_range(line, len, 4, " +", &fragment->oldpos, &fragment->oldlines); + offset = parse_range(line, len, offset, " @@", &fragment->newpos, &fragment->newlines); + + return offset; +} + +/* + * Find file diff header + * + * Returns: + * -1 if no header was found + * -128 in case of error + * the size of the header in bytes (called "offset") otherwise + */ +static int find_header(struct apply_state *state, + const char *line, + unsigned long size, + int *hdrsize, + struct patch *patch) +{ + unsigned long offset, len; + + patch->is_toplevel_relative = 0; + patch->is_rename = patch->is_copy = 0; + patch->is_new = patch->is_delete = -1; + patch->old_mode = patch->new_mode = 0; + patch->old_name = patch->new_name = NULL; + for (offset = 0; size > 0; offset += len, size -= len, line += len, state->linenr++) { + unsigned long nextlen; + + len = linelen(line, size); + if (!len) + break; + + /* Testing this early allows us to take a few shortcuts.. */ + if (len < 6) + continue; + + /* + * Make sure we don't find any unconnected patch fragments. + * That's a sign that we didn't find a header, and that a + * patch has become corrupted/broken up. + */ + if (!memcmp("@@ -", line, 4)) { + struct fragment dummy; + if (parse_fragment_header(line, len, &dummy) < 0) + continue; + error(_("patch fragment without header at line %d: %.*s"), + state->linenr, (int)len-1, line); + return -128; + } + + if (size < len + 6) + break; + + /* + * Git patch? It might not have a real patch, just a rename + * or mode change, so we handle that specially + */ + if (!memcmp("diff --git ", line, 11)) { + int git_hdr_len = parse_git_diff_header(&state->root, &state->linenr, + state->p_value, line, len, + size, patch); + if (git_hdr_len < 0) + return -128; + if (git_hdr_len <= len) + continue; + *hdrsize = git_hdr_len; + return offset; + } + + /* --- followed by +++ ? */ + if (memcmp("--- ", line, 4) || memcmp("+++ ", line + len, 4)) + continue; + + /* + * We only accept unified patches, so we want it to + * at least have "@@ -a,b +c,d @@\n", which is 14 chars + * minimum ("@@ -0,0 +1 @@\n" is the shortest). + */ + nextlen = linelen(line + len, size - len); + if (size < nextlen + 14 || memcmp("@@ -", line + len + nextlen, 4)) + continue; + + /* Ok, we'll consider it a patch */ + if (parse_traditional_patch(state, line, line+len, patch)) + return -128; + *hdrsize = len + nextlen; + state->linenr += 2; + return offset; + } + return -1; +} + +static void record_ws_error(struct apply_state *state, + unsigned result, + const char *line, + int len, + int linenr) +{ + char *err; + + if (!result) + return; + + state->whitespace_error++; + if (state->squelch_whitespace_errors && + state->squelch_whitespace_errors < state->whitespace_error) + return; + + err = whitespace_error_string(result); + if (state->apply_verbosity > verbosity_silent) + fprintf(stderr, "%s:%d: %s.\n%.*s\n", + state->patch_input_file, linenr, err, len, line); + free(err); +} + +static void check_whitespace(struct apply_state *state, + const char *line, + int len, + unsigned ws_rule) +{ + unsigned result = ws_check(line + 1, len - 1, ws_rule); + + record_ws_error(state, result, line + 1, len - 2, state->linenr); +} + +/* + * Check if the patch has context lines with CRLF or + * the patch wants to remove lines with CRLF. + */ +static void check_old_for_crlf(struct patch *patch, const char *line, int len) +{ + if (len >= 2 && line[len-1] == '\n' && line[len-2] == '\r') { + patch->ws_rule |= WS_CR_AT_EOL; + patch->crlf_in_old = 1; + } +} + + +/* + * Parse a unified diff. Note that this really needs to parse each + * fragment separately, since the only way to know the difference + * between a "---" that is part of a patch, and a "---" that starts + * the next patch is to look at the line counts.. + */ +static int parse_fragment(struct apply_state *state, + const char *line, + unsigned long size, + struct patch *patch, + struct fragment *fragment) +{ + int added, deleted; + int len = linelen(line, size), offset; + unsigned long oldlines, newlines; + unsigned long leading, trailing; + + offset = parse_fragment_header(line, len, fragment); + if (offset < 0) + return -1; + if (offset > 0 && patch->recount) + recount_diff(line + offset, size - offset, fragment); + oldlines = fragment->oldlines; + newlines = fragment->newlines; + leading = 0; + trailing = 0; + + /* Parse the thing.. */ + line += len; + size -= len; + state->linenr++; + added = deleted = 0; + for (offset = len; + 0 < size; + offset += len, size -= len, line += len, state->linenr++) { + if (!oldlines && !newlines) + break; + len = linelen(line, size); + if (!len || line[len-1] != '\n') + return -1; + switch (*line) { + default: + return -1; + case '\n': /* newer GNU diff, an empty context line */ + case ' ': + oldlines--; + newlines--; + if (!deleted && !added) + leading++; + trailing++; + check_old_for_crlf(patch, line, len); + if (!state->apply_in_reverse && + state->ws_error_action == correct_ws_error) + check_whitespace(state, line, len, patch->ws_rule); + break; + case '-': + if (!state->apply_in_reverse) + check_old_for_crlf(patch, line, len); + if (state->apply_in_reverse && + state->ws_error_action != nowarn_ws_error) + check_whitespace(state, line, len, patch->ws_rule); + deleted++; + oldlines--; + trailing = 0; + break; + case '+': + if (state->apply_in_reverse) + check_old_for_crlf(patch, line, len); + if (!state->apply_in_reverse && + state->ws_error_action != nowarn_ws_error) + check_whitespace(state, line, len, patch->ws_rule); + added++; + newlines--; + trailing = 0; + break; + + /* + * We allow "\ No newline at end of file". Depending + * on locale settings when the patch was produced we + * don't know what this line looks like. The only + * thing we do know is that it begins with "\ ". + * Checking for 12 is just for sanity check -- any + * l10n of "\ No newline..." is at least that long. + */ + case '\\': + if (len < 12 || memcmp(line, "\\ ", 2)) + return -1; + break; + } + } + if (oldlines || newlines) + return -1; + if (!patch->recount && !deleted && !added) + return -1; + + fragment->leading = leading; + fragment->trailing = trailing; + + /* + * If a fragment ends with an incomplete line, we failed to include + * it in the above loop because we hit oldlines == newlines == 0 + * before seeing it. + */ + if (12 < size && !memcmp(line, "\\ ", 2)) + offset += linelen(line, size); + + patch->lines_added += added; + patch->lines_deleted += deleted; + + if (0 < patch->is_new && oldlines) + return error(_("new file depends on old contents")); + if (0 < patch->is_delete && newlines) + return error(_("deleted file still has contents")); + return offset; +} + +/* + * We have seen "diff --git a/... b/..." header (or a traditional patch + * header). Read hunks that belong to this patch into fragments and hang + * them to the given patch structure. + * + * The (fragment->patch, fragment->size) pair points into the memory given + * by the caller, not a copy, when we return. + * + * Returns: + * -1 in case of error, + * the number of bytes in the patch otherwise. + */ +static int parse_single_patch(struct apply_state *state, + const char *line, + unsigned long size, + struct patch *patch) +{ + unsigned long offset = 0; + unsigned long oldlines = 0, newlines = 0, context = 0; + struct fragment **fragp = &patch->fragments; + + while (size > 4 && !memcmp(line, "@@ -", 4)) { + struct fragment *fragment; + int len; + + CALLOC_ARRAY(fragment, 1); + fragment->linenr = state->linenr; + len = parse_fragment(state, line, size, patch, fragment); + if (len <= 0) { + free(fragment); + return error(_("corrupt patch at line %d"), state->linenr); + } + fragment->patch = line; + fragment->size = len; + oldlines += fragment->oldlines; + newlines += fragment->newlines; + context += fragment->leading + fragment->trailing; + + *fragp = fragment; + fragp = &fragment->next; + + offset += len; + line += len; + size -= len; + } + + /* + * If something was removed (i.e. we have old-lines) it cannot + * be creation, and if something was added it cannot be + * deletion. However, the reverse is not true; --unified=0 + * patches that only add are not necessarily creation even + * though they do not have any old lines, and ones that only + * delete are not necessarily deletion. + * + * Unfortunately, a real creation/deletion patch do _not_ have + * any context line by definition, so we cannot safely tell it + * apart with --unified=0 insanity. At least if the patch has + * more than one hunk it is not creation or deletion. + */ + if (patch->is_new < 0 && + (oldlines || (patch->fragments && patch->fragments->next))) + patch->is_new = 0; + if (patch->is_delete < 0 && + (newlines || (patch->fragments && patch->fragments->next))) + patch->is_delete = 0; + + if (0 < patch->is_new && oldlines) + return error(_("new file %s depends on old contents"), patch->new_name); + if (0 < patch->is_delete && newlines) + return error(_("deleted file %s still has contents"), patch->old_name); + if (!patch->is_delete && !newlines && context && state->apply_verbosity > verbosity_silent) + fprintf_ln(stderr, + _("** warning: " + "file %s becomes empty but is not deleted"), + patch->new_name); + + return offset; +} + +static inline int metadata_changes(struct patch *patch) +{ + return patch->is_rename > 0 || + patch->is_copy > 0 || + patch->is_new > 0 || + patch->is_delete || + (patch->old_mode && patch->new_mode && + patch->old_mode != patch->new_mode); +} + +static char *inflate_it(const void *data, unsigned long size, + unsigned long inflated_size) +{ + git_zstream stream; + void *out; + int st; + + memset(&stream, 0, sizeof(stream)); + + stream.next_in = (unsigned char *)data; + stream.avail_in = size; + stream.next_out = out = xmalloc(inflated_size); + stream.avail_out = inflated_size; + git_inflate_init(&stream); + st = git_inflate(&stream, Z_FINISH); + git_inflate_end(&stream); + if ((st != Z_STREAM_END) || stream.total_out != inflated_size) { + free(out); + return NULL; + } + return out; +} + +/* + * Read a binary hunk and return a new fragment; fragment->patch + * points at an allocated memory that the caller must free, so + * it is marked as "->free_patch = 1". + */ +static struct fragment *parse_binary_hunk(struct apply_state *state, + char **buf_p, + unsigned long *sz_p, + int *status_p, + int *used_p) +{ + /* + * Expect a line that begins with binary patch method ("literal" + * or "delta"), followed by the length of data before deflating. + * a sequence of 'length-byte' followed by base-85 encoded data + * should follow, terminated by a newline. + * + * Each 5-byte sequence of base-85 encodes up to 4 bytes, + * and we would limit the patch line to 66 characters, + * so one line can fit up to 13 groups that would decode + * to 52 bytes max. The length byte 'A'-'Z' corresponds + * to 1-26 bytes, and 'a'-'z' corresponds to 27-52 bytes. + */ + int llen, used; + unsigned long size = *sz_p; + char *buffer = *buf_p; + int patch_method; + unsigned long origlen; + char *data = NULL; + int hunk_size = 0; + struct fragment *frag; + + llen = linelen(buffer, size); + used = llen; + + *status_p = 0; + + if (starts_with(buffer, "delta ")) { + patch_method = BINARY_DELTA_DEFLATED; + origlen = strtoul(buffer + 6, NULL, 10); + } + else if (starts_with(buffer, "literal ")) { + patch_method = BINARY_LITERAL_DEFLATED; + origlen = strtoul(buffer + 8, NULL, 10); + } + else + return NULL; + + state->linenr++; + buffer += llen; + size -= llen; + while (1) { + int byte_length, max_byte_length, newsize; + llen = linelen(buffer, size); + used += llen; + state->linenr++; + if (llen == 1) { + /* consume the blank line */ + buffer++; + size--; + break; + } + /* + * Minimum line is "A00000\n" which is 7-byte long, + * and the line length must be multiple of 5 plus 2. + */ + if ((llen < 7) || (llen-2) % 5) + goto corrupt; + max_byte_length = (llen - 2) / 5 * 4; + byte_length = *buffer; + if ('A' <= byte_length && byte_length <= 'Z') + byte_length = byte_length - 'A' + 1; + else if ('a' <= byte_length && byte_length <= 'z') + byte_length = byte_length - 'a' + 27; + else + goto corrupt; + /* if the input length was not multiple of 4, we would + * have filler at the end but the filler should never + * exceed 3 bytes + */ + if (max_byte_length < byte_length || + byte_length <= max_byte_length - 4) + goto corrupt; + newsize = hunk_size + byte_length; + data = xrealloc(data, newsize); + if (decode_85(data + hunk_size, buffer + 1, byte_length)) + goto corrupt; + hunk_size = newsize; + buffer += llen; + size -= llen; + } + + CALLOC_ARRAY(frag, 1); + frag->patch = inflate_it(data, hunk_size, origlen); + frag->free_patch = 1; + if (!frag->patch) + goto corrupt; + free(data); + frag->size = origlen; + *buf_p = buffer; + *sz_p = size; + *used_p = used; + frag->binary_patch_method = patch_method; + return frag; + + corrupt: + free(data); + *status_p = -1; + error(_("corrupt binary patch at line %d: %.*s"), + state->linenr-1, llen-1, buffer); + return NULL; +} + +/* + * Returns: + * -1 in case of error, + * the length of the parsed binary patch otherwise + */ +static int parse_binary(struct apply_state *state, + char *buffer, + unsigned long size, + struct patch *patch) +{ + /* + * We have read "GIT binary patch\n"; what follows is a line + * that says the patch method (currently, either "literal" or + * "delta") and the length of data before deflating; a + * sequence of 'length-byte' followed by base-85 encoded data + * follows. + * + * When a binary patch is reversible, there is another binary + * hunk in the same format, starting with patch method (either + * "literal" or "delta") with the length of data, and a sequence + * of length-byte + base-85 encoded data, terminated with another + * empty line. This data, when applied to the postimage, produces + * the preimage. + */ + struct fragment *forward; + struct fragment *reverse; + int status; + int used, used_1; + + forward = parse_binary_hunk(state, &buffer, &size, &status, &used); + if (!forward && !status) + /* there has to be one hunk (forward hunk) */ + return error(_("unrecognized binary patch at line %d"), state->linenr-1); + if (status) + /* otherwise we already gave an error message */ + return status; + + reverse = parse_binary_hunk(state, &buffer, &size, &status, &used_1); + if (reverse) + used += used_1; + else if (status) { + /* + * Not having reverse hunk is not an error, but having + * a corrupt reverse hunk is. + */ + free((void*) forward->patch); + free(forward); + return status; + } + forward->next = reverse; + patch->fragments = forward; + patch->is_binary = 1; + return used; +} + +static void prefix_one(struct apply_state *state, char **name) +{ + char *old_name = *name; + if (!old_name) + return; + *name = prefix_filename(state->prefix, *name); + free(old_name); +} + +static void prefix_patch(struct apply_state *state, struct patch *p) +{ + if (!state->prefix || p->is_toplevel_relative) + return; + prefix_one(state, &p->new_name); + prefix_one(state, &p->old_name); +} + +/* + * include/exclude + */ + +static void add_name_limit(struct apply_state *state, + const char *name, + int exclude) +{ + struct string_list_item *it; + + it = string_list_append(&state->limit_by_name, name); + it->util = exclude ? NULL : (void *) 1; +} + +static int use_patch(struct apply_state *state, struct patch *p) +{ + const char *pathname = p->new_name ? p->new_name : p->old_name; + int i; + + /* Paths outside are not touched regardless of "--include" */ + if (state->prefix && *state->prefix) { + const char *rest; + if (!skip_prefix(pathname, state->prefix, &rest) || !*rest) + return 0; + } + + /* See if it matches any of exclude/include rule */ + for (i = 0; i < state->limit_by_name.nr; i++) { + struct string_list_item *it = &state->limit_by_name.items[i]; + if (!wildmatch(it->string, pathname, 0)) + return (it->util != NULL); + } + + /* + * If we had any include, a path that does not match any rule is + * not used. Otherwise, we saw bunch of exclude rules (or none) + * and such a path is used. + */ + return !state->has_include; +} + +/* + * Read the patch text in "buffer" that extends for "size" bytes; stop + * reading after seeing a single patch (i.e. changes to a single file). + * Create fragments (i.e. patch hunks) and hang them to the given patch. + * + * Returns: + * -1 if no header was found or parse_binary() failed, + * -128 on another error, + * the number of bytes consumed otherwise, + * so that the caller can call us again for the next patch. + */ +static int parse_chunk(struct apply_state *state, char *buffer, unsigned long size, struct patch *patch) +{ + int hdrsize, patchsize; + int offset = find_header(state, buffer, size, &hdrsize, patch); + + if (offset < 0) + return offset; + + prefix_patch(state, patch); + + if (!use_patch(state, patch)) + patch->ws_rule = 0; + else if (patch->new_name) + patch->ws_rule = whitespace_rule(state->repo->index, + patch->new_name); + else + patch->ws_rule = whitespace_rule(state->repo->index, + patch->old_name); + + patchsize = parse_single_patch(state, + buffer + offset + hdrsize, + size - offset - hdrsize, + patch); + + if (patchsize < 0) + return -128; + + if (!patchsize) { + static const char git_binary[] = "GIT binary patch\n"; + int hd = hdrsize + offset; + unsigned long llen = linelen(buffer + hd, size - hd); + + if (llen == sizeof(git_binary) - 1 && + !memcmp(git_binary, buffer + hd, llen)) { + int used; + state->linenr++; + used = parse_binary(state, buffer + hd + llen, + size - hd - llen, patch); + if (used < 0) + return -1; + if (used) + patchsize = used + llen; + else + patchsize = 0; + } + else if (!memcmp(" differ\n", buffer + hd + llen - 8, 8)) { + static const char *binhdr[] = { + "Binary files ", + "Files ", + NULL, + }; + int i; + for (i = 0; binhdr[i]; i++) { + int len = strlen(binhdr[i]); + if (len < size - hd && + !memcmp(binhdr[i], buffer + hd, len)) { + state->linenr++; + patch->is_binary = 1; + patchsize = llen; + break; + } + } + } + + /* Empty patch cannot be applied if it is a text patch + * without metadata change. A binary patch appears + * empty to us here. + */ + if ((state->apply || state->check) && + (!patch->is_binary && !metadata_changes(patch))) { + error(_("patch with only garbage at line %d"), state->linenr); + return -128; + } + } + + return offset + hdrsize + patchsize; +} + +static void reverse_patches(struct patch *p) +{ + for (; p; p = p->next) { + struct fragment *frag = p->fragments; + + SWAP(p->new_name, p->old_name); + SWAP(p->new_mode, p->old_mode); + SWAP(p->is_new, p->is_delete); + SWAP(p->lines_added, p->lines_deleted); + SWAP(p->old_oid_prefix, p->new_oid_prefix); + + for (; frag; frag = frag->next) { + SWAP(frag->newpos, frag->oldpos); + SWAP(frag->newlines, frag->oldlines); + } + } +} + +static const char pluses[] = +"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"; +static const char minuses[]= +"----------------------------------------------------------------------"; + +static void show_stats(struct apply_state *state, struct patch *patch) +{ + struct strbuf qname = STRBUF_INIT; + char *cp = patch->new_name ? patch->new_name : patch->old_name; + int max, add, del; + + quote_c_style(cp, &qname, NULL, 0); + + /* + * "scale" the filename + */ + max = state->max_len; + if (max > 50) + max = 50; + + if (qname.len > max) { + cp = strchr(qname.buf + qname.len + 3 - max, '/'); + if (!cp) + cp = qname.buf + qname.len + 3 - max; + strbuf_splice(&qname, 0, cp - qname.buf, "...", 3); + } + + if (patch->is_binary) { + printf(" %-*s | Bin\n", max, qname.buf); + strbuf_release(&qname); + return; + } + + printf(" %-*s |", max, qname.buf); + strbuf_release(&qname); + + /* + * scale the add/delete + */ + max = max + state->max_change > 70 ? 70 - max : state->max_change; + add = patch->lines_added; + del = patch->lines_deleted; + + if (state->max_change > 0) { + int total = ((add + del) * max + state->max_change / 2) / state->max_change; + add = (add * max + state->max_change / 2) / state->max_change; + del = total - add; + } + printf("%5d %.*s%.*s\n", patch->lines_added + patch->lines_deleted, + add, pluses, del, minuses); +} + +static int read_old_data(struct stat *st, struct patch *patch, + const char *path, struct strbuf *buf) +{ + int conv_flags = patch->crlf_in_old ? + CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE; + switch (st->st_mode & S_IFMT) { + case S_IFLNK: + if (strbuf_readlink(buf, path, st->st_size) < 0) + return error(_("unable to read symlink %s"), path); + return 0; + case S_IFREG: + if (strbuf_read_file(buf, path, st->st_size) != st->st_size) + return error(_("unable to open or read %s"), path); + /* + * "git apply" without "--index/--cached" should never look + * at the index; the target file may not have been added to + * the index yet, and we may not even be in any Git repository. + * Pass NULL to convert_to_git() to stress this; the function + * should never look at the index when explicit crlf option + * is given. + */ + convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags); + return 0; + default: + return -1; + } +} + +/* + * Update the preimage, and the common lines in postimage, + * from buffer buf of length len. If postlen is 0 the postimage + * is updated in place, otherwise it's updated on a new buffer + * of length postlen + */ + +static void update_pre_post_images(struct image *preimage, + struct image *postimage, + char *buf, + size_t len, size_t postlen) +{ + int i, ctx, reduced; + char *new_buf, *old_buf, *fixed; + struct image fixed_preimage; + + /* + * Update the preimage with whitespace fixes. Note that we + * are not losing preimage->buf -- apply_one_fragment() will + * free "oldlines". + */ + prepare_image(&fixed_preimage, buf, len, 1); + assert(postlen + ? fixed_preimage.nr == preimage->nr + : fixed_preimage.nr <= preimage->nr); + for (i = 0; i < fixed_preimage.nr; i++) + fixed_preimage.line[i].flag = preimage->line[i].flag; + free(preimage->line_allocated); + *preimage = fixed_preimage; + + /* + * Adjust the common context lines in postimage. This can be + * done in-place when we are shrinking it with whitespace + * fixing, but needs a new buffer when ignoring whitespace or + * expanding leading tabs to spaces. + * + * We trust the caller to tell us if the update can be done + * in place (postlen==0) or not. + */ + old_buf = postimage->buf; + if (postlen) + new_buf = postimage->buf = xmalloc(postlen); + else + new_buf = old_buf; + fixed = preimage->buf; + + for (i = reduced = ctx = 0; i < postimage->nr; i++) { + size_t l_len = postimage->line[i].len; + if (!(postimage->line[i].flag & LINE_COMMON)) { + /* an added line -- no counterparts in preimage */ + memmove(new_buf, old_buf, l_len); + old_buf += l_len; + new_buf += l_len; + continue; + } + + /* a common context -- skip it in the original postimage */ + old_buf += l_len; + + /* and find the corresponding one in the fixed preimage */ + while (ctx < preimage->nr && + !(preimage->line[ctx].flag & LINE_COMMON)) { + fixed += preimage->line[ctx].len; + ctx++; + } + + /* + * preimage is expected to run out, if the caller + * fixed addition of trailing blank lines. + */ + if (preimage->nr <= ctx) { + reduced++; + continue; + } + + /* and copy it in, while fixing the line length */ + l_len = preimage->line[ctx].len; + memcpy(new_buf, fixed, l_len); + new_buf += l_len; + fixed += l_len; + postimage->line[i].len = l_len; + ctx++; + } + + if (postlen + ? postlen < new_buf - postimage->buf + : postimage->len < new_buf - postimage->buf) + BUG("caller miscounted postlen: asked %d, orig = %d, used = %d", + (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf)); + + /* Fix the length of the whole thing */ + postimage->len = new_buf - postimage->buf; + postimage->nr -= reduced; +} + +static int line_by_line_fuzzy_match(struct image *img, + struct image *preimage, + struct image *postimage, + unsigned long current, + int current_lno, + int preimage_limit) +{ + int i; + size_t imgoff = 0; + size_t preoff = 0; + size_t postlen = postimage->len; + size_t extra_chars; + char *buf; + char *preimage_eof; + char *preimage_end; + struct strbuf fixed; + char *fixed_buf; + size_t fixed_len; + + for (i = 0; i < preimage_limit; i++) { + size_t prelen = preimage->line[i].len; + size_t imglen = img->line[current_lno+i].len; + + if (!fuzzy_matchlines(img->buf + current + imgoff, imglen, + preimage->buf + preoff, prelen)) + return 0; + if (preimage->line[i].flag & LINE_COMMON) + postlen += imglen - prelen; + imgoff += imglen; + preoff += prelen; + } + + /* + * Ok, the preimage matches with whitespace fuzz. + * + * imgoff now holds the true length of the target that + * matches the preimage before the end of the file. + * + * Count the number of characters in the preimage that fall + * beyond the end of the file and make sure that all of them + * are whitespace characters. (This can only happen if + * we are removing blank lines at the end of the file.) + */ + buf = preimage_eof = preimage->buf + preoff; + for ( ; i < preimage->nr; i++) + preoff += preimage->line[i].len; + preimage_end = preimage->buf + preoff; + for ( ; buf < preimage_end; buf++) + if (!isspace(*buf)) + return 0; + + /* + * Update the preimage and the common postimage context + * lines to use the same whitespace as the target. + * If whitespace is missing in the target (i.e. + * if the preimage extends beyond the end of the file), + * use the whitespace from the preimage. + */ + extra_chars = preimage_end - preimage_eof; + strbuf_init(&fixed, imgoff + extra_chars); + strbuf_add(&fixed, img->buf + current, imgoff); + strbuf_add(&fixed, preimage_eof, extra_chars); + fixed_buf = strbuf_detach(&fixed, &fixed_len); + update_pre_post_images(preimage, postimage, + fixed_buf, fixed_len, postlen); + return 1; +} + +static int match_fragment(struct apply_state *state, + struct image *img, + struct image *preimage, + struct image *postimage, + unsigned long current, + int current_lno, + unsigned ws_rule, + int match_beginning, int match_end) +{ + int i; + char *fixed_buf, *buf, *orig, *target; + struct strbuf fixed; + size_t fixed_len, postlen; + int preimage_limit; + + if (preimage->nr + current_lno <= img->nr) { + /* + * The hunk falls within the boundaries of img. + */ + preimage_limit = preimage->nr; + if (match_end && (preimage->nr + current_lno != img->nr)) + return 0; + } else if (state->ws_error_action == correct_ws_error && + (ws_rule & WS_BLANK_AT_EOF)) { + /* + * This hunk extends beyond the end of img, and we are + * removing blank lines at the end of the file. This + * many lines from the beginning of the preimage must + * match with img, and the remainder of the preimage + * must be blank. + */ + preimage_limit = img->nr - current_lno; + } else { + /* + * The hunk extends beyond the end of the img and + * we are not removing blanks at the end, so we + * should reject the hunk at this position. + */ + return 0; + } + + if (match_beginning && current_lno) + return 0; + + /* Quick hash check */ + for (i = 0; i < preimage_limit; i++) + if ((img->line[current_lno + i].flag & LINE_PATCHED) || + (preimage->line[i].hash != img->line[current_lno + i].hash)) + return 0; + + if (preimage_limit == preimage->nr) { + /* + * Do we have an exact match? If we were told to match + * at the end, size must be exactly at current+fragsize, + * otherwise current+fragsize must be still within the preimage, + * and either case, the old piece should match the preimage + * exactly. + */ + if ((match_end + ? (current + preimage->len == img->len) + : (current + preimage->len <= img->len)) && + !memcmp(img->buf + current, preimage->buf, preimage->len)) + return 1; + } else { + /* + * The preimage extends beyond the end of img, so + * there cannot be an exact match. + * + * There must be one non-blank context line that match + * a line before the end of img. + */ + char *buf_end; + + buf = preimage->buf; + buf_end = buf; + for (i = 0; i < preimage_limit; i++) + buf_end += preimage->line[i].len; + + for ( ; buf < buf_end; buf++) + if (!isspace(*buf)) + break; + if (buf == buf_end) + return 0; + } + + /* + * No exact match. If we are ignoring whitespace, run a line-by-line + * fuzzy matching. We collect all the line length information because + * we need it to adjust whitespace if we match. + */ + if (state->ws_ignore_action == ignore_ws_change) + return line_by_line_fuzzy_match(img, preimage, postimage, + current, current_lno, preimage_limit); + + if (state->ws_error_action != correct_ws_error) + return 0; + + /* + * The hunk does not apply byte-by-byte, but the hash says + * it might with whitespace fuzz. We weren't asked to + * ignore whitespace, we were asked to correct whitespace + * errors, so let's try matching after whitespace correction. + * + * While checking the preimage against the target, whitespace + * errors in both fixed, we count how large the corresponding + * postimage needs to be. The postimage prepared by + * apply_one_fragment() has whitespace errors fixed on added + * lines already, but the common lines were propagated as-is, + * which may become longer when their whitespace errors are + * fixed. + */ + + /* First count added lines in postimage */ + postlen = 0; + for (i = 0; i < postimage->nr; i++) { + if (!(postimage->line[i].flag & LINE_COMMON)) + postlen += postimage->line[i].len; + } + + /* + * The preimage may extend beyond the end of the file, + * but in this loop we will only handle the part of the + * preimage that falls within the file. + */ + strbuf_init(&fixed, preimage->len + 1); + orig = preimage->buf; + target = img->buf + current; + for (i = 0; i < preimage_limit; i++) { + size_t oldlen = preimage->line[i].len; + size_t tgtlen = img->line[current_lno + i].len; + size_t fixstart = fixed.len; + struct strbuf tgtfix; + int match; + + /* Try fixing the line in the preimage */ + ws_fix_copy(&fixed, orig, oldlen, ws_rule, NULL); + + /* Try fixing the line in the target */ + strbuf_init(&tgtfix, tgtlen); + ws_fix_copy(&tgtfix, target, tgtlen, ws_rule, NULL); + + /* + * If they match, either the preimage was based on + * a version before our tree fixed whitespace breakage, + * or we are lacking a whitespace-fix patch the tree + * the preimage was based on already had (i.e. target + * has whitespace breakage, the preimage doesn't). + * In either case, we are fixing the whitespace breakages + * so we might as well take the fix together with their + * real change. + */ + match = (tgtfix.len == fixed.len - fixstart && + !memcmp(tgtfix.buf, fixed.buf + fixstart, + fixed.len - fixstart)); + + /* Add the length if this is common with the postimage */ + if (preimage->line[i].flag & LINE_COMMON) + postlen += tgtfix.len; + + strbuf_release(&tgtfix); + if (!match) + goto unmatch_exit; + + orig += oldlen; + target += tgtlen; + } + + + /* + * Now handle the lines in the preimage that falls beyond the + * end of the file (if any). They will only match if they are + * empty or only contain whitespace (if WS_BLANK_AT_EOL is + * false). + */ + for ( ; i < preimage->nr; i++) { + size_t fixstart = fixed.len; /* start of the fixed preimage */ + size_t oldlen = preimage->line[i].len; + int j; + + /* Try fixing the line in the preimage */ + ws_fix_copy(&fixed, orig, oldlen, ws_rule, NULL); + + for (j = fixstart; j < fixed.len; j++) + if (!isspace(fixed.buf[j])) + goto unmatch_exit; + + orig += oldlen; + } + + /* + * Yes, the preimage is based on an older version that still + * has whitespace breakages unfixed, and fixing them makes the + * hunk match. Update the context lines in the postimage. + */ + fixed_buf = strbuf_detach(&fixed, &fixed_len); + if (postlen < postimage->len) + postlen = 0; + update_pre_post_images(preimage, postimage, + fixed_buf, fixed_len, postlen); + return 1; + + unmatch_exit: + strbuf_release(&fixed); + return 0; +} + +static int find_pos(struct apply_state *state, + struct image *img, + struct image *preimage, + struct image *postimage, + int line, + unsigned ws_rule, + int match_beginning, int match_end) +{ + int i; + unsigned long backwards, forwards, current; + int backwards_lno, forwards_lno, current_lno; + + /* + * When running with --allow-overlap, it is possible that a hunk is + * seen that pretends to start at the beginning (but no longer does), + * and that *still* needs to match the end. So trust `match_end` more + * than `match_beginning`. + */ + if (state->allow_overlap && match_beginning && match_end && + img->nr - preimage->nr != 0) + match_beginning = 0; + + /* + * If match_beginning or match_end is specified, there is no + * point starting from a wrong line that will never match and + * wander around and wait for a match at the specified end. + */ + if (match_beginning) + line = 0; + else if (match_end) + line = img->nr - preimage->nr; + + /* + * Because the comparison is unsigned, the following test + * will also take care of a negative line number that can + * result when match_end and preimage is larger than the target. + */ + if ((size_t) line > img->nr) + line = img->nr; + + current = 0; + for (i = 0; i < line; i++) + current += img->line[i].len; + + /* + * There's probably some smart way to do this, but I'll leave + * that to the smart and beautiful people. I'm simple and stupid. + */ + backwards = current; + backwards_lno = line; + forwards = current; + forwards_lno = line; + current_lno = line; + + for (i = 0; ; i++) { + if (match_fragment(state, img, preimage, postimage, + current, current_lno, ws_rule, + match_beginning, match_end)) + return current_lno; + + again: + if (backwards_lno == 0 && forwards_lno == img->nr) + break; + + if (i & 1) { + if (backwards_lno == 0) { + i++; + goto again; + } + backwards_lno--; + backwards -= img->line[backwards_lno].len; + current = backwards; + current_lno = backwards_lno; + } else { + if (forwards_lno == img->nr) { + i++; + goto again; + } + forwards += img->line[forwards_lno].len; + forwards_lno++; + current = forwards; + current_lno = forwards_lno; + } + + } + return -1; +} + +static void remove_first_line(struct image *img) +{ + img->buf += img->line[0].len; + img->len -= img->line[0].len; + img->line++; + img->nr--; +} + +static void remove_last_line(struct image *img) +{ + img->len -= img->line[--img->nr].len; +} + +/* + * The change from "preimage" and "postimage" has been found to + * apply at applied_pos (counts in line numbers) in "img". + * Update "img" to remove "preimage" and replace it with "postimage". + */ +static void update_image(struct apply_state *state, + struct image *img, + int applied_pos, + struct image *preimage, + struct image *postimage) +{ + /* + * remove the copy of preimage at offset in img + * and replace it with postimage + */ + int i, nr; + size_t remove_count, insert_count, applied_at = 0; + char *result; + int preimage_limit; + + /* + * If we are removing blank lines at the end of img, + * the preimage may extend beyond the end. + * If that is the case, we must be careful only to + * remove the part of the preimage that falls within + * the boundaries of img. Initialize preimage_limit + * to the number of lines in the preimage that falls + * within the boundaries. + */ + preimage_limit = preimage->nr; + if (preimage_limit > img->nr - applied_pos) + preimage_limit = img->nr - applied_pos; + + for (i = 0; i < applied_pos; i++) + applied_at += img->line[i].len; + + remove_count = 0; + for (i = 0; i < preimage_limit; i++) + remove_count += img->line[applied_pos + i].len; + insert_count = postimage->len; + + /* Adjust the contents */ + result = xmalloc(st_add3(st_sub(img->len, remove_count), insert_count, 1)); + memcpy(result, img->buf, applied_at); + memcpy(result + applied_at, postimage->buf, postimage->len); + memcpy(result + applied_at + postimage->len, + img->buf + (applied_at + remove_count), + img->len - (applied_at + remove_count)); + free(img->buf); + img->buf = result; + img->len += insert_count - remove_count; + result[img->len] = '\0'; + + /* Adjust the line table */ + nr = img->nr + postimage->nr - preimage_limit; + if (preimage_limit < postimage->nr) { + /* + * NOTE: this knows that we never call remove_first_line() + * on anything other than pre/post image. + */ + REALLOC_ARRAY(img->line, nr); + img->line_allocated = img->line; + } + if (preimage_limit != postimage->nr) + MOVE_ARRAY(img->line + applied_pos + postimage->nr, + img->line + applied_pos + preimage_limit, + img->nr - (applied_pos + preimage_limit)); + COPY_ARRAY(img->line + applied_pos, postimage->line, postimage->nr); + if (!state->allow_overlap) + for (i = 0; i < postimage->nr; i++) + img->line[applied_pos + i].flag |= LINE_PATCHED; + img->nr = nr; +} + +/* + * Use the patch-hunk text in "frag" to prepare two images (preimage and + * postimage) for the hunk. Find lines that match "preimage" in "img" and + * replace the part of "img" with "postimage" text. + */ +static int apply_one_fragment(struct apply_state *state, + struct image *img, struct fragment *frag, + int inaccurate_eof, unsigned ws_rule, + int nth_fragment) +{ + int match_beginning, match_end; + const char *patch = frag->patch; + int size = frag->size; + char *old, *oldlines; + struct strbuf newlines; + int new_blank_lines_at_end = 0; + int found_new_blank_lines_at_end = 0; + int hunk_linenr = frag->linenr; + unsigned long leading, trailing; + int pos, applied_pos; + struct image preimage; + struct image postimage; + + memset(&preimage, 0, sizeof(preimage)); + memset(&postimage, 0, sizeof(postimage)); + oldlines = xmalloc(size); + strbuf_init(&newlines, size); + + old = oldlines; + while (size > 0) { + char first; + int len = linelen(patch, size); + int plen; + int added_blank_line = 0; + int is_blank_context = 0; + size_t start; + + if (!len) + break; + + /* + * "plen" is how much of the line we should use for + * the actual patch data. Normally we just remove the + * first character on the line, but if the line is + * followed by "\ No newline", then we also remove the + * last one (which is the newline, of course). + */ + plen = len - 1; + if (len < size && patch[len] == '\\') + plen--; + first = *patch; + if (state->apply_in_reverse) { + if (first == '-') + first = '+'; + else if (first == '+') + first = '-'; + } + + switch (first) { + case '\n': + /* Newer GNU diff, empty context line */ + if (plen < 0) + /* ... followed by '\No newline'; nothing */ + break; + *old++ = '\n'; + strbuf_addch(&newlines, '\n'); + add_line_info(&preimage, "\n", 1, LINE_COMMON); + add_line_info(&postimage, "\n", 1, LINE_COMMON); + is_blank_context = 1; + break; + case ' ': + if (plen && (ws_rule & WS_BLANK_AT_EOF) && + ws_blank_line(patch + 1, plen, ws_rule)) + is_blank_context = 1; + /* fallthrough */ + case '-': + memcpy(old, patch + 1, plen); + add_line_info(&preimage, old, plen, + (first == ' ' ? LINE_COMMON : 0)); + old += plen; + if (first == '-') + break; + /* fallthrough */ + case '+': + /* --no-add does not add new lines */ + if (first == '+' && state->no_add) + break; + + start = newlines.len; + if (first != '+' || + !state->whitespace_error || + state->ws_error_action != correct_ws_error) { + strbuf_add(&newlines, patch + 1, plen); + } + else { + ws_fix_copy(&newlines, patch + 1, plen, ws_rule, &state->applied_after_fixing_ws); + } + add_line_info(&postimage, newlines.buf + start, newlines.len - start, + (first == '+' ? 0 : LINE_COMMON)); + if (first == '+' && + (ws_rule & WS_BLANK_AT_EOF) && + ws_blank_line(patch + 1, plen, ws_rule)) + added_blank_line = 1; + break; + case '@': case '\\': + /* Ignore it, we already handled it */ + break; + default: + if (state->apply_verbosity > verbosity_normal) + error(_("invalid start of line: '%c'"), first); + applied_pos = -1; + goto out; + } + if (added_blank_line) { + if (!new_blank_lines_at_end) + found_new_blank_lines_at_end = hunk_linenr; + new_blank_lines_at_end++; + } + else if (is_blank_context) + ; + else + new_blank_lines_at_end = 0; + patch += len; + size -= len; + hunk_linenr++; + } + if (inaccurate_eof && + old > oldlines && old[-1] == '\n' && + newlines.len > 0 && newlines.buf[newlines.len - 1] == '\n') { + old--; + strbuf_setlen(&newlines, newlines.len - 1); + preimage.line_allocated[preimage.nr - 1].len--; + postimage.line_allocated[postimage.nr - 1].len--; + } + + leading = frag->leading; + trailing = frag->trailing; + + /* + * A hunk to change lines at the beginning would begin with + * @@ -1,L +N,M @@ + * but we need to be careful. -U0 that inserts before the second + * line also has this pattern. + * + * And a hunk to add to an empty file would begin with + * @@ -0,0 +N,M @@ + * + * In other words, a hunk that is (frag->oldpos <= 1) with or + * without leading context must match at the beginning. + */ + match_beginning = (!frag->oldpos || + (frag->oldpos == 1 && !state->unidiff_zero)); + + /* + * A hunk without trailing lines must match at the end. + * However, we simply cannot tell if a hunk must match end + * from the lack of trailing lines if the patch was generated + * with unidiff without any context. + */ + match_end = !state->unidiff_zero && !trailing; + + pos = frag->newpos ? (frag->newpos - 1) : 0; + preimage.buf = oldlines; + preimage.len = old - oldlines; + postimage.buf = newlines.buf; + postimage.len = newlines.len; + preimage.line = preimage.line_allocated; + postimage.line = postimage.line_allocated; + + for (;;) { + + applied_pos = find_pos(state, img, &preimage, &postimage, pos, + ws_rule, match_beginning, match_end); + + if (applied_pos >= 0) + break; + + /* Am I at my context limits? */ + if ((leading <= state->p_context) && (trailing <= state->p_context)) + break; + if (match_beginning || match_end) { + match_beginning = match_end = 0; + continue; + } + + /* + * Reduce the number of context lines; reduce both + * leading and trailing if they are equal otherwise + * just reduce the larger context. + */ + if (leading >= trailing) { + remove_first_line(&preimage); + remove_first_line(&postimage); + pos--; + leading--; + } + if (trailing > leading) { + remove_last_line(&preimage); + remove_last_line(&postimage); + trailing--; + } + } + + if (applied_pos >= 0) { + if (new_blank_lines_at_end && + preimage.nr + applied_pos >= img->nr && + (ws_rule & WS_BLANK_AT_EOF) && + state->ws_error_action != nowarn_ws_error) { + record_ws_error(state, WS_BLANK_AT_EOF, "+", 1, + found_new_blank_lines_at_end); + if (state->ws_error_action == correct_ws_error) { + while (new_blank_lines_at_end--) + remove_last_line(&postimage); + } + /* + * We would want to prevent write_out_results() + * from taking place in apply_patch() that follows + * the callchain led us here, which is: + * apply_patch->check_patch_list->check_patch-> + * apply_data->apply_fragments->apply_one_fragment + */ + if (state->ws_error_action == die_on_ws_error) + state->apply = 0; + } + + if (state->apply_verbosity > verbosity_normal && applied_pos != pos) { + int offset = applied_pos - pos; + if (state->apply_in_reverse) + offset = 0 - offset; + fprintf_ln(stderr, + Q_("Hunk #%d succeeded at %d (offset %d line).", + "Hunk #%d succeeded at %d (offset %d lines).", + offset), + nth_fragment, applied_pos + 1, offset); + } + + /* + * Warn if it was necessary to reduce the number + * of context lines. + */ + if ((leading != frag->leading || + trailing != frag->trailing) && state->apply_verbosity > verbosity_silent) + fprintf_ln(stderr, _("Context reduced to (%ld/%ld)" + " to apply fragment at %d"), + leading, trailing, applied_pos+1); + update_image(state, img, applied_pos, &preimage, &postimage); + } else { + if (state->apply_verbosity > verbosity_normal) + error(_("while searching for:\n%.*s"), + (int)(old - oldlines), oldlines); + } + +out: + free(oldlines); + strbuf_release(&newlines); + free(preimage.line_allocated); + free(postimage.line_allocated); + + return (applied_pos < 0); +} + +static int apply_binary_fragment(struct apply_state *state, + struct image *img, + struct patch *patch) +{ + struct fragment *fragment = patch->fragments; + unsigned long len; + void *dst; + + if (!fragment) + return error(_("missing binary patch data for '%s'"), + patch->new_name ? + patch->new_name : + patch->old_name); + + /* Binary patch is irreversible without the optional second hunk */ + if (state->apply_in_reverse) { + if (!fragment->next) + return error(_("cannot reverse-apply a binary patch " + "without the reverse hunk to '%s'"), + patch->new_name + ? patch->new_name : patch->old_name); + fragment = fragment->next; + } + switch (fragment->binary_patch_method) { + case BINARY_DELTA_DEFLATED: + dst = patch_delta(img->buf, img->len, fragment->patch, + fragment->size, &len); + if (!dst) + return -1; + clear_image(img); + img->buf = dst; + img->len = len; + return 0; + case BINARY_LITERAL_DEFLATED: + clear_image(img); + img->len = fragment->size; + img->buf = xmemdupz(fragment->patch, img->len); + return 0; + } + return -1; +} + +/* + * Replace "img" with the result of applying the binary patch. + * The binary patch data itself in patch->fragment is still kept + * but the preimage prepared by the caller in "img" is freed here + * or in the helper function apply_binary_fragment() this calls. + */ +static int apply_binary(struct apply_state *state, + struct image *img, + struct patch *patch) +{ + const char *name = patch->old_name ? patch->old_name : patch->new_name; + struct object_id oid; + const unsigned hexsz = the_hash_algo->hexsz; + + /* + * For safety, we require patch index line to contain + * full hex textual object ID for old and new, at least for now. + */ + if (strlen(patch->old_oid_prefix) != hexsz || + strlen(patch->new_oid_prefix) != hexsz || + get_oid_hex(patch->old_oid_prefix, &oid) || + get_oid_hex(patch->new_oid_prefix, &oid)) + return error(_("cannot apply binary patch to '%s' " + "without full index line"), name); + + if (patch->old_name) { + /* + * See if the old one matches what the patch + * applies to. + */ + hash_object_file(the_hash_algo, img->buf, img->len, blob_type, + &oid); + if (strcmp(oid_to_hex(&oid), patch->old_oid_prefix)) + return error(_("the patch applies to '%s' (%s), " + "which does not match the " + "current contents."), + name, oid_to_hex(&oid)); + } + else { + /* Otherwise, the old one must be empty. */ + if (img->len) + return error(_("the patch applies to an empty " + "'%s' but it is not empty"), name); + } + + get_oid_hex(patch->new_oid_prefix, &oid); + if (is_null_oid(&oid)) { + clear_image(img); + return 0; /* deletion patch */ + } + + if (has_object(the_repository, &oid, 0)) { + /* We already have the postimage */ + enum object_type type; + unsigned long size; + char *result; + + result = read_object_file(&oid, &type, &size); + if (!result) + return error(_("the necessary postimage %s for " + "'%s' cannot be read"), + patch->new_oid_prefix, name); + clear_image(img); + img->buf = result; + img->len = size; + } else { + /* + * We have verified buf matches the preimage; + * apply the patch data to it, which is stored + * in the patch->fragments->{patch,size}. + */ + if (apply_binary_fragment(state, img, patch)) + return error(_("binary patch does not apply to '%s'"), + name); + + /* verify that the result matches */ + hash_object_file(the_hash_algo, img->buf, img->len, blob_type, + &oid); + if (strcmp(oid_to_hex(&oid), patch->new_oid_prefix)) + return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"), + name, patch->new_oid_prefix, oid_to_hex(&oid)); + } + + return 0; +} + +static int apply_fragments(struct apply_state *state, struct image *img, struct patch *patch) +{ + struct fragment *frag = patch->fragments; + const char *name = patch->old_name ? patch->old_name : patch->new_name; + unsigned ws_rule = patch->ws_rule; + unsigned inaccurate_eof = patch->inaccurate_eof; + int nth = 0; + + if (patch->is_binary) + return apply_binary(state, img, patch); + + while (frag) { + nth++; + if (apply_one_fragment(state, img, frag, inaccurate_eof, ws_rule, nth)) { + error(_("patch failed: %s:%ld"), name, frag->oldpos); + if (!state->apply_with_reject) + return -1; + frag->rejected = 1; + } + frag = frag->next; + } + return 0; +} + +static int read_blob_object(struct strbuf *buf, const struct object_id *oid, unsigned mode) +{ + if (S_ISGITLINK(mode)) { + strbuf_grow(buf, 100); + strbuf_addf(buf, "Subproject commit %s\n", oid_to_hex(oid)); + } else { + enum object_type type; + unsigned long sz; + char *result; + + result = read_object_file(oid, &type, &sz); + if (!result) + return -1; + /* XXX read_sha1_file NUL-terminates */ + strbuf_attach(buf, result, sz, sz + 1); + } + return 0; +} + +static int read_file_or_gitlink(const struct cache_entry *ce, struct strbuf *buf) +{ + if (!ce) + return 0; + return read_blob_object(buf, &ce->oid, ce->ce_mode); +} + +static struct patch *in_fn_table(struct apply_state *state, const char *name) +{ + struct string_list_item *item; + + if (name == NULL) + return NULL; + + item = string_list_lookup(&state->fn_table, name); + if (item != NULL) + return (struct patch *)item->util; + + return NULL; +} + +/* + * item->util in the filename table records the status of the path. + * Usually it points at a patch (whose result records the contents + * of it after applying it), but it could be PATH_WAS_DELETED for a + * path that a previously applied patch has already removed, or + * PATH_TO_BE_DELETED for a path that a later patch would remove. + * + * The latter is needed to deal with a case where two paths A and B + * are swapped by first renaming A to B and then renaming B to A; + * moving A to B should not be prevented due to presence of B as we + * will remove it in a later patch. + */ +#define PATH_TO_BE_DELETED ((struct patch *) -2) +#define PATH_WAS_DELETED ((struct patch *) -1) + +static int to_be_deleted(struct patch *patch) +{ + return patch == PATH_TO_BE_DELETED; +} + +static int was_deleted(struct patch *patch) +{ + return patch == PATH_WAS_DELETED; +} + +static void add_to_fn_table(struct apply_state *state, struct patch *patch) +{ + struct string_list_item *item; + + /* + * Always add new_name unless patch is a deletion + * This should cover the cases for normal diffs, + * file creations and copies + */ + if (patch->new_name != NULL) { + item = string_list_insert(&state->fn_table, patch->new_name); + item->util = patch; + } + + /* + * store a failure on rename/deletion cases because + * later chunks shouldn't patch old names + */ + if ((patch->new_name == NULL) || (patch->is_rename)) { + item = string_list_insert(&state->fn_table, patch->old_name); + item->util = PATH_WAS_DELETED; + } +} + +static void prepare_fn_table(struct apply_state *state, struct patch *patch) +{ + /* + * store information about incoming file deletion + */ + while (patch) { + if ((patch->new_name == NULL) || (patch->is_rename)) { + struct string_list_item *item; + item = string_list_insert(&state->fn_table, patch->old_name); + item->util = PATH_TO_BE_DELETED; + } + patch = patch->next; + } +} + +static int checkout_target(struct index_state *istate, + struct cache_entry *ce, struct stat *st) +{ + struct checkout costate = CHECKOUT_INIT; + + costate.refresh_cache = 1; + costate.istate = istate; + if (checkout_entry(ce, &costate, NULL, NULL) || + lstat(ce->name, st)) + return error(_("cannot checkout %s"), ce->name); + return 0; +} + +static struct patch *previous_patch(struct apply_state *state, + struct patch *patch, + int *gone) +{ + struct patch *previous; + + *gone = 0; + if (patch->is_copy || patch->is_rename) + return NULL; /* "git" patches do not depend on the order */ + + previous = in_fn_table(state, patch->old_name); + if (!previous) + return NULL; + + if (to_be_deleted(previous)) + return NULL; /* the deletion hasn't happened yet */ + + if (was_deleted(previous)) + *gone = 1; + + return previous; +} + +static int verify_index_match(struct apply_state *state, + const struct cache_entry *ce, + struct stat *st) +{ + if (S_ISGITLINK(ce->ce_mode)) { + if (!S_ISDIR(st->st_mode)) + return -1; + return 0; + } + return ie_match_stat(state->repo->index, ce, st, + CE_MATCH_IGNORE_VALID | CE_MATCH_IGNORE_SKIP_WORKTREE); +} + +#define SUBMODULE_PATCH_WITHOUT_INDEX 1 + +static int load_patch_target(struct apply_state *state, + struct strbuf *buf, + const struct cache_entry *ce, + struct stat *st, + struct patch *patch, + const char *name, + unsigned expected_mode) +{ + if (state->cached || state->check_index) { + if (read_file_or_gitlink(ce, buf)) + return error(_("failed to read %s"), name); + } else if (name) { + if (S_ISGITLINK(expected_mode)) { + if (ce) + return read_file_or_gitlink(ce, buf); + else + return SUBMODULE_PATCH_WITHOUT_INDEX; + } else if (has_symlink_leading_path(name, strlen(name))) { + return error(_("reading from '%s' beyond a symbolic link"), name); + } else { + if (read_old_data(st, patch, name, buf)) + return error(_("failed to read %s"), name); + } + } + return 0; +} + +/* + * We are about to apply "patch"; populate the "image" with the + * current version we have, from the working tree or from the index, + * depending on the situation e.g. --cached/--index. If we are + * applying a non-git patch that incrementally updates the tree, + * we read from the result of a previous diff. + */ +static int load_preimage(struct apply_state *state, + struct image *image, + struct patch *patch, struct stat *st, + const struct cache_entry *ce) +{ + struct strbuf buf = STRBUF_INIT; + size_t len; + char *img; + struct patch *previous; + int status; + + previous = previous_patch(state, patch, &status); + if (status) + return error(_("path %s has been renamed/deleted"), + patch->old_name); + if (previous) { + /* We have a patched copy in memory; use that. */ + strbuf_add(&buf, previous->result, previous->resultsize); + } else { + status = load_patch_target(state, &buf, ce, st, patch, + patch->old_name, patch->old_mode); + if (status < 0) + return status; + else if (status == SUBMODULE_PATCH_WITHOUT_INDEX) { + /* + * There is no way to apply subproject + * patch without looking at the index. + * NEEDSWORK: shouldn't this be flagged + * as an error??? + */ + free_fragment_list(patch->fragments); + patch->fragments = NULL; + } else if (status) { + return error(_("failed to read %s"), patch->old_name); + } + } + + img = strbuf_detach(&buf, &len); + prepare_image(image, img, len, !patch->is_binary); + return 0; +} + +static int resolve_to(struct image *image, const struct object_id *result_id) +{ + unsigned long size; + enum object_type type; + + clear_image(image); + + image->buf = read_object_file(result_id, &type, &size); + if (!image->buf || type != OBJ_BLOB) + die("unable to read blob object %s", oid_to_hex(result_id)); + image->len = size; + + return 0; +} + +static int three_way_merge(struct apply_state *state, + struct image *image, + char *path, + const struct object_id *base, + const struct object_id *ours, + const struct object_id *theirs) +{ + mmfile_t base_file, our_file, their_file; + mmbuffer_t result = { NULL }; + int status; + + /* resolve trivial cases first */ + if (oideq(base, ours)) + return resolve_to(image, theirs); + else if (oideq(base, theirs) || oideq(ours, theirs)) + return resolve_to(image, ours); + + read_mmblob(&base_file, base); + read_mmblob(&our_file, ours); + read_mmblob(&their_file, theirs); + status = ll_merge(&result, path, + &base_file, "base", + &our_file, "ours", + &their_file, "theirs", + state->repo->index, + NULL); + free(base_file.ptr); + free(our_file.ptr); + free(their_file.ptr); + if (status < 0 || !result.ptr) { + free(result.ptr); + return -1; + } + clear_image(image); + image->buf = result.ptr; + image->len = result.size; + + return status; +} + +/* + * When directly falling back to add/add three-way merge, we read from + * the current contents of the new_name. In no cases other than that + * this function will be called. + */ +static int load_current(struct apply_state *state, + struct image *image, + struct patch *patch) +{ + struct strbuf buf = STRBUF_INIT; + int status, pos; + size_t len; + char *img; + struct stat st; + struct cache_entry *ce; + char *name = patch->new_name; + unsigned mode = patch->new_mode; + + if (!patch->is_new) + BUG("patch to %s is not a creation", patch->old_name); + + pos = index_name_pos(state->repo->index, name, strlen(name)); + if (pos < 0) + return error(_("%s: does not exist in index"), name); + ce = state->repo->index->cache[pos]; + if (lstat(name, &st)) { + if (errno != ENOENT) + return error_errno("%s", name); + if (checkout_target(state->repo->index, ce, &st)) + return -1; + } + if (verify_index_match(state, ce, &st)) + return error(_("%s: does not match index"), name); + + status = load_patch_target(state, &buf, ce, &st, patch, name, mode); + if (status < 0) + return status; + else if (status) + return -1; + img = strbuf_detach(&buf, &len); + prepare_image(image, img, len, !patch->is_binary); + return 0; +} + +static int try_threeway(struct apply_state *state, + struct image *image, + struct patch *patch, + struct stat *st, + const struct cache_entry *ce) +{ + struct object_id pre_oid, post_oid, our_oid; + struct strbuf buf = STRBUF_INIT; + size_t len; + int status; + char *img; + struct image tmp_image; + + /* No point falling back to 3-way merge in these cases */ + if (patch->is_delete || + S_ISGITLINK(patch->old_mode) || S_ISGITLINK(patch->new_mode)) + return -1; + + /* Preimage the patch was prepared for */ + if (patch->is_new) + write_object_file("", 0, blob_type, &pre_oid); + else if (get_oid(patch->old_oid_prefix, &pre_oid) || + read_blob_object(&buf, &pre_oid, patch->old_mode)) + return error(_("repository lacks the necessary blob to perform 3-way merge.")); + + if (state->apply_verbosity > verbosity_silent && patch->direct_to_threeway) + fprintf(stderr, _("Performing three-way merge...\n")); + + img = strbuf_detach(&buf, &len); + prepare_image(&tmp_image, img, len, 1); + /* Apply the patch to get the post image */ + if (apply_fragments(state, &tmp_image, patch) < 0) { + clear_image(&tmp_image); + return -1; + } + /* post_oid is theirs */ + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid); + clear_image(&tmp_image); + + /* our_oid is ours */ + if (patch->is_new) { + if (load_current(state, &tmp_image, patch)) + return error(_("cannot read the current contents of '%s'"), + patch->new_name); + } else { + if (load_preimage(state, &tmp_image, patch, st, ce)) + return error(_("cannot read the current contents of '%s'"), + patch->old_name); + } + write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid); + clear_image(&tmp_image); + + /* in-core three-way merge between post and our using pre as base */ + status = three_way_merge(state, image, patch->new_name, + &pre_oid, &our_oid, &post_oid); + if (status < 0) { + if (state->apply_verbosity > verbosity_silent) + fprintf(stderr, + _("Failed to perform three-way merge...\n")); + return status; + } + + if (status) { + patch->conflicted_threeway = 1; + if (patch->is_new) + oidclr(&patch->threeway_stage[0]); + else + oidcpy(&patch->threeway_stage[0], &pre_oid); + oidcpy(&patch->threeway_stage[1], &our_oid); + oidcpy(&patch->threeway_stage[2], &post_oid); + if (state->apply_verbosity > verbosity_silent) + fprintf(stderr, + _("Applied patch to '%s' with conflicts.\n"), + patch->new_name); + } else { + if (state->apply_verbosity > verbosity_silent) + fprintf(stderr, + _("Applied patch to '%s' cleanly.\n"), + patch->new_name); + } + return 0; +} + +static int apply_data(struct apply_state *state, struct patch *patch, + struct stat *st, const struct cache_entry *ce) +{ + struct image image; + + if (load_preimage(state, &image, patch, st, ce) < 0) + return -1; + + if (!state->threeway || try_threeway(state, &image, patch, st, ce) < 0) { + if (state->apply_verbosity > verbosity_silent && + state->threeway && !patch->direct_to_threeway) + fprintf(stderr, _("Falling back to direct application...\n")); + + /* Note: with --reject, apply_fragments() returns 0 */ + if (patch->direct_to_threeway || apply_fragments(state, &image, patch) < 0) + return -1; + } + patch->result = image.buf; + patch->resultsize = image.len; + add_to_fn_table(state, patch); + free(image.line_allocated); + + if (0 < patch->is_delete && patch->resultsize) + return error(_("removal patch leaves file contents")); + + return 0; +} + +/* + * If "patch" that we are looking at modifies or deletes what we have, + * we would want it not to lose any local modification we have, either + * in the working tree or in the index. + * + * This also decides if a non-git patch is a creation patch or a + * modification to an existing empty file. We do not check the state + * of the current tree for a creation patch in this function; the caller + * check_patch() separately makes sure (and errors out otherwise) that + * the path the patch creates does not exist in the current tree. + */ +static int check_preimage(struct apply_state *state, + struct patch *patch, + struct cache_entry **ce, + struct stat *st) +{ + const char *old_name = patch->old_name; + struct patch *previous = NULL; + int stat_ret = 0, status; + unsigned st_mode = 0; + + if (!old_name) + return 0; + + assert(patch->is_new <= 0); + previous = previous_patch(state, patch, &status); + + if (status) + return error(_("path %s has been renamed/deleted"), old_name); + if (previous) { + st_mode = previous->new_mode; + } else if (!state->cached) { + stat_ret = lstat(old_name, st); + if (stat_ret && errno != ENOENT) + return error_errno("%s", old_name); + } + + if (state->check_index && !previous) { + int pos = index_name_pos(state->repo->index, old_name, + strlen(old_name)); + if (pos < 0) { + if (patch->is_new < 0) + goto is_new; + return error(_("%s: does not exist in index"), old_name); + } + *ce = state->repo->index->cache[pos]; + if (stat_ret < 0) { + if (checkout_target(state->repo->index, *ce, st)) + return -1; + } + if (!state->cached && verify_index_match(state, *ce, st)) + return error(_("%s: does not match index"), old_name); + if (state->cached) + st_mode = (*ce)->ce_mode; + } else if (stat_ret < 0) { + if (patch->is_new < 0) + goto is_new; + return error_errno("%s", old_name); + } + + if (!state->cached && !previous) + st_mode = ce_mode_from_stat(*ce, st->st_mode); + + if (patch->is_new < 0) + patch->is_new = 0; + if (!patch->old_mode) + patch->old_mode = st_mode; + if ((st_mode ^ patch->old_mode) & S_IFMT) + return error(_("%s: wrong type"), old_name); + if (st_mode != patch->old_mode) + warning(_("%s has type %o, expected %o"), + old_name, st_mode, patch->old_mode); + if (!patch->new_mode && !patch->is_delete) + patch->new_mode = st_mode; + return 0; + + is_new: + patch->is_new = 1; + patch->is_delete = 0; + FREE_AND_NULL(patch->old_name); + return 0; +} + + +#define EXISTS_IN_INDEX 1 +#define EXISTS_IN_WORKTREE 2 +#define EXISTS_IN_INDEX_AS_ITA 3 + +static int check_to_create(struct apply_state *state, + const char *new_name, + int ok_if_exists) +{ + struct stat nst; + + if (state->check_index && (!ok_if_exists || !state->cached)) { + int pos; + + pos = index_name_pos(state->repo->index, new_name, strlen(new_name)); + if (pos >= 0) { + struct cache_entry *ce = state->repo->index->cache[pos]; + + /* allow ITA, as they do not yet exist in the index */ + if (!ok_if_exists && !(ce->ce_flags & CE_INTENT_TO_ADD)) + return EXISTS_IN_INDEX; + + /* ITA entries can never match working tree files */ + if (!state->cached && (ce->ce_flags & CE_INTENT_TO_ADD)) + return EXISTS_IN_INDEX_AS_ITA; + } + } + + if (state->cached) + return 0; + + if (!lstat(new_name, &nst)) { + if (S_ISDIR(nst.st_mode) || ok_if_exists) + return 0; + /* + * A leading component of new_name might be a symlink + * that is going to be removed with this patch, but + * still pointing at somewhere that has the path. + * In such a case, path "new_name" does not exist as + * far as git is concerned. + */ + if (has_symlink_leading_path(new_name, strlen(new_name))) + return 0; + + return EXISTS_IN_WORKTREE; + } else if (!is_missing_file_error(errno)) { + return error_errno("%s", new_name); + } + return 0; +} + +static uintptr_t register_symlink_changes(struct apply_state *state, + const char *path, + uintptr_t what) +{ + struct string_list_item *ent; + + ent = string_list_lookup(&state->symlink_changes, path); + if (!ent) { + ent = string_list_insert(&state->symlink_changes, path); + ent->util = (void *)0; + } + ent->util = (void *)(what | ((uintptr_t)ent->util)); + return (uintptr_t)ent->util; +} + +static uintptr_t check_symlink_changes(struct apply_state *state, const char *path) +{ + struct string_list_item *ent; + + ent = string_list_lookup(&state->symlink_changes, path); + if (!ent) + return 0; + return (uintptr_t)ent->util; +} + +static void prepare_symlink_changes(struct apply_state *state, struct patch *patch) +{ + for ( ; patch; patch = patch->next) { + if ((patch->old_name && S_ISLNK(patch->old_mode)) && + (patch->is_rename || patch->is_delete)) + /* the symlink at patch->old_name is removed */ + register_symlink_changes(state, patch->old_name, APPLY_SYMLINK_GOES_AWAY); + + if (patch->new_name && S_ISLNK(patch->new_mode)) + /* the symlink at patch->new_name is created or remains */ + register_symlink_changes(state, patch->new_name, APPLY_SYMLINK_IN_RESULT); + } +} + +static int path_is_beyond_symlink_1(struct apply_state *state, struct strbuf *name) +{ + do { + unsigned int change; + + while (--name->len && name->buf[name->len] != '/') + ; /* scan backwards */ + if (!name->len) + break; + name->buf[name->len] = '\0'; + change = check_symlink_changes(state, name->buf); + if (change & APPLY_SYMLINK_IN_RESULT) + return 1; + if (change & APPLY_SYMLINK_GOES_AWAY) + /* + * This cannot be "return 0", because we may + * see a new one created at a higher level. + */ + continue; + + /* otherwise, check the preimage */ + if (state->check_index) { + struct cache_entry *ce; + + ce = index_file_exists(state->repo->index, name->buf, + name->len, ignore_case); + if (ce && S_ISLNK(ce->ce_mode)) + return 1; + } else { + struct stat st; + if (!lstat(name->buf, &st) && S_ISLNK(st.st_mode)) + return 1; + } + } while (1); + return 0; +} + +static int path_is_beyond_symlink(struct apply_state *state, const char *name_) +{ + int ret; + struct strbuf name = STRBUF_INIT; + + assert(*name_ != '\0'); + strbuf_addstr(&name, name_); + ret = path_is_beyond_symlink_1(state, &name); + strbuf_release(&name); + + return ret; +} + +static int check_unsafe_path(struct patch *patch) +{ + const char *old_name = NULL; + const char *new_name = NULL; + if (patch->is_delete) + old_name = patch->old_name; + else if (!patch->is_new && !patch->is_copy) + old_name = patch->old_name; + if (!patch->is_delete) + new_name = patch->new_name; + + if (old_name && !verify_path(old_name, patch->old_mode)) + return error(_("invalid path '%s'"), old_name); + if (new_name && !verify_path(new_name, patch->new_mode)) + return error(_("invalid path '%s'"), new_name); + return 0; +} + +/* + * Check and apply the patch in-core; leave the result in patch->result + * for the caller to write it out to the final destination. + */ +static int check_patch(struct apply_state *state, struct patch *patch) +{ + struct stat st; + const char *old_name = patch->old_name; + const char *new_name = patch->new_name; + const char *name = old_name ? old_name : new_name; + struct cache_entry *ce = NULL; + struct patch *tpatch; + int ok_if_exists; + int status; + + patch->rejected = 1; /* we will drop this after we succeed */ + + status = check_preimage(state, patch, &ce, &st); + if (status) + return status; + old_name = patch->old_name; + + /* + * A type-change diff is always split into a patch to delete + * old, immediately followed by a patch to create new (see + * diff.c::run_diff()); in such a case it is Ok that the entry + * to be deleted by the previous patch is still in the working + * tree and in the index. + * + * A patch to swap-rename between A and B would first rename A + * to B and then rename B to A. While applying the first one, + * the presence of B should not stop A from getting renamed to + * B; ask to_be_deleted() about the later rename. Removal of + * B and rename from A to B is handled the same way by asking + * was_deleted(). + */ + if ((tpatch = in_fn_table(state, new_name)) && + (was_deleted(tpatch) || to_be_deleted(tpatch))) + ok_if_exists = 1; + else + ok_if_exists = 0; + + if (new_name && + ((0 < patch->is_new) || patch->is_rename || patch->is_copy)) { + int err = check_to_create(state, new_name, ok_if_exists); + + if (err && state->threeway) { + patch->direct_to_threeway = 1; + } else switch (err) { + case 0: + break; /* happy */ + case EXISTS_IN_INDEX: + return error(_("%s: already exists in index"), new_name); + case EXISTS_IN_INDEX_AS_ITA: + return error(_("%s: does not match index"), new_name); + case EXISTS_IN_WORKTREE: + return error(_("%s: already exists in working directory"), + new_name); + default: + return err; + } + + if (!patch->new_mode) { + if (0 < patch->is_new) + patch->new_mode = S_IFREG | 0644; + else + patch->new_mode = patch->old_mode; + } + } + + if (new_name && old_name) { + int same = !strcmp(old_name, new_name); + if (!patch->new_mode) + patch->new_mode = patch->old_mode; + if ((patch->old_mode ^ patch->new_mode) & S_IFMT) { + if (same) + return error(_("new mode (%o) of %s does not " + "match old mode (%o)"), + patch->new_mode, new_name, + patch->old_mode); + else + return error(_("new mode (%o) of %s does not " + "match old mode (%o) of %s"), + patch->new_mode, new_name, + patch->old_mode, old_name); + } + } + + if (!state->unsafe_paths && check_unsafe_path(patch)) + return -128; + + /* + * An attempt to read from or delete a path that is beyond a + * symbolic link will be prevented by load_patch_target() that + * is called at the beginning of apply_data() so we do not + * have to worry about a patch marked with "is_delete" bit + * here. We however need to make sure that the patch result + * is not deposited to a path that is beyond a symbolic link + * here. + */ + if (!patch->is_delete && path_is_beyond_symlink(state, patch->new_name)) + return error(_("affected file '%s' is beyond a symbolic link"), + patch->new_name); + + if (apply_data(state, patch, &st, ce) < 0) + return error(_("%s: patch does not apply"), name); + patch->rejected = 0; + return 0; +} + +static int check_patch_list(struct apply_state *state, struct patch *patch) +{ + int err = 0; + + prepare_symlink_changes(state, patch); + prepare_fn_table(state, patch); + while (patch) { + int res; + if (state->apply_verbosity > verbosity_normal) + say_patch_name(stderr, + _("Checking patch %s..."), patch); + res = check_patch(state, patch); + if (res == -128) + return -128; + err |= res; + patch = patch->next; + } + return err; +} + +static int read_apply_cache(struct apply_state *state) +{ + if (state->index_file) + return read_index_from(state->repo->index, state->index_file, + get_git_dir()); + else + return repo_read_index(state->repo); +} + +/* This function tries to read the object name from the current index */ +static int get_current_oid(struct apply_state *state, const char *path, + struct object_id *oid) +{ + int pos; + + if (read_apply_cache(state) < 0) + return -1; + pos = index_name_pos(state->repo->index, path, strlen(path)); + if (pos < 0) + return -1; + oidcpy(oid, &state->repo->index->cache[pos]->oid); + return 0; +} + +static int preimage_oid_in_gitlink_patch(struct patch *p, struct object_id *oid) +{ + /* + * A usable gitlink patch has only one fragment (hunk) that looks like: + * @@ -1 +1 @@ + * -Subproject commit + * +Subproject commit + * or + * @@ -1 +0,0 @@ + * -Subproject commit + * for a removal patch. + */ + struct fragment *hunk = p->fragments; + static const char heading[] = "-Subproject commit "; + char *preimage; + + if (/* does the patch have only one hunk? */ + hunk && !hunk->next && + /* is its preimage one line? */ + hunk->oldpos == 1 && hunk->oldlines == 1 && + /* does preimage begin with the heading? */ + (preimage = memchr(hunk->patch, '\n', hunk->size)) != NULL && + starts_with(++preimage, heading) && + /* does it record full SHA-1? */ + !get_oid_hex(preimage + sizeof(heading) - 1, oid) && + preimage[sizeof(heading) + the_hash_algo->hexsz - 1] == '\n' && + /* does the abbreviated name on the index line agree with it? */ + starts_with(preimage + sizeof(heading) - 1, p->old_oid_prefix)) + return 0; /* it all looks fine */ + + /* we may have full object name on the index line */ + return get_oid_hex(p->old_oid_prefix, oid); +} + +/* Build an index that contains just the files needed for a 3way merge */ +static int build_fake_ancestor(struct apply_state *state, struct patch *list) +{ + struct patch *patch; + struct index_state result = { NULL }; + struct lock_file lock = LOCK_INIT; + int res; + + /* Once we start supporting the reverse patch, it may be + * worth showing the new sha1 prefix, but until then... + */ + for (patch = list; patch; patch = patch->next) { + struct object_id oid; + struct cache_entry *ce; + const char *name; + + name = patch->old_name ? patch->old_name : patch->new_name; + if (0 < patch->is_new) + continue; + + if (S_ISGITLINK(patch->old_mode)) { + if (!preimage_oid_in_gitlink_patch(patch, &oid)) + ; /* ok, the textual part looks sane */ + else + return error(_("sha1 information is lacking or " + "useless for submodule %s"), name); + } else if (!get_oid_blob(patch->old_oid_prefix, &oid)) { + ; /* ok */ + } else if (!patch->lines_added && !patch->lines_deleted) { + /* mode-only change: update the current */ + if (get_current_oid(state, patch->old_name, &oid)) + return error(_("mode change for %s, which is not " + "in current HEAD"), name); + } else + return error(_("sha1 information is lacking or useless " + "(%s)."), name); + + ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0); + if (!ce) + return error(_("make_cache_entry failed for path '%s'"), + name); + if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) { + discard_cache_entry(ce); + return error(_("could not add %s to temporary index"), + name); + } + } + + hold_lock_file_for_update(&lock, state->fake_ancestor, LOCK_DIE_ON_ERROR); + res = write_locked_index(&result, &lock, COMMIT_LOCK); + discard_index(&result); + + if (res) + return error(_("could not write temporary index to %s"), + state->fake_ancestor); + + return 0; +} + +static void stat_patch_list(struct apply_state *state, struct patch *patch) +{ + int files, adds, dels; + + for (files = adds = dels = 0 ; patch ; patch = patch->next) { + files++; + adds += patch->lines_added; + dels += patch->lines_deleted; + show_stats(state, patch); + } + + print_stat_summary(stdout, files, adds, dels); +} + +static void numstat_patch_list(struct apply_state *state, + struct patch *patch) +{ + for ( ; patch; patch = patch->next) { + const char *name; + name = patch->new_name ? patch->new_name : patch->old_name; + if (patch->is_binary) + printf("-\t-\t"); + else + printf("%d\t%d\t", patch->lines_added, patch->lines_deleted); + write_name_quoted(name, stdout, state->line_termination); + } +} + +static void show_file_mode_name(const char *newdelete, unsigned int mode, const char *name) +{ + if (mode) + printf(" %s mode %06o %s\n", newdelete, mode, name); + else + printf(" %s %s\n", newdelete, name); +} + +static void show_mode_change(struct patch *p, int show_name) +{ + if (p->old_mode && p->new_mode && p->old_mode != p->new_mode) { + if (show_name) + printf(" mode change %06o => %06o %s\n", + p->old_mode, p->new_mode, p->new_name); + else + printf(" mode change %06o => %06o\n", + p->old_mode, p->new_mode); + } +} + +static void show_rename_copy(struct patch *p) +{ + const char *renamecopy = p->is_rename ? "rename" : "copy"; + const char *old_name, *new_name; + + /* Find common prefix */ + old_name = p->old_name; + new_name = p->new_name; + while (1) { + const char *slash_old, *slash_new; + slash_old = strchr(old_name, '/'); + slash_new = strchr(new_name, '/'); + if (!slash_old || + !slash_new || + slash_old - old_name != slash_new - new_name || + memcmp(old_name, new_name, slash_new - new_name)) + break; + old_name = slash_old + 1; + new_name = slash_new + 1; + } + /* p->old_name through old_name is the common prefix, and old_name and + * new_name through the end of names are renames + */ + if (old_name != p->old_name) + printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy, + (int)(old_name - p->old_name), p->old_name, + old_name, new_name, p->score); + else + printf(" %s %s => %s (%d%%)\n", renamecopy, + p->old_name, p->new_name, p->score); + show_mode_change(p, 0); +} + +static void summary_patch_list(struct patch *patch) +{ + struct patch *p; + + for (p = patch; p; p = p->next) { + if (p->is_new) + show_file_mode_name("create", p->new_mode, p->new_name); + else if (p->is_delete) + show_file_mode_name("delete", p->old_mode, p->old_name); + else { + if (p->is_rename || p->is_copy) + show_rename_copy(p); + else { + if (p->score) { + printf(" rewrite %s (%d%%)\n", + p->new_name, p->score); + show_mode_change(p, 0); + } + else + show_mode_change(p, 1); + } + } + } +} + +static void patch_stats(struct apply_state *state, struct patch *patch) +{ + int lines = patch->lines_added + patch->lines_deleted; + + if (lines > state->max_change) + state->max_change = lines; + if (patch->old_name) { + int len = quote_c_style(patch->old_name, NULL, NULL, 0); + if (!len) + len = strlen(patch->old_name); + if (len > state->max_len) + state->max_len = len; + } + if (patch->new_name) { + int len = quote_c_style(patch->new_name, NULL, NULL, 0); + if (!len) + len = strlen(patch->new_name); + if (len > state->max_len) + state->max_len = len; + } +} + +static int remove_file(struct apply_state *state, struct patch *patch, int rmdir_empty) +{ + if (state->update_index && !state->ita_only) { + if (remove_file_from_index(state->repo->index, patch->old_name) < 0) + return error(_("unable to remove %s from index"), patch->old_name); + } + if (!state->cached) { + if (!remove_or_warn(patch->old_mode, patch->old_name) && rmdir_empty) { + remove_path(patch->old_name); + } + } + return 0; +} + +static int add_index_file(struct apply_state *state, + const char *path, + unsigned mode, + void *buf, + unsigned long size) +{ + struct stat st; + struct cache_entry *ce; + int namelen = strlen(path); + + ce = make_empty_cache_entry(state->repo->index, namelen); + memcpy(ce->name, path, namelen); + ce->ce_mode = create_ce_mode(mode); + ce->ce_flags = create_ce_flags(0); + ce->ce_namelen = namelen; + if (state->ita_only) { + ce->ce_flags |= CE_INTENT_TO_ADD; + set_object_name_for_intent_to_add_entry(ce); + } else if (S_ISGITLINK(mode)) { + const char *s; + + if (!skip_prefix(buf, "Subproject commit ", &s) || + get_oid_hex(s, &ce->oid)) { + discard_cache_entry(ce); + return error(_("corrupt patch for submodule %s"), path); + } + } else { + if (!state->cached) { + if (lstat(path, &st) < 0) { + discard_cache_entry(ce); + return error_errno(_("unable to stat newly " + "created file '%s'"), + path); + } + fill_stat_cache_info(state->repo->index, ce, &st); + } + if (write_object_file(buf, size, blob_type, &ce->oid) < 0) { + discard_cache_entry(ce); + return error(_("unable to create backing store " + "for newly created file %s"), path); + } + } + if (add_index_entry(state->repo->index, ce, ADD_CACHE_OK_TO_ADD) < 0) { + discard_cache_entry(ce); + return error(_("unable to add cache entry for %s"), path); + } + + return 0; +} + +/* + * Returns: + * -1 if an unrecoverable error happened + * 0 if everything went well + * 1 if a recoverable error happened + */ +static int try_create_file(struct apply_state *state, const char *path, + unsigned int mode, const char *buf, + unsigned long size) +{ + int fd, res; + struct strbuf nbuf = STRBUF_INIT; + + if (S_ISGITLINK(mode)) { + struct stat st; + if (!lstat(path, &st) && S_ISDIR(st.st_mode)) + return 0; + return !!mkdir(path, 0777); + } + + if (has_symlinks && S_ISLNK(mode)) + /* Although buf:size is counted string, it also is NUL + * terminated. + */ + return !!symlink(buf, path); + + fd = open(path, O_CREAT | O_EXCL | O_WRONLY, (mode & 0100) ? 0777 : 0666); + if (fd < 0) + return 1; + + if (convert_to_working_tree(state->repo->index, path, buf, size, &nbuf, NULL)) { + size = nbuf.len; + buf = nbuf.buf; + } + + res = write_in_full(fd, buf, size) < 0; + if (res) + error_errno(_("failed to write to '%s'"), path); + strbuf_release(&nbuf); + + if (close(fd) < 0 && !res) + return error_errno(_("closing file '%s'"), path); + + return res ? -1 : 0; +} + +/* + * We optimistically assume that the directories exist, + * which is true 99% of the time anyway. If they don't, + * we create them and try again. + * + * Returns: + * -1 on error + * 0 otherwise + */ +static int create_one_file(struct apply_state *state, + char *path, + unsigned mode, + const char *buf, + unsigned long size) +{ + int res; + + if (state->cached) + return 0; + + /* + * We already try to detect whether files are beyond a symlink in our + * up-front checks. But in the case where symlinks are created by any + * of the intermediate hunks it can happen that our up-front checks + * didn't yet see the symlink, but at the point of arriving here there + * in fact is one. We thus repeat the check for symlinks here. + * + * Note that this does not make the up-front check obsolete as the + * failure mode is different: + * + * - The up-front checks cause us to abort before we have written + * anything into the working directory. So when we exit this way the + * working directory remains clean. + * + * - The checks here happen in the middle of the action where we have + * already started to apply the patch. The end result will be a dirty + * working directory. + * + * Ideally, we should update the up-front checks to catch what would + * happen when we apply the patch before we damage the working tree. + * We have all the information necessary to do so. But for now, as a + * part of embargoed security work, having this check would serve as a + * reasonable first step. + */ + if (path_is_beyond_symlink(state, path)) + return error(_("affected file '%s' is beyond a symbolic link"), path); + + res = try_create_file(state, path, mode, buf, size); + if (res < 0) + return -1; + if (!res) + return 0; + + if (errno == ENOENT) { + if (safe_create_leading_directories_no_share(path)) + return 0; + res = try_create_file(state, path, mode, buf, size); + if (res < 0) + return -1; + if (!res) + return 0; + } + + if (errno == EEXIST || errno == EACCES) { + /* We may be trying to create a file where a directory + * used to be. + */ + struct stat st; + if (!lstat(path, &st) && (!S_ISDIR(st.st_mode) || !rmdir(path))) + errno = EEXIST; + } + + if (errno == EEXIST) { + unsigned int nr = getpid(); + + for (;;) { + char newpath[PATH_MAX]; + mksnpath(newpath, sizeof(newpath), "%s~%u", path, nr); + res = try_create_file(state, newpath, mode, buf, size); + if (res < 0) + return -1; + if (!res) { + if (!rename(newpath, path)) + return 0; + unlink_or_warn(newpath); + break; + } + if (errno != EEXIST) + break; + ++nr; + } + } + return error_errno(_("unable to write file '%s' mode %o"), + path, mode); +} + +static int add_conflicted_stages_file(struct apply_state *state, + struct patch *patch) +{ + int stage, namelen; + unsigned mode; + struct cache_entry *ce; + + if (!state->update_index) + return 0; + namelen = strlen(patch->new_name); + mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644); + + remove_file_from_index(state->repo->index, patch->new_name); + for (stage = 1; stage < 4; stage++) { + if (is_null_oid(&patch->threeway_stage[stage - 1])) + continue; + ce = make_empty_cache_entry(state->repo->index, namelen); + memcpy(ce->name, patch->new_name, namelen); + ce->ce_mode = create_ce_mode(mode); + ce->ce_flags = create_ce_flags(stage); + ce->ce_namelen = namelen; + oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]); + if (add_index_entry(state->repo->index, ce, ADD_CACHE_OK_TO_ADD) < 0) { + discard_cache_entry(ce); + return error(_("unable to add cache entry for %s"), + patch->new_name); + } + } + + return 0; +} + +static int create_file(struct apply_state *state, struct patch *patch) +{ + char *path = patch->new_name; + unsigned mode = patch->new_mode; + unsigned long size = patch->resultsize; + char *buf = patch->result; + + if (!mode) + mode = S_IFREG | 0644; + if (create_one_file(state, path, mode, buf, size)) + return -1; + + if (patch->conflicted_threeway) + return add_conflicted_stages_file(state, patch); + else if (state->update_index) + return add_index_file(state, path, mode, buf, size); + return 0; +} + +/* phase zero is to remove, phase one is to create */ +static int write_out_one_result(struct apply_state *state, + struct patch *patch, + int phase) +{ + if (patch->is_delete > 0) { + if (phase == 0) + return remove_file(state, patch, 1); + return 0; + } + if (patch->is_new > 0 || patch->is_copy) { + if (phase == 1) + return create_file(state, patch); + return 0; + } + /* + * Rename or modification boils down to the same + * thing: remove the old, write the new + */ + if (phase == 0) + return remove_file(state, patch, patch->is_rename); + if (phase == 1) + return create_file(state, patch); + return 0; +} + +static int write_out_one_reject(struct apply_state *state, struct patch *patch) +{ + FILE *rej; + char namebuf[PATH_MAX]; + struct fragment *frag; + int cnt = 0; + struct strbuf sb = STRBUF_INIT; + + for (cnt = 0, frag = patch->fragments; frag; frag = frag->next) { + if (!frag->rejected) + continue; + cnt++; + } + + if (!cnt) { + if (state->apply_verbosity > verbosity_normal) + say_patch_name(stderr, + _("Applied patch %s cleanly."), patch); + return 0; + } + + /* This should not happen, because a removal patch that leaves + * contents are marked "rejected" at the patch level. + */ + if (!patch->new_name) + die(_("internal error")); + + /* Say this even without --verbose */ + strbuf_addf(&sb, Q_("Applying patch %%s with %d reject...", + "Applying patch %%s with %d rejects...", + cnt), + cnt); + if (state->apply_verbosity > verbosity_silent) + say_patch_name(stderr, sb.buf, patch); + strbuf_release(&sb); + + cnt = strlen(patch->new_name); + if (ARRAY_SIZE(namebuf) <= cnt + 5) { + cnt = ARRAY_SIZE(namebuf) - 5; + warning(_("truncating .rej filename to %.*s.rej"), + cnt - 1, patch->new_name); + } + memcpy(namebuf, patch->new_name, cnt); + memcpy(namebuf + cnt, ".rej", 5); + + rej = fopen(namebuf, "w"); + if (!rej) + return error_errno(_("cannot open %s"), namebuf); + + /* Normal git tools never deal with .rej, so do not pretend + * this is a git patch by saying --git or giving extended + * headers. While at it, maybe please "kompare" that wants + * the trailing TAB and some garbage at the end of line ;-). + */ + fprintf(rej, "diff a/%s b/%s\t(rejected hunks)\n", + patch->new_name, patch->new_name); + for (cnt = 1, frag = patch->fragments; + frag; + cnt++, frag = frag->next) { + if (!frag->rejected) { + if (state->apply_verbosity > verbosity_silent) + fprintf_ln(stderr, _("Hunk #%d applied cleanly."), cnt); + continue; + } + if (state->apply_verbosity > verbosity_silent) + fprintf_ln(stderr, _("Rejected hunk #%d."), cnt); + fprintf(rej, "%.*s", frag->size, frag->patch); + if (frag->patch[frag->size-1] != '\n') + fputc('\n', rej); + } + fclose(rej); + return -1; +} + +/* + * Returns: + * -1 if an error happened + * 0 if the patch applied cleanly + * 1 if the patch did not apply cleanly + */ +static int write_out_results(struct apply_state *state, struct patch *list) +{ + int phase; + int errs = 0; + struct patch *l; + struct string_list cpath = STRING_LIST_INIT_DUP; + + for (phase = 0; phase < 2; phase++) { + l = list; + while (l) { + if (l->rejected) + errs = 1; + else { + if (write_out_one_result(state, l, phase)) { + string_list_clear(&cpath, 0); + return -1; + } + if (phase == 1) { + if (write_out_one_reject(state, l)) + errs = 1; + if (l->conflicted_threeway) { + string_list_append(&cpath, l->new_name); + errs = 1; + } + } + } + l = l->next; + } + } + + if (cpath.nr) { + struct string_list_item *item; + + string_list_sort(&cpath); + if (state->apply_verbosity > verbosity_silent) { + for_each_string_list_item(item, &cpath) + fprintf(stderr, "U %s\n", item->string); + } + string_list_clear(&cpath, 0); + + /* + * rerere relies on the partially merged result being in the working + * tree with conflict markers, but that isn't written with --cached. + */ + if (!state->cached) + repo_rerere(state->repo, 0); + } + + return errs; +} + +/* + * Try to apply a patch. + * + * Returns: + * -128 if a bad error happened (like patch unreadable) + * -1 if patch did not apply and user cannot deal with it + * 0 if the patch applied + * 1 if the patch did not apply but user might fix it + */ +static int apply_patch(struct apply_state *state, + int fd, + const char *filename, + int options) +{ + size_t offset; + struct strbuf buf = STRBUF_INIT; /* owns the patch text */ + struct patch *list = NULL, **listp = &list; + int skipped_patch = 0; + int res = 0; + int flush_attributes = 0; + + state->patch_input_file = filename; + if (read_patch_file(&buf, fd) < 0) + return -128; + offset = 0; + while (offset < buf.len) { + struct patch *patch; + int nr; + + CALLOC_ARRAY(patch, 1); + patch->inaccurate_eof = !!(options & APPLY_OPT_INACCURATE_EOF); + patch->recount = !!(options & APPLY_OPT_RECOUNT); + nr = parse_chunk(state, buf.buf + offset, buf.len - offset, patch); + if (nr < 0) { + free_patch(patch); + if (nr == -128) { + res = -128; + goto end; + } + break; + } + if (state->apply_in_reverse) + reverse_patches(patch); + if (use_patch(state, patch)) { + patch_stats(state, patch); + if (!list || !state->apply_in_reverse) { + *listp = patch; + listp = &patch->next; + } else { + patch->next = list; + list = patch; + } + + if ((patch->new_name && + ends_with_path_components(patch->new_name, + GITATTRIBUTES_FILE)) || + (patch->old_name && + ends_with_path_components(patch->old_name, + GITATTRIBUTES_FILE))) + flush_attributes = 1; + } + else { + if (state->apply_verbosity > verbosity_normal) + say_patch_name(stderr, _("Skipped patch '%s'."), patch); + free_patch(patch); + skipped_patch++; + } + offset += nr; + } + + if (!list && !skipped_patch) { + error(_("unrecognized input")); + res = -128; + goto end; + } + + if (state->whitespace_error && (state->ws_error_action == die_on_ws_error)) + state->apply = 0; + + state->update_index = (state->check_index || state->ita_only) && state->apply; + if (state->update_index && !is_lock_file_locked(&state->lock_file)) { + if (state->index_file) + hold_lock_file_for_update(&state->lock_file, + state->index_file, + LOCK_DIE_ON_ERROR); + else + repo_hold_locked_index(state->repo, &state->lock_file, + LOCK_DIE_ON_ERROR); + } + + if (state->check_index && read_apply_cache(state) < 0) { + error(_("unable to read index file")); + res = -128; + goto end; + } + + if (state->check || state->apply) { + int r = check_patch_list(state, list); + if (r == -128) { + res = -128; + goto end; + } + if (r < 0 && !state->apply_with_reject) { + res = -1; + goto end; + } + } + + if (state->apply) { + int write_res = write_out_results(state, list); + if (write_res < 0) { + res = -128; + goto end; + } + if (write_res > 0) { + /* with --3way, we still need to write the index out */ + res = state->apply_with_reject ? -1 : 1; + goto end; + } + } + + if (state->fake_ancestor && + build_fake_ancestor(state, list)) { + res = -128; + goto end; + } + + if (state->diffstat && state->apply_verbosity > verbosity_silent) + stat_patch_list(state, list); + + if (state->numstat && state->apply_verbosity > verbosity_silent) + numstat_patch_list(state, list); + + if (state->summary && state->apply_verbosity > verbosity_silent) + summary_patch_list(list); + + if (flush_attributes) + reset_parsed_attributes(); +end: + free_patch_list(list); + strbuf_release(&buf); + string_list_clear(&state->fn_table, 0); + return res; +} + +static int apply_option_parse_exclude(const struct option *opt, + const char *arg, int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + + add_name_limit(state, arg, 1); + return 0; +} + +static int apply_option_parse_include(const struct option *opt, + const char *arg, int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + + add_name_limit(state, arg, 0); + state->has_include = 1; + return 0; +} + +static int apply_option_parse_p(const struct option *opt, + const char *arg, + int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + + state->p_value = atoi(arg); + state->p_value_known = 1; + return 0; +} + +static int apply_option_parse_space_change(const struct option *opt, + const char *arg, int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_ARG(arg); + + if (unset) + state->ws_ignore_action = ignore_ws_none; + else + state->ws_ignore_action = ignore_ws_change; + return 0; +} + +static int apply_option_parse_whitespace(const struct option *opt, + const char *arg, int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + + state->whitespace_option = arg; + if (parse_whitespace_option(state, arg)) + return -1; + return 0; +} + +static int apply_option_parse_directory(const struct option *opt, + const char *arg, int unset) +{ + struct apply_state *state = opt->value; + + BUG_ON_OPT_NEG(unset); + + strbuf_reset(&state->root); + strbuf_addstr(&state->root, arg); + strbuf_complete(&state->root, '/'); + return 0; +} + +int apply_all_patches(struct apply_state *state, + int argc, + const char **argv, + int options) +{ + int i; + int res; + int errs = 0; + int read_stdin = 1; + + for (i = 0; i < argc; i++) { + const char *arg = argv[i]; + char *to_free = NULL; + int fd; + + if (!strcmp(arg, "-")) { + res = apply_patch(state, 0, "", options); + if (res < 0) + goto end; + errs |= res; + read_stdin = 0; + continue; + } else + arg = to_free = prefix_filename(state->prefix, arg); + + fd = open(arg, O_RDONLY); + if (fd < 0) { + error(_("can't open patch '%s': %s"), arg, strerror(errno)); + res = -128; + free(to_free); + goto end; + } + read_stdin = 0; + set_default_whitespace_mode(state); + res = apply_patch(state, fd, arg, options); + close(fd); + free(to_free); + if (res < 0) + goto end; + errs |= res; + } + set_default_whitespace_mode(state); + if (read_stdin) { + res = apply_patch(state, 0, "", options); + if (res < 0) + goto end; + errs |= res; + } + + if (state->whitespace_error) { + if (state->squelch_whitespace_errors && + state->squelch_whitespace_errors < state->whitespace_error) { + int squelched = + state->whitespace_error - state->squelch_whitespace_errors; + warning(Q_("squelched %d whitespace error", + "squelched %d whitespace errors", + squelched), + squelched); + } + if (state->ws_error_action == die_on_ws_error) { + error(Q_("%d line adds whitespace errors.", + "%d lines add whitespace errors.", + state->whitespace_error), + state->whitespace_error); + res = -128; + goto end; + } + if (state->applied_after_fixing_ws && state->apply) + warning(Q_("%d line applied after" + " fixing whitespace errors.", + "%d lines applied after" + " fixing whitespace errors.", + state->applied_after_fixing_ws), + state->applied_after_fixing_ws); + else if (state->whitespace_error) + warning(Q_("%d line adds whitespace errors.", + "%d lines add whitespace errors.", + state->whitespace_error), + state->whitespace_error); + } + + if (state->update_index) { + res = write_locked_index(state->repo->index, &state->lock_file, COMMIT_LOCK); + if (res) { + error(_("Unable to write new index file")); + res = -128; + goto end; + } + } + + res = !!errs; + +end: + rollback_lock_file(&state->lock_file); + + if (state->apply_verbosity <= verbosity_silent) { + set_error_routine(state->saved_error_routine); + set_warn_routine(state->saved_warn_routine); + } + + if (res > -1) + return res; + return (res == -1 ? 1 : 128); +} + +int apply_parse_options(int argc, const char **argv, + struct apply_state *state, + int *force_apply, int *options, + const char * const *apply_usage) +{ + struct option builtin_apply_options[] = { + OPT_CALLBACK_F(0, "exclude", state, N_("path"), + N_("don't apply changes matching the given path"), + PARSE_OPT_NONEG, apply_option_parse_exclude), + OPT_CALLBACK_F(0, "include", state, N_("path"), + N_("apply changes matching the given path"), + PARSE_OPT_NONEG, apply_option_parse_include), + OPT_CALLBACK('p', NULL, state, N_("num"), + N_("remove leading slashes from traditional diff paths"), + apply_option_parse_p), + OPT_BOOL(0, "no-add", &state->no_add, + N_("ignore additions made by the patch")), + OPT_BOOL(0, "stat", &state->diffstat, + N_("instead of applying the patch, output diffstat for the input")), + OPT_NOOP_NOARG(0, "allow-binary-replacement"), + OPT_NOOP_NOARG(0, "binary"), + OPT_BOOL(0, "numstat", &state->numstat, + N_("show number of added and deleted lines in decimal notation")), + OPT_BOOL(0, "summary", &state->summary, + N_("instead of applying the patch, output a summary for the input")), + OPT_BOOL(0, "check", &state->check, + N_("instead of applying the patch, see if the patch is applicable")), + OPT_BOOL(0, "index", &state->check_index, + N_("make sure the patch is applicable to the current index")), + OPT_BOOL('N', "intent-to-add", &state->ita_only, + N_("mark new files with `git add --intent-to-add`")), + OPT_BOOL(0, "cached", &state->cached, + N_("apply a patch without touching the working tree")), + OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths, + N_("accept a patch that touches outside the working area"), + PARSE_OPT_NOCOMPLETE), + OPT_BOOL(0, "apply", force_apply, + N_("also apply the patch (use with --stat/--summary/--check)")), + OPT_BOOL('3', "3way", &state->threeway, + N_( "attempt three-way merge, fall back on normal patch if that fails")), + OPT_FILENAME(0, "build-fake-ancestor", &state->fake_ancestor, + N_("build a temporary index based on embedded index information")), + /* Think twice before adding "--nul" synonym to this */ + OPT_SET_INT('z', NULL, &state->line_termination, + N_("paths are separated with NUL character"), '\0'), + OPT_INTEGER('C', NULL, &state->p_context, + N_("ensure at least lines of context match")), + OPT_CALLBACK(0, "whitespace", state, N_("action"), + N_("detect new or modified lines that have whitespace errors"), + apply_option_parse_whitespace), + OPT_CALLBACK_F(0, "ignore-space-change", state, NULL, + N_("ignore changes in whitespace when finding context"), + PARSE_OPT_NOARG, apply_option_parse_space_change), + OPT_CALLBACK_F(0, "ignore-whitespace", state, NULL, + N_("ignore changes in whitespace when finding context"), + PARSE_OPT_NOARG, apply_option_parse_space_change), + OPT_BOOL('R', "reverse", &state->apply_in_reverse, + N_("apply the patch in reverse")), + OPT_BOOL(0, "unidiff-zero", &state->unidiff_zero, + N_("don't expect at least one line of context")), + OPT_BOOL(0, "reject", &state->apply_with_reject, + N_("leave the rejected hunks in corresponding *.rej files")), + OPT_BOOL(0, "allow-overlap", &state->allow_overlap, + N_("allow overlapping hunks")), + OPT__VERBOSE(&state->apply_verbosity, N_("be verbose")), + OPT_BIT(0, "inaccurate-eof", options, + N_("tolerate incorrectly detected missing new-line at the end of file"), + APPLY_OPT_INACCURATE_EOF), + OPT_BIT(0, "recount", options, + N_("do not trust the line counts in the hunk headers"), + APPLY_OPT_RECOUNT), + OPT_CALLBACK(0, "directory", state, N_("root"), + N_("prepend to all filenames"), + apply_option_parse_directory), + OPT_END() + }; + + return parse_options(argc, argv, state->prefix, builtin_apply_options, apply_usage, 0); +} diff --git a/attr.c b/attr.c index d029e681f2880a..47f32e7b3be851 100644 --- a/attr.c +++ b/attr.c @@ -28,7 +28,7 @@ static const char git_attr__unknown[] = "(builtin)unknown"; #endif struct git_attr { - int attr_nr; /* unique attribute number */ + unsigned int attr_nr; /* unique attribute number */ char name[FLEX_ARRAY]; /* attribute name */ }; @@ -210,7 +210,7 @@ static void report_invalid_attr(const char *name, size_t len, * dictionary. If no entry is found, create a new attribute and store it in * the dictionary. */ -static const struct git_attr *git_attr_internal(const char *name, int namelen) +static const struct git_attr *git_attr_internal(const char *name, size_t namelen) { struct git_attr *a; @@ -226,8 +226,8 @@ static const struct git_attr *git_attr_internal(const char *name, int namelen) a->attr_nr = hashmap_get_size(&g_attr_hashmap.map); attr_hashmap_add(&g_attr_hashmap, a->name, namelen, a); - assert(a->attr_nr == - (hashmap_get_size(&g_attr_hashmap.map) - 1)); + if (a->attr_nr != hashmap_get_size(&g_attr_hashmap.map) - 1) + die(_("unable to add additional attribute")); } hashmap_unlock(&g_attr_hashmap); @@ -272,7 +272,7 @@ struct match_attr { const struct git_attr *attr; } u; char is_macro; - unsigned num_attr; + size_t num_attr; struct attr_state state[FLEX_ARRAY]; }; @@ -293,7 +293,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp, struct attr_state *e) { const char *ep, *equals; - int len; + size_t len; ep = cp + strcspn(cp, blank); equals = strchr(cp, '='); @@ -337,8 +337,7 @@ static const char *parse_attr(const char *src, int lineno, const char *cp, static struct match_attr *parse_attr_line(const char *line, const char *src, int lineno, unsigned flags) { - int namelen; - int num_attr, i; + size_t namelen, num_attr, i; const char *cp, *name, *states; struct match_attr *res = NULL; int is_macro; @@ -385,10 +384,9 @@ static struct match_attr *parse_attr_line(const char *line, const char *src, goto fail_return; } - res = xcalloc(1, - sizeof(*res) + - sizeof(struct attr_state) * num_attr + - (is_macro ? 0 : namelen + 1)); + res = xcalloc(1, st_add3(sizeof(*res), + st_mult(sizeof(struct attr_state), num_attr), + is_macro ? 0 : namelen + 1)); if (is_macro) { res->u.attr = git_attr_internal(name, namelen); } else { @@ -451,11 +449,12 @@ struct attr_stack { static void attr_stack_free(struct attr_stack *e) { - int i; + unsigned i; free(e->origin); for (i = 0; i < e->num_matches; i++) { struct match_attr *a = e->attrs[i]; - int j; + size_t j; + for (j = 0; j < a->num_attr; j++) { const char *setto = a->state[j].setto; if (setto == ATTR__TRUE || @@ -664,8 +663,8 @@ static void handle_attr_line(struct attr_stack *res, a = parse_attr_line(line, src, lineno, flags); if (!a) return; - ALLOC_GROW(res->attrs, res->num_matches + 1, res->alloc); - res->attrs[res->num_matches++] = a; + ALLOC_GROW_BY(res->attrs, res->num_matches, 1, res->alloc); + res->attrs[res->num_matches - 1] = a; } static struct attr_stack *read_attr_from_array(const char **list) @@ -1017,12 +1016,12 @@ static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem); static int fill_one(const char *what, struct all_attrs_item *all_attrs, const struct match_attr *a, int rem) { - int i; + size_t i; - for (i = a->num_attr - 1; rem > 0 && i >= 0; i--) { - const struct git_attr *attr = a->state[i].attr; + for (i = a->num_attr; rem > 0 && i > 0; i--) { + const struct git_attr *attr = a->state[i - 1].attr; const char **n = &(all_attrs[attr->attr_nr].value); - const char *v = a->state[i].setto; + const char *v = a->state[i - 1].setto; if (*n == ATTR__UNKNOWN) { debug_set(what, @@ -1041,11 +1040,11 @@ static int fill(const char *path, int pathlen, int basename_offset, struct all_attrs_item *all_attrs, int rem) { for (; rem > 0 && stack; stack = stack->prev) { - int i; + unsigned i; const char *base = stack->origin ? stack->origin : ""; - for (i = stack->num_matches - 1; 0 < rem && 0 <= i; i--) { - const struct match_attr *a = stack->attrs[i]; + for (i = stack->num_matches; 0 < rem && 0 < i; i--) { + const struct match_attr *a = stack->attrs[i - 1]; if (a->is_macro) continue; if (path_matches(path, pathlen, basename_offset, @@ -1076,11 +1075,11 @@ static void determine_macros(struct all_attrs_item *all_attrs, const struct attr_stack *stack) { for (; stack; stack = stack->prev) { - int i; - for (i = stack->num_matches - 1; i >= 0; i--) { - const struct match_attr *ma = stack->attrs[i]; + unsigned i; + for (i = stack->num_matches; i > 0; i--) { + const struct match_attr *ma = stack->attrs[i - 1]; if (ma->is_macro) { - int n = ma->u.attr->attr_nr; + unsigned int n = ma->u.attr->attr_nr; if (!all_attrs[n].macro) { all_attrs[n].macro = ma; } @@ -1132,7 +1131,7 @@ void git_check_attr(struct index_state *istate, collect_some_attrs(istate, path, check); for (i = 0; i < check->nr; i++) { - size_t n = check->items[i].attr->attr_nr; + unsigned int n = check->items[i].attr->attr_nr; const char *value = check->all_attrs[n].value; if (value == ATTR__UNKNOWN) value = ATTR__UNSET; diff --git a/attr.c.orig b/attr.c.orig new file mode 100644 index 00000000000000..ab8d3273f730c8 --- /dev/null +++ b/attr.c.orig @@ -0,0 +1,1166 @@ +/* + * Handle git attributes. See gitattributes(5) for a description of + * the file syntax, and attr.h for a description of the API. + * + * One basic design decision here is that we are not going to support + * an insanely large number of attributes. + */ + +#include "cache.h" +#include "config.h" +#include "exec-cmd.h" +#include "attr.h" +#include "dir.h" +#include "utf8.h" +#include "quote.h" +#include "thread-utils.h" + +const char git_attr__true[] = "(builtin)true"; +const char git_attr__false[] = "\0(builtin)false"; +static const char git_attr__unknown[] = "(builtin)unknown"; +#define ATTR__TRUE git_attr__true +#define ATTR__FALSE git_attr__false +#define ATTR__UNSET NULL +#define ATTR__UNKNOWN git_attr__unknown + +#ifndef DEBUG_ATTR +#define DEBUG_ATTR 0 +#endif + +struct git_attr { + unsigned int attr_nr; /* unique attribute number */ + char name[FLEX_ARRAY]; /* attribute name */ +}; + +const char *git_attr_name(const struct git_attr *attr) +{ + return attr->name; +} + +struct attr_hashmap { + struct hashmap map; + pthread_mutex_t mutex; +}; + +static inline void hashmap_lock(struct attr_hashmap *map) +{ + pthread_mutex_lock(&map->mutex); +} + +static inline void hashmap_unlock(struct attr_hashmap *map) +{ + pthread_mutex_unlock(&map->mutex); +} + +/* The container for objects stored in "struct attr_hashmap" */ +struct attr_hash_entry { + struct hashmap_entry ent; + const char *key; /* the key; memory should be owned by value */ + size_t keylen; /* length of the key */ + void *value; /* the stored value */ +}; + +/* attr_hashmap comparison function */ +static int attr_hash_entry_cmp(const void *unused_cmp_data, + const struct hashmap_entry *eptr, + const struct hashmap_entry *entry_or_key, + const void *unused_keydata) +{ + const struct attr_hash_entry *a, *b; + + a = container_of(eptr, const struct attr_hash_entry, ent); + b = container_of(entry_or_key, const struct attr_hash_entry, ent); + return (a->keylen != b->keylen) || strncmp(a->key, b->key, a->keylen); +} + +/* + * The global dictionary of all interned attributes. This + * is a singleton object which is shared between threads. + * Access to this dictionary must be surrounded with a mutex. + */ +static struct attr_hashmap g_attr_hashmap = { + HASHMAP_INIT(attr_hash_entry_cmp, NULL) +}; + +/* + * Retrieve the 'value' stored in a hashmap given the provided 'key'. + * If there is no matching entry, return NULL. + */ +static void *attr_hashmap_get(struct attr_hashmap *map, + const char *key, size_t keylen) +{ + struct attr_hash_entry k; + struct attr_hash_entry *e; + + hashmap_entry_init(&k.ent, memhash(key, keylen)); + k.key = key; + k.keylen = keylen; + e = hashmap_get_entry(&map->map, &k, ent, NULL); + + return e ? e->value : NULL; +} + +/* Add 'value' to a hashmap based on the provided 'key'. */ +static void attr_hashmap_add(struct attr_hashmap *map, + const char *key, size_t keylen, + void *value) +{ + struct attr_hash_entry *e; + + e = xmalloc(sizeof(struct attr_hash_entry)); + hashmap_entry_init(&e->ent, memhash(key, keylen)); + e->key = key; + e->keylen = keylen; + e->value = value; + + hashmap_add(&map->map, &e->ent); +} + +struct all_attrs_item { + const struct git_attr *attr; + const char *value; + /* + * If 'macro' is non-NULL, indicates that 'attr' is a macro based on + * the current attribute stack and contains a pointer to the match_attr + * definition of the macro + */ + const struct match_attr *macro; +}; + +/* + * Reallocate and reinitialize the array of all attributes (which is used in + * the attribute collection process) in 'check' based on the global dictionary + * of attributes. + */ +static void all_attrs_init(struct attr_hashmap *map, struct attr_check *check) +{ + int i; + unsigned int size; + + hashmap_lock(map); + + size = hashmap_get_size(&map->map); + if (size < check->all_attrs_nr) + BUG("interned attributes shouldn't be deleted"); + + /* + * If the number of attributes in the global dictionary has increased + * (or this attr_check instance doesn't have an initialized all_attrs + * field), reallocate the provided attr_check instance's all_attrs + * field and fill each entry with its corresponding git_attr. + */ + if (size != check->all_attrs_nr) { + struct attr_hash_entry *e; + struct hashmap_iter iter; + + REALLOC_ARRAY(check->all_attrs, size); + check->all_attrs_nr = size; + + hashmap_for_each_entry(&map->map, &iter, e, + ent /* member name */) { + const struct git_attr *a = e->value; + check->all_attrs[a->attr_nr].attr = a; + } + } + + hashmap_unlock(map); + + /* + * Re-initialize every entry in check->all_attrs. + * This re-initialization can live outside of the locked region since + * the attribute dictionary is no longer being accessed. + */ + for (i = 0; i < check->all_attrs_nr; i++) { + check->all_attrs[i].value = ATTR__UNKNOWN; + check->all_attrs[i].macro = NULL; + } +} + +static int attr_name_valid(const char *name, size_t namelen) +{ + /* + * Attribute name cannot begin with '-' and must consist of + * characters from [-A-Za-z0-9_.]. + */ + if (namelen <= 0 || *name == '-') + return 0; + while (namelen--) { + char ch = *name++; + if (! (ch == '-' || ch == '.' || ch == '_' || + ('0' <= ch && ch <= '9') || + ('a' <= ch && ch <= 'z') || + ('A' <= ch && ch <= 'Z')) ) + return 0; + } + return 1; +} + +static void report_invalid_attr(const char *name, size_t len, + const char *src, int lineno) +{ + struct strbuf err = STRBUF_INIT; + strbuf_addf(&err, _("%.*s is not a valid attribute name"), + (int) len, name); + fprintf(stderr, "%s: %s:%d\n", err.buf, src, lineno); + strbuf_release(&err); +} + +/* + * Given a 'name', lookup and return the corresponding attribute in the global + * dictionary. If no entry is found, create a new attribute and store it in + * the dictionary. + */ +static const struct git_attr *git_attr_internal(const char *name, size_t namelen) +{ + struct git_attr *a; + + if (!attr_name_valid(name, namelen)) + return NULL; + + hashmap_lock(&g_attr_hashmap); + + a = attr_hashmap_get(&g_attr_hashmap, name, namelen); + + if (!a) { + FLEX_ALLOC_MEM(a, name, name, namelen); + a->attr_nr = hashmap_get_size(&g_attr_hashmap.map); + + attr_hashmap_add(&g_attr_hashmap, a->name, namelen, a); + if (a->attr_nr != hashmap_get_size(&g_attr_hashmap.map) - 1) + die(_("unable to add additional attribute")); + } + + hashmap_unlock(&g_attr_hashmap); + + return a; +} + +const struct git_attr *git_attr(const char *name) +{ + return git_attr_internal(name, strlen(name)); +} + +/* What does a matched pattern decide? */ +struct attr_state { + const struct git_attr *attr; + const char *setto; +}; + +struct pattern { + const char *pattern; + int patternlen; + int nowildcardlen; + unsigned flags; /* PATTERN_FLAG_* */ +}; + +/* + * One rule, as from a .gitattributes file. + * + * If is_macro is true, then u.attr is a pointer to the git_attr being + * defined. + * + * If is_macro is false, then u.pat is the filename pattern to which the + * rule applies. + * + * In either case, num_attr is the number of attributes affected by + * this rule, and state is an array listing them. The attributes are + * listed as they appear in the file (macros unexpanded). + */ +struct match_attr { + union { + struct pattern pat; + const struct git_attr *attr; + } u; + char is_macro; + size_t num_attr; + struct attr_state state[FLEX_ARRAY]; +}; + +static const char blank[] = " \t\r\n"; + +/* Flags usable in read_attr() and parse_attr_line() family of functions. */ +#define READ_ATTR_MACRO_OK (1<<0) +#define READ_ATTR_NOFOLLOW (1<<1) + +/* + * Parse a whitespace-delimited attribute state (i.e., "attr", + * "-attr", "!attr", or "attr=value") from the string starting at src. + * If e is not NULL, write the results to *e. Return a pointer to the + * remainder of the string (with leading whitespace removed), or NULL + * if there was an error. + */ +static const char *parse_attr(const char *src, int lineno, const char *cp, + struct attr_state *e) +{ + const char *ep, *equals; + size_t len; + + ep = cp + strcspn(cp, blank); + equals = strchr(cp, '='); + if (equals && ep < equals) + equals = NULL; + if (equals) + len = equals - cp; + else + len = ep - cp; + if (!e) { + if (*cp == '-' || *cp == '!') { + cp++; + len--; + } + if (!attr_name_valid(cp, len)) { + report_invalid_attr(cp, len, src, lineno); + return NULL; + } + } else { + /* + * As this function is always called twice, once with + * e == NULL in the first pass and then e != NULL in + * the second pass, no need for attr_name_valid() + * check here. + */ + if (*cp == '-' || *cp == '!') { + e->setto = (*cp == '-') ? ATTR__FALSE : ATTR__UNSET; + cp++; + len--; + } + else if (!equals) + e->setto = ATTR__TRUE; + else { + e->setto = xmemdupz(equals + 1, ep - equals - 1); + } + e->attr = git_attr_internal(cp, len); + } + return ep + strspn(ep, blank); +} + +static struct match_attr *parse_attr_line(const char *line, const char *src, + int lineno, unsigned flags) +{ + size_t namelen, num_attr, i; + const char *cp, *name, *states; + struct match_attr *res = NULL; + int is_macro; + struct strbuf pattern = STRBUF_INIT; + + cp = line + strspn(line, blank); + if (!*cp || *cp == '#') + return NULL; + name = cp; + + if (*cp == '"' && !unquote_c_style(&pattern, name, &states)) { + name = pattern.buf; + namelen = pattern.len; + } else { + namelen = strcspn(name, blank); + states = name + namelen; + } + + if (strlen(ATTRIBUTE_MACRO_PREFIX) < namelen && + starts_with(name, ATTRIBUTE_MACRO_PREFIX)) { + if (!(flags & READ_ATTR_MACRO_OK)) { + fprintf_ln(stderr, _("%s not allowed: %s:%d"), + name, src, lineno); + goto fail_return; + } + is_macro = 1; + name += strlen(ATTRIBUTE_MACRO_PREFIX); + name += strspn(name, blank); + namelen = strcspn(name, blank); + if (!attr_name_valid(name, namelen)) { + report_invalid_attr(name, namelen, src, lineno); + goto fail_return; + } + } + else + is_macro = 0; + + states += strspn(states, blank); + + /* First pass to count the attr_states */ + for (cp = states, num_attr = 0; *cp; num_attr++) { + cp = parse_attr(src, lineno, cp, NULL); + if (!cp) + goto fail_return; + } + + res = xcalloc(1, + sizeof(*res) + + sizeof(struct attr_state) * num_attr + + (is_macro ? 0 : namelen + 1)); + if (is_macro) { + res->u.attr = git_attr_internal(name, namelen); + } else { + char *p = (char *)&(res->state[num_attr]); + memcpy(p, name, namelen); + res->u.pat.pattern = p; + parse_path_pattern(&res->u.pat.pattern, + &res->u.pat.patternlen, + &res->u.pat.flags, + &res->u.pat.nowildcardlen); + if (res->u.pat.flags & PATTERN_FLAG_NEGATIVE) { + warning(_("Negative patterns are ignored in git attributes\n" + "Use '\\!' for literal leading exclamation.")); + goto fail_return; + } + } + res->is_macro = is_macro; + res->num_attr = num_attr; + + /* Second pass to fill the attr_states */ + for (cp = states, i = 0; *cp; i++) { + cp = parse_attr(src, lineno, cp, &(res->state[i])); + } + + strbuf_release(&pattern); + return res; + +fail_return: + strbuf_release(&pattern); + free(res); + return NULL; +} + +/* + * Like info/exclude and .gitignore, the attribute information can + * come from many places. + * + * (1) .gitattributes file of the same directory; + * (2) .gitattributes file of the parent directory if (1) does not have + * any match; this goes recursively upwards, just like .gitignore. + * (3) $GIT_DIR/info/attributes, which overrides both of the above. + * + * In the same file, later entries override the earlier match, so in the + * global list, we would have entries from info/attributes the earliest + * (reading the file from top to bottom), .gitattributes of the root + * directory (again, reading the file from top to bottom) down to the + * current directory, and then scan the list backwards to find the first match. + * This is exactly the same as what is_excluded() does in dir.c to deal with + * .gitignore file and info/excludes file as a fallback. + */ + +struct attr_stack { + struct attr_stack *prev; + char *origin; + size_t originlen; + unsigned num_matches; + unsigned alloc; + struct match_attr **attrs; +}; + +static void attr_stack_free(struct attr_stack *e) +{ + unsigned i; + free(e->origin); + for (i = 0; i < e->num_matches; i++) { + struct match_attr *a = e->attrs[i]; + size_t j; + + for (j = 0; j < a->num_attr; j++) { + const char *setto = a->state[j].setto; + if (setto == ATTR__TRUE || + setto == ATTR__FALSE || + setto == ATTR__UNSET || + setto == ATTR__UNKNOWN) + ; + else + free((char *) setto); + } + free(a); + } + free(e->attrs); + free(e); +} + +static void drop_attr_stack(struct attr_stack **stack) +{ + while (*stack) { + struct attr_stack *elem = *stack; + *stack = elem->prev; + attr_stack_free(elem); + } +} + +/* List of all attr_check structs; access should be surrounded by mutex */ +static struct check_vector { + size_t nr; + size_t alloc; + struct attr_check **checks; + pthread_mutex_t mutex; +} check_vector; + +static inline void vector_lock(void) +{ + pthread_mutex_lock(&check_vector.mutex); +} + +static inline void vector_unlock(void) +{ + pthread_mutex_unlock(&check_vector.mutex); +} + +static void check_vector_add(struct attr_check *c) +{ + vector_lock(); + + ALLOC_GROW(check_vector.checks, + check_vector.nr + 1, + check_vector.alloc); + check_vector.checks[check_vector.nr++] = c; + + vector_unlock(); +} + +static void check_vector_remove(struct attr_check *check) +{ + int i; + + vector_lock(); + + /* Find entry */ + for (i = 0; i < check_vector.nr; i++) + if (check_vector.checks[i] == check) + break; + + if (i >= check_vector.nr) + BUG("no entry found"); + + /* shift entries over */ + for (; i < check_vector.nr - 1; i++) + check_vector.checks[i] = check_vector.checks[i + 1]; + + check_vector.nr--; + + vector_unlock(); +} + +/* Iterate through all attr_check instances and drop their stacks */ +static void drop_all_attr_stacks(void) +{ + int i; + + vector_lock(); + + for (i = 0; i < check_vector.nr; i++) { + drop_attr_stack(&check_vector.checks[i]->stack); + } + + vector_unlock(); +} + +struct attr_check *attr_check_alloc(void) +{ + struct attr_check *c = xcalloc(1, sizeof(struct attr_check)); + + /* save pointer to the check struct */ + check_vector_add(c); + + return c; +} + +struct attr_check *attr_check_initl(const char *one, ...) +{ + struct attr_check *check; + int cnt; + va_list params; + const char *param; + + va_start(params, one); + for (cnt = 1; (param = va_arg(params, const char *)) != NULL; cnt++) + ; + va_end(params); + + check = attr_check_alloc(); + check->nr = cnt; + check->alloc = cnt; + CALLOC_ARRAY(check->items, cnt); + + check->items[0].attr = git_attr(one); + va_start(params, one); + for (cnt = 1; cnt < check->nr; cnt++) { + const struct git_attr *attr; + param = va_arg(params, const char *); + if (!param) + BUG("counted %d != ended at %d", + check->nr, cnt); + attr = git_attr(param); + if (!attr) + BUG("%s: not a valid attribute name", param); + check->items[cnt].attr = attr; + } + va_end(params); + return check; +} + +struct attr_check *attr_check_dup(const struct attr_check *check) +{ + struct attr_check *ret; + + if (!check) + return NULL; + + ret = attr_check_alloc(); + + ret->nr = check->nr; + ret->alloc = check->alloc; + ALLOC_ARRAY(ret->items, ret->nr); + COPY_ARRAY(ret->items, check->items, ret->nr); + + return ret; +} + +struct attr_check_item *attr_check_append(struct attr_check *check, + const struct git_attr *attr) +{ + struct attr_check_item *item; + + ALLOC_GROW(check->items, check->nr + 1, check->alloc); + item = &check->items[check->nr++]; + item->attr = attr; + return item; +} + +void attr_check_reset(struct attr_check *check) +{ + check->nr = 0; +} + +void attr_check_clear(struct attr_check *check) +{ + FREE_AND_NULL(check->items); + check->alloc = 0; + check->nr = 0; + + FREE_AND_NULL(check->all_attrs); + check->all_attrs_nr = 0; + + drop_attr_stack(&check->stack); +} + +void attr_check_free(struct attr_check *check) +{ + if (check) { + /* Remove check from the check vector */ + check_vector_remove(check); + + attr_check_clear(check); + free(check); + } +} + +static const char *builtin_attr[] = { + "[attr]binary -diff -merge -text", + NULL, +}; + +static void handle_attr_line(struct attr_stack *res, + const char *line, + const char *src, + int lineno, + unsigned flags) +{ + struct match_attr *a; + + a = parse_attr_line(line, src, lineno, flags); + if (!a) + return; + ALLOC_GROW_BY(res->attrs, res->num_matches, 1, res->alloc); + res->attrs[res->num_matches - 1] = a; +} + +static struct attr_stack *read_attr_from_array(const char **list) +{ + struct attr_stack *res; + const char *line; + int lineno = 0; + + CALLOC_ARRAY(res, 1); + while ((line = *(list++)) != NULL) + handle_attr_line(res, line, "[builtin]", ++lineno, + READ_ATTR_MACRO_OK); + return res; +} + +/* + * Callers into the attribute system assume there is a single, system-wide + * global state where attributes are read from and when the state is flipped by + * calling git_attr_set_direction(), the stack frames that have been + * constructed need to be discarded so that subsequent calls into the + * attribute system will lazily read from the right place. Since changing + * direction causes a global paradigm shift, it should not ever be called while + * another thread could potentially be calling into the attribute system. + */ +static enum git_attr_direction direction; + +void git_attr_set_direction(enum git_attr_direction new_direction) +{ + if (is_bare_repository() && new_direction != GIT_ATTR_INDEX) + BUG("non-INDEX attr direction in a bare repo"); + + if (new_direction != direction) + drop_all_attr_stacks(); + + direction = new_direction; +} + +static struct attr_stack *read_attr_from_file(const char *path, unsigned flags) +{ + int fd; + FILE *fp; + struct attr_stack *res; + char buf[2048]; + int lineno = 0; + + if (flags & READ_ATTR_NOFOLLOW) + fd = open_nofollow(path, O_RDONLY); + else + fd = open(path, O_RDONLY); + + if (fd < 0) { + warn_on_fopen_errors(path); + return NULL; + } + fp = xfdopen(fd, "r"); + + CALLOC_ARRAY(res, 1); + while (fgets(buf, sizeof(buf), fp)) { + char *bufp = buf; + if (!lineno) + skip_utf8_bom(&bufp, strlen(bufp)); + handle_attr_line(res, bufp, path, ++lineno, flags); + } + fclose(fp); + return res; +} + +static struct attr_stack *read_attr_from_index(struct index_state *istate, + const char *path, + unsigned flags) +{ + struct attr_stack *res; + char *buf, *sp; + int lineno = 0; + + if (!istate) + return NULL; + + buf = read_blob_data_from_index(istate, path, NULL); + if (!buf) + return NULL; + + CALLOC_ARRAY(res, 1); + for (sp = buf; *sp; ) { + char *ep; + int more; + + ep = strchrnul(sp, '\n'); + more = (*ep == '\n'); + *ep = '\0'; + handle_attr_line(res, sp, path, ++lineno, flags); + sp = ep + more; + } + free(buf); + return res; +} + +static struct attr_stack *read_attr(struct index_state *istate, + const char *path, unsigned flags) +{ + struct attr_stack *res = NULL; + + if (direction == GIT_ATTR_INDEX) { + res = read_attr_from_index(istate, path, flags); + } else if (!is_bare_repository()) { + if (direction == GIT_ATTR_CHECKOUT) { + res = read_attr_from_index(istate, path, flags); + if (!res) + res = read_attr_from_file(path, flags); + } else if (direction == GIT_ATTR_CHECKIN) { + res = read_attr_from_file(path, flags); + if (!res) + /* + * There is no checked out .gitattributes file + * there, but we might have it in the index. + * We allow operation in a sparsely checked out + * work tree, so read from it. + */ + res = read_attr_from_index(istate, path, flags); + } + } + + if (!res) + CALLOC_ARRAY(res, 1); + return res; +} + +#if DEBUG_ATTR +static void debug_info(const char *what, struct attr_stack *elem) +{ + fprintf(stderr, "%s: %s\n", what, elem->origin ? elem->origin : "()"); +} +static void debug_set(const char *what, const char *match, struct git_attr *attr, const void *v) +{ + const char *value = v; + + if (ATTR_TRUE(value)) + value = "set"; + else if (ATTR_FALSE(value)) + value = "unset"; + else if (ATTR_UNSET(value)) + value = "unspecified"; + + fprintf(stderr, "%s: %s => %s (%s)\n", + what, attr->name, (char *) value, match); +} +#define debug_push(a) debug_info("push", (a)) +#define debug_pop(a) debug_info("pop", (a)) +#else +#define debug_push(a) do { ; } while (0) +#define debug_pop(a) do { ; } while (0) +#define debug_set(a,b,c,d) do { ; } while (0) +#endif /* DEBUG_ATTR */ + +static const char *git_etc_gitattributes(void) +{ + static const char *system_wide; + if (!system_wide) + system_wide = system_path(ETC_GITATTRIBUTES); + return system_wide; +} + +static const char *get_home_gitattributes(void) +{ + if (!git_attributes_file) + git_attributes_file = xdg_config_home("attributes"); + + return git_attributes_file; +} + +static int git_attr_system(void) +{ + return !git_env_bool("GIT_ATTR_NOSYSTEM", 0); +} + +static GIT_PATH_FUNC(git_path_info_attributes, INFOATTRIBUTES_FILE) + +static void push_stack(struct attr_stack **attr_stack_p, + struct attr_stack *elem, char *origin, size_t originlen) +{ + if (elem) { + elem->origin = origin; + if (origin) + elem->originlen = originlen; + elem->prev = *attr_stack_p; + *attr_stack_p = elem; + } +} + +static void bootstrap_attr_stack(struct index_state *istate, + struct attr_stack **stack) +{ + struct attr_stack *e; + unsigned flags = READ_ATTR_MACRO_OK; + + if (*stack) + return; + + /* builtin frame */ + e = read_attr_from_array(builtin_attr); + push_stack(stack, e, NULL, 0); + + /* system-wide frame */ + if (git_attr_system()) { + e = read_attr_from_file(git_etc_gitattributes(), flags); + push_stack(stack, e, NULL, 0); + } + + /* home directory */ + if (get_home_gitattributes()) { + e = read_attr_from_file(get_home_gitattributes(), flags); + push_stack(stack, e, NULL, 0); + } + + /* root directory */ + e = read_attr(istate, GITATTRIBUTES_FILE, flags | READ_ATTR_NOFOLLOW); + push_stack(stack, e, xstrdup(""), 0); + + /* info frame */ + if (startup_info->have_repository) + e = read_attr_from_file(git_path_info_attributes(), flags); + else + e = NULL; + if (!e) + CALLOC_ARRAY(e, 1); + push_stack(stack, e, NULL, 0); +} + +static void prepare_attr_stack(struct index_state *istate, + const char *path, int dirlen, + struct attr_stack **stack) +{ + struct attr_stack *info; + struct strbuf pathbuf = STRBUF_INIT; + + /* + * At the bottom of the attribute stack is the built-in + * set of attribute definitions, followed by the contents + * of $(prefix)/etc/gitattributes and a file specified by + * core.attributesfile. Then, contents from + * .gitattributes files from directories closer to the + * root to the ones in deeper directories are pushed + * to the stack. Finally, at the very top of the stack + * we always keep the contents of $GIT_DIR/info/attributes. + * + * When checking, we use entries from near the top of the + * stack, preferring $GIT_DIR/info/attributes, then + * .gitattributes in deeper directories to shallower ones, + * and finally use the built-in set as the default. + */ + bootstrap_attr_stack(istate, stack); + + /* + * Pop the "info" one that is always at the top of the stack. + */ + info = *stack; + *stack = info->prev; + + /* + * Pop the ones from directories that are not the prefix of + * the path we are checking. Break out of the loop when we see + * the root one (whose origin is an empty string "") or the builtin + * one (whose origin is NULL) without popping it. + */ + while ((*stack)->origin) { + int namelen = (*stack)->originlen; + struct attr_stack *elem; + + elem = *stack; + if (namelen <= dirlen && + !strncmp(elem->origin, path, namelen) && + (!namelen || path[namelen] == '/')) + break; + + debug_pop(elem); + *stack = elem->prev; + attr_stack_free(elem); + } + + /* + * bootstrap_attr_stack() should have added, and the + * above loop should have stopped before popping, the + * root element whose attr_stack->origin is set to an + * empty string. + */ + assert((*stack)->origin); + + strbuf_addstr(&pathbuf, (*stack)->origin); + /* Build up to the directory 'path' is in */ + while (pathbuf.len < dirlen) { + size_t len = pathbuf.len; + struct attr_stack *next; + char *origin; + + /* Skip path-separator */ + if (len < dirlen && is_dir_sep(path[len])) + len++; + /* Find the end of the next component */ + while (len < dirlen && !is_dir_sep(path[len])) + len++; + + if (pathbuf.len > 0) + strbuf_addch(&pathbuf, '/'); + strbuf_add(&pathbuf, path + pathbuf.len, (len - pathbuf.len)); + strbuf_addf(&pathbuf, "/%s", GITATTRIBUTES_FILE); + + next = read_attr(istate, pathbuf.buf, READ_ATTR_NOFOLLOW); + + /* reset the pathbuf to not include "/.gitattributes" */ + strbuf_setlen(&pathbuf, len); + + origin = xstrdup(pathbuf.buf); + push_stack(stack, next, origin, len); + } + + /* + * Finally push the "info" one at the top of the stack. + */ + push_stack(stack, info, NULL, 0); + + strbuf_release(&pathbuf); +} + +static int path_matches(const char *pathname, int pathlen, + int basename_offset, + const struct pattern *pat, + const char *base, int baselen) +{ + const char *pattern = pat->pattern; + int prefix = pat->nowildcardlen; + int isdir = (pathlen && pathname[pathlen - 1] == '/'); + + if ((pat->flags & PATTERN_FLAG_MUSTBEDIR) && !isdir) + return 0; + + if (pat->flags & PATTERN_FLAG_NODIR) { + return match_basename(pathname + basename_offset, + pathlen - basename_offset - isdir, + pattern, prefix, + pat->patternlen, pat->flags); + } + return match_pathname(pathname, pathlen - isdir, + base, baselen, + pattern, prefix, pat->patternlen, pat->flags); +} + +static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem); + +static int fill_one(const char *what, struct all_attrs_item *all_attrs, + const struct match_attr *a, int rem) +{ + size_t i; + + for (i = a->num_attr; rem > 0 && i > 0; i--) { + const struct git_attr *attr = a->state[i - 1].attr; + const char **n = &(all_attrs[attr->attr_nr].value); + const char *v = a->state[i - 1].setto; + + if (*n == ATTR__UNKNOWN) { + debug_set(what, + a->is_macro ? a->u.attr->name : a->u.pat.pattern, + attr, v); + *n = v; + rem--; + rem = macroexpand_one(all_attrs, attr->attr_nr, rem); + } + } + return rem; +} + +static int fill(const char *path, int pathlen, int basename_offset, + const struct attr_stack *stack, + struct all_attrs_item *all_attrs, int rem) +{ + for (; rem > 0 && stack; stack = stack->prev) { + unsigned i; + const char *base = stack->origin ? stack->origin : ""; + + for (i = stack->num_matches; 0 < rem && 0 < i; i--) { + const struct match_attr *a = stack->attrs[i - 1]; + if (a->is_macro) + continue; + if (path_matches(path, pathlen, basename_offset, + &a->u.pat, base, stack->originlen)) + rem = fill_one("fill", all_attrs, a, rem); + } + } + + return rem; +} + +static int macroexpand_one(struct all_attrs_item *all_attrs, int nr, int rem) +{ + const struct all_attrs_item *item = &all_attrs[nr]; + + if (item->macro && item->value == ATTR__TRUE) + return fill_one("expand", all_attrs, item->macro, rem); + else + return rem; +} + +/* + * Marks the attributes which are macros based on the attribute stack. + * This prevents having to search through the attribute stack each time + * a macro needs to be expanded during the fill stage. + */ +static void determine_macros(struct all_attrs_item *all_attrs, + const struct attr_stack *stack) +{ + for (; stack; stack = stack->prev) { + unsigned i; + for (i = stack->num_matches; i > 0; i--) { + const struct match_attr *ma = stack->attrs[i - 1]; + if (ma->is_macro) { + unsigned int n = ma->u.attr->attr_nr; + if (!all_attrs[n].macro) { + all_attrs[n].macro = ma; + } + } + } + } +} + +/* + * Collect attributes for path into the array pointed to by check->all_attrs. + * If check->check_nr is non-zero, only attributes in check[] are collected. + * Otherwise all attributes are collected. + */ +static void collect_some_attrs(struct index_state *istate, + const char *path, + struct attr_check *check) +{ + int pathlen, rem, dirlen; + const char *cp, *last_slash = NULL; + int basename_offset; + + for (cp = path; *cp; cp++) { + if (*cp == '/' && cp[1]) + last_slash = cp; + } + pathlen = cp - path; + if (last_slash) { + basename_offset = last_slash + 1 - path; + dirlen = last_slash - path; + } else { + basename_offset = 0; + dirlen = 0; + } + + prepare_attr_stack(istate, path, dirlen, &check->stack); + all_attrs_init(&g_attr_hashmap, check); + determine_macros(check->all_attrs, check->stack); + + rem = check->all_attrs_nr; + fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem); +} + +void git_check_attr(struct index_state *istate, + const char *path, + struct attr_check *check) +{ + int i; + + collect_some_attrs(istate, path, check); + + for (i = 0; i < check->nr; i++) { + unsigned int n = check->items[i].attr->attr_nr; + const char *value = check->all_attrs[n].value; + if (value == ATTR__UNKNOWN) + value = ATTR__UNSET; + check->items[i].value = value; + } +} + +void git_all_attrs(struct index_state *istate, + const char *path, struct attr_check *check) +{ + int i; + + attr_check_reset(check); + collect_some_attrs(istate, path, check); + + for (i = 0; i < check->all_attrs_nr; i++) { + const char *name = check->all_attrs[i].attr->name; + const char *value = check->all_attrs[i].value; + struct attr_check_item *item; + if (value == ATTR__UNSET || value == ATTR__UNKNOWN) + continue; + item = attr_check_append(check, git_attr(name)); + item->value = value; + } +} + +void attr_start(void) +{ + pthread_mutex_init(&g_attr_hashmap.mutex, NULL); + pthread_mutex_init(&check_vector.mutex, NULL); +} diff --git a/builtin/clone.c b/builtin/clone.c index 7743dc07d2bdb4..326b8729f8e220 100644 --- a/builtin/clone.c +++ b/builtin/clone.c @@ -424,13 +424,11 @@ static void copy_or_link_directory(struct strbuf *src, struct strbuf *dest, int src_len, dest_len; struct dir_iterator *iter; int iter_status; - unsigned int flags; struct strbuf realpath = STRBUF_INIT; mkdir_if_missing(dest->buf, 0777); - flags = DIR_ITERATOR_PEDANTIC | DIR_ITERATOR_FOLLOW_SYMLINKS; - iter = dir_iterator_begin(src->buf, flags); + iter = dir_iterator_begin(src->buf, DIR_ITERATOR_PEDANTIC); if (!iter) die_errno(_("failed to start iterator over '%s'"), src->buf); @@ -446,6 +444,10 @@ static void copy_or_link_directory(struct strbuf *src, struct strbuf *dest, strbuf_setlen(dest, dest_len); strbuf_addstr(dest, iter->relative_path); + if (S_ISLNK(iter->st.st_mode)) + die(_("symlink '%s' exists, refusing to clone with --local"), + iter->relative_path); + if (S_ISDIR(iter->st.st_mode)) { mkdir_if_missing(dest->buf, 0777); continue; @@ -1218,10 +1220,6 @@ int cmd_clone(int argc, const char **argv, const char *prefix) refspec_appendf(&remote->fetch, "+%s*:%s*", src_ref_prefix, branch_top.buf); - transport = transport_get(remote, remote->url[0]); - transport_set_verbosity(transport, option_verbosity, option_progress); - transport->family = family; - path = get_repo_path(remote->url[0], &is_bundle); is_local = option_local != 0 && path && !is_bundle; if (is_local) { @@ -1243,6 +1241,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix) } if (option_local > 0 && !is_local) warning(_("--local is ignored")); + + transport = transport_get(remote, path ? path : remote->url[0]); + transport_set_verbosity(transport, option_verbosity, option_progress); + transport->family = family; transport->cloning = 1; transport_set_option(transport, TRANS_OPT_KEEP, "yes"); diff --git a/builtin/clone.c.orig b/builtin/clone.c.orig new file mode 100644 index 00000000000000..4c5f35f6c153ef --- /dev/null +++ b/builtin/clone.c.orig @@ -0,0 +1,1428 @@ +/* + * Builtin "git clone" + * + * Copyright (c) 2007 Kristian Høgsberg , + * 2008 Daniel Barkalow + * Based on git-commit.sh by Junio C Hamano and Linus Torvalds + * + * Clone a repository into a different directory that does not yet exist. + */ + +#define USE_THE_INDEX_COMPATIBILITY_MACROS +#include "builtin.h" +#include "config.h" +#include "lockfile.h" +#include "parse-options.h" +#include "fetch-pack.h" +#include "refs.h" +#include "refspec.h" +#include "object-store.h" +#include "tree.h" +#include "tree-walk.h" +#include "unpack-trees.h" +#include "transport.h" +#include "strbuf.h" +#include "dir.h" +#include "dir-iterator.h" +#include "iterator.h" +#include "sigchain.h" +#include "branch.h" +#include "remote.h" +#include "run-command.h" +#include "connected.h" +#include "packfile.h" +#include "list-objects-filter-options.h" + +/* + * Overall FIXMEs: + * - respect DB_ENVIRONMENT for .git/objects. + * + * Implementation notes: + * - dropping use-separate-remote and no-separate-remote compatibility + * + */ +static const char * const builtin_clone_usage[] = { + N_("git clone [] [--] []"), + NULL +}; + +static int option_no_checkout, option_bare, option_mirror, option_single_branch = -1; +static int option_local = -1, option_no_hardlinks, option_shared; +static int option_no_tags; +static int option_shallow_submodules; +static int option_reject_shallow = -1; /* unspecified */ +static int config_reject_shallow = -1; /* unspecified */ +static int deepen; +static char *option_template, *option_depth, *option_since; +static char *option_origin = NULL; +static char *remote_name = NULL; +static char *option_branch = NULL; +static struct string_list option_not = STRING_LIST_INIT_NODUP; +static const char *real_git_dir; +static char *option_upload_pack = "git-upload-pack"; +static int option_verbosity; +static int option_progress = -1; +static int option_sparse_checkout; +static enum transport_family family; +static struct string_list option_config = STRING_LIST_INIT_NODUP; +static struct string_list option_required_reference = STRING_LIST_INIT_NODUP; +static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP; +static int option_dissociate; +static int max_jobs = -1; +static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP; +static struct list_objects_filter_options filter_options; +static struct string_list server_options = STRING_LIST_INIT_NODUP; +static int option_remote_submodules; + +static int recurse_submodules_cb(const struct option *opt, + const char *arg, int unset) +{ + if (unset) + string_list_clear((struct string_list *)opt->value, 0); + else if (arg) + string_list_append((struct string_list *)opt->value, arg); + else + string_list_append((struct string_list *)opt->value, + (const char *)opt->defval); + + return 0; +} + +static struct option builtin_clone_options[] = { + OPT__VERBOSITY(&option_verbosity), + OPT_BOOL(0, "progress", &option_progress, + N_("force progress reporting")), + OPT_BOOL(0, "reject-shallow", &option_reject_shallow, + N_("don't clone shallow repository")), + OPT_BOOL('n', "no-checkout", &option_no_checkout, + N_("don't create a checkout")), + OPT_BOOL(0, "bare", &option_bare, N_("create a bare repository")), + OPT_HIDDEN_BOOL(0, "naked", &option_bare, + N_("create a bare repository")), + OPT_BOOL(0, "mirror", &option_mirror, + N_("create a mirror repository (implies bare)")), + OPT_BOOL('l', "local", &option_local, + N_("to clone from a local repository")), + OPT_BOOL(0, "no-hardlinks", &option_no_hardlinks, + N_("don't use local hardlinks, always copy")), + OPT_BOOL('s', "shared", &option_shared, + N_("setup as shared repository")), + { OPTION_CALLBACK, 0, "recurse-submodules", &option_recurse_submodules, + N_("pathspec"), N_("initialize submodules in the clone"), + PARSE_OPT_OPTARG, recurse_submodules_cb, (intptr_t)"." }, + OPT_ALIAS(0, "recursive", "recurse-submodules"), + OPT_INTEGER('j', "jobs", &max_jobs, + N_("number of submodules cloned in parallel")), + OPT_STRING(0, "template", &option_template, N_("template-directory"), + N_("directory from which templates will be used")), + OPT_STRING_LIST(0, "reference", &option_required_reference, N_("repo"), + N_("reference repository")), + OPT_STRING_LIST(0, "reference-if-able", &option_optional_reference, + N_("repo"), N_("reference repository")), + OPT_BOOL(0, "dissociate", &option_dissociate, + N_("use --reference only while cloning")), + OPT_STRING('o', "origin", &option_origin, N_("name"), + N_("use instead of 'origin' to track upstream")), + OPT_STRING('b', "branch", &option_branch, N_("branch"), + N_("checkout instead of the remote's HEAD")), + OPT_STRING('u', "upload-pack", &option_upload_pack, N_("path"), + N_("path to git-upload-pack on the remote")), + OPT_STRING(0, "depth", &option_depth, N_("depth"), + N_("create a shallow clone of that depth")), + OPT_STRING(0, "shallow-since", &option_since, N_("time"), + N_("create a shallow clone since a specific time")), + OPT_STRING_LIST(0, "shallow-exclude", &option_not, N_("revision"), + N_("deepen history of shallow clone, excluding rev")), + OPT_BOOL(0, "single-branch", &option_single_branch, + N_("clone only one branch, HEAD or --branch")), + OPT_BOOL(0, "no-tags", &option_no_tags, + N_("don't clone any tags, and make later fetches not to follow them")), + OPT_BOOL(0, "shallow-submodules", &option_shallow_submodules, + N_("any cloned submodules will be shallow")), + OPT_STRING(0, "separate-git-dir", &real_git_dir, N_("gitdir"), + N_("separate git dir from working tree")), + OPT_STRING_LIST('c', "config", &option_config, N_("key=value"), + N_("set config inside the new repository")), + OPT_STRING_LIST(0, "server-option", &server_options, + N_("server-specific"), N_("option to transmit")), + OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"), + TRANSPORT_FAMILY_IPV4), + OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"), + TRANSPORT_FAMILY_IPV6), + OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options), + OPT_BOOL(0, "remote-submodules", &option_remote_submodules, + N_("any cloned submodules will use their remote-tracking branch")), + OPT_BOOL(0, "sparse", &option_sparse_checkout, + N_("initialize sparse-checkout file to include only files at root")), + OPT_END() +}; + +static const char *get_repo_path_1(struct strbuf *path, int *is_bundle) +{ + static char *suffix[] = { "/.git", "", ".git/.git", ".git" }; + static char *bundle_suffix[] = { ".bundle", "" }; + size_t baselen = path->len; + struct stat st; + int i; + + for (i = 0; i < ARRAY_SIZE(suffix); i++) { + strbuf_setlen(path, baselen); + strbuf_addstr(path, suffix[i]); + if (stat(path->buf, &st)) + continue; + if (S_ISDIR(st.st_mode) && is_git_directory(path->buf)) { + *is_bundle = 0; + return path->buf; + } else if (S_ISREG(st.st_mode) && st.st_size > 8) { + /* Is it a "gitfile"? */ + char signature[8]; + const char *dst; + int len, fd = open(path->buf, O_RDONLY); + if (fd < 0) + continue; + len = read_in_full(fd, signature, 8); + close(fd); + if (len != 8 || strncmp(signature, "gitdir: ", 8)) + continue; + dst = read_gitfile(path->buf); + if (dst) { + *is_bundle = 0; + return dst; + } + } + } + + for (i = 0; i < ARRAY_SIZE(bundle_suffix); i++) { + strbuf_setlen(path, baselen); + strbuf_addstr(path, bundle_suffix[i]); + if (!stat(path->buf, &st) && S_ISREG(st.st_mode)) { + *is_bundle = 1; + return path->buf; + } + } + + return NULL; +} + +static char *get_repo_path(const char *repo, int *is_bundle) +{ + struct strbuf path = STRBUF_INIT; + const char *raw; + char *canon; + + strbuf_addstr(&path, repo); + raw = get_repo_path_1(&path, is_bundle); + canon = raw ? absolute_pathdup(raw) : NULL; + strbuf_release(&path); + return canon; +} + +static char *guess_dir_name(const char *repo, int is_bundle, int is_bare) +{ + const char *end = repo + strlen(repo), *start, *ptr; + size_t len; + char *dir; + + /* + * Skip scheme. + */ + start = strstr(repo, "://"); + if (start == NULL) + start = repo; + else + start += 3; + + /* + * Skip authentication data. The stripping does happen + * greedily, such that we strip up to the last '@' inside + * the host part. + */ + for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) { + if (*ptr == '@') + start = ptr + 1; + } + + /* + * Strip trailing spaces, slashes and /.git + */ + while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1]))) + end--; + if (end - start > 5 && is_dir_sep(end[-5]) && + !strncmp(end - 4, ".git", 4)) { + end -= 5; + while (start < end && is_dir_sep(end[-1])) + end--; + } + + /* + * Strip trailing port number if we've got only a + * hostname (that is, there is no dir separator but a + * colon). This check is required such that we do not + * strip URI's like '/foo/bar:2222.git', which should + * result in a dir '2222' being guessed due to backwards + * compatibility. + */ + if (memchr(start, '/', end - start) == NULL + && memchr(start, ':', end - start) != NULL) { + ptr = end; + while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':') + ptr--; + if (start < ptr && ptr[-1] == ':') + end = ptr - 1; + } + + /* + * Find last component. To remain backwards compatible we + * also regard colons as path separators, such that + * cloning a repository 'foo:bar.git' would result in a + * directory 'bar' being guessed. + */ + ptr = end; + while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':') + ptr--; + start = ptr; + + /* + * Strip .{bundle,git}. + */ + len = end - start; + strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git"); + + if (!len || (len == 1 && *start == '/')) + die(_("No directory name could be guessed.\n" + "Please specify a directory on the command line")); + + if (is_bare) + dir = xstrfmt("%.*s.git", (int)len, start); + else + dir = xstrndup(start, len); + /* + * Replace sequences of 'control' characters and whitespace + * with one ascii space, remove leading and trailing spaces. + */ + if (*dir) { + char *out = dir; + int prev_space = 1 /* strip leading whitespace */; + for (end = dir; *end; ++end) { + char ch = *end; + if ((unsigned char)ch < '\x20') + ch = '\x20'; + if (isspace(ch)) { + if (prev_space) + continue; + prev_space = 1; + } else + prev_space = 0; + *out++ = ch; + } + *out = '\0'; + if (out > dir && prev_space) + out[-1] = '\0'; + } + return dir; +} + +static void strip_trailing_slashes(char *dir) +{ + char *end = dir + strlen(dir); + + while (dir < end - 1 && is_dir_sep(end[-1])) + end--; + *end = '\0'; +} + +static int add_one_reference(struct string_list_item *item, void *cb_data) +{ + struct strbuf err = STRBUF_INIT; + int *required = cb_data; + char *ref_git = compute_alternate_path(item->string, &err); + + if (!ref_git) { + if (*required) + die("%s", err.buf); + else + fprintf(stderr, + _("info: Could not add alternate for '%s': %s\n"), + item->string, err.buf); + } else { + struct strbuf sb = STRBUF_INIT; + strbuf_addf(&sb, "%s/objects", ref_git); + add_to_alternates_file(sb.buf); + strbuf_release(&sb); + } + + strbuf_release(&err); + free(ref_git); + return 0; +} + +static void setup_reference(void) +{ + int required = 1; + for_each_string_list(&option_required_reference, + add_one_reference, &required); + required = 0; + for_each_string_list(&option_optional_reference, + add_one_reference, &required); +} + +static void copy_alternates(struct strbuf *src, const char *src_repo) +{ + /* + * Read from the source objects/info/alternates file + * and copy the entries to corresponding file in the + * destination repository with add_to_alternates_file(). + * Both src and dst have "$path/objects/info/alternates". + * + * Instead of copying bit-for-bit from the original, + * we need to append to existing one so that the already + * created entry via "clone -s" is not lost, and also + * to turn entries with paths relative to the original + * absolute, so that they can be used in the new repository. + */ + FILE *in = xfopen(src->buf, "r"); + struct strbuf line = STRBUF_INIT; + + while (strbuf_getline(&line, in) != EOF) { + char *abs_path; + if (!line.len || line.buf[0] == '#') + continue; + if (is_absolute_path(line.buf)) { + add_to_alternates_file(line.buf); + continue; + } + abs_path = mkpathdup("%s/objects/%s", src_repo, line.buf); + if (!normalize_path_copy(abs_path, abs_path)) + add_to_alternates_file(abs_path); + else + warning("skipping invalid relative alternate: %s/%s", + src_repo, line.buf); + free(abs_path); + } + strbuf_release(&line); + fclose(in); +} + +static void mkdir_if_missing(const char *pathname, mode_t mode) +{ + struct stat st; + + if (!mkdir(pathname, mode)) + return; + + if (errno != EEXIST) + die_errno(_("failed to create directory '%s'"), pathname); + else if (stat(pathname, &st)) + die_errno(_("failed to stat '%s'"), pathname); + else if (!S_ISDIR(st.st_mode)) + die(_("%s exists and is not a directory"), pathname); +} + +static void copy_or_link_directory(struct strbuf *src, struct strbuf *dest, + const char *src_repo) +{ + int src_len, dest_len; + struct dir_iterator *iter; + int iter_status; + struct strbuf realpath = STRBUF_INIT; + + mkdir_if_missing(dest->buf, 0777); + + iter = dir_iterator_begin(src->buf, DIR_ITERATOR_PEDANTIC); + + if (!iter) + die_errno(_("failed to start iterator over '%s'"), src->buf); + + strbuf_addch(src, '/'); + src_len = src->len; + strbuf_addch(dest, '/'); + dest_len = dest->len; + + while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) { + strbuf_setlen(src, src_len); + strbuf_addstr(src, iter->relative_path); + strbuf_setlen(dest, dest_len); + strbuf_addstr(dest, iter->relative_path); + + if (S_ISLNK(iter->st.st_mode)) + die(_("symlink '%s' exists, refusing to clone with --local"), + iter->relative_path); + + if (S_ISDIR(iter->st.st_mode)) { + mkdir_if_missing(dest->buf, 0777); + continue; + } + + /* Files that cannot be copied bit-for-bit... */ + if (!fspathcmp(iter->relative_path, "info/alternates")) { + copy_alternates(src, src_repo); + continue; + } + + if (unlink(dest->buf) && errno != ENOENT) + die_errno(_("failed to unlink '%s'"), dest->buf); + if (!option_no_hardlinks) { + strbuf_realpath(&realpath, src->buf, 1); + if (!link(realpath.buf, dest->buf)) + continue; + if (option_local > 0) + die_errno(_("failed to create link '%s'"), dest->buf); + option_no_hardlinks = 1; + } + if (copy_file_with_time(dest->buf, src->buf, 0666)) + die_errno(_("failed to copy file to '%s'"), dest->buf); + } + + if (iter_status != ITER_DONE) { + strbuf_setlen(src, src_len); + die(_("failed to iterate over '%s'"), src->buf); + } + + strbuf_release(&realpath); +} + +static void clone_local(const char *src_repo, const char *dest_repo) +{ + if (option_shared) { + struct strbuf alt = STRBUF_INIT; + get_common_dir(&alt, src_repo); + strbuf_addstr(&alt, "/objects"); + add_to_alternates_file(alt.buf); + strbuf_release(&alt); + } else { + struct strbuf src = STRBUF_INIT; + struct strbuf dest = STRBUF_INIT; + get_common_dir(&src, src_repo); + get_common_dir(&dest, dest_repo); + strbuf_addstr(&src, "/objects"); + strbuf_addstr(&dest, "/objects"); + copy_or_link_directory(&src, &dest, src_repo); + strbuf_release(&src); + strbuf_release(&dest); + } + + if (0 <= option_verbosity) + fprintf(stderr, _("done.\n")); +} + +static const char *junk_work_tree; +static int junk_work_tree_flags; +static const char *junk_git_dir; +static int junk_git_dir_flags; +static enum { + JUNK_LEAVE_NONE, + JUNK_LEAVE_REPO, + JUNK_LEAVE_ALL +} junk_mode = JUNK_LEAVE_NONE; + +static const char junk_leave_repo_msg[] = +N_("Clone succeeded, but checkout failed.\n" + "You can inspect what was checked out with 'git status'\n" + "and retry with 'git restore --source=HEAD :/'\n"); + +static void remove_junk(void) +{ + struct strbuf sb = STRBUF_INIT; + + switch (junk_mode) { + case JUNK_LEAVE_REPO: + warning("%s", _(junk_leave_repo_msg)); + /* fall-through */ + case JUNK_LEAVE_ALL: + return; + default: + /* proceed to removal */ + break; + } + + if (junk_git_dir) { + strbuf_addstr(&sb, junk_git_dir); + remove_dir_recursively(&sb, junk_git_dir_flags); + strbuf_reset(&sb); + } + if (junk_work_tree) { + strbuf_addstr(&sb, junk_work_tree); + remove_dir_recursively(&sb, junk_work_tree_flags); + } + strbuf_release(&sb); +} + +static void remove_junk_on_signal(int signo) +{ + remove_junk(); + sigchain_pop(signo); + raise(signo); +} + +static struct ref *find_remote_branch(const struct ref *refs, const char *branch) +{ + struct ref *ref; + struct strbuf head = STRBUF_INIT; + strbuf_addstr(&head, "refs/heads/"); + strbuf_addstr(&head, branch); + ref = find_ref_by_name(refs, head.buf); + strbuf_release(&head); + + if (ref) + return ref; + + strbuf_addstr(&head, "refs/tags/"); + strbuf_addstr(&head, branch); + ref = find_ref_by_name(refs, head.buf); + strbuf_release(&head); + + return ref; +} + +static struct ref *wanted_peer_refs(const struct ref *refs, + struct refspec *refspec) +{ + struct ref *head = copy_ref(find_ref_by_name(refs, "HEAD")); + struct ref *local_refs = head; + struct ref **tail = head ? &head->next : &local_refs; + + if (option_single_branch) { + struct ref *remote_head = NULL; + + if (!option_branch) + remote_head = guess_remote_head(head, refs, 0); + else { + local_refs = NULL; + tail = &local_refs; + remote_head = copy_ref(find_remote_branch(refs, option_branch)); + } + + if (!remote_head && option_branch) + warning(_("Could not find remote branch %s to clone."), + option_branch); + else { + int i; + for (i = 0; i < refspec->nr; i++) + get_fetch_map(remote_head, &refspec->items[i], + &tail, 0); + + /* if --branch=tag, pull the requested tag explicitly */ + get_fetch_map(remote_head, tag_refspec, &tail, 0); + } + } else { + int i; + for (i = 0; i < refspec->nr; i++) + get_fetch_map(refs, &refspec->items[i], &tail, 0); + } + + if (!option_mirror && !option_single_branch && !option_no_tags) + get_fetch_map(refs, tag_refspec, &tail, 0); + + return local_refs; +} + +static void write_remote_refs(const struct ref *local_refs) +{ + const struct ref *r; + + struct ref_transaction *t; + struct strbuf err = STRBUF_INIT; + + t = ref_transaction_begin(&err); + if (!t) + die("%s", err.buf); + + for (r = local_refs; r; r = r->next) { + if (!r->peer_ref) + continue; + if (ref_transaction_create(t, r->peer_ref->name, &r->old_oid, + 0, NULL, &err)) + die("%s", err.buf); + } + + if (initial_ref_transaction_commit(t, &err)) + die("%s", err.buf); + + strbuf_release(&err); + ref_transaction_free(t); +} + +static void write_followtags(const struct ref *refs, const char *msg) +{ + const struct ref *ref; + for (ref = refs; ref; ref = ref->next) { + if (!starts_with(ref->name, "refs/tags/")) + continue; + if (ends_with(ref->name, "^{}")) + continue; + if (!has_object_file_with_flags(&ref->old_oid, + OBJECT_INFO_QUICK | + OBJECT_INFO_SKIP_FETCH_OBJECT)) + continue; + update_ref(msg, ref->name, &ref->old_oid, NULL, 0, + UPDATE_REFS_DIE_ON_ERR); + } +} + +static int iterate_ref_map(void *cb_data, struct object_id *oid) +{ + struct ref **rm = cb_data; + struct ref *ref = *rm; + + /* + * Skip anything missing a peer_ref, which we are not + * actually going to write a ref for. + */ + while (ref && !ref->peer_ref) + ref = ref->next; + /* Returning -1 notes "end of list" to the caller. */ + if (!ref) + return -1; + + oidcpy(oid, &ref->old_oid); + *rm = ref->next; + return 0; +} + +static void update_remote_refs(const struct ref *refs, + const struct ref *mapped_refs, + const struct ref *remote_head_points_at, + const char *branch_top, + const char *msg, + struct transport *transport, + int check_connectivity) +{ + const struct ref *rm = mapped_refs; + + if (check_connectivity) { + struct check_connected_options opt = CHECK_CONNECTED_INIT; + + opt.transport = transport; + opt.progress = transport->progress; + + if (check_connected(iterate_ref_map, &rm, &opt)) + die(_("remote did not send all necessary objects")); + } + + if (refs) { + write_remote_refs(mapped_refs); + if (option_single_branch && !option_no_tags) + write_followtags(refs, msg); + } + + if (remote_head_points_at && !option_bare) { + struct strbuf head_ref = STRBUF_INIT; + strbuf_addstr(&head_ref, branch_top); + strbuf_addstr(&head_ref, "HEAD"); + if (create_symref(head_ref.buf, + remote_head_points_at->peer_ref->name, + msg) < 0) + die(_("unable to update %s"), head_ref.buf); + strbuf_release(&head_ref); + } +} + +static void update_head(const struct ref *our, const struct ref *remote, + const char *msg) +{ + const char *head; + if (our && skip_prefix(our->name, "refs/heads/", &head)) { + /* Local default branch link */ + if (create_symref("HEAD", our->name, NULL) < 0) + die(_("unable to update HEAD")); + if (!option_bare) { + update_ref(msg, "HEAD", &our->old_oid, NULL, 0, + UPDATE_REFS_DIE_ON_ERR); + install_branch_config(0, head, remote_name, our->name); + } + } else if (our) { + struct commit *c = lookup_commit_reference(the_repository, + &our->old_oid); + /* --branch specifies a non-branch (i.e. tags), detach HEAD */ + update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NO_DEREF, + UPDATE_REFS_DIE_ON_ERR); + } else if (remote) { + /* + * We know remote HEAD points to a non-branch, or + * HEAD points to a branch but we don't know which one. + * Detach HEAD in all these cases. + */ + update_ref(msg, "HEAD", &remote->old_oid, NULL, REF_NO_DEREF, + UPDATE_REFS_DIE_ON_ERR); + } +} + +static int git_sparse_checkout_init(const char *repo) +{ + struct strvec argv = STRVEC_INIT; + int result = 0; + strvec_pushl(&argv, "-C", repo, "sparse-checkout", "init", NULL); + + /* + * We must apply the setting in the current process + * for the later checkout to use the sparse-checkout file. + */ + core_apply_sparse_checkout = 1; + + if (run_command_v_opt(argv.v, RUN_GIT_CMD)) { + error(_("failed to initialize sparse-checkout")); + result = 1; + } + + strvec_clear(&argv); + return result; +} + +static int checkout(int submodule_progress) +{ + struct object_id oid; + char *head; + struct lock_file lock_file = LOCK_INIT; + struct unpack_trees_options opts; + struct tree *tree; + struct tree_desc t; + int err = 0; + + if (option_no_checkout) + return 0; + + head = resolve_refdup("HEAD", RESOLVE_REF_READING, &oid, NULL); + if (!head) { + warning(_("remote HEAD refers to nonexistent ref, " + "unable to checkout.\n")); + return 0; + } + if (!strcmp(head, "HEAD")) { + if (advice_detached_head) + detach_advice(oid_to_hex(&oid)); + FREE_AND_NULL(head); + } else { + if (!starts_with(head, "refs/heads/")) + die(_("HEAD not found below refs/heads!")); + } + + /* We need to be in the new work tree for the checkout */ + setup_work_tree(); + + hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR); + + memset(&opts, 0, sizeof opts); + opts.update = 1; + opts.merge = 1; + opts.clone = 1; + opts.fn = oneway_merge; + opts.verbose_update = (option_verbosity >= 0); + opts.src_index = &the_index; + opts.dst_index = &the_index; + init_checkout_metadata(&opts.meta, head, &oid, NULL); + + tree = parse_tree_indirect(&oid); + parse_tree(tree); + init_tree_desc(&t, tree->buffer, tree->size); + if (unpack_trees(1, &t, &opts) < 0) + die(_("unable to checkout working tree")); + + free(head); + + if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) + die(_("unable to write new index file")); + + err |= run_hook_le(NULL, "post-checkout", oid_to_hex(null_oid()), + oid_to_hex(&oid), "1", NULL); + + if (!err && (option_recurse_submodules.nr > 0)) { + struct strvec args = STRVEC_INIT; + strvec_pushl(&args, "submodule", "update", "--require-init", "--recursive", NULL); + + if (option_shallow_submodules == 1) + strvec_push(&args, "--depth=1"); + + if (max_jobs != -1) + strvec_pushf(&args, "--jobs=%d", max_jobs); + + if (submodule_progress) + strvec_push(&args, "--progress"); + + if (option_verbosity < 0) + strvec_push(&args, "--quiet"); + + if (option_remote_submodules) { + strvec_push(&args, "--remote"); + strvec_push(&args, "--no-fetch"); + } + + if (option_single_branch >= 0) + strvec_push(&args, option_single_branch ? + "--single-branch" : + "--no-single-branch"); + + err = run_command_v_opt(args.v, RUN_GIT_CMD); + strvec_clear(&args); + } + + return err; +} + +static int git_clone_config(const char *k, const char *v, void *cb) +{ + if (!strcmp(k, "clone.defaultremotename")) { + free(remote_name); + remote_name = xstrdup(v); + } + if (!strcmp(k, "clone.rejectshallow")) + config_reject_shallow = git_config_bool(k, v); + + return git_default_config(k, v, cb); +} + +static int write_one_config(const char *key, const char *value, void *data) +{ + /* + * give git_clone_config a chance to write config values back to the + * environment, since git_config_set_multivar_gently only deals with + * config-file writes + */ + int apply_failed = git_clone_config(key, value, data); + if (apply_failed) + return apply_failed; + + return git_config_set_multivar_gently(key, + value ? value : "true", + CONFIG_REGEX_NONE, 0); +} + +static void write_config(struct string_list *config) +{ + int i; + + for (i = 0; i < config->nr; i++) { + if (git_config_parse_parameter(config->items[i].string, + write_one_config, NULL) < 0) + die(_("unable to write parameters to config file")); + } +} + +static void write_refspec_config(const char *src_ref_prefix, + const struct ref *our_head_points_at, + const struct ref *remote_head_points_at, + struct strbuf *branch_top) +{ + struct strbuf key = STRBUF_INIT; + struct strbuf value = STRBUF_INIT; + + if (option_mirror || !option_bare) { + if (option_single_branch && !option_mirror) { + if (option_branch) { + if (starts_with(our_head_points_at->name, "refs/tags/")) + strbuf_addf(&value, "+%s:%s", our_head_points_at->name, + our_head_points_at->name); + else + strbuf_addf(&value, "+%s:%s%s", our_head_points_at->name, + branch_top->buf, option_branch); + } else if (remote_head_points_at) { + const char *head = remote_head_points_at->name; + if (!skip_prefix(head, "refs/heads/", &head)) + BUG("remote HEAD points at non-head?"); + + strbuf_addf(&value, "+%s:%s%s", remote_head_points_at->name, + branch_top->buf, head); + } + /* + * otherwise, the next "git fetch" will + * simply fetch from HEAD without updating + * any remote-tracking branch, which is what + * we want. + */ + } else { + strbuf_addf(&value, "+%s*:%s*", src_ref_prefix, branch_top->buf); + } + /* Configure the remote */ + if (value.len) { + strbuf_addf(&key, "remote.%s.fetch", remote_name); + git_config_set_multivar(key.buf, value.buf, "^$", 0); + strbuf_reset(&key); + + if (option_mirror) { + strbuf_addf(&key, "remote.%s.mirror", remote_name); + git_config_set(key.buf, "true"); + strbuf_reset(&key); + } + } + } + + strbuf_release(&key); + strbuf_release(&value); +} + +static void dissociate_from_references(void) +{ + static const char* argv[] = { "repack", "-a", "-d", NULL }; + char *alternates = git_pathdup("objects/info/alternates"); + + if (!access(alternates, F_OK)) { + if (run_command_v_opt(argv, RUN_GIT_CMD|RUN_COMMAND_NO_STDIN)) + die(_("cannot repack to clean up")); + if (unlink(alternates) && errno != ENOENT) + die_errno(_("cannot unlink temporary alternates file")); + } + free(alternates); +} + +static int path_exists(const char *path) +{ + struct stat sb; + return !stat(path, &sb); +} + +int cmd_clone(int argc, const char **argv, const char *prefix) +{ + int is_bundle = 0, is_local; + int reject_shallow = 0; + const char *repo_name, *repo, *work_tree, *git_dir; + char *path = NULL, *dir, *display_repo = NULL; + int dest_exists, real_dest_exists = 0; + const struct ref *refs, *remote_head; + struct ref *remote_head_points_at = NULL; + const struct ref *our_head_points_at; + struct ref *mapped_refs; + const struct ref *ref; + struct strbuf key = STRBUF_INIT; + struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT; + struct transport *transport = NULL; + const char *src_ref_prefix = "refs/heads/"; + struct remote *remote; + int err = 0, complete_refs_before_fetch = 1; + int submodule_progress; + + struct transport_ls_refs_options transport_ls_refs_options = + TRANSPORT_LS_REFS_OPTIONS_INIT; + + packet_trace_identity("clone"); + + git_config(git_clone_config, NULL); + + argc = parse_options(argc, argv, prefix, builtin_clone_options, + builtin_clone_usage, 0); + + if (argc > 2) + usage_msg_opt(_("Too many arguments."), + builtin_clone_usage, builtin_clone_options); + + if (argc == 0) + usage_msg_opt(_("You must specify a repository to clone."), + builtin_clone_usage, builtin_clone_options); + + if (option_depth || option_since || option_not.nr) + deepen = 1; + if (option_single_branch == -1) + option_single_branch = deepen ? 1 : 0; + + if (option_mirror) + option_bare = 1; + + if (option_bare) { + if (option_origin) + die(_("--bare and --origin %s options are incompatible."), + option_origin); + if (real_git_dir) + die(_("--bare and --separate-git-dir are incompatible.")); + option_no_checkout = 1; + } + + repo_name = argv[0]; + + path = get_repo_path(repo_name, &is_bundle); + if (path) { + FREE_AND_NULL(path); + repo = absolute_pathdup(repo_name); + } else if (strchr(repo_name, ':')) { + repo = repo_name; + display_repo = transport_anonymize_url(repo); + } else + die(_("repository '%s' does not exist"), repo_name); + + /* no need to be strict, transport_set_option() will validate it again */ + if (option_depth && atoi(option_depth) < 1) + die(_("depth %s is not a positive number"), option_depth); + + if (argc == 2) + dir = xstrdup(argv[1]); + else + dir = guess_dir_name(repo_name, is_bundle, option_bare); + strip_trailing_slashes(dir); + + dest_exists = path_exists(dir); + if (dest_exists && !is_empty_dir(dir)) + die(_("destination path '%s' already exists and is not " + "an empty directory."), dir); + + if (real_git_dir) { + real_dest_exists = path_exists(real_git_dir); + if (real_dest_exists && !is_empty_dir(real_git_dir)) + die(_("repository path '%s' already exists and is not " + "an empty directory."), real_git_dir); + } + + + strbuf_addf(&reflog_msg, "clone: from %s", + display_repo ? display_repo : repo); + free(display_repo); + + if (option_bare) + work_tree = NULL; + else { + work_tree = getenv("GIT_WORK_TREE"); + if (work_tree && path_exists(work_tree)) + die(_("working tree '%s' already exists."), work_tree); + } + + if (option_bare || work_tree) + git_dir = xstrdup(dir); + else { + work_tree = dir; + git_dir = mkpathdup("%s/.git", dir); + } + + atexit(remove_junk); + sigchain_push_common(remove_junk_on_signal); + + if (!option_bare) { + if (safe_create_leading_directories_const(work_tree) < 0) + die_errno(_("could not create leading directories of '%s'"), + work_tree); + if (dest_exists) + junk_work_tree_flags |= REMOVE_DIR_KEEP_TOPLEVEL; + else if (mkdir(work_tree, 0777)) + die_errno(_("could not create work tree dir '%s'"), + work_tree); + junk_work_tree = work_tree; + set_git_work_tree(work_tree); + } + + if (real_git_dir) { + if (real_dest_exists) + junk_git_dir_flags |= REMOVE_DIR_KEEP_TOPLEVEL; + junk_git_dir = real_git_dir; + } else { + if (dest_exists) + junk_git_dir_flags |= REMOVE_DIR_KEEP_TOPLEVEL; + junk_git_dir = git_dir; + } + if (safe_create_leading_directories_const(git_dir) < 0) + die(_("could not create leading directories of '%s'"), git_dir); + + if (0 <= option_verbosity) { + if (option_bare) + fprintf(stderr, _("Cloning into bare repository '%s'...\n"), dir); + else + fprintf(stderr, _("Cloning into '%s'...\n"), dir); + } + + if (option_recurse_submodules.nr > 0) { + struct string_list_item *item; + struct strbuf sb = STRBUF_INIT; + + /* remove duplicates */ + string_list_sort(&option_recurse_submodules); + string_list_remove_duplicates(&option_recurse_submodules, 0); + + /* + * NEEDSWORK: In a multi-working-tree world, this needs to be + * set in the per-worktree config. + */ + for_each_string_list_item(item, &option_recurse_submodules) { + strbuf_addf(&sb, "submodule.active=%s", + item->string); + string_list_append(&option_config, + strbuf_detach(&sb, NULL)); + } + + if (option_required_reference.nr && + option_optional_reference.nr) + die(_("clone --recursive is not compatible with " + "both --reference and --reference-if-able")); + else if (option_required_reference.nr) { + string_list_append(&option_config, + "submodule.alternateLocation=superproject"); + string_list_append(&option_config, + "submodule.alternateErrorStrategy=die"); + } else if (option_optional_reference.nr) { + string_list_append(&option_config, + "submodule.alternateLocation=superproject"); + string_list_append(&option_config, + "submodule.alternateErrorStrategy=info"); + } + } + + init_db(git_dir, real_git_dir, option_template, GIT_HASH_UNKNOWN, NULL, + INIT_DB_QUIET); + + if (real_git_dir) + git_dir = real_git_dir; + + /* + * additional config can be injected with -c, make sure it's included + * after init_db, which clears the entire config environment. + */ + write_config(&option_config); + + /* + * re-read config after init_db and write_config to pick up any config + * injected by --template and --config, respectively. + */ + git_config(git_clone_config, NULL); + + /* + * If option_reject_shallow is specified from CLI option, + * ignore config_reject_shallow from git_clone_config. + */ + if (config_reject_shallow != -1) + reject_shallow = config_reject_shallow; + if (option_reject_shallow != -1) + reject_shallow = option_reject_shallow; + + /* + * apply the remote name provided by --origin only after this second + * call to git_config, to ensure it overrides all config-based values. + */ + if (option_origin != NULL) + remote_name = xstrdup(option_origin); + + if (remote_name == NULL) + remote_name = xstrdup("origin"); + + if (!valid_remote_name(remote_name)) + die(_("'%s' is not a valid remote name"), remote_name); + + if (option_bare) { + if (option_mirror) + src_ref_prefix = "refs/"; + strbuf_addstr(&branch_top, src_ref_prefix); + + git_config_set("core.bare", "true"); + } else { + strbuf_addf(&branch_top, "refs/remotes/%s/", remote_name); + } + + strbuf_addf(&key, "remote.%s.url", remote_name); + git_config_set(key.buf, repo); + strbuf_reset(&key); + + if (option_no_tags) { + strbuf_addf(&key, "remote.%s.tagOpt", remote_name); + git_config_set(key.buf, "--no-tags"); + strbuf_reset(&key); + } + + if (option_required_reference.nr || option_optional_reference.nr) + setup_reference(); + + if (option_sparse_checkout && git_sparse_checkout_init(dir)) + return 1; + + remote = remote_get(remote_name); + + refspec_appendf(&remote->fetch, "+%s*:%s*", src_ref_prefix, + branch_top.buf); + + transport = transport_get(remote, remote->url[0]); + transport_set_verbosity(transport, option_verbosity, option_progress); + transport->family = family; + + path = get_repo_path(remote->url[0], &is_bundle); + is_local = option_local != 0 && path && !is_bundle; + if (is_local) { + if (option_depth) + warning(_("--depth is ignored in local clones; use file:// instead.")); + if (option_since) + warning(_("--shallow-since is ignored in local clones; use file:// instead.")); + if (option_not.nr) + warning(_("--shallow-exclude is ignored in local clones; use file:// instead.")); + if (filter_options.choice) + warning(_("--filter is ignored in local clones; use file:// instead.")); + if (!access(mkpath("%s/shallow", path), F_OK)) { + if (reject_shallow) + die(_("source repository is shallow, reject to clone.")); + if (option_local > 0) + warning(_("source repository is shallow, ignoring --local")); + is_local = 0; + } + } + if (option_local > 0 && !is_local) + warning(_("--local is ignored")); + transport->cloning = 1; + + transport_set_option(transport, TRANS_OPT_KEEP, "yes"); + + if (reject_shallow) + transport_set_option(transport, TRANS_OPT_REJECT_SHALLOW, "1"); + if (option_depth) + transport_set_option(transport, TRANS_OPT_DEPTH, + option_depth); + if (option_since) + transport_set_option(transport, TRANS_OPT_DEEPEN_SINCE, + option_since); + if (option_not.nr) + transport_set_option(transport, TRANS_OPT_DEEPEN_NOT, + (const char *)&option_not); + if (option_single_branch) + transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1"); + + if (option_upload_pack) + transport_set_option(transport, TRANS_OPT_UPLOADPACK, + option_upload_pack); + + if (server_options.nr) + transport->server_options = &server_options; + + if (filter_options.choice) { + const char *spec = + expand_list_objects_filter_spec(&filter_options); + transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER, + spec); + transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1"); + } + + if (transport->smart_options && !deepen && !filter_options.choice) + transport->smart_options->check_self_contained_and_connected = 1; + + + strvec_push(&transport_ls_refs_options.ref_prefixes, "HEAD"); + refspec_ref_prefixes(&remote->fetch, + &transport_ls_refs_options.ref_prefixes); + if (option_branch) + expand_ref_prefix(&transport_ls_refs_options.ref_prefixes, + option_branch); + if (!option_no_tags) + strvec_push(&transport_ls_refs_options.ref_prefixes, + "refs/tags/"); + + refs = transport_get_remote_refs(transport, &transport_ls_refs_options); + + if (refs) { + int hash_algo = hash_algo_by_ptr(transport_get_hash_algo(transport)); + + /* + * Now that we know what algorithm the remote side is using, + * let's set ours to the same thing. + */ + initialize_repository_version(hash_algo, 1); + repo_set_hash_algo(the_repository, hash_algo); + + mapped_refs = wanted_peer_refs(refs, &remote->fetch); + /* + * transport_get_remote_refs() may return refs with null sha-1 + * in mapped_refs (see struct transport->get_refs_list + * comment). In that case we need fetch it early because + * remote_head code below relies on it. + * + * for normal clones, transport_get_remote_refs() should + * return reliable ref set, we can delay cloning until after + * remote HEAD check. + */ + for (ref = refs; ref; ref = ref->next) + if (is_null_oid(&ref->old_oid)) { + complete_refs_before_fetch = 0; + break; + } + + if (!is_local && !complete_refs_before_fetch) { + if (transport_fetch_refs(transport, mapped_refs)) + die(_("remote transport reported error")); + } + + remote_head = find_ref_by_name(refs, "HEAD"); + remote_head_points_at = + guess_remote_head(remote_head, mapped_refs, 0); + + if (option_branch) { + our_head_points_at = + find_remote_branch(mapped_refs, option_branch); + + if (!our_head_points_at) + die(_("Remote branch %s not found in upstream %s"), + option_branch, remote_name); + } + else + our_head_points_at = remote_head_points_at; + } + else { + const char *branch; + char *ref; + + if (option_branch) + die(_("Remote branch %s not found in upstream %s"), + option_branch, remote_name); + + warning(_("You appear to have cloned an empty repository.")); + mapped_refs = NULL; + our_head_points_at = NULL; + remote_head_points_at = NULL; + remote_head = NULL; + option_no_checkout = 1; + + if (transport_ls_refs_options.unborn_head_target && + skip_prefix(transport_ls_refs_options.unborn_head_target, + "refs/heads/", &branch)) { + ref = transport_ls_refs_options.unborn_head_target; + transport_ls_refs_options.unborn_head_target = NULL; + create_symref("HEAD", ref, reflog_msg.buf); + } else { + branch = git_default_branch_name(0); + ref = xstrfmt("refs/heads/%s", branch); + } + + if (!option_bare) + install_branch_config(0, branch, remote_name, ref); + + free(ref); + } + + write_refspec_config(src_ref_prefix, our_head_points_at, + remote_head_points_at, &branch_top); + + if (filter_options.choice) + partial_clone_register(remote_name, &filter_options); + + if (is_local) + clone_local(path, git_dir); + else if (refs && complete_refs_before_fetch) { + if (transport_fetch_refs(transport, mapped_refs)) + die(_("remote transport reported error")); + } + + update_remote_refs(refs, mapped_refs, remote_head_points_at, + branch_top.buf, reflog_msg.buf, transport, + !is_local); + + update_head(our_head_points_at, remote_head, reflog_msg.buf); + + /* + * We want to show progress for recursive submodule clones iff + * we did so for the main clone. But only the transport knows + * the final decision for this flag, so we need to rescue the value + * before we free the transport. + */ + submodule_progress = transport->progress; + + transport_unlock_pack(transport); + transport_disconnect(transport); + + if (option_dissociate) { + close_object_store(the_repository->objects); + dissociate_from_references(); + } + + junk_mode = JUNK_LEAVE_REPO; + err = checkout(submodule_progress); + + free(remote_name); + strbuf_release(&reflog_msg); + strbuf_release(&branch_top); + strbuf_release(&key); + free_refs(mapped_refs); + free_refs(remote_head_points_at); + free(dir); + free(path); + UNLEAK(repo); + junk_mode = JUNK_LEAVE_ALL; + + strvec_clear(&transport_ls_refs_options.ref_prefixes); + free(transport_ls_refs_options.unborn_head_target); + return err; +} diff --git a/column.c b/column.c index 1261e18a72e972..fbf88639aaed93 100644 --- a/column.c +++ b/column.c @@ -23,7 +23,7 @@ struct column_data { /* return length of 's' in letters, ANSI escapes stripped */ static int item_length(const char *s) { - return utf8_strnwidth(s, -1, 1); + return utf8_strnwidth(s, strlen(s), 1); } /* diff --git a/compat/mingw.c b/compat/mingw.c index 9e0cd1e097f25f..5e42191fa4acb4 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -1,5 +1,6 @@ #include "../git-compat-util.h" #include "win32.h" +#include #include #include #include "../strbuf.h" @@ -1083,6 +1084,7 @@ int pipe(int filedes[2]) return 0; } +#ifndef __MINGW64__ struct tm *gmtime_r(const time_t *timep, struct tm *result) { if (gmtime_s(result, timep) == 0) @@ -1096,6 +1098,7 @@ struct tm *localtime_r(const time_t *timep, struct tm *result) return result; return NULL; } +#endif char *mingw_getcwd(char *pointer, int len) { @@ -2622,6 +2625,92 @@ static void setup_windows_environment(void) } } +static PSID get_current_user_sid(void) +{ + HANDLE token; + DWORD len = 0; + PSID result = NULL; + + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &token)) + return NULL; + + if (!GetTokenInformation(token, TokenUser, NULL, 0, &len)) { + TOKEN_USER *info = xmalloc((size_t)len); + if (GetTokenInformation(token, TokenUser, info, len, &len)) { + len = GetLengthSid(info->User.Sid); + result = xmalloc(len); + if (!CopySid(len, result, info->User.Sid)) { + error(_("failed to copy SID (%ld)"), + GetLastError()); + FREE_AND_NULL(result); + } + } + FREE_AND_NULL(info); + } + CloseHandle(token); + + return result; +} + +int is_path_owned_by_current_sid(const char *path) +{ + WCHAR wpath[MAX_PATH]; + PSID sid = NULL; + PSECURITY_DESCRIPTOR descriptor = NULL; + DWORD err; + + static wchar_t home[MAX_PATH]; + + int result = 0; + + if (xutftowcs_path(wpath, path) < 0) + return 0; + + /* + * On Windows, the home directory is owned by the administrator, but for + * all practical purposes, it belongs to the user. Do pretend that it is + * owned by the user. + */ + if (!*home) { + DWORD size = ARRAY_SIZE(home); + DWORD len = GetEnvironmentVariableW(L"HOME", home, size); + if (!len || len > size) + wcscpy(home, L"::N/A::"); + } + if (!wcsicmp(wpath, home)) + return 1; + + /* Get the owner SID */ + err = GetNamedSecurityInfoW(wpath, SE_FILE_OBJECT, + OWNER_SECURITY_INFORMATION | + DACL_SECURITY_INFORMATION, + &sid, NULL, NULL, NULL, &descriptor); + + if (err != ERROR_SUCCESS) + error(_("failed to get owner for '%s' (%ld)"), path, err); + else if (sid && IsValidSid(sid)) { + /* Now, verify that the SID matches the current user's */ + static PSID current_user_sid; + + if (!current_user_sid) + current_user_sid = get_current_user_sid(); + + if (current_user_sid && + IsValidSid(current_user_sid) && + EqualSid(sid, current_user_sid)) + result = 1; + } + + /* + * We can release the security descriptor struct only now because `sid` + * actually points into this struct. + */ + if (descriptor) + LocalFree(descriptor); + + return result; +} + int is_valid_win32_path(const char *path, int allow_literal_nul) { const char *p = path; diff --git a/compat/mingw.c.orig b/compat/mingw.c.orig new file mode 100644 index 00000000000000..e14f2d5f77ce7c --- /dev/null +++ b/compat/mingw.c.orig @@ -0,0 +1,2930 @@ +#include "../git-compat-util.h" +#include "win32.h" +#include +#include +#include "../strbuf.h" +#include "../run-command.h" +#include "../cache.h" +#include "win32/lazyload.h" +#include "../config.h" +#include "dir.h" + +#define HCAST(type, handle) ((type)(intptr_t)handle) + +static const int delay[] = { 0, 1, 10, 20, 40 }; + +void open_in_gdb(void) +{ + static struct child_process cp = CHILD_PROCESS_INIT; + extern char *_pgmptr; + + strvec_pushl(&cp.args, "mintty", "gdb", NULL); + strvec_pushf(&cp.args, "--pid=%d", getpid()); + cp.clean_on_exit = 1; + if (start_command(&cp) < 0) + die_errno("Could not start gdb"); + sleep(1); +} + +int err_win_to_posix(DWORD winerr) +{ + int error = ENOSYS; + switch(winerr) { + case ERROR_ACCESS_DENIED: error = EACCES; break; + case ERROR_ACCOUNT_DISABLED: error = EACCES; break; + case ERROR_ACCOUNT_RESTRICTION: error = EACCES; break; + case ERROR_ALREADY_ASSIGNED: error = EBUSY; break; + case ERROR_ALREADY_EXISTS: error = EEXIST; break; + case ERROR_ARITHMETIC_OVERFLOW: error = ERANGE; break; + case ERROR_BAD_COMMAND: error = EIO; break; + case ERROR_BAD_DEVICE: error = ENODEV; break; + case ERROR_BAD_DRIVER_LEVEL: error = ENXIO; break; + case ERROR_BAD_EXE_FORMAT: error = ENOEXEC; break; + case ERROR_BAD_FORMAT: error = ENOEXEC; break; + case ERROR_BAD_LENGTH: error = EINVAL; break; + case ERROR_BAD_PATHNAME: error = ENOENT; break; + case ERROR_BAD_PIPE: error = EPIPE; break; + case ERROR_BAD_UNIT: error = ENODEV; break; + case ERROR_BAD_USERNAME: error = EINVAL; break; + case ERROR_BROKEN_PIPE: error = EPIPE; break; + case ERROR_BUFFER_OVERFLOW: error = ENAMETOOLONG; break; + case ERROR_BUSY: error = EBUSY; break; + case ERROR_BUSY_DRIVE: error = EBUSY; break; + case ERROR_CALL_NOT_IMPLEMENTED: error = ENOSYS; break; + case ERROR_CANNOT_MAKE: error = EACCES; break; + case ERROR_CANTOPEN: error = EIO; break; + case ERROR_CANTREAD: error = EIO; break; + case ERROR_CANTWRITE: error = EIO; break; + case ERROR_CRC: error = EIO; break; + case ERROR_CURRENT_DIRECTORY: error = EACCES; break; + case ERROR_DEVICE_IN_USE: error = EBUSY; break; + case ERROR_DEV_NOT_EXIST: error = ENODEV; break; + case ERROR_DIRECTORY: error = EINVAL; break; + case ERROR_DIR_NOT_EMPTY: error = ENOTEMPTY; break; + case ERROR_DISK_CHANGE: error = EIO; break; + case ERROR_DISK_FULL: error = ENOSPC; break; + case ERROR_DRIVE_LOCKED: error = EBUSY; break; + case ERROR_ENVVAR_NOT_FOUND: error = EINVAL; break; + case ERROR_EXE_MARKED_INVALID: error = ENOEXEC; break; + case ERROR_FILENAME_EXCED_RANGE: error = ENAMETOOLONG; break; + case ERROR_FILE_EXISTS: error = EEXIST; break; + case ERROR_FILE_INVALID: error = ENODEV; break; + case ERROR_FILE_NOT_FOUND: error = ENOENT; break; + case ERROR_GEN_FAILURE: error = EIO; break; + case ERROR_HANDLE_DISK_FULL: error = ENOSPC; break; + case ERROR_INSUFFICIENT_BUFFER: error = ENOMEM; break; + case ERROR_INVALID_ACCESS: error = EACCES; break; + case ERROR_INVALID_ADDRESS: error = EFAULT; break; + case ERROR_INVALID_BLOCK: error = EFAULT; break; + case ERROR_INVALID_DATA: error = EINVAL; break; + case ERROR_INVALID_DRIVE: error = ENODEV; break; + case ERROR_INVALID_EXE_SIGNATURE: error = ENOEXEC; break; + case ERROR_INVALID_FLAGS: error = EINVAL; break; + case ERROR_INVALID_FUNCTION: error = ENOSYS; break; + case ERROR_INVALID_HANDLE: error = EBADF; break; + case ERROR_INVALID_LOGON_HOURS: error = EACCES; break; + case ERROR_INVALID_NAME: error = EINVAL; break; + case ERROR_INVALID_OWNER: error = EINVAL; break; + case ERROR_INVALID_PARAMETER: error = EINVAL; break; + case ERROR_INVALID_PASSWORD: error = EPERM; break; + case ERROR_INVALID_PRIMARY_GROUP: error = EINVAL; break; + case ERROR_INVALID_SIGNAL_NUMBER: error = EINVAL; break; + case ERROR_INVALID_TARGET_HANDLE: error = EIO; break; + case ERROR_INVALID_WORKSTATION: error = EACCES; break; + case ERROR_IO_DEVICE: error = EIO; break; + case ERROR_IO_INCOMPLETE: error = EINTR; break; + case ERROR_LOCKED: error = EBUSY; break; + case ERROR_LOCK_VIOLATION: error = EACCES; break; + case ERROR_LOGON_FAILURE: error = EACCES; break; + case ERROR_MAPPED_ALIGNMENT: error = EINVAL; break; + case ERROR_META_EXPANSION_TOO_LONG: error = E2BIG; break; + case ERROR_MORE_DATA: error = EPIPE; break; + case ERROR_NEGATIVE_SEEK: error = ESPIPE; break; + case ERROR_NOACCESS: error = EFAULT; break; + case ERROR_NONE_MAPPED: error = EINVAL; break; + case ERROR_NOT_ENOUGH_MEMORY: error = ENOMEM; break; + case ERROR_NOT_READY: error = EAGAIN; break; + case ERROR_NOT_SAME_DEVICE: error = EXDEV; break; + case ERROR_NO_DATA: error = EPIPE; break; + case ERROR_NO_MORE_SEARCH_HANDLES: error = EIO; break; + case ERROR_NO_PROC_SLOTS: error = EAGAIN; break; + case ERROR_NO_SUCH_PRIVILEGE: error = EACCES; break; + case ERROR_OPEN_FAILED: error = EIO; break; + case ERROR_OPEN_FILES: error = EBUSY; break; + case ERROR_OPERATION_ABORTED: error = EINTR; break; + case ERROR_OUTOFMEMORY: error = ENOMEM; break; + case ERROR_PASSWORD_EXPIRED: error = EACCES; break; + case ERROR_PATH_BUSY: error = EBUSY; break; + case ERROR_PATH_NOT_FOUND: error = ENOENT; break; + case ERROR_PIPE_BUSY: error = EBUSY; break; + case ERROR_PIPE_CONNECTED: error = EPIPE; break; + case ERROR_PIPE_LISTENING: error = EPIPE; break; + case ERROR_PIPE_NOT_CONNECTED: error = EPIPE; break; + case ERROR_PRIVILEGE_NOT_HELD: error = EACCES; break; + case ERROR_READ_FAULT: error = EIO; break; + case ERROR_SEEK: error = EIO; break; + case ERROR_SEEK_ON_DEVICE: error = ESPIPE; break; + case ERROR_SHARING_BUFFER_EXCEEDED: error = ENFILE; break; + case ERROR_SHARING_VIOLATION: error = EACCES; break; + case ERROR_STACK_OVERFLOW: error = ENOMEM; break; + case ERROR_SUCCESS: BUG("err_win_to_posix() called without an error!"); + case ERROR_SWAPERROR: error = ENOENT; break; + case ERROR_TOO_MANY_MODULES: error = EMFILE; break; + case ERROR_TOO_MANY_OPEN_FILES: error = EMFILE; break; + case ERROR_UNRECOGNIZED_MEDIA: error = ENXIO; break; + case ERROR_UNRECOGNIZED_VOLUME: error = ENODEV; break; + case ERROR_WAIT_NO_CHILDREN: error = ECHILD; break; + case ERROR_WRITE_FAULT: error = EIO; break; + case ERROR_WRITE_PROTECT: error = EROFS; break; + } + return error; +} + +static inline int is_file_in_use_error(DWORD errcode) +{ + switch (errcode) { + case ERROR_SHARING_VIOLATION: + case ERROR_ACCESS_DENIED: + return 1; + } + + return 0; +} + +static int read_yes_no_answer(void) +{ + char answer[1024]; + + if (fgets(answer, sizeof(answer), stdin)) { + size_t answer_len = strlen(answer); + int got_full_line = 0, c; + + /* remove the newline */ + if (answer_len >= 2 && answer[answer_len-2] == '\r') { + answer[answer_len-2] = '\0'; + got_full_line = 1; + } else if (answer_len >= 1 && answer[answer_len-1] == '\n') { + answer[answer_len-1] = '\0'; + got_full_line = 1; + } + /* flush the buffer in case we did not get the full line */ + if (!got_full_line) + while ((c = getchar()) != EOF && c != '\n') + ; + } else + /* we could not read, return the + * default answer which is no */ + return 0; + + if (tolower(answer[0]) == 'y' && !answer[1]) + return 1; + if (!strncasecmp(answer, "yes", sizeof(answer))) + return 1; + if (tolower(answer[0]) == 'n' && !answer[1]) + return 0; + if (!strncasecmp(answer, "no", sizeof(answer))) + return 0; + + /* did not find an answer we understand */ + return -1; +} + +static int ask_yes_no_if_possible(const char *format, ...) +{ + char question[4096]; + const char *retry_hook[] = { NULL, NULL, NULL }; + va_list args; + + va_start(args, format); + vsnprintf(question, sizeof(question), format, args); + va_end(args); + + if ((retry_hook[0] = mingw_getenv("GIT_ASK_YESNO"))) { + retry_hook[1] = question; + return !run_command_v_opt(retry_hook, 0); + } + + if (!isatty(_fileno(stdin)) || !isatty(_fileno(stderr))) + return 0; + + while (1) { + int answer; + fprintf(stderr, "%s (y/n) ", question); + + if ((answer = read_yes_no_answer()) >= 0) + return answer; + + fprintf(stderr, "Sorry, I did not understand your answer. " + "Please type 'y' or 'n'\n"); + } +} + +/* Windows only */ +enum hide_dotfiles_type { + HIDE_DOTFILES_FALSE = 0, + HIDE_DOTFILES_TRUE, + HIDE_DOTFILES_DOTGITONLY +}; + +static int core_restrict_inherited_handles = -1; +static enum hide_dotfiles_type hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; +static char *unset_environment_variables; + +int mingw_core_config(const char *var, const char *value, void *cb) +{ + if (!strcmp(var, "core.hidedotfiles")) { + if (value && !strcasecmp(value, "dotgitonly")) + hide_dotfiles = HIDE_DOTFILES_DOTGITONLY; + else + hide_dotfiles = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.unsetenvvars")) { + free(unset_environment_variables); + unset_environment_variables = xstrdup(value); + return 0; + } + + if (!strcmp(var, "core.restrictinheritedhandles")) { + if (value && !strcasecmp(value, "auto")) + core_restrict_inherited_handles = -1; + else + core_restrict_inherited_handles = + git_config_bool(var, value); + return 0; + } + + return 0; +} + +/* Normalizes NT paths as returned by some low-level APIs. */ +static wchar_t *normalize_ntpath(wchar_t *wbuf) +{ + int i; + /* fix absolute path prefixes */ + if (wbuf[0] == '\\') { + /* strip NT namespace prefixes */ + if (!wcsncmp(wbuf, L"\\??\\", 4) || + !wcsncmp(wbuf, L"\\\\?\\", 4)) + wbuf += 4; + else if (!wcsnicmp(wbuf, L"\\DosDevices\\", 12)) + wbuf += 12; + /* replace remaining '...UNC\' with '\\' */ + if (!wcsnicmp(wbuf, L"UNC\\", 4)) { + wbuf += 2; + *wbuf = '\\'; + } + } + /* convert backslashes to slashes */ + for (i = 0; wbuf[i]; i++) + if (wbuf[i] == '\\') + wbuf[i] = '/'; + return wbuf; +} + +int mingw_unlink(const char *pathname) +{ + int ret, tries = 0; + wchar_t wpathname[MAX_PATH]; + if (xutftowcs_path(wpathname, pathname) < 0) + return -1; + + if (DeleteFileW(wpathname)) + return 0; + + /* read-only files cannot be removed */ + _wchmod(wpathname, 0666); + while ((ret = _wunlink(wpathname)) == -1 && tries < ARRAY_SIZE(delay)) { + if (!is_file_in_use_error(GetLastError())) + break; + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + } + while (ret == -1 && is_file_in_use_error(GetLastError()) && + ask_yes_no_if_possible("Unlink of file '%s' failed. " + "Should I try again?", pathname)) + ret = _wunlink(wpathname); + return ret; +} + +static int is_dir_empty(const wchar_t *wpath) +{ + WIN32_FIND_DATAW findbuf; + HANDLE handle; + wchar_t wbuf[MAX_PATH + 2]; + wcscpy(wbuf, wpath); + wcscat(wbuf, L"\\*"); + handle = FindFirstFileW(wbuf, &findbuf); + if (handle == INVALID_HANDLE_VALUE) + return GetLastError() == ERROR_NO_MORE_FILES; + + while (!wcscmp(findbuf.cFileName, L".") || + !wcscmp(findbuf.cFileName, L"..")) + if (!FindNextFileW(handle, &findbuf)) { + DWORD err = GetLastError(); + FindClose(handle); + return err == ERROR_NO_MORE_FILES; + } + FindClose(handle); + return 0; +} + +int mingw_rmdir(const char *pathname) +{ + int ret, tries = 0; + wchar_t wpathname[MAX_PATH]; + struct stat st; + + /* + * Contrary to Linux' `rmdir()`, Windows' _wrmdir() and _rmdir() + * (and `RemoveDirectoryW()`) will attempt to remove the target of a + * symbolic link (if it points to a directory). + * + * This behavior breaks the assumption of e.g. `remove_path()` which + * upon successful deletion of a file will attempt to remove its parent + * directories recursively until failure (which usually happens when + * the directory is not empty). + * + * Therefore, before calling `_wrmdir()`, we first check if the path is + * a symbolic link. If it is, we exit and return the same error as + * Linux' `rmdir()` would, i.e. `ENOTDIR`. + */ + if (!mingw_lstat(pathname, &st) && S_ISLNK(st.st_mode)) { + errno = ENOTDIR; + return -1; + } + + if (xutftowcs_path(wpathname, pathname) < 0) + return -1; + + while ((ret = _wrmdir(wpathname)) == -1 && tries < ARRAY_SIZE(delay)) { + if (!is_file_in_use_error(GetLastError())) + errno = err_win_to_posix(GetLastError()); + if (errno != EACCES) + break; + if (!is_dir_empty(wpathname)) { + errno = ENOTEMPTY; + break; + } + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + } + while (ret == -1 && errno == EACCES && is_file_in_use_error(GetLastError()) && + ask_yes_no_if_possible("Deletion of directory '%s' failed. " + "Should I try again?", pathname)) + ret = _wrmdir(wpathname); + if (!ret) + invalidate_lstat_cache(); + return ret; +} + +static inline int needs_hiding(const char *path) +{ + const char *basename; + + if (hide_dotfiles == HIDE_DOTFILES_FALSE) + return 0; + + /* We cannot use basename(), as it would remove trailing slashes */ + win32_skip_dos_drive_prefix((char **)&path); + if (!*path) + return 0; + + for (basename = path; *path; path++) + if (is_dir_sep(*path)) { + do { + path++; + } while (is_dir_sep(*path)); + /* ignore trailing slashes */ + if (*path) + basename = path; + else + break; + } + + if (hide_dotfiles == HIDE_DOTFILES_TRUE) + return *basename == '.'; + + assert(hide_dotfiles == HIDE_DOTFILES_DOTGITONLY); + return !strncasecmp(".git", basename, 4) && + (!basename[4] || is_dir_sep(basename[4])); +} + +static int set_hidden_flag(const wchar_t *path, int set) +{ + DWORD original = GetFileAttributesW(path), modified; + if (set) + modified = original | FILE_ATTRIBUTE_HIDDEN; + else + modified = original & ~FILE_ATTRIBUTE_HIDDEN; + if (original == modified || SetFileAttributesW(path, modified)) + return 0; + errno = err_win_to_posix(GetLastError()); + return -1; +} + +int mingw_mkdir(const char *path, int mode) +{ + int ret; + wchar_t wpath[MAX_PATH]; + + if (!is_valid_win32_path(path, 0)) { + errno = EINVAL; + return -1; + } + + if (xutftowcs_path(wpath, path) < 0) + return -1; + ret = _wmkdir(wpath); + if (!ret && needs_hiding(path)) + return set_hidden_flag(wpath, 1); + return ret; +} + +/* + * Calling CreateFile() using FILE_APPEND_DATA and without FILE_WRITE_DATA + * is documented in [1] as opening a writable file handle in append mode. + * (It is believed that) this is atomic since it is maintained by the + * kernel unlike the O_APPEND flag which is racily maintained by the CRT. + * + * [1] https://docs.microsoft.com/en-us/windows/desktop/fileio/file-access-rights-constants + * + * This trick does not appear to work for named pipes. Instead it creates + * a named pipe client handle that cannot be written to. Callers should + * just use the regular _wopen() for them. (And since client handle gets + * bound to a unique server handle, it isn't really an issue.) + */ +static int mingw_open_append(wchar_t const *wfilename, int oflags, ...) +{ + HANDLE handle; + int fd; + DWORD create = (oflags & O_CREAT) ? OPEN_ALWAYS : OPEN_EXISTING; + + /* only these flags are supported */ + if ((oflags & ~O_CREAT) != (O_WRONLY | O_APPEND)) + return errno = ENOSYS, -1; + + /* + * FILE_SHARE_WRITE is required to permit child processes + * to append to the file. + */ + handle = CreateFileW(wfilename, FILE_APPEND_DATA, + FILE_SHARE_WRITE | FILE_SHARE_READ, + NULL, create, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) { + DWORD err = GetLastError(); + + /* + * Some network storage solutions (e.g. Isilon) might return + * ERROR_INVALID_PARAMETER instead of expected error + * ERROR_PATH_NOT_FOUND, which results in an unknown error. If + * so, let's turn the error to ERROR_PATH_NOT_FOUND instead. + */ + if (err == ERROR_INVALID_PARAMETER) + err = ERROR_PATH_NOT_FOUND; + + errno = err_win_to_posix(err); + return -1; + } + + /* + * No O_APPEND here, because the CRT uses it only to reset the + * file pointer to EOF before each write(); but that is not + * necessary (and may lead to races) for a file created with + * FILE_APPEND_DATA. + */ + fd = _open_osfhandle((intptr_t)handle, O_BINARY); + if (fd < 0) + CloseHandle(handle); + return fd; +} + +/* + * Does the pathname map to the local named pipe filesystem? + * That is, does it have a "//./pipe/" prefix? + */ +static int is_local_named_pipe_path(const char *filename) +{ + return (is_dir_sep(filename[0]) && + is_dir_sep(filename[1]) && + filename[2] == '.' && + is_dir_sep(filename[3]) && + !strncasecmp(filename+4, "pipe", 4) && + is_dir_sep(filename[8]) && + filename[9]); +} + +int mingw_open (const char *filename, int oflags, ...) +{ + typedef int (*open_fn_t)(wchar_t const *wfilename, int oflags, ...); + va_list args; + unsigned mode; + int fd, create = (oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL); + wchar_t wfilename[MAX_PATH]; + open_fn_t open_fn; + + va_start(args, oflags); + mode = va_arg(args, int); + va_end(args); + + if (!is_valid_win32_path(filename, !create)) { + errno = create ? EINVAL : ENOENT; + return -1; + } + + if ((oflags & O_APPEND) && !is_local_named_pipe_path(filename)) + open_fn = mingw_open_append; + else + open_fn = _wopen; + + if (filename && !strcmp(filename, "/dev/null")) + wcscpy(wfilename, L"nul"); + else if (xutftowcs_path(wfilename, filename) < 0) + return -1; + + fd = open_fn(wfilename, oflags, mode); + + if (fd < 0 && (oflags & O_ACCMODE) != O_RDONLY && errno == EACCES) { + DWORD attrs = GetFileAttributesW(wfilename); + if (attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY)) + errno = EISDIR; + } + if ((oflags & O_CREAT) && needs_hiding(filename)) { + /* + * Internally, _wopen() uses the CreateFile() API which errors + * out with an ERROR_ACCESS_DENIED if CREATE_ALWAYS was + * specified and an already existing file's attributes do not + * match *exactly*. As there is no mode or flag we can set that + * would correspond to FILE_ATTRIBUTE_HIDDEN, let's just try + * again *without* the O_CREAT flag (that corresponds to the + * CREATE_ALWAYS flag of CreateFile()). + */ + if (fd < 0 && errno == EACCES) + fd = open_fn(wfilename, oflags & ~O_CREAT, mode); + if (fd >= 0 && set_hidden_flag(wfilename, 1)) + warning("could not mark '%s' as hidden.", filename); + } + return fd; +} + +static BOOL WINAPI ctrl_ignore(DWORD type) +{ + return TRUE; +} + +#undef fgetc +int mingw_fgetc(FILE *stream) +{ + int ch; + if (!isatty(_fileno(stream))) + return fgetc(stream); + + SetConsoleCtrlHandler(ctrl_ignore, TRUE); + while (1) { + ch = fgetc(stream); + if (ch != EOF || GetLastError() != ERROR_OPERATION_ABORTED) + break; + + /* Ctrl+C was pressed, simulate SIGINT and retry */ + mingw_raise(SIGINT); + } + SetConsoleCtrlHandler(ctrl_ignore, FALSE); + return ch; +} + +#undef fopen +FILE *mingw_fopen (const char *filename, const char *otype) +{ + int hide = needs_hiding(filename); + FILE *file; + wchar_t wfilename[MAX_PATH], wotype[4]; + if (filename && !strcmp(filename, "/dev/null")) + wcscpy(wfilename, L"nul"); + else if (!is_valid_win32_path(filename, 1)) { + int create = otype && strchr(otype, 'w'); + errno = create ? EINVAL : ENOENT; + return NULL; + } else if (xutftowcs_path(wfilename, filename) < 0) + return NULL; + + if (xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0) + return NULL; + + if (hide && !access(filename, F_OK) && set_hidden_flag(wfilename, 0)) { + error("could not unhide %s", filename); + return NULL; + } + file = _wfopen(wfilename, wotype); + if (!file && GetLastError() == ERROR_INVALID_NAME) + errno = ENOENT; + if (file && hide && set_hidden_flag(wfilename, 1)) + warning("could not mark '%s' as hidden.", filename); + return file; +} + +FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream) +{ + int hide = needs_hiding(filename); + FILE *file; + wchar_t wfilename[MAX_PATH], wotype[4]; + if (filename && !strcmp(filename, "/dev/null")) + wcscpy(wfilename, L"nul"); + else if (!is_valid_win32_path(filename, 1)) { + int create = otype && strchr(otype, 'w'); + errno = create ? EINVAL : ENOENT; + return NULL; + } else if (xutftowcs_path(wfilename, filename) < 0) + return NULL; + + if (xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0) + return NULL; + + if (hide && !access(filename, F_OK) && set_hidden_flag(wfilename, 0)) { + error("could not unhide %s", filename); + return NULL; + } + file = _wfreopen(wfilename, wotype, stream); + if (file && hide && set_hidden_flag(wfilename, 1)) + warning("could not mark '%s' as hidden.", filename); + return file; +} + +#undef fflush +int mingw_fflush(FILE *stream) +{ + int ret = fflush(stream); + + /* + * write() is used behind the scenes of stdio output functions. + * Since git code does not check for errors after each stdio write + * operation, it can happen that write() is called by a later + * stdio function even if an earlier write() call failed. In the + * case of a pipe whose readable end was closed, only the first + * call to write() reports EPIPE on Windows. Subsequent write() + * calls report EINVAL. It is impossible to notice whether this + * fflush invocation triggered such a case, therefore, we have to + * catch all EINVAL errors whole-sale. + */ + if (ret && errno == EINVAL) + errno = EPIPE; + + return ret; +} + +#undef write +ssize_t mingw_write(int fd, const void *buf, size_t len) +{ + ssize_t result = write(fd, buf, len); + + if (result < 0 && errno == EINVAL && buf) { + /* check if fd is a pipe */ + HANDLE h = (HANDLE) _get_osfhandle(fd); + if (GetFileType(h) == FILE_TYPE_PIPE) + errno = EPIPE; + else + errno = EINVAL; + } + + return result; +} + +int mingw_access(const char *filename, int mode) +{ + wchar_t wfilename[MAX_PATH]; + if (!strcmp("nul", filename) || !strcmp("/dev/null", filename)) + return 0; + if (xutftowcs_path(wfilename, filename) < 0) + return -1; + /* X_OK is not supported by the MSVCRT version */ + return _waccess(wfilename, mode & ~X_OK); +} + +int mingw_chdir(const char *dirname) +{ + wchar_t wdirname[MAX_PATH]; + if (xutftowcs_path(wdirname, dirname) < 0) + return -1; + return _wchdir(wdirname); +} + +int mingw_chmod(const char *filename, int mode) +{ + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, filename) < 0) + return -1; + return _wchmod(wfilename, mode); +} + +/* + * The unit of FILETIME is 100-nanoseconds since January 1, 1601, UTC. + * Returns the 100-nanoseconds ("hekto nanoseconds") since the epoch. + */ +static inline long long filetime_to_hnsec(const FILETIME *ft) +{ + long long winTime = ((long long)ft->dwHighDateTime << 32) + ft->dwLowDateTime; + /* Windows to Unix Epoch conversion */ + return winTime - 116444736000000000LL; +} + +static inline void filetime_to_timespec(const FILETIME *ft, struct timespec *ts) +{ + long long hnsec = filetime_to_hnsec(ft); + ts->tv_sec = (time_t)(hnsec / 10000000); + ts->tv_nsec = (hnsec % 10000000) * 100; +} + +/** + * Verifies that safe_create_leading_directories() would succeed. + */ +static int has_valid_directory_prefix(wchar_t *wfilename) +{ + int n = wcslen(wfilename); + + while (n > 0) { + wchar_t c = wfilename[--n]; + DWORD attributes; + + if (!is_dir_sep(c)) + continue; + + wfilename[n] = L'\0'; + attributes = GetFileAttributesW(wfilename); + wfilename[n] = c; + if (attributes == FILE_ATTRIBUTE_DIRECTORY || + attributes == FILE_ATTRIBUTE_DEVICE) + return 1; + if (attributes == INVALID_FILE_ATTRIBUTES) + switch (GetLastError()) { + case ERROR_PATH_NOT_FOUND: + continue; + case ERROR_FILE_NOT_FOUND: + /* This implies parent directory exists. */ + return 1; + } + return 0; + } + return 1; +} + +/* We keep the do_lstat code in a separate function to avoid recursion. + * When a path ends with a slash, the stat will fail with ENOENT. In + * this case, we strip the trailing slashes and stat again. + * + * If follow is true then act like stat() and report on the link + * target. Otherwise report on the link itself. + */ +static int do_lstat(int follow, const char *file_name, struct stat *buf) +{ + WIN32_FILE_ATTRIBUTE_DATA fdata; + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, file_name) < 0) + return -1; + + if (GetFileAttributesExW(wfilename, GetFileExInfoStandard, &fdata)) { + buf->st_ino = 0; + buf->st_gid = 0; + buf->st_uid = 0; + buf->st_nlink = 1; + buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); + buf->st_size = fdata.nFileSizeLow | + (((off_t)fdata.nFileSizeHigh)<<32); + buf->st_dev = buf->st_rdev = 0; /* not used by Git */ + filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim)); + filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim)); + filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim)); + if (fdata.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + WIN32_FIND_DATAW findbuf; + HANDLE handle = FindFirstFileW(wfilename, &findbuf); + if (handle != INVALID_HANDLE_VALUE) { + if ((findbuf.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) && + (findbuf.dwReserved0 == IO_REPARSE_TAG_SYMLINK)) { + if (follow) { + char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + buf->st_size = readlink(file_name, buffer, MAXIMUM_REPARSE_DATA_BUFFER_SIZE); + } else { + buf->st_mode = S_IFLNK; + } + buf->st_mode |= S_IREAD; + if (!(findbuf.dwFileAttributes & FILE_ATTRIBUTE_READONLY)) + buf->st_mode |= S_IWRITE; + } + FindClose(handle); + } + } + return 0; + } + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_LOCK_VIOLATION: + case ERROR_SHARING_BUFFER_EXCEEDED: + errno = EACCES; + break; + case ERROR_BUFFER_OVERFLOW: + errno = ENAMETOOLONG; + break; + case ERROR_NOT_ENOUGH_MEMORY: + errno = ENOMEM; + break; + case ERROR_PATH_NOT_FOUND: + if (!has_valid_directory_prefix(wfilename)) { + errno = ENOTDIR; + break; + } + /* fallthru */ + default: + errno = ENOENT; + break; + } + return -1; +} + +/* We provide our own lstat/fstat functions, since the provided + * lstat/fstat functions are so slow. These stat functions are + * tailored for Git's usage (read: fast), and are not meant to be + * complete. Note that Git stat()s are redirected to mingw_lstat() + * too, since Windows doesn't really handle symlinks that well. + */ +static int do_stat_internal(int follow, const char *file_name, struct stat *buf) +{ + int namelen; + char alt_name[PATH_MAX]; + + if (!do_lstat(follow, file_name, buf)) + return 0; + + /* if file_name ended in a '/', Windows returned ENOENT; + * try again without trailing slashes + */ + if (errno != ENOENT) + return -1; + + namelen = strlen(file_name); + if (namelen && file_name[namelen-1] != '/') + return -1; + while (namelen && file_name[namelen-1] == '/') + --namelen; + if (!namelen || namelen >= PATH_MAX) + return -1; + + memcpy(alt_name, file_name, namelen); + alt_name[namelen] = 0; + return do_lstat(follow, alt_name, buf); +} + +static int get_file_info_by_handle(HANDLE hnd, struct stat *buf) +{ + BY_HANDLE_FILE_INFORMATION fdata; + + if (!GetFileInformationByHandle(hnd, &fdata)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + + buf->st_ino = 0; + buf->st_gid = 0; + buf->st_uid = 0; + buf->st_nlink = 1; + buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); + buf->st_size = fdata.nFileSizeLow | + (((off_t)fdata.nFileSizeHigh)<<32); + buf->st_dev = buf->st_rdev = 0; /* not used by Git */ + filetime_to_timespec(&(fdata.ftLastAccessTime), &(buf->st_atim)); + filetime_to_timespec(&(fdata.ftLastWriteTime), &(buf->st_mtim)); + filetime_to_timespec(&(fdata.ftCreationTime), &(buf->st_ctim)); + return 0; +} + +int mingw_lstat(const char *file_name, struct stat *buf) +{ + return do_stat_internal(0, file_name, buf); +} +int mingw_stat(const char *file_name, struct stat *buf) +{ + return do_stat_internal(1, file_name, buf); +} + +int mingw_fstat(int fd, struct stat *buf) +{ + HANDLE fh = (HANDLE)_get_osfhandle(fd); + DWORD avail, type = GetFileType(fh) & ~FILE_TYPE_REMOTE; + + switch (type) { + case FILE_TYPE_DISK: + return get_file_info_by_handle(fh, buf); + + case FILE_TYPE_CHAR: + case FILE_TYPE_PIPE: + /* initialize stat fields */ + memset(buf, 0, sizeof(*buf)); + buf->st_nlink = 1; + + if (type == FILE_TYPE_CHAR) { + buf->st_mode = _S_IFCHR; + } else { + buf->st_mode = _S_IFIFO; + if (PeekNamedPipe(fh, NULL, 0, NULL, &avail, NULL)) + buf->st_size = avail; + } + return 0; + + default: + errno = EBADF; + return -1; + } +} + +static inline void time_t_to_filetime(time_t t, FILETIME *ft) +{ + long long winTime = t * 10000000LL + 116444736000000000LL; + ft->dwLowDateTime = winTime; + ft->dwHighDateTime = winTime >> 32; +} + +int mingw_utime (const char *file_name, const struct utimbuf *times) +{ + FILETIME mft, aft; + int fh, rc; + DWORD attrs; + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, file_name) < 0) + return -1; + + /* must have write permission */ + attrs = GetFileAttributesW(wfilename); + if (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & FILE_ATTRIBUTE_READONLY)) { + /* ignore errors here; open() will report them */ + SetFileAttributesW(wfilename, attrs & ~FILE_ATTRIBUTE_READONLY); + } + + if ((fh = _wopen(wfilename, O_RDWR | O_BINARY)) < 0) { + rc = -1; + goto revert_attrs; + } + + if (times) { + time_t_to_filetime(times->modtime, &mft); + time_t_to_filetime(times->actime, &aft); + } else { + GetSystemTimeAsFileTime(&mft); + aft = mft; + } + if (!SetFileTime((HANDLE)_get_osfhandle(fh), NULL, &aft, &mft)) { + errno = EINVAL; + rc = -1; + } else + rc = 0; + close(fh); + +revert_attrs: + if (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & FILE_ATTRIBUTE_READONLY)) { + /* ignore errors again */ + SetFileAttributesW(wfilename, attrs); + } + return rc; +} + +#undef strftime +size_t mingw_strftime(char *s, size_t max, + const char *format, const struct tm *tm) +{ + /* a pointer to the original strftime in case we can't find the UCRT version */ + static size_t (*fallback)(char *, size_t, const char *, const struct tm *) = strftime; + size_t ret; + DECLARE_PROC_ADDR(ucrtbase.dll, size_t, strftime, char *, size_t, + const char *, const struct tm *); + + if (INIT_PROC_ADDR(strftime)) + ret = strftime(s, max, format, tm); + else + ret = fallback(s, max, format, tm); + + if (!ret && errno == EINVAL) + die("invalid strftime format: '%s'", format); + return ret; +} + +unsigned int sleep (unsigned int seconds) +{ + Sleep(seconds*1000); + return 0; +} + +char *mingw_mktemp(char *template) +{ + wchar_t wtemplate[MAX_PATH]; + if (xutftowcs_path(wtemplate, template) < 0) + return NULL; + if (!_wmktemp(wtemplate)) + return NULL; + if (xwcstoutf(template, wtemplate, strlen(template) + 1) < 0) + return NULL; + return template; +} + +int mkstemp(char *template) +{ + char *filename = mktemp(template); + if (filename == NULL) + return -1; + return open(filename, O_RDWR | O_CREAT, 0600); +} + +int gettimeofday(struct timeval *tv, void *tz) +{ + FILETIME ft; + long long hnsec; + + GetSystemTimeAsFileTime(&ft); + hnsec = filetime_to_hnsec(&ft); + tv->tv_sec = hnsec / 10000000; + tv->tv_usec = (hnsec % 10000000) / 10; + return 0; +} + +int pipe(int filedes[2]) +{ + HANDLE h[2]; + + /* this creates non-inheritable handles */ + if (!CreatePipe(&h[0], &h[1], NULL, 8192)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + filedes[0] = _open_osfhandle(HCAST(int, h[0]), O_NOINHERIT); + if (filedes[0] < 0) { + CloseHandle(h[0]); + CloseHandle(h[1]); + return -1; + } + filedes[1] = _open_osfhandle(HCAST(int, h[1]), O_NOINHERIT); + if (filedes[1] < 0) { + close(filedes[0]); + CloseHandle(h[1]); + return -1; + } + return 0; +} + +#ifndef __MINGW64__ +struct tm *gmtime_r(const time_t *timep, struct tm *result) +{ + if (gmtime_s(result, timep) == 0) + return result; + return NULL; +} + +struct tm *localtime_r(const time_t *timep, struct tm *result) +{ + if (localtime_s(result, timep) == 0) + return result; + return NULL; +} +#endif + +char *mingw_getcwd(char *pointer, int len) +{ + wchar_t cwd[MAX_PATH], wpointer[MAX_PATH]; + DWORD ret = GetCurrentDirectoryW(ARRAY_SIZE(cwd), cwd); + + if (!ret || ret >= ARRAY_SIZE(cwd)) { + errno = ret ? ENAMETOOLONG : err_win_to_posix(GetLastError()); + return NULL; + } + ret = GetLongPathNameW(cwd, wpointer, ARRAY_SIZE(wpointer)); + if (!ret && GetLastError() == ERROR_ACCESS_DENIED) { + HANDLE hnd = CreateFileW(cwd, 0, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + if (hnd == INVALID_HANDLE_VALUE) + return NULL; + ret = GetFinalPathNameByHandleW(hnd, wpointer, ARRAY_SIZE(wpointer), 0); + CloseHandle(hnd); + if (!ret || ret >= ARRAY_SIZE(wpointer)) + return NULL; + if (xwcstoutf(pointer, normalize_ntpath(wpointer), len) < 0) + return NULL; + return pointer; + } + if (!ret || ret >= ARRAY_SIZE(wpointer)) + return NULL; + if (xwcstoutf(pointer, wpointer, len) < 0) + return NULL; + convert_slashes(pointer); + return pointer; +} + +/* + * See "Parsing C++ Command-Line Arguments" at Microsoft's Docs: + * https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments + */ +static const char *quote_arg_msvc(const char *arg) +{ + /* count chars to quote */ + int len = 0, n = 0; + int force_quotes = 0; + char *q, *d; + const char *p = arg; + if (!*p) force_quotes = 1; + while (*p) { + if (isspace(*p) || *p == '*' || *p == '?' || *p == '{' || *p == '\'') + force_quotes = 1; + else if (*p == '"') + n++; + else if (*p == '\\') { + int count = 0; + while (*p == '\\') { + count++; + p++; + len++; + } + if (*p == '"' || !*p) + n += count*2 + 1; + continue; + } + len++; + p++; + } + if (!force_quotes && n == 0) + return arg; + + /* insert \ where necessary */ + d = q = xmalloc(st_add3(len, n, 3)); + *d++ = '"'; + while (*arg) { + if (*arg == '"') + *d++ = '\\'; + else if (*arg == '\\') { + int count = 0; + while (*arg == '\\') { + count++; + *d++ = *arg++; + } + if (*arg == '"' || !*arg) { + while (count-- > 0) + *d++ = '\\'; + /* don't escape the surrounding end quote */ + if (!*arg) + break; + *d++ = '\\'; + } + } + *d++ = *arg++; + } + *d++ = '"'; + *d++ = '\0'; + return q; +} + +#include "quote.h" + +static const char *quote_arg_msys2(const char *arg) +{ + struct strbuf buf = STRBUF_INIT; + const char *p2 = arg, *p; + + for (p = arg; *p; p++) { + int ws = isspace(*p); + if (!ws && *p != '\\' && *p != '"' && *p != '{' && *p != '\'' && + *p != '?' && *p != '*' && *p != '~') + continue; + if (!buf.len) + strbuf_addch(&buf, '"'); + if (p != p2) + strbuf_add(&buf, p2, p - p2); + if (*p == '\\' || *p == '"') + strbuf_addch(&buf, '\\'); + p2 = p; + } + + if (p == arg) + strbuf_addch(&buf, '"'); + else if (!buf.len) + return arg; + else + strbuf_add(&buf, p2, p - p2); + + strbuf_addch(&buf, '"'); + return strbuf_detach(&buf, 0); +} + +static const char *parse_interpreter(const char *cmd) +{ + static char buf[100]; + char *p, *opt; + int n, fd; + + /* don't even try a .exe */ + n = strlen(cmd); + if (n >= 4 && !strcasecmp(cmd+n-4, ".exe")) + return NULL; + + fd = open(cmd, O_RDONLY); + if (fd < 0) + return NULL; + n = read(fd, buf, sizeof(buf)-1); + close(fd); + if (n < 4) /* at least '#!/x' and not error */ + return NULL; + + if (buf[0] != '#' || buf[1] != '!') + return NULL; + buf[n] = '\0'; + p = buf + strcspn(buf, "\r\n"); + if (!*p) + return NULL; + + *p = '\0'; + if (!(p = strrchr(buf+2, '/')) && !(p = strrchr(buf+2, '\\'))) + return NULL; + /* strip options */ + if ((opt = strchr(p+1, ' '))) + *opt = '\0'; + return p+1; +} + +/* + * exe_only means that we only want to detect .exe files, but not scripts + * (which do not have an extension) + */ +static char *lookup_prog(const char *dir, int dirlen, const char *cmd, + int isexe, int exe_only) +{ + char path[MAX_PATH]; + wchar_t wpath[MAX_PATH]; + snprintf(path, sizeof(path), "%.*s\\%s.exe", dirlen, dir, cmd); + + if (xutftowcs_path(wpath, path) < 0) + return NULL; + + if (!isexe && _waccess(wpath, F_OK) == 0) + return xstrdup(path); + wpath[wcslen(wpath)-4] = '\0'; + if ((!exe_only || isexe) && _waccess(wpath, F_OK) == 0) { + if (!(GetFileAttributesW(wpath) & FILE_ATTRIBUTE_DIRECTORY)) { + path[strlen(path)-4] = '\0'; + return xstrdup(path); + } + } + return NULL; +} + +/* + * Determines the absolute path of cmd using the split path in path. + * If cmd contains a slash or backslash, no lookup is performed. + */ +static char *path_lookup(const char *cmd, int exe_only) +{ + const char *path; + char *prog = NULL; + int len = strlen(cmd); + int isexe = len >= 4 && !strcasecmp(cmd+len-4, ".exe"); + + if (strpbrk(cmd, "/\\")) + return xstrdup(cmd); + + path = mingw_getenv("PATH"); + if (!path) + return NULL; + + while (!prog) { + const char *sep = strchrnul(path, ';'); + int dirlen = sep - path; + if (dirlen) + prog = lookup_prog(path, dirlen, cmd, isexe, exe_only); + if (!*sep) + break; + path = sep + 1; + } + + return prog; +} + +static const wchar_t *wcschrnul(const wchar_t *s, wchar_t c) +{ + while (*s && *s != c) + s++; + return s; +} + +/* Compare only keys */ +static int wenvcmp(const void *a, const void *b) +{ + wchar_t *p = *(wchar_t **)a, *q = *(wchar_t **)b; + size_t p_len, q_len; + + /* Find the keys */ + p_len = wcschrnul(p, L'=') - p; + q_len = wcschrnul(q, L'=') - q; + + /* If the length differs, include the shorter key's NUL */ + if (p_len < q_len) + p_len++; + else if (p_len > q_len) + p_len = q_len + 1; + + return _wcsnicmp(p, q, p_len); +} + +/* + * Build an environment block combining the inherited environment + * merged with the given list of settings. + * + * Values of the form "KEY=VALUE" in deltaenv override inherited values. + * Values of the form "KEY" in deltaenv delete inherited values. + * + * Multiple entries in deltaenv for the same key are explicitly allowed. + * + * We return a contiguous block of UNICODE strings with a final trailing + * zero word. + */ +static wchar_t *make_environment_block(char **deltaenv) +{ + wchar_t *wenv = GetEnvironmentStringsW(), *wdeltaenv, *result, *p; + size_t wlen, s, delta_size, size; + + wchar_t **array = NULL; + size_t alloc = 0, nr = 0, i; + + size = 1; /* for extra NUL at the end */ + + /* If there is no deltaenv to apply, simply return a copy. */ + if (!deltaenv || !*deltaenv) { + for (p = wenv; p && *p; ) { + size_t s = wcslen(p) + 1; + size += s; + p += s; + } + + ALLOC_ARRAY(result, size); + COPY_ARRAY(result, wenv, size); + FreeEnvironmentStringsW(wenv); + return result; + } + + /* + * If there is a deltaenv, let's accumulate all keys into `array`, + * sort them using the stable git_stable_qsort() and then copy, + * skipping duplicate keys + */ + for (p = wenv; p && *p; ) { + ALLOC_GROW(array, nr + 1, alloc); + s = wcslen(p) + 1; + array[nr++] = p; + p += s; + size += s; + } + + /* (over-)assess size needed for wchar version of deltaenv */ + for (delta_size = 0, i = 0; deltaenv[i]; i++) + delta_size += strlen(deltaenv[i]) * 2 + 1; + ALLOC_ARRAY(wdeltaenv, delta_size); + + /* convert the deltaenv, appending to array */ + for (i = 0, p = wdeltaenv; deltaenv[i]; i++) { + ALLOC_GROW(array, nr + 1, alloc); + wlen = xutftowcs(p, deltaenv[i], wdeltaenv + delta_size - p); + array[nr++] = p; + p += wlen + 1; + } + + git_stable_qsort(array, nr, sizeof(*array), wenvcmp); + ALLOC_ARRAY(result, size + delta_size); + + for (p = result, i = 0; i < nr; i++) { + /* Skip any duplicate keys; last one wins */ + while (i + 1 < nr && !wenvcmp(array + i, array + i + 1)) + i++; + + /* Skip "to delete" entry */ + if (!wcschr(array[i], L'=')) + continue; + + size = wcslen(array[i]) + 1; + COPY_ARRAY(p, array[i], size); + p += size; + } + *p = L'\0'; + + free(array); + free(wdeltaenv); + FreeEnvironmentStringsW(wenv); + return result; +} + +static void do_unset_environment_variables(void) +{ + static int done; + char *p = unset_environment_variables; + + if (done || !p) + return; + done = 1; + + for (;;) { + char *comma = strchr(p, ','); + + if (comma) + *comma = '\0'; + unsetenv(p); + if (!comma) + break; + p = comma + 1; + } +} + +struct pinfo_t { + struct pinfo_t *next; + pid_t pid; + HANDLE proc; +}; +static struct pinfo_t *pinfo = NULL; +CRITICAL_SECTION pinfo_cs; + +/* Used to match and chomp off path components */ +static inline int match_last_path_component(const char *path, size_t *len, + const char *component) +{ + size_t component_len = strlen(component); + if (*len < component_len + 1 || + !is_dir_sep(path[*len - component_len - 1]) || + fspathncmp(path + *len - component_len, component, component_len)) + return 0; + *len -= component_len + 1; + /* chomp off repeated dir separators */ + while (*len > 0 && is_dir_sep(path[*len - 1])) + (*len)--; + return 1; +} + +static int is_msys2_sh(const char *cmd) +{ + if (!cmd) + return 0; + + if (!strcmp(cmd, "sh")) { + static int ret = -1; + char *p; + + if (ret >= 0) + return ret; + + p = path_lookup(cmd, 0); + if (!p) + ret = 0; + else { + size_t len = strlen(p); + + ret = match_last_path_component(p, &len, "sh.exe") && + match_last_path_component(p, &len, "bin") && + match_last_path_component(p, &len, "usr"); + free(p); + } + return ret; + } + + if (ends_with(cmd, "\\sh.exe")) { + static char *sh; + + if (!sh) + sh = path_lookup("sh", 0); + + return !fspathcmp(cmd, sh); + } + + return 0; +} + +static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaenv, + const char *dir, + int prepend_cmd, int fhin, int fhout, int fherr) +{ + static int restrict_handle_inheritance = -1; + STARTUPINFOEXW si; + PROCESS_INFORMATION pi; + LPPROC_THREAD_ATTRIBUTE_LIST attr_list = NULL; + HANDLE stdhandles[3]; + DWORD stdhandles_count = 0; + SIZE_T size; + struct strbuf args; + wchar_t wcmd[MAX_PATH], wdir[MAX_PATH], *wargs, *wenvblk = NULL; + unsigned flags = CREATE_UNICODE_ENVIRONMENT; + BOOL ret; + HANDLE cons; + const char *(*quote_arg)(const char *arg) = + is_msys2_sh(cmd ? cmd : *argv) ? + quote_arg_msys2 : quote_arg_msvc; + const char *strace_env; + + /* Make sure to override previous errors, if any */ + errno = 0; + + if (restrict_handle_inheritance < 0) + restrict_handle_inheritance = core_restrict_inherited_handles; + /* + * The following code to restrict which handles are inherited seems + * to work properly only on Windows 7 and later, so let's disable it + * on Windows Vista and 2008. + */ + if (restrict_handle_inheritance < 0) + restrict_handle_inheritance = GetVersion() >> 16 >= 7601; + + do_unset_environment_variables(); + + /* Determine whether or not we are associated to a console */ + cons = CreateFileW(L"CONOUT$", GENERIC_WRITE, + FILE_SHARE_WRITE, NULL, OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, NULL); + if (cons == INVALID_HANDLE_VALUE) { + /* There is no console associated with this process. + * Since the child is a console process, Windows + * would normally create a console window. But + * since we'll be redirecting std streams, we do + * not need the console. + * It is necessary to use DETACHED_PROCESS + * instead of CREATE_NO_WINDOW to make ssh + * recognize that it has no console. + */ + flags |= DETACHED_PROCESS; + } else { + /* There is already a console. If we specified + * DETACHED_PROCESS here, too, Windows would + * disassociate the child from the console. + * The same is true for CREATE_NO_WINDOW. + * Go figure! + */ + CloseHandle(cons); + } + memset(&si, 0, sizeof(si)); + si.StartupInfo.cb = sizeof(si); + si.StartupInfo.hStdInput = winansi_get_osfhandle(fhin); + si.StartupInfo.hStdOutput = winansi_get_osfhandle(fhout); + si.StartupInfo.hStdError = winansi_get_osfhandle(fherr); + + /* The list of handles cannot contain duplicates */ + if (si.StartupInfo.hStdInput != INVALID_HANDLE_VALUE) + stdhandles[stdhandles_count++] = si.StartupInfo.hStdInput; + if (si.StartupInfo.hStdOutput != INVALID_HANDLE_VALUE && + si.StartupInfo.hStdOutput != si.StartupInfo.hStdInput) + stdhandles[stdhandles_count++] = si.StartupInfo.hStdOutput; + if (si.StartupInfo.hStdError != INVALID_HANDLE_VALUE && + si.StartupInfo.hStdError != si.StartupInfo.hStdInput && + si.StartupInfo.hStdError != si.StartupInfo.hStdOutput) + stdhandles[stdhandles_count++] = si.StartupInfo.hStdError; + if (stdhandles_count) + si.StartupInfo.dwFlags |= STARTF_USESTDHANDLES; + + if (*argv && !strcmp(cmd, *argv)) + wcmd[0] = L'\0'; + else if (xutftowcs_path(wcmd, cmd) < 0) + return -1; + if (dir && xutftowcs_path(wdir, dir) < 0) + return -1; + + /* concatenate argv, quoting args as we go */ + strbuf_init(&args, 0); + if (prepend_cmd) { + char *quoted = (char *)quote_arg(cmd); + strbuf_addstr(&args, quoted); + if (quoted != cmd) + free(quoted); + } + for (; *argv; argv++) { + char *quoted = (char *)quote_arg(*argv); + if (*args.buf) + strbuf_addch(&args, ' '); + strbuf_addstr(&args, quoted); + if (quoted != *argv) + free(quoted); + } + + strace_env = getenv("GIT_STRACE_COMMANDS"); + if (strace_env) { + char *p = path_lookup("strace.exe", 1); + if (!p) + return error("strace not found!"); + if (xutftowcs_path(wcmd, p) < 0) { + free(p); + return -1; + } + free(p); + if (!strcmp("1", strace_env) || + !strcasecmp("yes", strace_env) || + !strcasecmp("true", strace_env)) + strbuf_insert(&args, 0, "strace ", 7); + else { + const char *quoted = quote_arg(strace_env); + struct strbuf buf = STRBUF_INIT; + strbuf_addf(&buf, "strace -o %s ", quoted); + if (quoted != strace_env) + free((char *)quoted); + strbuf_insert(&args, 0, buf.buf, buf.len); + strbuf_release(&buf); + } + } + + ALLOC_ARRAY(wargs, st_add(st_mult(2, args.len), 1)); + xutftowcs(wargs, args.buf, 2 * args.len + 1); + strbuf_release(&args); + + wenvblk = make_environment_block(deltaenv); + + memset(&pi, 0, sizeof(pi)); + if (restrict_handle_inheritance && stdhandles_count && + (InitializeProcThreadAttributeList(NULL, 1, 0, &size) || + GetLastError() == ERROR_INSUFFICIENT_BUFFER) && + (attr_list = (LPPROC_THREAD_ATTRIBUTE_LIST) + (HeapAlloc(GetProcessHeap(), 0, size))) && + InitializeProcThreadAttributeList(attr_list, 1, 0, &size) && + UpdateProcThreadAttribute(attr_list, 0, + PROC_THREAD_ATTRIBUTE_HANDLE_LIST, + stdhandles, + stdhandles_count * sizeof(HANDLE), + NULL, NULL)) { + si.lpAttributeList = attr_list; + flags |= EXTENDED_STARTUPINFO_PRESENT; + } + + ret = CreateProcessW(*wcmd ? wcmd : NULL, wargs, NULL, NULL, + stdhandles_count ? TRUE : FALSE, + flags, wenvblk, dir ? wdir : NULL, + &si.StartupInfo, &pi); + + /* + * On Windows 2008 R2, it seems that specifying certain types of handles + * (such as FILE_TYPE_CHAR or FILE_TYPE_PIPE) will always produce an + * error. Rather than playing finicky and fragile games, let's just try + * to detect this situation and simply try again without restricting any + * handle inheritance. This is still better than failing to create + * processes. + */ + if (!ret && restrict_handle_inheritance && stdhandles_count) { + DWORD err = GetLastError(); + struct strbuf buf = STRBUF_INIT; + + if (err != ERROR_NO_SYSTEM_RESOURCES && + /* + * On Windows 7 and earlier, handles on pipes and character + * devices are inherited automatically, and cannot be + * specified in the thread handle list. Rather than trying + * to catch each and every corner case (and running the + * chance of *still* forgetting a few), let's just fall + * back to creating the process without trying to limit the + * handle inheritance. + */ + !(err == ERROR_INVALID_PARAMETER && + GetVersion() >> 16 < 9200) && + !getenv("SUPPRESS_HANDLE_INHERITANCE_WARNING")) { + DWORD fl = 0; + int i; + + setenv("SUPPRESS_HANDLE_INHERITANCE_WARNING", "1", 1); + + for (i = 0; i < stdhandles_count; i++) { + HANDLE h = stdhandles[i]; + strbuf_addf(&buf, "handle #%d: %p (type %lx, " + "handle info (%d) %lx\n", i, h, + GetFileType(h), + GetHandleInformation(h, &fl), + fl); + } + strbuf_addstr(&buf, "\nThis is a bug; please report it " + "at\nhttps://github.com/git-for-windows/" + "git/issues/new\n\n" + "To suppress this warning, please set " + "the environment variable\n\n" + "\tSUPPRESS_HANDLE_INHERITANCE_WARNING=1" + "\n"); + } + restrict_handle_inheritance = 0; + flags &= ~EXTENDED_STARTUPINFO_PRESENT; + ret = CreateProcessW(*wcmd ? wcmd : NULL, wargs, NULL, NULL, + TRUE, flags, wenvblk, dir ? wdir : NULL, + &si.StartupInfo, &pi); + if (!ret) + errno = err_win_to_posix(GetLastError()); + if (ret && buf.len) { + warning("failed to restrict file handles (%ld)\n\n%s", + err, buf.buf); + } + strbuf_release(&buf); + } else if (!ret) + errno = err_win_to_posix(GetLastError()); + + if (si.lpAttributeList) + DeleteProcThreadAttributeList(si.lpAttributeList); + if (attr_list) + HeapFree(GetProcessHeap(), 0, attr_list); + + free(wenvblk); + free(wargs); + + if (!ret) + return -1; + + CloseHandle(pi.hThread); + + /* + * The process ID is the human-readable identifier of the process + * that we want to present in log and error messages. The handle + * is not useful for this purpose. But we cannot close it, either, + * because it is not possible to turn a process ID into a process + * handle after the process terminated. + * Keep the handle in a list for waitpid. + */ + EnterCriticalSection(&pinfo_cs); + { + struct pinfo_t *info = xmalloc(sizeof(struct pinfo_t)); + info->pid = pi.dwProcessId; + info->proc = pi.hProcess; + info->next = pinfo; + pinfo = info; + } + LeaveCriticalSection(&pinfo_cs); + + return (pid_t)pi.dwProcessId; +} + +static pid_t mingw_spawnv(const char *cmd, const char **argv, int prepend_cmd) +{ + return mingw_spawnve_fd(cmd, argv, NULL, NULL, prepend_cmd, 0, 1, 2); +} + +pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **deltaenv, + const char *dir, + int fhin, int fhout, int fherr) +{ + pid_t pid; + char *prog = path_lookup(cmd, 0); + + if (!prog) { + errno = ENOENT; + pid = -1; + } + else { + const char *interpr = parse_interpreter(prog); + + if (interpr) { + const char *argv0 = argv[0]; + char *iprog = path_lookup(interpr, 1); + argv[0] = prog; + if (!iprog) { + errno = ENOENT; + pid = -1; + } + else { + pid = mingw_spawnve_fd(iprog, argv, deltaenv, dir, 1, + fhin, fhout, fherr); + free(iprog); + } + argv[0] = argv0; + } + else + pid = mingw_spawnve_fd(prog, argv, deltaenv, dir, 0, + fhin, fhout, fherr); + free(prog); + } + return pid; +} + +static int try_shell_exec(const char *cmd, char *const *argv) +{ + const char *interpr = parse_interpreter(cmd); + char *prog; + int pid = 0; + + if (!interpr) + return 0; + prog = path_lookup(interpr, 1); + if (prog) { + int exec_id; + int argc = 0; +#ifndef _MSC_VER + const +#endif + char **argv2; + while (argv[argc]) argc++; + ALLOC_ARRAY(argv2, argc + 1); + argv2[0] = (char *)cmd; /* full path to the script file */ + COPY_ARRAY(&argv2[1], &argv[1], argc); + exec_id = trace2_exec(prog, argv2); + pid = mingw_spawnv(prog, argv2, 1); + if (pid >= 0) { + int status; + if (waitpid(pid, &status, 0) < 0) + status = 255; + trace2_exec_result(exec_id, status); + exit(status); + } + trace2_exec_result(exec_id, -1); + pid = 1; /* indicate that we tried but failed */ + free(prog); + free(argv2); + } + return pid; +} + +int mingw_execv(const char *cmd, char *const *argv) +{ + /* check if git_command is a shell script */ + if (!try_shell_exec(cmd, argv)) { + int pid, status; + int exec_id; + + exec_id = trace2_exec(cmd, (const char **)argv); + pid = mingw_spawnv(cmd, (const char **)argv, 0); + if (pid < 0) { + trace2_exec_result(exec_id, -1); + return -1; + } + if (waitpid(pid, &status, 0) < 0) + status = 255; + trace2_exec_result(exec_id, status); + exit(status); + } + return -1; +} + +int mingw_execvp(const char *cmd, char *const *argv) +{ + char *prog = path_lookup(cmd, 0); + + if (prog) { + mingw_execv(prog, argv); + free(prog); + } else + errno = ENOENT; + + return -1; +} + +int mingw_kill(pid_t pid, int sig) +{ + if (pid > 0 && sig == SIGTERM) { + HANDLE h = OpenProcess(PROCESS_TERMINATE, FALSE, pid); + + if (TerminateProcess(h, -1)) { + CloseHandle(h); + return 0; + } + + errno = err_win_to_posix(GetLastError()); + CloseHandle(h); + return -1; + } else if (pid > 0 && sig == 0) { + HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid); + if (h) { + CloseHandle(h); + return 0; + } + } + + errno = EINVAL; + return -1; +} + +/* + * UTF-8 versions of getenv(), putenv() and unsetenv(). + * Internally, they use the CRT's stock UNICODE routines + * to avoid data loss. + */ +char *mingw_getenv(const char *name) +{ +#define GETENV_MAX_RETAIN 64 + static char *values[GETENV_MAX_RETAIN]; + static int value_counter; + int len_key, len_value; + wchar_t *w_key; + char *value; + wchar_t w_value[32768]; + + if (!name || !*name) + return NULL; + + len_key = strlen(name) + 1; + /* We cannot use xcalloc() here because that uses getenv() itself */ + w_key = calloc(len_key, sizeof(wchar_t)); + if (!w_key) + die("Out of memory, (tried to allocate %u wchar_t's)", len_key); + xutftowcs(w_key, name, len_key); + /* GetEnvironmentVariableW() only sets the last error upon failure */ + SetLastError(ERROR_SUCCESS); + len_value = GetEnvironmentVariableW(w_key, w_value, ARRAY_SIZE(w_value)); + if (!len_value && GetLastError() == ERROR_ENVVAR_NOT_FOUND) { + free(w_key); + return NULL; + } + free(w_key); + + len_value = len_value * 3 + 1; + /* We cannot use xcalloc() here because that uses getenv() itself */ + value = calloc(len_value, sizeof(char)); + if (!value) + die("Out of memory, (tried to allocate %u bytes)", len_value); + xwcstoutf(value, w_value, len_value); + + /* + * We return `value` which is an allocated value and the caller is NOT + * expecting to have to free it, so we keep a round-robin array, + * invalidating the buffer after GETENV_MAX_RETAIN getenv() calls. + */ + free(values[value_counter]); + values[value_counter++] = value; + if (value_counter >= ARRAY_SIZE(values)) + value_counter = 0; + + return value; +} + +int mingw_putenv(const char *namevalue) +{ + int size; + wchar_t *wide, *equal; + BOOL result; + + if (!namevalue || !*namevalue) + return 0; + + size = strlen(namevalue) * 2 + 1; + wide = calloc(size, sizeof(wchar_t)); + if (!wide) + die("Out of memory, (tried to allocate %u wchar_t's)", size); + xutftowcs(wide, namevalue, size); + equal = wcschr(wide, L'='); + if (!equal) + result = SetEnvironmentVariableW(wide, NULL); + else { + *equal = L'\0'; + result = SetEnvironmentVariableW(wide, equal + 1); + } + free(wide); + + if (!result) + errno = err_win_to_posix(GetLastError()); + + return result ? 0 : -1; +} + +static void ensure_socket_initialization(void) +{ + WSADATA wsa; + static int initialized = 0; + + if (initialized) + return; + + if (WSAStartup(MAKEWORD(2,2), &wsa)) + die("unable to initialize winsock subsystem, error %d", + WSAGetLastError()); + + atexit((void(*)(void)) WSACleanup); + initialized = 1; +} + +#undef gethostname +int mingw_gethostname(char *name, int namelen) +{ + ensure_socket_initialization(); + return gethostname(name, namelen); +} + +#undef gethostbyname +struct hostent *mingw_gethostbyname(const char *host) +{ + ensure_socket_initialization(); + return gethostbyname(host); +} + +#undef getaddrinfo +int mingw_getaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, struct addrinfo **res) +{ + ensure_socket_initialization(); + return getaddrinfo(node, service, hints, res); +} + +int mingw_socket(int domain, int type, int protocol) +{ + int sockfd; + SOCKET s; + + ensure_socket_initialization(); + s = WSASocket(domain, type, protocol, NULL, 0, 0); + if (s == INVALID_SOCKET) { + /* + * WSAGetLastError() values are regular BSD error codes + * biased by WSABASEERR. + * However, strerror() does not know about networking + * specific errors, which are values beginning at 38 or so. + * Therefore, we choose to leave the biased error code + * in errno so that _if_ someone looks up the code somewhere, + * then it is at least the number that are usually listed. + */ + errno = WSAGetLastError(); + return -1; + } + /* convert into a file descriptor */ + if ((sockfd = _open_osfhandle(s, O_RDWR|O_BINARY)) < 0) { + closesocket(s); + return error("unable to make a socket file descriptor: %s", + strerror(errno)); + } + return sockfd; +} + +#undef connect +int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return connect(s, sa, sz); +} + +#undef bind +int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return bind(s, sa, sz); +} + +#undef setsockopt +int mingw_setsockopt(int sockfd, int lvl, int optname, void *optval, int optlen) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return setsockopt(s, lvl, optname, (const char*)optval, optlen); +} + +#undef shutdown +int mingw_shutdown(int sockfd, int how) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return shutdown(s, how); +} + +#undef listen +int mingw_listen(int sockfd, int backlog) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return listen(s, backlog); +} + +#undef accept +int mingw_accept(int sockfd1, struct sockaddr *sa, socklen_t *sz) +{ + int sockfd2; + + SOCKET s1 = (SOCKET)_get_osfhandle(sockfd1); + SOCKET s2 = accept(s1, sa, sz); + + /* convert into a file descriptor */ + if ((sockfd2 = _open_osfhandle(s2, O_RDWR|O_BINARY)) < 0) { + int err = errno; + closesocket(s2); + return error("unable to make a socket file descriptor: %s", + strerror(err)); + } + return sockfd2; +} + +#undef rename +int mingw_rename(const char *pold, const char *pnew) +{ + DWORD attrs, gle; + int tries = 0; + wchar_t wpold[MAX_PATH], wpnew[MAX_PATH]; + if (xutftowcs_path(wpold, pold) < 0 || xutftowcs_path(wpnew, pnew) < 0) + return -1; + + /* + * Try native rename() first to get errno right. + * It is based on MoveFile(), which cannot overwrite existing files. + */ + if (!_wrename(wpold, wpnew)) + return 0; + if (errno != EEXIST) + return -1; +repeat: + if (MoveFileExW(wpold, wpnew, MOVEFILE_REPLACE_EXISTING)) + return 0; + /* TODO: translate more errors */ + gle = GetLastError(); + if (gle == ERROR_ACCESS_DENIED && + (attrs = GetFileAttributesW(wpnew)) != INVALID_FILE_ATTRIBUTES) { + if (attrs & FILE_ATTRIBUTE_DIRECTORY) { + DWORD attrsold = GetFileAttributesW(wpold); + if (attrsold == INVALID_FILE_ATTRIBUTES || + !(attrsold & FILE_ATTRIBUTE_DIRECTORY)) + errno = EISDIR; + else if (!_wrmdir(wpnew)) + goto repeat; + return -1; + } + if ((attrs & FILE_ATTRIBUTE_READONLY) && + SetFileAttributesW(wpnew, attrs & ~FILE_ATTRIBUTE_READONLY)) { + if (MoveFileExW(wpold, wpnew, MOVEFILE_REPLACE_EXISTING)) + return 0; + gle = GetLastError(); + /* revert file attributes on failure */ + SetFileAttributesW(wpnew, attrs); + } + } + if (tries < ARRAY_SIZE(delay) && gle == ERROR_ACCESS_DENIED) { + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + goto repeat; + } + if (gle == ERROR_ACCESS_DENIED && + ask_yes_no_if_possible("Rename from '%s' to '%s' failed. " + "Should I try again?", pold, pnew)) + goto repeat; + + errno = EACCES; + return -1; +} + +/* + * Note that this doesn't return the actual pagesize, but + * the allocation granularity. If future Windows specific git code + * needs the real getpagesize function, we need to find another solution. + */ +int mingw_getpagesize(void) +{ + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwAllocationGranularity; +} + +/* See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724435.aspx */ +enum EXTENDED_NAME_FORMAT { + NameDisplay = 3, + NameUserPrincipal = 8 +}; + +static char *get_extended_user_info(enum EXTENDED_NAME_FORMAT type) +{ + DECLARE_PROC_ADDR(secur32.dll, BOOL, GetUserNameExW, + enum EXTENDED_NAME_FORMAT, LPCWSTR, PULONG); + static wchar_t wbuffer[1024]; + DWORD len; + + if (!INIT_PROC_ADDR(GetUserNameExW)) + return NULL; + + len = ARRAY_SIZE(wbuffer); + if (GetUserNameExW(type, wbuffer, &len)) { + char *converted = xmalloc((len *= 3)); + if (xwcstoutf(converted, wbuffer, len) >= 0) + return converted; + free(converted); + } + + return NULL; +} + +char *mingw_query_user_email(void) +{ + return get_extended_user_info(NameUserPrincipal); +} + +struct passwd *getpwuid(int uid) +{ + static unsigned initialized; + static char user_name[100]; + static struct passwd *p; + wchar_t buf[100]; + DWORD len; + + if (initialized) + return p; + + len = ARRAY_SIZE(buf); + if (!GetUserNameW(buf, &len)) { + initialized = 1; + return NULL; + } + + if (xwcstoutf(user_name, buf, sizeof(user_name)) < 0) { + initialized = 1; + return NULL; + } + + p = xmalloc(sizeof(*p)); + p->pw_name = user_name; + p->pw_gecos = get_extended_user_info(NameDisplay); + if (!p->pw_gecos) + p->pw_gecos = "unknown"; + p->pw_dir = NULL; + + initialized = 1; + return p; +} + +static HANDLE timer_event; +static HANDLE timer_thread; +static int timer_interval; +static int one_shot; +static sig_handler_t timer_fn = SIG_DFL, sigint_fn = SIG_DFL; + +/* The timer works like this: + * The thread, ticktack(), is a trivial routine that most of the time + * only waits to receive the signal to terminate. The main thread tells + * the thread to terminate by setting the timer_event to the signalled + * state. + * But ticktack() interrupts the wait state after the timer's interval + * length to call the signal handler. + */ + +static unsigned __stdcall ticktack(void *dummy) +{ + while (WaitForSingleObject(timer_event, timer_interval) == WAIT_TIMEOUT) { + mingw_raise(SIGALRM); + if (one_shot) + break; + } + return 0; +} + +static int start_timer_thread(void) +{ + timer_event = CreateEvent(NULL, FALSE, FALSE, NULL); + if (timer_event) { + timer_thread = (HANDLE) _beginthreadex(NULL, 0, ticktack, NULL, 0, NULL); + if (!timer_thread ) + return errno = ENOMEM, + error("cannot start timer thread"); + } else + return errno = ENOMEM, + error("cannot allocate resources for timer"); + return 0; +} + +static void stop_timer_thread(void) +{ + if (timer_event) + SetEvent(timer_event); /* tell thread to terminate */ + if (timer_thread) { + int rc = WaitForSingleObject(timer_thread, 10000); + if (rc == WAIT_TIMEOUT) + error("timer thread did not terminate timely"); + else if (rc != WAIT_OBJECT_0) + error("waiting for timer thread failed: %lu", + GetLastError()); + CloseHandle(timer_thread); + } + if (timer_event) + CloseHandle(timer_event); + timer_event = NULL; + timer_thread = NULL; +} + +static inline int is_timeval_eq(const struct timeval *i1, const struct timeval *i2) +{ + return i1->tv_sec == i2->tv_sec && i1->tv_usec == i2->tv_usec; +} + +int setitimer(int type, struct itimerval *in, struct itimerval *out) +{ + static const struct timeval zero; + static int atexit_done; + + if (out != NULL) + return errno = EINVAL, + error("setitimer param 3 != NULL not implemented"); + if (!is_timeval_eq(&in->it_interval, &zero) && + !is_timeval_eq(&in->it_interval, &in->it_value)) + return errno = EINVAL, + error("setitimer: it_interval must be zero or eq it_value"); + + if (timer_thread) + stop_timer_thread(); + + if (is_timeval_eq(&in->it_value, &zero) && + is_timeval_eq(&in->it_interval, &zero)) + return 0; + + timer_interval = in->it_value.tv_sec * 1000 + in->it_value.tv_usec / 1000; + one_shot = is_timeval_eq(&in->it_interval, &zero); + if (!atexit_done) { + atexit(stop_timer_thread); + atexit_done = 1; + } + return start_timer_thread(); +} + +int sigaction(int sig, struct sigaction *in, struct sigaction *out) +{ + if (sig != SIGALRM) + return errno = EINVAL, + error("sigaction only implemented for SIGALRM"); + if (out != NULL) + return errno = EINVAL, + error("sigaction: param 3 != NULL not implemented"); + + timer_fn = in->sa_handler; + return 0; +} + +#undef signal +sig_handler_t mingw_signal(int sig, sig_handler_t handler) +{ + sig_handler_t old; + + switch (sig) { + case SIGALRM: + old = timer_fn; + timer_fn = handler; + break; + + case SIGINT: + old = sigint_fn; + sigint_fn = handler; + break; + + default: + return signal(sig, handler); + } + + return old; +} + +#undef raise +int mingw_raise(int sig) +{ + switch (sig) { + case SIGALRM: + if (timer_fn == SIG_DFL) { + if (isatty(STDERR_FILENO)) + fputs("Alarm clock\n", stderr); + exit(128 + SIGALRM); + } else if (timer_fn != SIG_IGN) + timer_fn(SIGALRM); + return 0; + + case SIGINT: + if (sigint_fn == SIG_DFL) + exit(128 + SIGINT); + else if (sigint_fn != SIG_IGN) + sigint_fn(SIGINT); + return 0; + +#if defined(_MSC_VER) + case SIGILL: + case SIGFPE: + case SIGSEGV: + case SIGTERM: + case SIGBREAK: + case SIGABRT: + case SIGABRT_COMPAT: + /* + * The header in the MS C Runtime defines 8 signals + * as being supported on the platform. Anything else causes an + * "Invalid signal or error" (which in DEBUG builds causes the + * Abort/Retry/Ignore dialog). We by-pass the CRT for things we + * already know will fail. + */ + return raise(sig); + default: + errno = EINVAL; + return -1; + +#else + + default: + return raise(sig); + +#endif + + } +} + +int link(const char *oldpath, const char *newpath) +{ + wchar_t woldpath[MAX_PATH], wnewpath[MAX_PATH]; + if (xutftowcs_path(woldpath, oldpath) < 0 || + xutftowcs_path(wnewpath, newpath) < 0) + return -1; + + if (!CreateHardLinkW(wnewpath, woldpath, NULL)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + return 0; +} + +pid_t waitpid(pid_t pid, int *status, int options) +{ + HANDLE h = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, + FALSE, pid); + if (!h) { + errno = ECHILD; + return -1; + } + + if (pid > 0 && options & WNOHANG) { + if (WAIT_OBJECT_0 != WaitForSingleObject(h, 0)) { + CloseHandle(h); + return 0; + } + options &= ~WNOHANG; + } + + if (options == 0) { + struct pinfo_t **ppinfo; + if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) { + CloseHandle(h); + return 0; + } + + if (status) + GetExitCodeProcess(h, (LPDWORD)status); + + EnterCriticalSection(&pinfo_cs); + + ppinfo = &pinfo; + while (*ppinfo) { + struct pinfo_t *info = *ppinfo; + if (info->pid == pid) { + CloseHandle(info->proc); + *ppinfo = info->next; + free(info); + break; + } + ppinfo = &info->next; + } + + LeaveCriticalSection(&pinfo_cs); + + CloseHandle(h); + return pid; + } + CloseHandle(h); + + errno = EINVAL; + return -1; +} + +int xutftowcsn(wchar_t *wcs, const char *utfs, size_t wcslen, int utflen) +{ + int upos = 0, wpos = 0; + const unsigned char *utf = (const unsigned char*) utfs; + if (!utf || !wcs || wcslen < 1) { + errno = EINVAL; + return -1; + } + /* reserve space for \0 */ + wcslen--; + if (utflen < 0) + utflen = INT_MAX; + + while (upos < utflen) { + int c = utf[upos++] & 0xff; + if (utflen == INT_MAX && c == 0) + break; + + if (wpos >= wcslen) { + wcs[wpos] = 0; + errno = ERANGE; + return -1; + } + + if (c < 0x80) { + /* ASCII */ + wcs[wpos++] = c; + } else if (c >= 0xc2 && c < 0xe0 && upos < utflen && + (utf[upos] & 0xc0) == 0x80) { + /* 2-byte utf-8 */ + c = ((c & 0x1f) << 6); + c |= (utf[upos++] & 0x3f); + wcs[wpos++] = c; + } else if (c >= 0xe0 && c < 0xf0 && upos + 1 < utflen && + !(c == 0xe0 && utf[upos] < 0xa0) && /* over-long encoding */ + (utf[upos] & 0xc0) == 0x80 && + (utf[upos + 1] & 0xc0) == 0x80) { + /* 3-byte utf-8 */ + c = ((c & 0x0f) << 12); + c |= ((utf[upos++] & 0x3f) << 6); + c |= (utf[upos++] & 0x3f); + wcs[wpos++] = c; + } else if (c >= 0xf0 && c < 0xf5 && upos + 2 < utflen && + wpos + 1 < wcslen && + !(c == 0xf0 && utf[upos] < 0x90) && /* over-long encoding */ + !(c == 0xf4 && utf[upos] >= 0x90) && /* > \u10ffff */ + (utf[upos] & 0xc0) == 0x80 && + (utf[upos + 1] & 0xc0) == 0x80 && + (utf[upos + 2] & 0xc0) == 0x80) { + /* 4-byte utf-8: convert to \ud8xx \udcxx surrogate pair */ + c = ((c & 0x07) << 18); + c |= ((utf[upos++] & 0x3f) << 12); + c |= ((utf[upos++] & 0x3f) << 6); + c |= (utf[upos++] & 0x3f); + c -= 0x10000; + wcs[wpos++] = 0xd800 | (c >> 10); + wcs[wpos++] = 0xdc00 | (c & 0x3ff); + } else if (c >= 0xa0) { + /* invalid utf-8 byte, printable unicode char: convert 1:1 */ + wcs[wpos++] = c; + } else { + /* invalid utf-8 byte, non-printable unicode: convert to hex */ + static const char *hex = "0123456789abcdef"; + wcs[wpos++] = hex[c >> 4]; + if (wpos < wcslen) + wcs[wpos++] = hex[c & 0x0f]; + } + } + wcs[wpos] = 0; + return wpos; +} + +int xwcstoutf(char *utf, const wchar_t *wcs, size_t utflen) +{ + if (!wcs || !utf || utflen < 1) { + errno = EINVAL; + return -1; + } + utflen = WideCharToMultiByte(CP_UTF8, 0, wcs, -1, utf, utflen, NULL, NULL); + if (utflen) + return utflen - 1; + errno = ERANGE; + return -1; +} + +static void setup_windows_environment(void) +{ + char *tmp = getenv("TMPDIR"); + + /* on Windows it is TMP and TEMP */ + if (!tmp) { + if (!(tmp = getenv("TMP"))) + tmp = getenv("TEMP"); + if (tmp) { + setenv("TMPDIR", tmp, 1); + tmp = getenv("TMPDIR"); + } + } + + if (tmp) { + /* + * Convert all dir separators to forward slashes, + * to help shell commands called from the Git + * executable (by not mistaking the dir separators + * for escape characters). + */ + convert_slashes(tmp); + } + + /* simulate TERM to enable auto-color (see color.c) */ + if (!getenv("TERM")) + setenv("TERM", "cygwin", 1); + + /* calculate HOME if not set */ + if (!getenv("HOME")) { + /* + * try $HOMEDRIVE$HOMEPATH - the home share may be a network + * location, thus also check if the path exists (i.e. is not + * disconnected) + */ + if ((tmp = getenv("HOMEDRIVE"))) { + struct strbuf buf = STRBUF_INIT; + strbuf_addstr(&buf, tmp); + if ((tmp = getenv("HOMEPATH"))) { + strbuf_addstr(&buf, tmp); + if (is_directory(buf.buf)) + setenv("HOME", buf.buf, 1); + else + tmp = NULL; /* use $USERPROFILE */ + } + strbuf_release(&buf); + } + /* use $USERPROFILE if the home share is not available */ + if (!tmp && (tmp = getenv("USERPROFILE"))) + setenv("HOME", tmp, 1); + } +} + +int is_valid_win32_path(const char *path, int allow_literal_nul) +{ + const char *p = path; + int preceding_space_or_period = 0, i = 0, periods = 0; + + if (!protect_ntfs) + return 1; + + skip_dos_drive_prefix((char **)&path); + goto segment_start; + + for (;;) { + char c = *(path++); + switch (c) { + case '\0': + case '/': case '\\': + /* cannot end in ` ` or `.`, except for `.` and `..` */ + if (preceding_space_or_period && + (i != periods || periods > 2)) + return 0; + if (!c) + return 1; + + i = periods = preceding_space_or_period = 0; + +segment_start: + switch (*path) { + case 'a': case 'A': /* AUX */ + if (((c = path[++i]) != 'u' && c != 'U') || + ((c = path[++i]) != 'x' && c != 'X')) { +not_a_reserved_name: + path += i; + continue; + } + break; + case 'c': case 'C': + /* COM1 ... COM9, CON, CONIN$, CONOUT$ */ + if ((c = path[++i]) != 'o' && c != 'O') + goto not_a_reserved_name; + c = path[++i]; + if (c == 'm' || c == 'M') { /* COM1 ... COM9 */ + c = path[++i]; + if (c < '1' || c > '9') + goto not_a_reserved_name; + } else if (c == 'n' || c == 'N') { /* CON */ + c = path[i + 1]; + if ((c == 'i' || c == 'I') && + ((c = path[i + 2]) == 'n' || + c == 'N') && + path[i + 3] == '$') + i += 3; /* CONIN$ */ + else if ((c == 'o' || c == 'O') && + ((c = path[i + 2]) == 'u' || + c == 'U') && + ((c = path[i + 3]) == 't' || + c == 'T') && + path[i + 4] == '$') + i += 4; /* CONOUT$ */ + } else + goto not_a_reserved_name; + break; + case 'l': case 'L': /* LPT */ + if (((c = path[++i]) != 'p' && c != 'P') || + ((c = path[++i]) != 't' && c != 'T') || + !isdigit(path[++i])) + goto not_a_reserved_name; + break; + case 'n': case 'N': /* NUL */ + if (((c = path[++i]) != 'u' && c != 'U') || + ((c = path[++i]) != 'l' && c != 'L') || + (allow_literal_nul && + !path[i + 1] && p == path)) + goto not_a_reserved_name; + break; + case 'p': case 'P': /* PRN */ + if (((c = path[++i]) != 'r' && c != 'R') || + ((c = path[++i]) != 'n' && c != 'N')) + goto not_a_reserved_name; + break; + default: + continue; + } + + /* + * So far, this looks like a reserved name. Let's see + * whether it actually is one: trailing spaces, a file + * extension, or an NTFS Alternate Data Stream do not + * matter, the name is still reserved if any of those + * follow immediately after the actual name. + */ + i++; + if (path[i] == ' ') { + preceding_space_or_period = 1; + while (path[++i] == ' ') + ; /* skip all spaces */ + } + + c = path[i]; + if (c && c != '.' && c != ':' && c != '/' && c != '\\') + goto not_a_reserved_name; + + /* contains reserved name */ + return 0; + case '.': + periods++; + /* fallthru */ + case ' ': + preceding_space_or_period = 1; + i++; + continue; + case ':': /* DOS drive prefix was already skipped */ + case '<': case '>': case '"': case '|': case '?': case '*': + /* illegal character */ + return 0; + default: + if (c > '\0' && c < '\x20') + /* illegal character */ + return 0; + } + preceding_space_or_period = 0; + i++; + } +} + +#if !defined(_MSC_VER) +/* + * Disable MSVCRT command line wildcard expansion (__getmainargs called from + * mingw startup code, see init.c in mingw runtime). + */ +int _CRT_glob = 0; +#endif + +static NORETURN void die_startup(void) +{ + fputs("fatal: not enough memory for initialization", stderr); + exit(128); +} + +static void *malloc_startup(size_t size) +{ + void *result = malloc(size); + if (!result) + die_startup(); + return result; +} + +static char *wcstoutfdup_startup(char *buffer, const wchar_t *wcs, size_t len) +{ + len = xwcstoutf(buffer, wcs, len) + 1; + return memcpy(malloc_startup(len), buffer, len); +} + +static void maybe_redirect_std_handle(const wchar_t *key, DWORD std_id, int fd, + DWORD desired_access, DWORD flags) +{ + DWORD create_flag = fd ? OPEN_ALWAYS : OPEN_EXISTING; + wchar_t buf[MAX_PATH]; + DWORD max = ARRAY_SIZE(buf); + HANDLE handle; + DWORD ret = GetEnvironmentVariableW(key, buf, max); + + if (!ret || ret >= max) + return; + + /* make sure this does not leak into child processes */ + SetEnvironmentVariableW(key, NULL); + if (!wcscmp(buf, L"off")) { + close(fd); + handle = GetStdHandle(std_id); + if (handle != INVALID_HANDLE_VALUE) + CloseHandle(handle); + return; + } + if (std_id == STD_ERROR_HANDLE && !wcscmp(buf, L"2>&1")) { + handle = GetStdHandle(STD_OUTPUT_HANDLE); + if (handle == INVALID_HANDLE_VALUE) { + close(fd); + handle = GetStdHandle(std_id); + if (handle != INVALID_HANDLE_VALUE) + CloseHandle(handle); + } else { + int new_fd = _open_osfhandle((intptr_t)handle, O_BINARY); + SetStdHandle(std_id, handle); + dup2(new_fd, fd); + /* do *not* close the new_fd: that would close stdout */ + } + return; + } + handle = CreateFileW(buf, desired_access, 0, NULL, create_flag, + flags, NULL); + if (handle != INVALID_HANDLE_VALUE) { + int new_fd = _open_osfhandle((intptr_t)handle, O_BINARY); + SetStdHandle(std_id, handle); + dup2(new_fd, fd); + close(new_fd); + } +} + +static void maybe_redirect_std_handles(void) +{ + maybe_redirect_std_handle(L"GIT_REDIRECT_STDIN", STD_INPUT_HANDLE, 0, + GENERIC_READ, FILE_ATTRIBUTE_NORMAL); + maybe_redirect_std_handle(L"GIT_REDIRECT_STDOUT", STD_OUTPUT_HANDLE, 1, + GENERIC_WRITE, FILE_ATTRIBUTE_NORMAL); + maybe_redirect_std_handle(L"GIT_REDIRECT_STDERR", STD_ERROR_HANDLE, 2, + GENERIC_WRITE, FILE_FLAG_NO_BUFFERING); +} + +#ifdef _MSC_VER +#ifdef _DEBUG +#include +#endif +#endif + +/* + * We implement wmain() and compile with -municode, which would + * normally ignore main(), but we call the latter from the former + * so that we can handle non-ASCII command-line parameters + * appropriately. + * + * To be more compatible with the core git code, we convert + * argv into UTF8 and pass them directly to main(). + */ +int wmain(int argc, const wchar_t **wargv) +{ + int i, maxlen, exit_status; + char *buffer, **save; + const char **argv; + + trace2_initialize_clock(); + +#ifdef _MSC_VER +#ifdef _DEBUG + _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG); +#endif + +#ifdef USE_MSVC_CRTDBG + _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); +#endif +#endif + + maybe_redirect_std_handles(); + + /* determine size of argv and environ conversion buffer */ + maxlen = wcslen(wargv[0]); + for (i = 1; i < argc; i++) + maxlen = max(maxlen, wcslen(wargv[i])); + + /* allocate buffer (wchar_t encodes to max 3 UTF-8 bytes) */ + maxlen = 3 * maxlen + 1; + buffer = malloc_startup(maxlen); + + /* + * Create a UTF-8 version of w_argv. Also create a "save" copy + * to remember all the string pointers because parse_options() + * will remove claimed items from the argv that we pass down. + */ + ALLOC_ARRAY(argv, argc + 1); + ALLOC_ARRAY(save, argc + 1); + for (i = 0; i < argc; i++) + argv[i] = save[i] = wcstoutfdup_startup(buffer, wargv[i], maxlen); + argv[i] = save[i] = NULL; + free(buffer); + + /* fix Windows specific environment settings */ + setup_windows_environment(); + + unset_environment_variables = xstrdup("PERL5LIB"); + + /* initialize critical section for waitpid pinfo_t list */ + InitializeCriticalSection(&pinfo_cs); + + /* set up default file mode and file modes for stdin/out/err */ + _fmode = _O_BINARY; + _setmode(_fileno(stdin), _O_BINARY); + _setmode(_fileno(stdout), _O_BINARY); + _setmode(_fileno(stderr), _O_BINARY); + + /* initialize Unicode console */ + winansi_init(); + + /* invoke the real main() using our utf8 version of argv. */ + exit_status = main(argc, argv); + + for (i = 0; i < argc; i++) + free(save[i]); + free(save); + free(argv); + + return exit_status; +} + +int uname(struct utsname *buf) +{ + unsigned v = (unsigned)GetVersion(); + memset(buf, 0, sizeof(*buf)); + xsnprintf(buf->sysname, sizeof(buf->sysname), "Windows"); + xsnprintf(buf->release, sizeof(buf->release), + "%u.%u", v & 0xff, (v >> 8) & 0xff); + /* assuming NT variants only.. */ + xsnprintf(buf->version, sizeof(buf->version), + "%u", (v >> 16) & 0x7fff); + return 0; +} diff --git a/compat/mingw.h b/compat/mingw.h index c9a52ad64a681f..ffa53a44b01659 100644 --- a/compat/mingw.h +++ b/compat/mingw.h @@ -453,6 +453,13 @@ char *mingw_query_user_email(void); #include #endif +/** + * Verifies that the specified path is owned by the user running the + * current process. + */ +int is_path_owned_by_current_sid(const char *path); +#define is_path_owned_by_current_user is_path_owned_by_current_sid + /** * Verifies that the given path is a valid one on Windows. * diff --git a/compat/mingw.h.orig b/compat/mingw.h.orig new file mode 100644 index 00000000000000..c9a52ad64a681f --- /dev/null +++ b/compat/mingw.h.orig @@ -0,0 +1,615 @@ +#ifdef __MINGW64_VERSION_MAJOR +#include +#include +typedef _sigset_t sigset_t; +#endif +#include +#include + +/* MinGW-w64 reports to have flockfile, but it does not actually have it. */ +#ifdef __MINGW64_VERSION_MAJOR +#undef _POSIX_THREAD_SAFE_FUNCTIONS +#endif + +int mingw_core_config(const char *var, const char *value, void *cb); +#define platform_core_config mingw_core_config + +/* + * things that are not available in header files + */ + +typedef int uid_t; +typedef int socklen_t; +#ifndef __MINGW64_VERSION_MAJOR +typedef int pid_t; +#define hstrerror strerror +#endif + +#define S_IFLNK 0120000 /* Symbolic link */ +#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK) +#define S_ISSOCK(x) 0 + +#ifndef S_IRWXG +#define S_IRGRP 0 +#define S_IWGRP 0 +#define S_IXGRP 0 +#define S_IRWXG (S_IRGRP | S_IWGRP | S_IXGRP) +#endif +#ifndef S_IRWXO +#define S_IROTH 0 +#define S_IWOTH 0 +#define S_IXOTH 0 +#define S_IRWXO (S_IROTH | S_IWOTH | S_IXOTH) +#endif + +#define S_ISUID 0004000 +#define S_ISGID 0002000 +#define S_ISVTX 0001000 + +#define WIFEXITED(x) 1 +#define WIFSIGNALED(x) 0 +#define WEXITSTATUS(x) ((x) & 0xff) +#define WTERMSIG(x) SIGTERM + +#ifndef EWOULDBLOCK +#define EWOULDBLOCK EAGAIN +#endif +#ifndef ELOOP +#define ELOOP EMLINK +#endif +#define SHUT_WR SD_SEND + +#define SIGHUP 1 +#define SIGQUIT 3 +#define SIGKILL 9 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGCHLD 17 + +#define F_GETFD 1 +#define F_SETFD 2 +#define FD_CLOEXEC 0x1 + +#if !defined O_CLOEXEC && defined O_NOINHERIT +#define O_CLOEXEC O_NOINHERIT +#endif + +#ifndef EAFNOSUPPORT +#define EAFNOSUPPORT WSAEAFNOSUPPORT +#endif +#ifndef ECONNABORTED +#define ECONNABORTED WSAECONNABORTED +#endif +#ifndef ENOTSOCK +#define ENOTSOCK WSAENOTSOCK +#endif + +struct passwd { + char *pw_name; + char *pw_gecos; + char *pw_dir; +}; + +typedef void (__cdecl *sig_handler_t)(int); +struct sigaction { + sig_handler_t sa_handler; + unsigned sa_flags; +}; +#define SA_RESTART 0 + +struct itimerval { + struct timeval it_value, it_interval; +}; +#define ITIMER_REAL 0 + +struct utsname { + char sysname[16]; + char nodename[1]; + char release[16]; + char version[16]; + char machine[1]; +}; + +/* + * sanitize preprocessor namespace polluted by Windows headers defining + * macros which collide with git local versions + */ +#undef HELP_COMMAND /* from winuser.h */ + +/* + * trivial stubs + */ + +static inline int readlink(const char *path, char *buf, size_t bufsiz) +{ errno = ENOSYS; return -1; } +static inline int symlink(const char *oldpath, const char *newpath) +{ errno = ENOSYS; return -1; } +static inline int fchmod(int fildes, mode_t mode) +{ errno = ENOSYS; return -1; } +#ifndef __MINGW64_VERSION_MAJOR +static inline pid_t fork(void) +{ errno = ENOSYS; return -1; } +#endif +static inline unsigned int alarm(unsigned int seconds) +{ return 0; } +static inline int fsync(int fd) +{ return _commit(fd); } +static inline void sync(void) +{} +static inline uid_t getuid(void) +{ return 1; } +static inline struct passwd *getpwnam(const char *name) +{ return NULL; } +static inline int fcntl(int fd, int cmd, ...) +{ + if (cmd == F_GETFD || cmd == F_SETFD) + return 0; + errno = EINVAL; + return -1; +} + +#define sigemptyset(x) (void)0 +static inline int sigaddset(sigset_t *set, int signum) +{ return 0; } +#define SIG_BLOCK 0 +#define SIG_UNBLOCK 0 +static inline int sigprocmask(int how, const sigset_t *set, sigset_t *oldset) +{ return 0; } +static inline pid_t getppid(void) +{ return 1; } +static inline pid_t getpgid(pid_t pid) +{ return pid == 0 ? getpid() : pid; } +static inline pid_t tcgetpgrp(int fd) +{ return getpid(); } + +/* + * simple adaptors + */ + +int mingw_mkdir(const char *path, int mode); +#define mkdir mingw_mkdir + +#define WNOHANG 1 +pid_t waitpid(pid_t pid, int *status, int options); + +#define kill mingw_kill +int mingw_kill(pid_t pid, int sig); + +#ifndef NO_OPENSSL +#include +static inline int mingw_SSL_set_fd(SSL *ssl, int fd) +{ + return SSL_set_fd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_fd mingw_SSL_set_fd + +static inline int mingw_SSL_set_rfd(SSL *ssl, int fd) +{ + return SSL_set_rfd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_rfd mingw_SSL_set_rfd + +static inline int mingw_SSL_set_wfd(SSL *ssl, int fd) +{ + return SSL_set_wfd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_wfd mingw_SSL_set_wfd +#endif + +/* + * implementations of missing functions + */ + +int pipe(int filedes[2]); +unsigned int sleep (unsigned int seconds); +int mkstemp(char *template); +int gettimeofday(struct timeval *tv, void *tz); +#ifndef __MINGW64_VERSION_MAJOR +struct tm *gmtime_r(const time_t *timep, struct tm *result); +struct tm *localtime_r(const time_t *timep, struct tm *result); +#endif +int getpagesize(void); /* defined in MinGW's libgcc.a */ +struct passwd *getpwuid(uid_t uid); +int setitimer(int type, struct itimerval *in, struct itimerval *out); +int sigaction(int sig, struct sigaction *in, struct sigaction *out); +int link(const char *oldpath, const char *newpath); +int uname(struct utsname *buf); + +/* + * replacements of existing functions + */ + +int mingw_unlink(const char *pathname); +#define unlink mingw_unlink + +int mingw_rmdir(const char *path); +#define rmdir mingw_rmdir + +int mingw_open (const char *filename, int oflags, ...); +#define open mingw_open +#undef OPEN_RETURNS_EINTR + +int mingw_fgetc(FILE *stream); +#define fgetc mingw_fgetc + +FILE *mingw_fopen (const char *filename, const char *otype); +#define fopen mingw_fopen + +FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream); +#define freopen mingw_freopen + +int mingw_fflush(FILE *stream); +#define fflush mingw_fflush + +ssize_t mingw_write(int fd, const void *buf, size_t len); +#define write mingw_write + +int mingw_access(const char *filename, int mode); +#undef access +#define access mingw_access + +int mingw_chdir(const char *dirname); +#define chdir mingw_chdir + +int mingw_chmod(const char *filename, int mode); +#define chmod mingw_chmod + +char *mingw_mktemp(char *template); +#define mktemp mingw_mktemp + +char *mingw_getcwd(char *pointer, int len); +#define getcwd mingw_getcwd + +#ifdef NO_UNSETENV +#error "NO_UNSETENV is incompatible with the Windows-specific startup code!" +#endif + +/* + * We bind *env() routines (even the mingw_ ones) to private mingw_ versions. + * These talk to the CRT using UNICODE/wchar_t, but maintain the original + * narrow-char API. + * + * Note that the MSCRT maintains both ANSI (getenv()) and UNICODE (_wgetenv()) + * routines and stores both versions of each environment variable in parallel + * (and secretly updates both when you set one or the other), but it uses CP_ACP + * to do the conversion rather than CP_UTF8. + * + * Since everything in the git code base is UTF8, we define the mingw_ routines + * to access the CRT using the UNICODE routines and manually convert them to + * UTF8. This also avoids round-trip problems. + * + * This also helps with our linkage, since "_wenviron" is publicly exported + * from the CRT. But to access "_environ" we would have to statically link + * to the CRT (/MT). + * + * We require NO_SETENV (and let gitsetenv() call our mingw_putenv). + */ +#define getenv mingw_getenv +#define putenv mingw_putenv +#define unsetenv mingw_putenv +char *mingw_getenv(const char *name); +int mingw_putenv(const char *name); + +int mingw_gethostname(char *host, int namelen); +#define gethostname mingw_gethostname + +struct hostent *mingw_gethostbyname(const char *host); +#define gethostbyname mingw_gethostbyname + +int mingw_getaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, struct addrinfo **res); +#define getaddrinfo mingw_getaddrinfo + +int mingw_socket(int domain, int type, int protocol); +#define socket mingw_socket + +int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz); +#define connect mingw_connect + +int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz); +#define bind mingw_bind + +int mingw_setsockopt(int sockfd, int lvl, int optname, void *optval, int optlen); +#define setsockopt mingw_setsockopt + +int mingw_shutdown(int sockfd, int how); +#define shutdown mingw_shutdown + +int mingw_listen(int sockfd, int backlog); +#define listen mingw_listen + +int mingw_accept(int sockfd, struct sockaddr *sa, socklen_t *sz); +#define accept mingw_accept + +int mingw_rename(const char*, const char*); +#define rename mingw_rename + +#if defined(USE_WIN32_MMAP) || defined(_MSC_VER) +int mingw_getpagesize(void); +#define getpagesize mingw_getpagesize +#endif + +struct rlimit { + unsigned int rlim_cur; +}; +#define RLIMIT_NOFILE 0 + +static inline int getrlimit(int resource, struct rlimit *rlp) +{ + if (resource != RLIMIT_NOFILE) { + errno = EINVAL; + return -1; + } + + rlp->rlim_cur = 2048; + return 0; +} + +/* + * Use mingw specific stat()/lstat()/fstat() implementations on Windows, + * including our own struct stat with 64 bit st_size and nanosecond-precision + * file times. + */ +#ifndef __MINGW64_VERSION_MAJOR +#define off_t off64_t +#define lseek _lseeki64 +#ifndef _MSC_VER +struct timespec { + time_t tv_sec; + long tv_nsec; +}; +#endif +#endif + +struct mingw_stat { + _dev_t st_dev; + _ino_t st_ino; + _mode_t st_mode; + short st_nlink; + short st_uid; + short st_gid; + _dev_t st_rdev; + off64_t st_size; + struct timespec st_atim; + struct timespec st_mtim; + struct timespec st_ctim; +}; + +#define st_atime st_atim.tv_sec +#define st_mtime st_mtim.tv_sec +#define st_ctime st_ctim.tv_sec + +#ifdef stat +#undef stat +#endif +#define stat mingw_stat +int mingw_lstat(const char *file_name, struct stat *buf); +int mingw_stat(const char *file_name, struct stat *buf); +int mingw_fstat(int fd, struct stat *buf); +#ifdef fstat +#undef fstat +#endif +#define fstat mingw_fstat +#ifdef lstat +#undef lstat +#endif +#define lstat mingw_lstat + + +int mingw_utime(const char *file_name, const struct utimbuf *times); +#define utime mingw_utime +size_t mingw_strftime(char *s, size_t max, + const char *format, const struct tm *tm); +#define strftime mingw_strftime + +pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **env, + const char *dir, + int fhin, int fhout, int fherr); +int mingw_execvp(const char *cmd, char *const *argv); +#define execvp mingw_execvp +int mingw_execv(const char *cmd, char *const *argv); +#define execv mingw_execv + +static inline unsigned int git_ntohl(unsigned int x) +{ return (unsigned int)ntohl(x); } +#define ntohl git_ntohl + +sig_handler_t mingw_signal(int sig, sig_handler_t handler); +#define signal mingw_signal + +int mingw_raise(int sig); +#define raise mingw_raise + +/* + * ANSI emulation wrappers + */ + +int winansi_isatty(int fd); +#define isatty winansi_isatty + +int winansi_dup2(int oldfd, int newfd); +#define dup2 winansi_dup2 + +void winansi_init(void); +HANDLE winansi_get_osfhandle(int fd); + +/* + * git specific compatibility + */ + +static inline void convert_slashes(char *path) +{ + for (; *path; path++) + if (*path == '\\') + *path = '/'; +} +#define PATH_SEP ';' +char *mingw_query_user_email(void); +#define query_user_email mingw_query_user_email +#if !defined(__MINGW64_VERSION_MAJOR) && (!defined(_MSC_VER) || _MSC_VER < 1800) +#define PRIuMAX "I64u" +#define PRId64 "I64d" +#else +#include +#endif + +/** + * Verifies that the given path is a valid one on Windows. + * + * In particular, path segments are disallowed which + * + * - end in a period or a space (except the special directories `.` and `..`). + * + * - contain any of the reserved characters, e.g. `:`, `;`, `*`, etc + * + * - correspond to reserved names (such as `AUX`, `PRN`, etc) + * + * The `allow_literal_nul` parameter controls whether the path `NUL` should + * be considered valid (this makes sense e.g. before opening files, as it is + * perfectly legitimate to open `NUL` on Windows, just as it is to open + * `/dev/null` on Unix/Linux). + * + * Returns 1 upon success, otherwise 0. + */ +int is_valid_win32_path(const char *path, int allow_literal_nul); +#define is_valid_path(path) is_valid_win32_path(path, 0) + +/** + * Converts UTF-8 encoded string to UTF-16LE. + * + * To support repositories with legacy-encoded file names, invalid UTF-8 bytes + * 0xa0 - 0xff are converted to corresponding printable Unicode chars \u00a0 - + * \u00ff, and invalid UTF-8 bytes 0x80 - 0x9f (which would make non-printable + * Unicode) are converted to hex-code. + * + * Lead-bytes not followed by an appropriate number of trail-bytes, over-long + * encodings and 4-byte encodings > \u10ffff are detected as invalid UTF-8. + * + * Maximum space requirement for the target buffer is two wide chars per UTF-8 + * char (((strlen(utf) * 2) + 1) [* sizeof(wchar_t)]). + * + * The maximum space is needed only if the entire input string consists of + * invalid UTF-8 bytes in range 0x80-0x9f, as per the following table: + * + * | | UTF-8 | UTF-16 | + * Code point | UTF-8 sequence | bytes | words | ratio + * --------------+-------------------+-------+--------+------- + * 000000-00007f | 0-7f | 1 | 1 | 1 + * 000080-0007ff | c2-df + 80-bf | 2 | 1 | 0.5 + * 000800-00ffff | e0-ef + 2 * 80-bf | 3 | 1 | 0.33 + * 010000-10ffff | f0-f4 + 3 * 80-bf | 4 | 2 (a) | 0.5 + * invalid | 80-9f | 1 | 2 (b) | 2 + * invalid | a0-ff | 1 | 1 | 1 + * + * (a) encoded as UTF-16 surrogate pair + * (b) encoded as two hex digits + * + * Note that, while the UTF-8 encoding scheme can be extended to 5-byte, 6-byte + * or even indefinite-byte sequences, the largest valid code point \u10ffff + * encodes as only 4 UTF-8 bytes. + * + * Parameters: + * wcs: wide char target buffer + * utf: string to convert + * wcslen: size of target buffer (in wchar_t's) + * utflen: size of string to convert, or -1 if 0-terminated + * + * Returns: + * length of converted string (_wcslen(wcs)), or -1 on failure + * + * Errors: + * EINVAL: one of the input parameters is invalid (e.g. NULL) + * ERANGE: the output buffer is too small + */ +int xutftowcsn(wchar_t *wcs, const char *utf, size_t wcslen, int utflen); + +/** + * Simplified variant of xutftowcsn, assumes input string is \0-terminated. + */ +static inline int xutftowcs(wchar_t *wcs, const char *utf, size_t wcslen) +{ + return xutftowcsn(wcs, utf, wcslen, -1); +} + +/** + * Simplified file system specific variant of xutftowcsn, assumes output + * buffer size is MAX_PATH wide chars and input string is \0-terminated, + * fails with ENAMETOOLONG if input string is too long. + */ +static inline int xutftowcs_path(wchar_t *wcs, const char *utf) +{ + int result = xutftowcsn(wcs, utf, MAX_PATH, -1); + if (result < 0 && errno == ERANGE) + errno = ENAMETOOLONG; + return result; +} + +/** + * Converts UTF-16LE encoded string to UTF-8. + * + * Maximum space requirement for the target buffer is three UTF-8 chars per + * wide char ((_wcslen(wcs) * 3) + 1). + * + * The maximum space is needed only if the entire input string consists of + * UTF-16 words in range 0x0800-0xd7ff or 0xe000-0xffff (i.e. \u0800-\uffff + * modulo surrogate pairs), as per the following table: + * + * | | UTF-16 | UTF-8 | + * Code point | UTF-16 sequence | words | bytes | ratio + * --------------+-----------------------+--------+-------+------- + * 000000-00007f | 0000-007f | 1 | 1 | 1 + * 000080-0007ff | 0080-07ff | 1 | 2 | 2 + * 000800-00ffff | 0800-d7ff / e000-ffff | 1 | 3 | 3 + * 010000-10ffff | d800-dbff + dc00-dfff | 2 | 4 | 2 + * + * Note that invalid code points > 10ffff cannot be represented in UTF-16. + * + * Parameters: + * utf: target buffer + * wcs: wide string to convert + * utflen: size of target buffer + * + * Returns: + * length of converted string, or -1 on failure + * + * Errors: + * EINVAL: one of the input parameters is invalid (e.g. NULL) + * ERANGE: the output buffer is too small + */ +int xwcstoutf(char *utf, const wchar_t *wcs, size_t utflen); + +/* + * A critical section used in the implementation of the spawn + * functions (mingw_spawnv[p]e()) and waitpid(). Initialised in + * the replacement main() macro below. + */ +extern CRITICAL_SECTION pinfo_cs; + +/* + * Git, like most portable C applications, implements a main() function. On + * Windows, this main() function would receive parameters encoded in the + * current locale, but Git for Windows would prefer UTF-8 encoded parameters. + * + * To make that happen, we still declare main() here, and then declare and + * implement wmain() (which is the Unicode variant of main()) and compile with + * -municode. This wmain() function reencodes the parameters from UTF-16 to + * UTF-8 format, sets up a couple of other things as required on Windows, and + * then hands off to the main() function. + */ +int wmain(int argc, const wchar_t **w_argv); +int main(int argc, const char **argv); + +/* + * For debugging: if a problem occurs, say, in a Git process that is spawned + * from another Git process which in turn is spawned from yet another Git + * process, it can be quite daunting to figure out what is going on. + * + * Call this function to open a new MinTTY (this assumes you are in Git for + * Windows' SDK) with a GDB that attaches to the current process right away. + */ +void open_in_gdb(void); + +/* + * Used by Pthread API implementation for Windows + */ +int err_win_to_posix(DWORD winerr); diff --git a/config.c b/config.c index 7a6ff18ffa6c46..ea39153e31bc8e 100644 --- a/config.c +++ b/config.c @@ -3197,9 +3197,10 @@ void git_config_set_multivar(const char *key, const char *value, flags); } -static int section_name_match (const char *buf, const char *name) +static size_t section_name_match (const char *buf, const char *name) { - int i = 0, j = 0, dot = 0; + size_t i = 0, j = 0; + int dot = 0; if (buf[i] != '[') return 0; for (i = 1; buf[i] && buf[i] != ']'; i++) { @@ -3252,6 +3253,8 @@ static int section_name_is_ok(const char *name) return 1; } +#define GIT_CONFIG_MAX_LINE_LEN (512 * 1024) + /* if new_name == NULL, the section is removed instead */ static int git_config_copy_or_rename_section_in_file(const char *config_filename, const char *old_name, @@ -3261,11 +3264,12 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename char *filename_buf = NULL; struct lock_file lock = LOCK_INIT; int out_fd; - char buf[1024]; + struct strbuf buf = STRBUF_INIT; FILE *config_file = NULL; struct stat st; struct strbuf copystr = STRBUF_INIT; struct config_store_data store; + uint32_t line_nr = 0; memset(&store, 0, sizeof(store)); @@ -3302,16 +3306,25 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename goto out; } - while (fgets(buf, sizeof(buf), config_file)) { - unsigned i; - int length; + while (!strbuf_getwholeline(&buf, config_file, '\n')) { + size_t i, length; int is_section = 0; - char *output = buf; - for (i = 0; buf[i] && isspace(buf[i]); i++) + char *output = buf.buf; + + line_nr++; + + if (buf.len >= GIT_CONFIG_MAX_LINE_LEN) { + ret = error(_("refusing to work with overly long line " + "in '%s' on line %"PRIuMAX), + config_filename, (uintmax_t)line_nr); + goto out; + } + + for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++) ; /* do nothing */ - if (buf[i] == '[') { + if (buf.buf[i] == '[') { /* it's a section */ - int offset; + size_t offset; is_section = 1; /* @@ -3328,7 +3341,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename strbuf_reset(©str); } - offset = section_name_match(&buf[i], old_name); + offset = section_name_match(&buf.buf[i], old_name); if (offset > 0) { ret++; if (new_name == NULL) { @@ -3403,6 +3416,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename out_no_rollback: free(filename_buf); config_store_data_clear(&store); + strbuf_release(&buf); return ret; } diff --git a/config.c.orig b/config.c.orig new file mode 100644 index 00000000000000..028a47a35f9f60 --- /dev/null +++ b/config.c.orig @@ -0,0 +1,3565 @@ +/* + * GIT - The information manager from hell + * + * Copyright (C) Linus Torvalds, 2005 + * Copyright (C) Johannes Schindelin, 2005 + * + */ +#include "cache.h" +#include "branch.h" +#include "config.h" +#include "environment.h" +#include "repository.h" +#include "lockfile.h" +#include "exec-cmd.h" +#include "strbuf.h" +#include "quote.h" +#include "hashmap.h" +#include "string-list.h" +#include "object-store.h" +#include "utf8.h" +#include "dir.h" +#include "color.h" +#include "refs.h" + +struct config_source { + struct config_source *prev; + union { + FILE *file; + struct config_buf { + const char *buf; + size_t len; + size_t pos; + } buf; + } u; + enum config_origin_type origin_type; + const char *name; + const char *path; + enum config_error_action default_error_action; + int linenr; + int eof; + size_t total_len; + struct strbuf value; + struct strbuf var; + unsigned subsection_case_sensitive : 1; + + int (*do_fgetc)(struct config_source *c); + int (*do_ungetc)(int c, struct config_source *conf); + long (*do_ftell)(struct config_source *c); +}; + +/* + * These variables record the "current" config source, which + * can be accessed by parsing callbacks. + * + * The "cf" variable will be non-NULL only when we are actually parsing a real + * config source (file, blob, cmdline, etc). + * + * The "current_config_kvi" variable will be non-NULL only when we are feeding + * cached config from a configset into a callback. + * + * They should generally never be non-NULL at the same time. If they are both + * NULL, then we aren't parsing anything (and depending on the function looking + * at the variables, it's either a bug for it to be called in the first place, + * or it's a function which can be reused for non-config purposes, and should + * fall back to some sane behavior). + */ +static struct config_source *cf; +static struct key_value_info *current_config_kvi; + +/* + * Similar to the variables above, this gives access to the "scope" of the + * current value (repo, global, etc). For cached values, it can be found via + * the current_config_kvi as above. During parsing, the current value can be + * found in this variable. It's not part of "cf" because it transcends a single + * file (i.e., a file included from .git/config is still in "repo" scope). + */ +static enum config_scope current_parsing_scope; + +static int pack_compression_seen; +static int zlib_compression_seen; + +static int config_file_fgetc(struct config_source *conf) +{ + return getc_unlocked(conf->u.file); +} + +static int config_file_ungetc(int c, struct config_source *conf) +{ + return ungetc(c, conf->u.file); +} + +static long config_file_ftell(struct config_source *conf) +{ + return ftell(conf->u.file); +} + + +static int config_buf_fgetc(struct config_source *conf) +{ + if (conf->u.buf.pos < conf->u.buf.len) + return conf->u.buf.buf[conf->u.buf.pos++]; + + return EOF; +} + +static int config_buf_ungetc(int c, struct config_source *conf) +{ + if (conf->u.buf.pos > 0) { + conf->u.buf.pos--; + if (conf->u.buf.buf[conf->u.buf.pos] != c) + BUG("config_buf can only ungetc the same character"); + return c; + } + + return EOF; +} + +static long config_buf_ftell(struct config_source *conf) +{ + return conf->u.buf.pos; +} + +#define MAX_INCLUDE_DEPTH 10 +static const char include_depth_advice[] = N_( +"exceeded maximum include depth (%d) while including\n" +" %s\n" +"from\n" +" %s\n" +"This might be due to circular includes."); +static int handle_path_include(const char *path, struct config_include_data *inc) +{ + int ret = 0; + struct strbuf buf = STRBUF_INIT; + char *expanded; + + if (!path) + return config_error_nonbool("include.path"); + + expanded = expand_user_path(path, 0); + if (!expanded) + return error(_("could not expand include path '%s'"), path); + path = expanded; + + /* + * Use an absolute path as-is, but interpret relative paths + * based on the including config file. + */ + if (!is_absolute_path(path)) { + char *slash; + + if (!cf || !cf->path) + return error(_("relative config includes must come from files")); + + slash = find_last_dir_sep(cf->path); + if (slash) + strbuf_add(&buf, cf->path, slash - cf->path + 1); + strbuf_addstr(&buf, path); + path = buf.buf; + } + + if (!access_or_die(path, R_OK, 0)) { + if (++inc->depth > MAX_INCLUDE_DEPTH) + die(_(include_depth_advice), MAX_INCLUDE_DEPTH, path, + !cf ? "" : + cf->name ? cf->name : + "the command line"); + ret = git_config_from_file(git_config_include, path, inc); + inc->depth--; + } + strbuf_release(&buf); + free(expanded); + return ret; +} + +static void add_trailing_starstar_for_dir(struct strbuf *pat) +{ + if (pat->len && is_dir_sep(pat->buf[pat->len - 1])) + strbuf_addstr(pat, "**"); +} + +static int prepare_include_condition_pattern(struct strbuf *pat) +{ + struct strbuf path = STRBUF_INIT; + char *expanded; + int prefix = 0; + + expanded = expand_user_path(pat->buf, 1); + if (expanded) { + strbuf_reset(pat); + strbuf_addstr(pat, expanded); + free(expanded); + } + + if (pat->buf[0] == '.' && is_dir_sep(pat->buf[1])) { + const char *slash; + + if (!cf || !cf->path) + return error(_("relative config include " + "conditionals must come from files")); + + strbuf_realpath(&path, cf->path, 1); + slash = find_last_dir_sep(path.buf); + if (!slash) + BUG("how is this possible?"); + strbuf_splice(pat, 0, 1, path.buf, slash - path.buf); + prefix = slash - path.buf + 1 /* slash */; + } else if (!is_absolute_path(pat->buf)) + strbuf_insertstr(pat, 0, "**/"); + + add_trailing_starstar_for_dir(pat); + + strbuf_release(&path); + return prefix; +} + +static int include_by_gitdir(const struct config_options *opts, + const char *cond, size_t cond_len, int icase) +{ + struct strbuf text = STRBUF_INIT; + struct strbuf pattern = STRBUF_INIT; + int ret = 0, prefix; + const char *git_dir; + int already_tried_absolute = 0; + + if (opts->git_dir) + git_dir = opts->git_dir; + else + goto done; + + strbuf_realpath(&text, git_dir, 1); + strbuf_add(&pattern, cond, cond_len); + prefix = prepare_include_condition_pattern(&pattern); + +again: + if (prefix < 0) + goto done; + + if (prefix > 0) { + /* + * perform literal matching on the prefix part so that + * any wildcard character in it can't create side effects. + */ + if (text.len < prefix) + goto done; + if (!icase && strncmp(pattern.buf, text.buf, prefix)) + goto done; + if (icase && strncasecmp(pattern.buf, text.buf, prefix)) + goto done; + } + + ret = !wildmatch(pattern.buf + prefix, text.buf + prefix, + WM_PATHNAME | (icase ? WM_CASEFOLD : 0)); + + if (!ret && !already_tried_absolute) { + /* + * We've tried e.g. matching gitdir:~/work, but if + * ~/work is a symlink to /mnt/storage/work + * strbuf_realpath() will expand it, so the rule won't + * match. Let's match against a + * strbuf_add_absolute_path() version of the path, + * which'll do the right thing + */ + strbuf_reset(&text); + strbuf_add_absolute_path(&text, git_dir); + already_tried_absolute = 1; + goto again; + } +done: + strbuf_release(&pattern); + strbuf_release(&text); + return ret; +} + +static int include_by_branch(const char *cond, size_t cond_len) +{ + int flags; + int ret; + struct strbuf pattern = STRBUF_INIT; + const char *refname = !the_repository->gitdir ? + NULL : resolve_ref_unsafe("HEAD", 0, NULL, &flags); + const char *shortname; + + if (!refname || !(flags & REF_ISSYMREF) || + !skip_prefix(refname, "refs/heads/", &shortname)) + return 0; + + strbuf_add(&pattern, cond, cond_len); + add_trailing_starstar_for_dir(&pattern); + ret = !wildmatch(pattern.buf, shortname, WM_PATHNAME); + strbuf_release(&pattern); + return ret; +} + +static int include_condition_is_true(const struct config_options *opts, + const char *cond, size_t cond_len) +{ + + if (skip_prefix_mem(cond, cond_len, "gitdir:", &cond, &cond_len)) + return include_by_gitdir(opts, cond, cond_len, 0); + else if (skip_prefix_mem(cond, cond_len, "gitdir/i:", &cond, &cond_len)) + return include_by_gitdir(opts, cond, cond_len, 1); + else if (skip_prefix_mem(cond, cond_len, "onbranch:", &cond, &cond_len)) + return include_by_branch(cond, cond_len); + + /* unknown conditionals are always false */ + return 0; +} + +int git_config_include(const char *var, const char *value, void *data) +{ + struct config_include_data *inc = data; + const char *cond, *key; + size_t cond_len; + int ret; + + /* + * Pass along all values, including "include" directives; this makes it + * possible to query information on the includes themselves. + */ + ret = inc->fn(var, value, inc->data); + if (ret < 0) + return ret; + + if (!strcmp(var, "include.path")) + ret = handle_path_include(value, inc); + + if (!parse_config_key(var, "includeif", &cond, &cond_len, &key) && + (cond && include_condition_is_true(inc->opts, cond, cond_len)) && + !strcmp(key, "path")) + ret = handle_path_include(value, inc); + + return ret; +} + +static void git_config_push_split_parameter(const char *key, const char *value) +{ + struct strbuf env = STRBUF_INIT; + const char *old = getenv(CONFIG_DATA_ENVIRONMENT); + if (old && *old) { + strbuf_addstr(&env, old); + strbuf_addch(&env, ' '); + } + sq_quote_buf(&env, key); + strbuf_addch(&env, '='); + if (value) + sq_quote_buf(&env, value); + setenv(CONFIG_DATA_ENVIRONMENT, env.buf, 1); + strbuf_release(&env); +} + +void git_config_push_parameter(const char *text) +{ + const char *value; + + /* + * When we see: + * + * section.subsection=with=equals.key=value + * + * we cannot tell if it means: + * + * [section "subsection=with=equals"] + * key = value + * + * or: + * + * [section] + * subsection = with=equals.key=value + * + * We parse left-to-right for the first "=", meaning we'll prefer to + * keep the value intact over the subsection. This is historical, but + * also sensible since values are more likely to contain odd or + * untrusted input than a section name. + * + * A missing equals is explicitly allowed (as a bool-only entry). + */ + value = strchr(text, '='); + if (value) { + char *key = xmemdupz(text, value - text); + git_config_push_split_parameter(key, value + 1); + free(key); + } else { + git_config_push_split_parameter(text, NULL); + } +} + +void git_config_push_env(const char *spec) +{ + char *key; + const char *env_name; + const char *env_value; + + env_name = strrchr(spec, '='); + if (!env_name) + die(_("invalid config format: %s"), spec); + key = xmemdupz(spec, env_name - spec); + env_name++; + if (!*env_name) + die(_("missing environment variable name for configuration '%.*s'"), + (int)(env_name - spec - 1), spec); + + env_value = getenv(env_name); + if (!env_value) + die(_("missing environment variable '%s' for configuration '%.*s'"), + env_name, (int)(env_name - spec - 1), spec); + + git_config_push_split_parameter(key, env_value); + free(key); +} + +static inline int iskeychar(int c) +{ + return isalnum(c) || c == '-'; +} + +/* + * Auxiliary function to sanity-check and split the key into the section + * identifier and variable name. + * + * Returns 0 on success, -1 when there is an invalid character in the key and + * -2 if there is no section name in the key. + * + * store_key - pointer to char* which will hold a copy of the key with + * lowercase section and variable name + * baselen - pointer to size_t which will hold the length of the + * section + subsection part, can be NULL + */ +static int git_config_parse_key_1(const char *key, char **store_key, size_t *baselen_, int quiet) +{ + size_t i, baselen; + int dot; + const char *last_dot = strrchr(key, '.'); + + /* + * Since "key" actually contains the section name and the real + * key name separated by a dot, we have to know where the dot is. + */ + + if (last_dot == NULL || last_dot == key) { + if (!quiet) + error(_("key does not contain a section: %s"), key); + return -CONFIG_NO_SECTION_OR_NAME; + } + + if (!last_dot[1]) { + if (!quiet) + error(_("key does not contain variable name: %s"), key); + return -CONFIG_NO_SECTION_OR_NAME; + } + + baselen = last_dot - key; + if (baselen_) + *baselen_ = baselen; + + /* + * Validate the key and while at it, lower case it for matching. + */ + if (store_key) + *store_key = xmallocz(strlen(key)); + + dot = 0; + for (i = 0; key[i]; i++) { + unsigned char c = key[i]; + if (c == '.') + dot = 1; + /* Leave the extended basename untouched.. */ + if (!dot || i > baselen) { + if (!iskeychar(c) || + (i == baselen + 1 && !isalpha(c))) { + if (!quiet) + error(_("invalid key: %s"), key); + goto out_free_ret_1; + } + c = tolower(c); + } else if (c == '\n') { + if (!quiet) + error(_("invalid key (newline): %s"), key); + goto out_free_ret_1; + } + if (store_key) + (*store_key)[i] = c; + } + + return 0; + +out_free_ret_1: + if (store_key) { + FREE_AND_NULL(*store_key); + } + return -CONFIG_INVALID_KEY; +} + +int git_config_parse_key(const char *key, char **store_key, size_t *baselen) +{ + return git_config_parse_key_1(key, store_key, baselen, 0); +} + +int git_config_key_is_valid(const char *key) +{ + return !git_config_parse_key_1(key, NULL, NULL, 1); +} + +static int config_parse_pair(const char *key, const char *value, + config_fn_t fn, void *data) +{ + char *canonical_name; + int ret; + + if (!strlen(key)) + return error(_("empty config key")); + if (git_config_parse_key(key, &canonical_name, NULL)) + return -1; + + ret = (fn(canonical_name, value, data) < 0) ? -1 : 0; + free(canonical_name); + return ret; +} + +int git_config_parse_parameter(const char *text, + config_fn_t fn, void *data) +{ + const char *value; + struct strbuf **pair; + int ret; + + pair = strbuf_split_str(text, '=', 2); + if (!pair[0]) + return error(_("bogus config parameter: %s"), text); + + if (pair[0]->len && pair[0]->buf[pair[0]->len - 1] == '=') { + strbuf_setlen(pair[0], pair[0]->len - 1); + value = pair[1] ? pair[1]->buf : ""; + } else { + value = NULL; + } + + strbuf_trim(pair[0]); + if (!pair[0]->len) { + strbuf_list_free(pair); + return error(_("bogus config parameter: %s"), text); + } + + ret = config_parse_pair(pair[0]->buf, value, fn, data); + strbuf_list_free(pair); + return ret; +} + +static int parse_config_env_list(char *env, config_fn_t fn, void *data) +{ + char *cur = env; + while (cur && *cur) { + const char *key = sq_dequote_step(cur, &cur); + if (!key) + return error(_("bogus format in %s"), + CONFIG_DATA_ENVIRONMENT); + + if (!cur || isspace(*cur)) { + /* old-style 'key=value' */ + if (git_config_parse_parameter(key, fn, data) < 0) + return -1; + } + else if (*cur == '=') { + /* new-style 'key'='value' */ + const char *value; + + cur++; + if (*cur == '\'') { + /* quoted value */ + value = sq_dequote_step(cur, &cur); + if (!value || (cur && !isspace(*cur))) { + return error(_("bogus format in %s"), + CONFIG_DATA_ENVIRONMENT); + } + } else if (!*cur || isspace(*cur)) { + /* implicit bool: 'key'= */ + value = NULL; + } else { + return error(_("bogus format in %s"), + CONFIG_DATA_ENVIRONMENT); + } + + if (config_parse_pair(key, value, fn, data) < 0) + return -1; + } + else { + /* unknown format */ + return error(_("bogus format in %s"), + CONFIG_DATA_ENVIRONMENT); + } + + if (cur) { + while (isspace(*cur)) + cur++; + } + } + return 0; +} + +int git_config_from_parameters(config_fn_t fn, void *data) +{ + const char *env; + struct strbuf envvar = STRBUF_INIT; + struct strvec to_free = STRVEC_INIT; + int ret = 0; + char *envw = NULL; + struct config_source source; + + memset(&source, 0, sizeof(source)); + source.prev = cf; + source.origin_type = CONFIG_ORIGIN_CMDLINE; + cf = &source; + + env = getenv(CONFIG_COUNT_ENVIRONMENT); + if (env) { + unsigned long count; + char *endp; + int i; + + count = strtoul(env, &endp, 10); + if (*endp) { + ret = error(_("bogus count in %s"), CONFIG_COUNT_ENVIRONMENT); + goto out; + } + if (count > INT_MAX) { + ret = error(_("too many entries in %s"), CONFIG_COUNT_ENVIRONMENT); + goto out; + } + + for (i = 0; i < count; i++) { + const char *key, *value; + + strbuf_addf(&envvar, "GIT_CONFIG_KEY_%d", i); + key = getenv_safe(&to_free, envvar.buf); + if (!key) { + ret = error(_("missing config key %s"), envvar.buf); + goto out; + } + strbuf_reset(&envvar); + + strbuf_addf(&envvar, "GIT_CONFIG_VALUE_%d", i); + value = getenv_safe(&to_free, envvar.buf); + if (!value) { + ret = error(_("missing config value %s"), envvar.buf); + goto out; + } + strbuf_reset(&envvar); + + if (config_parse_pair(key, value, fn, data) < 0) { + ret = -1; + goto out; + } + } + } + + env = getenv(CONFIG_DATA_ENVIRONMENT); + if (env) { + /* sq_dequote will write over it */ + envw = xstrdup(env); + if (parse_config_env_list(envw, fn, data) < 0) { + ret = -1; + goto out; + } + } + +out: + strbuf_release(&envvar); + strvec_clear(&to_free); + free(envw); + cf = source.prev; + return ret; +} + +static int get_next_char(void) +{ + int c = cf->do_fgetc(cf); + + if (c == '\r') { + /* DOS like systems */ + c = cf->do_fgetc(cf); + if (c != '\n') { + if (c != EOF) + cf->do_ungetc(c, cf); + c = '\r'; + } + } + + if (c != EOF && ++cf->total_len > INT_MAX) { + /* + * This is an absurdly long config file; refuse to parse + * further in order to protect downstream code from integer + * overflows. Note that we can't return an error specifically, + * but we can mark EOF and put trash in the return value, + * which will trigger a parse error. + */ + cf->eof = 1; + return 0; + } + + if (c == '\n') + cf->linenr++; + if (c == EOF) { + cf->eof = 1; + cf->linenr++; + c = '\n'; + } + return c; +} + +static char *parse_value(void) +{ + int quote = 0, comment = 0, space = 0; + + strbuf_reset(&cf->value); + for (;;) { + int c = get_next_char(); + if (c == '\n') { + if (quote) { + cf->linenr--; + return NULL; + } + return cf->value.buf; + } + if (comment) + continue; + if (isspace(c) && !quote) { + if (cf->value.len) + space++; + continue; + } + if (!quote) { + if (c == ';' || c == '#') { + comment = 1; + continue; + } + } + for (; space; space--) + strbuf_addch(&cf->value, ' '); + if (c == '\\') { + c = get_next_char(); + switch (c) { + case '\n': + continue; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'n': + c = '\n'; + break; + /* Some characters escape as themselves */ + case '\\': case '"': + break; + /* Reject unknown escape sequences */ + default: + return NULL; + } + strbuf_addch(&cf->value, c); + continue; + } + if (c == '"') { + quote = 1-quote; + continue; + } + strbuf_addch(&cf->value, c); + } +} + +static int get_value(config_fn_t fn, void *data, struct strbuf *name) +{ + int c; + char *value; + int ret; + + /* Get the full name */ + for (;;) { + c = get_next_char(); + if (cf->eof) + break; + if (!iskeychar(c)) + break; + strbuf_addch(name, tolower(c)); + } + + while (c == ' ' || c == '\t') + c = get_next_char(); + + value = NULL; + if (c != '\n') { + if (c != '=') + return -1; + value = parse_value(); + if (!value) + return -1; + } + /* + * We already consumed the \n, but we need linenr to point to + * the line we just parsed during the call to fn to get + * accurate line number in error messages. + */ + cf->linenr--; + ret = fn(name->buf, value, data); + if (ret >= 0) + cf->linenr++; + return ret; +} + +static int get_extended_base_var(struct strbuf *name, int c) +{ + cf->subsection_case_sensitive = 0; + do { + if (c == '\n') + goto error_incomplete_line; + c = get_next_char(); + } while (isspace(c)); + + /* We require the format to be '[base "extension"]' */ + if (c != '"') + return -1; + strbuf_addch(name, '.'); + + for (;;) { + int c = get_next_char(); + if (c == '\n') + goto error_incomplete_line; + if (c == '"') + break; + if (c == '\\') { + c = get_next_char(); + if (c == '\n') + goto error_incomplete_line; + } + strbuf_addch(name, c); + } + + /* Final ']' */ + if (get_next_char() != ']') + return -1; + return 0; +error_incomplete_line: + cf->linenr--; + return -1; +} + +static int get_base_var(struct strbuf *name) +{ + cf->subsection_case_sensitive = 1; + for (;;) { + int c = get_next_char(); + if (cf->eof) + return -1; + if (c == ']') + return 0; + if (isspace(c)) + return get_extended_base_var(name, c); + if (!iskeychar(c) && c != '.') + return -1; + strbuf_addch(name, tolower(c)); + } +} + +struct parse_event_data { + enum config_event_t previous_type; + size_t previous_offset; + const struct config_options *opts; +}; + +static int do_event(enum config_event_t type, struct parse_event_data *data) +{ + size_t offset; + + if (!data->opts || !data->opts->event_fn) + return 0; + + if (type == CONFIG_EVENT_WHITESPACE && + data->previous_type == type) + return 0; + + offset = cf->do_ftell(cf); + /* + * At EOF, the parser always "inserts" an extra '\n', therefore + * the end offset of the event is the current file position, otherwise + * we will already have advanced to the next event. + */ + if (type != CONFIG_EVENT_EOF) + offset--; + + if (data->previous_type != CONFIG_EVENT_EOF && + data->opts->event_fn(data->previous_type, data->previous_offset, + offset, data->opts->event_fn_data) < 0) + return -1; + + data->previous_type = type; + data->previous_offset = offset; + + return 0; +} + +static int git_parse_source(config_fn_t fn, void *data, + const struct config_options *opts) +{ + int comment = 0; + size_t baselen = 0; + struct strbuf *var = &cf->var; + int error_return = 0; + char *error_msg = NULL; + + /* U+FEFF Byte Order Mark in UTF8 */ + const char *bomptr = utf8_bom; + + /* For the parser event callback */ + struct parse_event_data event_data = { + CONFIG_EVENT_EOF, 0, opts + }; + + for (;;) { + int c; + + c = get_next_char(); + if (bomptr && *bomptr) { + /* We are at the file beginning; skip UTF8-encoded BOM + * if present. Sane editors won't put this in on their + * own, but e.g. Windows Notepad will do it happily. */ + if (c == (*bomptr & 0377)) { + bomptr++; + continue; + } else { + /* Do not tolerate partial BOM. */ + if (bomptr != utf8_bom) + break; + /* No BOM at file beginning. Cool. */ + bomptr = NULL; + } + } + if (c == '\n') { + if (cf->eof) { + if (do_event(CONFIG_EVENT_EOF, &event_data) < 0) + return -1; + return 0; + } + if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0) + return -1; + comment = 0; + continue; + } + if (comment) + continue; + if (isspace(c)) { + if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0) + return -1; + continue; + } + if (c == '#' || c == ';') { + if (do_event(CONFIG_EVENT_COMMENT, &event_data) < 0) + return -1; + comment = 1; + continue; + } + if (c == '[') { + if (do_event(CONFIG_EVENT_SECTION, &event_data) < 0) + return -1; + + /* Reset prior to determining a new stem */ + strbuf_reset(var); + if (get_base_var(var) < 0 || var->len < 1) + break; + strbuf_addch(var, '.'); + baselen = var->len; + continue; + } + if (!isalpha(c)) + break; + + if (do_event(CONFIG_EVENT_ENTRY, &event_data) < 0) + return -1; + + /* + * Truncate the var name back to the section header + * stem prior to grabbing the suffix part of the name + * and the value. + */ + strbuf_setlen(var, baselen); + strbuf_addch(var, tolower(c)); + if (get_value(fn, data, var) < 0) + break; + } + + if (do_event(CONFIG_EVENT_ERROR, &event_data) < 0) + return -1; + + switch (cf->origin_type) { + case CONFIG_ORIGIN_BLOB: + error_msg = xstrfmt(_("bad config line %d in blob %s"), + cf->linenr, cf->name); + break; + case CONFIG_ORIGIN_FILE: + error_msg = xstrfmt(_("bad config line %d in file %s"), + cf->linenr, cf->name); + break; + case CONFIG_ORIGIN_STDIN: + error_msg = xstrfmt(_("bad config line %d in standard input"), + cf->linenr); + break; + case CONFIG_ORIGIN_SUBMODULE_BLOB: + error_msg = xstrfmt(_("bad config line %d in submodule-blob %s"), + cf->linenr, cf->name); + break; + case CONFIG_ORIGIN_CMDLINE: + error_msg = xstrfmt(_("bad config line %d in command line %s"), + cf->linenr, cf->name); + break; + default: + error_msg = xstrfmt(_("bad config line %d in %s"), + cf->linenr, cf->name); + } + + switch (opts && opts->error_action ? + opts->error_action : + cf->default_error_action) { + case CONFIG_ERROR_DIE: + die("%s", error_msg); + break; + case CONFIG_ERROR_ERROR: + error_return = error("%s", error_msg); + break; + case CONFIG_ERROR_SILENT: + error_return = -1; + break; + case CONFIG_ERROR_UNSET: + BUG("config error action unset"); + } + + free(error_msg); + return error_return; +} + +static uintmax_t get_unit_factor(const char *end) +{ + if (!*end) + return 1; + else if (!strcasecmp(end, "k")) + return 1024; + else if (!strcasecmp(end, "m")) + return 1024 * 1024; + else if (!strcasecmp(end, "g")) + return 1024 * 1024 * 1024; + return 0; +} + +static int git_parse_signed(const char *value, intmax_t *ret, intmax_t max) +{ + if (value && *value) { + char *end; + intmax_t val; + uintmax_t uval; + uintmax_t factor; + + errno = 0; + val = strtoimax(value, &end, 0); + if (errno == ERANGE) + return 0; + factor = get_unit_factor(end); + if (!factor) { + errno = EINVAL; + return 0; + } + uval = val < 0 ? -val : val; + if (unsigned_mult_overflows(factor, uval) || + factor * uval > max) { + errno = ERANGE; + return 0; + } + val *= factor; + *ret = val; + return 1; + } + errno = EINVAL; + return 0; +} + +static int git_parse_unsigned(const char *value, uintmax_t *ret, uintmax_t max) +{ + if (value && *value) { + char *end; + uintmax_t val; + uintmax_t factor; + + errno = 0; + val = strtoumax(value, &end, 0); + if (errno == ERANGE) + return 0; + factor = get_unit_factor(end); + if (!factor) { + errno = EINVAL; + return 0; + } + if (unsigned_mult_overflows(factor, val) || + factor * val > max) { + errno = ERANGE; + return 0; + } + val *= factor; + *ret = val; + return 1; + } + errno = EINVAL; + return 0; +} + +static int git_parse_int(const char *value, int *ret) +{ + intmax_t tmp; + if (!git_parse_signed(value, &tmp, maximum_signed_value_of_type(int))) + return 0; + *ret = tmp; + return 1; +} + +static int git_parse_int64(const char *value, int64_t *ret) +{ + intmax_t tmp; + if (!git_parse_signed(value, &tmp, maximum_signed_value_of_type(int64_t))) + return 0; + *ret = tmp; + return 1; +} + +int git_parse_ulong(const char *value, unsigned long *ret) +{ + uintmax_t tmp; + if (!git_parse_unsigned(value, &tmp, maximum_unsigned_value_of_type(long))) + return 0; + *ret = tmp; + return 1; +} + +int git_parse_ssize_t(const char *value, ssize_t *ret) +{ + intmax_t tmp; + if (!git_parse_signed(value, &tmp, maximum_signed_value_of_type(ssize_t))) + return 0; + *ret = tmp; + return 1; +} + +NORETURN +static void die_bad_number(const char *name, const char *value) +{ + const char *error_type = (errno == ERANGE) ? + N_("out of range") : N_("invalid unit"); + const char *bad_numeric = N_("bad numeric config value '%s' for '%s': %s"); + + if (!value) + value = ""; + + if (!(cf && cf->name)) + die(_(bad_numeric), value, name, _(error_type)); + + switch (cf->origin_type) { + case CONFIG_ORIGIN_BLOB: + die(_("bad numeric config value '%s' for '%s' in blob %s: %s"), + value, name, cf->name, _(error_type)); + case CONFIG_ORIGIN_FILE: + die(_("bad numeric config value '%s' for '%s' in file %s: %s"), + value, name, cf->name, _(error_type)); + case CONFIG_ORIGIN_STDIN: + die(_("bad numeric config value '%s' for '%s' in standard input: %s"), + value, name, _(error_type)); + case CONFIG_ORIGIN_SUBMODULE_BLOB: + die(_("bad numeric config value '%s' for '%s' in submodule-blob %s: %s"), + value, name, cf->name, _(error_type)); + case CONFIG_ORIGIN_CMDLINE: + die(_("bad numeric config value '%s' for '%s' in command line %s: %s"), + value, name, cf->name, _(error_type)); + default: + die(_("bad numeric config value '%s' for '%s' in %s: %s"), + value, name, cf->name, _(error_type)); + } +} + +int git_config_int(const char *name, const char *value) +{ + int ret; + if (!git_parse_int(value, &ret)) + die_bad_number(name, value); + return ret; +} + +int64_t git_config_int64(const char *name, const char *value) +{ + int64_t ret; + if (!git_parse_int64(value, &ret)) + die_bad_number(name, value); + return ret; +} + +unsigned long git_config_ulong(const char *name, const char *value) +{ + unsigned long ret; + if (!git_parse_ulong(value, &ret)) + die_bad_number(name, value); + return ret; +} + +ssize_t git_config_ssize_t(const char *name, const char *value) +{ + ssize_t ret; + if (!git_parse_ssize_t(value, &ret)) + die_bad_number(name, value); + return ret; +} + +static int git_parse_maybe_bool_text(const char *value) +{ + if (!value) + return 1; + if (!*value) + return 0; + if (!strcasecmp(value, "true") + || !strcasecmp(value, "yes") + || !strcasecmp(value, "on")) + return 1; + if (!strcasecmp(value, "false") + || !strcasecmp(value, "no") + || !strcasecmp(value, "off")) + return 0; + return -1; +} + +int git_parse_maybe_bool(const char *value) +{ + int v = git_parse_maybe_bool_text(value); + if (0 <= v) + return v; + if (git_parse_int(value, &v)) + return !!v; + return -1; +} + +int git_config_bool_or_int(const char *name, const char *value, int *is_bool) +{ + int v = git_parse_maybe_bool_text(value); + if (0 <= v) { + *is_bool = 1; + return v; + } + *is_bool = 0; + return git_config_int(name, value); +} + +int git_config_bool(const char *name, const char *value) +{ + int v = git_parse_maybe_bool(value); + if (v < 0) + die(_("bad boolean config value '%s' for '%s'"), value, name); + return v; +} + +int git_config_string(const char **dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + *dest = xstrdup(value); + return 0; +} + +int git_config_pathname(const char **dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + *dest = expand_user_path(value, 0); + if (!*dest) + die(_("failed to expand user dir in: '%s'"), value); + return 0; +} + +int git_config_expiry_date(timestamp_t *timestamp, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + if (parse_expiry_date(value, timestamp)) + return error(_("'%s' for '%s' is not a valid timestamp"), + value, var); + return 0; +} + +int git_config_color(char *dest, const char *var, const char *value) +{ + if (!value) + return config_error_nonbool(var); + if (color_parse(value, dest) < 0) + return -1; + return 0; +} + +static int git_default_core_config(const char *var, const char *value, void *cb) +{ + /* This needs a better name */ + if (!strcmp(var, "core.filemode")) { + trust_executable_bit = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "core.trustctime")) { + trust_ctime = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "core.checkstat")) { + if (!strcasecmp(value, "default")) + check_stat = 1; + else if (!strcasecmp(value, "minimal")) + check_stat = 0; + } + + if (!strcmp(var, "core.quotepath")) { + quote_path_fully = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.symlinks")) { + has_symlinks = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.ignorecase")) { + ignore_case = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.attributesfile")) + return git_config_pathname(&git_attributes_file, var, value); + + if (!strcmp(var, "core.hookspath")) + return git_config_pathname(&git_hooks_path, var, value); + + if (!strcmp(var, "core.bare")) { + is_bare_repository_cfg = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.ignorestat")) { + assume_unchanged = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.prefersymlinkrefs")) { + prefer_symlink_refs = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.logallrefupdates")) { + if (value && !strcasecmp(value, "always")) + log_all_ref_updates = LOG_REFS_ALWAYS; + else if (git_config_bool(var, value)) + log_all_ref_updates = LOG_REFS_NORMAL; + else + log_all_ref_updates = LOG_REFS_NONE; + return 0; + } + + if (!strcmp(var, "core.warnambiguousrefs")) { + warn_ambiguous_refs = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.abbrev")) { + if (!value) + return config_error_nonbool(var); + if (!strcasecmp(value, "auto")) + default_abbrev = -1; + else if (!git_parse_maybe_bool_text(value)) + default_abbrev = the_hash_algo->hexsz; + else { + int abbrev = git_config_int(var, value); + if (abbrev < minimum_abbrev || abbrev > the_hash_algo->hexsz) + return error(_("abbrev length out of range: %d"), abbrev); + default_abbrev = abbrev; + } + return 0; + } + + if (!strcmp(var, "core.disambiguate")) + return set_disambiguate_hint_config(var, value); + + if (!strcmp(var, "core.loosecompression")) { + int level = git_config_int(var, value); + if (level == -1) + level = Z_DEFAULT_COMPRESSION; + else if (level < 0 || level > Z_BEST_COMPRESSION) + die(_("bad zlib compression level %d"), level); + zlib_compression_level = level; + zlib_compression_seen = 1; + return 0; + } + + if (!strcmp(var, "core.compression")) { + int level = git_config_int(var, value); + if (level == -1) + level = Z_DEFAULT_COMPRESSION; + else if (level < 0 || level > Z_BEST_COMPRESSION) + die(_("bad zlib compression level %d"), level); + if (!zlib_compression_seen) + zlib_compression_level = level; + if (!pack_compression_seen) + pack_compression_level = level; + return 0; + } + + if (!strcmp(var, "core.packedgitwindowsize")) { + int pgsz_x2 = getpagesize() * 2; + packed_git_window_size = git_config_ulong(var, value); + + /* This value must be multiple of (pagesize * 2) */ + packed_git_window_size /= pgsz_x2; + if (packed_git_window_size < 1) + packed_git_window_size = 1; + packed_git_window_size *= pgsz_x2; + return 0; + } + + if (!strcmp(var, "core.bigfilethreshold")) { + big_file_threshold = git_config_ulong(var, value); + return 0; + } + + if (!strcmp(var, "core.packedgitlimit")) { + packed_git_limit = git_config_ulong(var, value); + return 0; + } + + if (!strcmp(var, "core.deltabasecachelimit")) { + delta_base_cache_limit = git_config_ulong(var, value); + return 0; + } + + if (!strcmp(var, "core.autocrlf")) { + if (value && !strcasecmp(value, "input")) { + auto_crlf = AUTO_CRLF_INPUT; + return 0; + } + auto_crlf = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.safecrlf")) { + int eol_rndtrp_die; + if (value && !strcasecmp(value, "warn")) { + global_conv_flags_eol = CONV_EOL_RNDTRP_WARN; + return 0; + } + eol_rndtrp_die = git_config_bool(var, value); + global_conv_flags_eol = eol_rndtrp_die ? + CONV_EOL_RNDTRP_DIE : 0; + return 0; + } + + if (!strcmp(var, "core.eol")) { + if (value && !strcasecmp(value, "lf")) + core_eol = EOL_LF; + else if (value && !strcasecmp(value, "crlf")) + core_eol = EOL_CRLF; + else if (value && !strcasecmp(value, "native")) + core_eol = EOL_NATIVE; + else + core_eol = EOL_UNSET; + return 0; + } + + if (!strcmp(var, "core.checkroundtripencoding")) { + check_roundtrip_encoding = xstrdup(value); + return 0; + } + + if (!strcmp(var, "core.notesref")) { + notes_ref_name = xstrdup(value); + return 0; + } + + if (!strcmp(var, "core.editor")) + return git_config_string(&editor_program, var, value); + + if (!strcmp(var, "core.commentchar")) { + if (!value) + return config_error_nonbool(var); + else if (!strcasecmp(value, "auto")) + auto_comment_line_char = 1; + else if (value[0] && !value[1]) { + comment_line_char = value[0]; + auto_comment_line_char = 0; + } else + return error(_("core.commentChar should only be one character")); + return 0; + } + + if (!strcmp(var, "core.askpass")) + return git_config_string(&askpass_program, var, value); + + if (!strcmp(var, "core.excludesfile")) + return git_config_pathname(&excludes_file, var, value); + + if (!strcmp(var, "core.whitespace")) { + if (!value) + return config_error_nonbool(var); + whitespace_rule_cfg = parse_whitespace_rule(value); + return 0; + } + + if (!strcmp(var, "core.fsyncobjectfiles")) { + fsync_object_files = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.preloadindex")) { + core_preload_index = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.createobject")) { + if (!strcmp(value, "rename")) + object_creation_mode = OBJECT_CREATION_USES_RENAMES; + else if (!strcmp(value, "link")) + object_creation_mode = OBJECT_CREATION_USES_HARDLINKS; + else + die(_("invalid mode for object creation: %s"), value); + return 0; + } + + if (!strcmp(var, "core.sparsecheckout")) { + core_apply_sparse_checkout = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.sparsecheckoutcone")) { + core_sparse_checkout_cone = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.precomposeunicode")) { + precomposed_unicode = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.protecthfs")) { + protect_hfs = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.protectntfs")) { + protect_ntfs = git_config_bool(var, value); + return 0; + } + + if (!strcmp(var, "core.usereplacerefs")) { + read_replace_refs = git_config_bool(var, value); + return 0; + } + + /* Add other config variables here and to Documentation/config.txt. */ + return platform_core_config(var, value, cb); +} + +static int git_default_i18n_config(const char *var, const char *value) +{ + if (!strcmp(var, "i18n.commitencoding")) + return git_config_string(&git_commit_encoding, var, value); + + if (!strcmp(var, "i18n.logoutputencoding")) + return git_config_string(&git_log_output_encoding, var, value); + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +static int git_default_branch_config(const char *var, const char *value) +{ + if (!strcmp(var, "branch.autosetupmerge")) { + if (value && !strcasecmp(value, "always")) { + git_branch_track = BRANCH_TRACK_ALWAYS; + return 0; + } + git_branch_track = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "branch.autosetuprebase")) { + if (!value) + return config_error_nonbool(var); + else if (!strcmp(value, "never")) + autorebase = AUTOREBASE_NEVER; + else if (!strcmp(value, "local")) + autorebase = AUTOREBASE_LOCAL; + else if (!strcmp(value, "remote")) + autorebase = AUTOREBASE_REMOTE; + else if (!strcmp(value, "always")) + autorebase = AUTOREBASE_ALWAYS; + else + return error(_("malformed value for %s"), var); + return 0; + } + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +static int git_default_push_config(const char *var, const char *value) +{ + if (!strcmp(var, "push.default")) { + if (!value) + return config_error_nonbool(var); + else if (!strcmp(value, "nothing")) + push_default = PUSH_DEFAULT_NOTHING; + else if (!strcmp(value, "matching")) + push_default = PUSH_DEFAULT_MATCHING; + else if (!strcmp(value, "simple")) + push_default = PUSH_DEFAULT_SIMPLE; + else if (!strcmp(value, "upstream")) + push_default = PUSH_DEFAULT_UPSTREAM; + else if (!strcmp(value, "tracking")) /* deprecated */ + push_default = PUSH_DEFAULT_UPSTREAM; + else if (!strcmp(value, "current")) + push_default = PUSH_DEFAULT_CURRENT; + else { + error(_("malformed value for %s: %s"), var, value); + return error(_("must be one of nothing, matching, simple, " + "upstream or current")); + } + return 0; + } + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +static int git_default_mailmap_config(const char *var, const char *value) +{ + if (!strcmp(var, "mailmap.file")) + return git_config_pathname(&git_mailmap_file, var, value); + if (!strcmp(var, "mailmap.blob")) + return git_config_string(&git_mailmap_blob, var, value); + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +int git_default_config(const char *var, const char *value, void *cb) +{ + if (starts_with(var, "core.")) + return git_default_core_config(var, value, cb); + + if (starts_with(var, "user.") || + starts_with(var, "author.") || + starts_with(var, "committer.")) + return git_ident_config(var, value, cb); + + if (starts_with(var, "i18n.")) + return git_default_i18n_config(var, value); + + if (starts_with(var, "branch.")) + return git_default_branch_config(var, value); + + if (starts_with(var, "push.")) + return git_default_push_config(var, value); + + if (starts_with(var, "mailmap.")) + return git_default_mailmap_config(var, value); + + if (starts_with(var, "advice.") || starts_with(var, "color.advice")) + return git_default_advice_config(var, value); + + if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) { + pager_use_color = git_config_bool(var,value); + return 0; + } + + if (!strcmp(var, "pack.packsizelimit")) { + pack_size_limit_cfg = git_config_ulong(var, value); + return 0; + } + + if (!strcmp(var, "pack.compression")) { + int level = git_config_int(var, value); + if (level == -1) + level = Z_DEFAULT_COMPRESSION; + else if (level < 0 || level > Z_BEST_COMPRESSION) + die(_("bad pack compression level %d"), level); + pack_compression_level = level; + pack_compression_seen = 1; + return 0; + } + + /* Add other config variables here and to Documentation/config.txt. */ + return 0; +} + +/* + * All source specific fields in the union, die_on_error, name and the callbacks + * fgetc, ungetc, ftell of top need to be initialized before calling + * this function. + */ +static int do_config_from(struct config_source *top, config_fn_t fn, void *data, + const struct config_options *opts) +{ + int ret; + + /* push config-file parsing state stack */ + top->prev = cf; + top->linenr = 1; + top->eof = 0; + top->total_len = 0; + strbuf_init(&top->value, 1024); + strbuf_init(&top->var, 1024); + cf = top; + + ret = git_parse_source(fn, data, opts); + + /* pop config-file parsing state stack */ + strbuf_release(&top->value); + strbuf_release(&top->var); + cf = top->prev; + + return ret; +} + +static int do_config_from_file(config_fn_t fn, + const enum config_origin_type origin_type, + const char *name, const char *path, FILE *f, + void *data, const struct config_options *opts) +{ + struct config_source top; + int ret; + + top.u.file = f; + top.origin_type = origin_type; + top.name = name; + top.path = path; + top.default_error_action = CONFIG_ERROR_DIE; + top.do_fgetc = config_file_fgetc; + top.do_ungetc = config_file_ungetc; + top.do_ftell = config_file_ftell; + + flockfile(f); + ret = do_config_from(&top, fn, data, opts); + funlockfile(f); + return ret; +} + +static int git_config_from_stdin(config_fn_t fn, void *data) +{ + return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin, + data, NULL); +} + +int git_config_from_file_with_options(config_fn_t fn, const char *filename, + void *data, + const struct config_options *opts) +{ + int ret = -1; + FILE *f; + + f = fopen_or_warn(filename, "r"); + if (f) { + ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, + filename, f, data, opts); + fclose(f); + } + return ret; +} + +int git_config_from_file(config_fn_t fn, const char *filename, void *data) +{ + return git_config_from_file_with_options(fn, filename, data, NULL); +} + +int git_config_from_mem(config_fn_t fn, + const enum config_origin_type origin_type, + const char *name, const char *buf, size_t len, + void *data, const struct config_options *opts) +{ + struct config_source top; + + top.u.buf.buf = buf; + top.u.buf.len = len; + top.u.buf.pos = 0; + top.origin_type = origin_type; + top.name = name; + top.path = NULL; + top.default_error_action = CONFIG_ERROR_ERROR; + top.do_fgetc = config_buf_fgetc; + top.do_ungetc = config_buf_ungetc; + top.do_ftell = config_buf_ftell; + + return do_config_from(&top, fn, data, opts); +} + +int git_config_from_blob_oid(config_fn_t fn, + const char *name, + const struct object_id *oid, + void *data) +{ + enum object_type type; + char *buf; + unsigned long size; + int ret; + + buf = read_object_file(oid, &type, &size); + if (!buf) + return error(_("unable to load config blob object '%s'"), name); + if (type != OBJ_BLOB) { + free(buf); + return error(_("reference '%s' does not point to a blob"), name); + } + + ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size, + data, NULL); + free(buf); + + return ret; +} + +static int git_config_from_blob_ref(config_fn_t fn, + const char *name, + void *data) +{ + struct object_id oid; + + if (get_oid(name, &oid) < 0) + return error(_("unable to resolve config blob '%s'"), name); + return git_config_from_blob_oid(fn, name, &oid, data); +} + +char *git_system_config(void) +{ + char *system_config = xstrdup_or_null(getenv("GIT_CONFIG_SYSTEM")); + if (!system_config) + system_config = system_path(ETC_GITCONFIG); + normalize_path_copy(system_config, system_config); + return system_config; +} + +void git_global_config(char **user_out, char **xdg_out) +{ + char *user_config = xstrdup_or_null(getenv("GIT_CONFIG_GLOBAL")); + char *xdg_config = NULL; + + if (!user_config) { + user_config = expand_user_path("~/.gitconfig", 0); + xdg_config = xdg_config_home("config"); + } + + *user_out = user_config; + *xdg_out = xdg_config; +} + +/* + * Parse environment variable 'k' as a boolean (in various + * possible spellings); if missing, use the default value 'def'. + */ +int git_env_bool(const char *k, int def) +{ + const char *v = getenv(k); + return v ? git_config_bool(k, v) : def; +} + +/* + * Parse environment variable 'k' as ulong with possibly a unit + * suffix; if missing, use the default value 'val'. + */ +unsigned long git_env_ulong(const char *k, unsigned long val) +{ + const char *v = getenv(k); + if (v && !git_parse_ulong(v, &val)) + die(_("failed to parse %s"), k); + return val; +} + +int git_config_system(void) +{ + return !git_env_bool("GIT_CONFIG_NOSYSTEM", 0); +} + +static int do_git_config_sequence(const struct config_options *opts, + config_fn_t fn, void *data) +{ + int ret = 0; + char *system_config = git_system_config(); + char *xdg_config = NULL; + char *user_config = NULL; + char *repo_config; + enum config_scope prev_parsing_scope = current_parsing_scope; + + if (opts->commondir) + repo_config = mkpathdup("%s/config", opts->commondir); + else if (opts->git_dir) + BUG("git_dir without commondir"); + else + repo_config = NULL; + + current_parsing_scope = CONFIG_SCOPE_SYSTEM; + if (git_config_system() && system_config && + !access_or_die(system_config, R_OK, + opts->system_gently ? ACCESS_EACCES_OK : 0)) + ret += git_config_from_file(fn, system_config, data); + + current_parsing_scope = CONFIG_SCOPE_GLOBAL; + git_global_config(&user_config, &xdg_config); + + if (xdg_config && !access_or_die(xdg_config, R_OK, ACCESS_EACCES_OK)) + ret += git_config_from_file(fn, xdg_config, data); + + if (user_config && !access_or_die(user_config, R_OK, ACCESS_EACCES_OK)) + ret += git_config_from_file(fn, user_config, data); + + current_parsing_scope = CONFIG_SCOPE_LOCAL; + if (!opts->ignore_repo && repo_config && + !access_or_die(repo_config, R_OK, 0)) + ret += git_config_from_file(fn, repo_config, data); + + current_parsing_scope = CONFIG_SCOPE_WORKTREE; + if (!opts->ignore_worktree && repository_format_worktree_config) { + char *path = git_pathdup("config.worktree"); + if (!access_or_die(path, R_OK, 0)) + ret += git_config_from_file(fn, path, data); + free(path); + } + + current_parsing_scope = CONFIG_SCOPE_COMMAND; + if (!opts->ignore_cmdline && git_config_from_parameters(fn, data) < 0) + die(_("unable to parse command-line config")); + + current_parsing_scope = prev_parsing_scope; + free(system_config); + free(xdg_config); + free(user_config); + free(repo_config); + return ret; +} + +int config_with_options(config_fn_t fn, void *data, + struct git_config_source *config_source, + const struct config_options *opts) +{ + struct config_include_data inc = CONFIG_INCLUDE_INIT; + + if (opts->respect_includes) { + inc.fn = fn; + inc.data = data; + inc.opts = opts; + fn = git_config_include; + data = &inc; + } + + if (config_source) + current_parsing_scope = config_source->scope; + + /* + * If we have a specific filename, use it. Otherwise, follow the + * regular lookup sequence. + */ + if (config_source && config_source->use_stdin) + return git_config_from_stdin(fn, data); + else if (config_source && config_source->file) + return git_config_from_file(fn, config_source->file, data); + else if (config_source && config_source->blob) + return git_config_from_blob_ref(fn, config_source->blob, data); + + return do_git_config_sequence(opts, fn, data); +} + +static void configset_iter(struct config_set *cs, config_fn_t fn, void *data) +{ + int i, value_index; + struct string_list *values; + struct config_set_element *entry; + struct configset_list *list = &cs->list; + + for (i = 0; i < list->nr; i++) { + entry = list->items[i].e; + value_index = list->items[i].value_index; + values = &entry->value_list; + + current_config_kvi = values->items[value_index].util; + + if (fn(entry->key, values->items[value_index].string, data) < 0) + git_die_config_linenr(entry->key, + current_config_kvi->filename, + current_config_kvi->linenr); + + current_config_kvi = NULL; + } +} + +void read_early_config(config_fn_t cb, void *data) +{ + struct config_options opts = {0}; + struct strbuf commondir = STRBUF_INIT; + struct strbuf gitdir = STRBUF_INIT; + + opts.respect_includes = 1; + + if (have_git_dir()) { + opts.commondir = get_git_common_dir(); + opts.git_dir = get_git_dir(); + /* + * When setup_git_directory() was not yet asked to discover the + * GIT_DIR, we ask discover_git_directory() to figure out whether there + * is any repository config we should use (but unlike + * setup_git_directory_gently(), no global state is changed, most + * notably, the current working directory is still the same after the + * call). + */ + } else if (!discover_git_directory(&commondir, &gitdir)) { + opts.commondir = commondir.buf; + opts.git_dir = gitdir.buf; + } + + config_with_options(cb, data, NULL, &opts); + + strbuf_release(&commondir); + strbuf_release(&gitdir); +} + +/* + * Read config but only enumerate system and global settings. + * Omit any repo-local, worktree-local, or command-line settings. + */ +void read_very_early_config(config_fn_t cb, void *data) +{ + struct config_options opts = { 0 }; + + opts.respect_includes = 1; + opts.ignore_repo = 1; + opts.ignore_worktree = 1; + opts.ignore_cmdline = 1; + opts.system_gently = 1; + + config_with_options(cb, data, NULL, &opts); +} + +static struct config_set_element *configset_find_element(struct config_set *cs, const char *key) +{ + struct config_set_element k; + struct config_set_element *found_entry; + char *normalized_key; + /* + * `key` may come from the user, so normalize it before using it + * for querying entries from the hashmap. + */ + if (git_config_parse_key(key, &normalized_key, NULL)) + return NULL; + + hashmap_entry_init(&k.ent, strhash(normalized_key)); + k.key = normalized_key; + found_entry = hashmap_get_entry(&cs->config_hash, &k, ent, NULL); + free(normalized_key); + return found_entry; +} + +static int configset_add_value(struct config_set *cs, const char *key, const char *value) +{ + struct config_set_element *e; + struct string_list_item *si; + struct configset_list_item *l_item; + struct key_value_info *kv_info = xmalloc(sizeof(*kv_info)); + + e = configset_find_element(cs, key); + /* + * Since the keys are being fed by git_config*() callback mechanism, they + * are already normalized. So simply add them without any further munging. + */ + if (!e) { + e = xmalloc(sizeof(*e)); + hashmap_entry_init(&e->ent, strhash(key)); + e->key = xstrdup(key); + string_list_init_dup(&e->value_list); + hashmap_add(&cs->config_hash, &e->ent); + } + si = string_list_append_nodup(&e->value_list, xstrdup_or_null(value)); + + ALLOC_GROW(cs->list.items, cs->list.nr + 1, cs->list.alloc); + l_item = &cs->list.items[cs->list.nr++]; + l_item->e = e; + l_item->value_index = e->value_list.nr - 1; + + if (!cf) + BUG("configset_add_value has no source"); + if (cf->name) { + kv_info->filename = strintern(cf->name); + kv_info->linenr = cf->linenr; + kv_info->origin_type = cf->origin_type; + } else { + /* for values read from `git_config_from_parameters()` */ + kv_info->filename = NULL; + kv_info->linenr = -1; + kv_info->origin_type = CONFIG_ORIGIN_CMDLINE; + } + kv_info->scope = current_parsing_scope; + si->util = kv_info; + + return 0; +} + +static int config_set_element_cmp(const void *unused_cmp_data, + const struct hashmap_entry *eptr, + const struct hashmap_entry *entry_or_key, + const void *unused_keydata) +{ + const struct config_set_element *e1, *e2; + + e1 = container_of(eptr, const struct config_set_element, ent); + e2 = container_of(entry_or_key, const struct config_set_element, ent); + + return strcmp(e1->key, e2->key); +} + +void git_configset_init(struct config_set *cs) +{ + hashmap_init(&cs->config_hash, config_set_element_cmp, NULL, 0); + cs->hash_initialized = 1; + cs->list.nr = 0; + cs->list.alloc = 0; + cs->list.items = NULL; +} + +void git_configset_clear(struct config_set *cs) +{ + struct config_set_element *entry; + struct hashmap_iter iter; + if (!cs->hash_initialized) + return; + + hashmap_for_each_entry(&cs->config_hash, &iter, entry, + ent /* member name */) { + free(entry->key); + string_list_clear(&entry->value_list, 1); + } + hashmap_clear_and_free(&cs->config_hash, struct config_set_element, ent); + cs->hash_initialized = 0; + free(cs->list.items); + cs->list.nr = 0; + cs->list.alloc = 0; + cs->list.items = NULL; +} + +static int config_set_callback(const char *key, const char *value, void *cb) +{ + struct config_set *cs = cb; + configset_add_value(cs, key, value); + return 0; +} + +int git_configset_add_file(struct config_set *cs, const char *filename) +{ + return git_config_from_file(config_set_callback, filename, cs); +} + +int git_configset_get_value(struct config_set *cs, const char *key, const char **value) +{ + const struct string_list *values = NULL; + /* + * Follows "last one wins" semantic, i.e., if there are multiple matches for the + * queried key in the files of the configset, the value returned will be the last + * value in the value list for that key. + */ + values = git_configset_get_value_multi(cs, key); + + if (!values) + return 1; + assert(values->nr > 0); + *value = values->items[values->nr - 1].string; + return 0; +} + +const struct string_list *git_configset_get_value_multi(struct config_set *cs, const char *key) +{ + struct config_set_element *e = configset_find_element(cs, key); + return e ? &e->value_list : NULL; +} + +int git_configset_get_string(struct config_set *cs, const char *key, char **dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) + return git_config_string((const char **)dest, key, value); + else + return 1; +} + +int git_configset_get_string_tmp(struct config_set *cs, const char *key, + const char **dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + if (!value) + return config_error_nonbool(key); + *dest = value; + return 0; + } else { + return 1; + } +} + +int git_configset_get_int(struct config_set *cs, const char *key, int *dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + *dest = git_config_int(key, value); + return 0; + } else + return 1; +} + +int git_configset_get_ulong(struct config_set *cs, const char *key, unsigned long *dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + *dest = git_config_ulong(key, value); + return 0; + } else + return 1; +} + +int git_configset_get_bool(struct config_set *cs, const char *key, int *dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + *dest = git_config_bool(key, value); + return 0; + } else + return 1; +} + +int git_configset_get_bool_or_int(struct config_set *cs, const char *key, + int *is_bool, int *dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + *dest = git_config_bool_or_int(key, value, is_bool); + return 0; + } else + return 1; +} + +int git_configset_get_maybe_bool(struct config_set *cs, const char *key, int *dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) { + *dest = git_parse_maybe_bool(value); + if (*dest == -1) + return -1; + return 0; + } else + return 1; +} + +int git_configset_get_pathname(struct config_set *cs, const char *key, const char **dest) +{ + const char *value; + if (!git_configset_get_value(cs, key, &value)) + return git_config_pathname(dest, key, value); + else + return 1; +} + +/* Functions use to read configuration from a repository */ +static void repo_read_config(struct repository *repo) +{ + struct config_options opts = { 0 }; + + opts.respect_includes = 1; + opts.commondir = repo->commondir; + opts.git_dir = repo->gitdir; + + if (!repo->config) + CALLOC_ARRAY(repo->config, 1); + else + git_configset_clear(repo->config); + + git_configset_init(repo->config); + + if (config_with_options(config_set_callback, repo->config, NULL, &opts) < 0) + /* + * config_with_options() normally returns only + * zero, as most errors are fatal, and + * non-fatal potential errors are guarded by "if" + * statements that are entered only when no error is + * possible. + * + * If we ever encounter a non-fatal error, it means + * something went really wrong and we should stop + * immediately. + */ + die(_("unknown error occurred while reading the configuration files")); +} + +static void git_config_check_init(struct repository *repo) +{ + if (repo->config && repo->config->hash_initialized) + return; + repo_read_config(repo); +} + +static void repo_config_clear(struct repository *repo) +{ + if (!repo->config || !repo->config->hash_initialized) + return; + git_configset_clear(repo->config); +} + +void repo_config(struct repository *repo, config_fn_t fn, void *data) +{ + git_config_check_init(repo); + configset_iter(repo->config, fn, data); +} + +int repo_config_get_value(struct repository *repo, + const char *key, const char **value) +{ + git_config_check_init(repo); + return git_configset_get_value(repo->config, key, value); +} + +const struct string_list *repo_config_get_value_multi(struct repository *repo, + const char *key) +{ + git_config_check_init(repo); + return git_configset_get_value_multi(repo->config, key); +} + +int repo_config_get_string(struct repository *repo, + const char *key, char **dest) +{ + int ret; + git_config_check_init(repo); + ret = git_configset_get_string(repo->config, key, dest); + if (ret < 0) + git_die_config(key, NULL); + return ret; +} + +int repo_config_get_string_tmp(struct repository *repo, + const char *key, const char **dest) +{ + int ret; + git_config_check_init(repo); + ret = git_configset_get_string_tmp(repo->config, key, dest); + if (ret < 0) + git_die_config(key, NULL); + return ret; +} + +int repo_config_get_int(struct repository *repo, + const char *key, int *dest) +{ + git_config_check_init(repo); + return git_configset_get_int(repo->config, key, dest); +} + +int repo_config_get_ulong(struct repository *repo, + const char *key, unsigned long *dest) +{ + git_config_check_init(repo); + return git_configset_get_ulong(repo->config, key, dest); +} + +int repo_config_get_bool(struct repository *repo, + const char *key, int *dest) +{ + git_config_check_init(repo); + return git_configset_get_bool(repo->config, key, dest); +} + +int repo_config_get_bool_or_int(struct repository *repo, + const char *key, int *is_bool, int *dest) +{ + git_config_check_init(repo); + return git_configset_get_bool_or_int(repo->config, key, is_bool, dest); +} + +int repo_config_get_maybe_bool(struct repository *repo, + const char *key, int *dest) +{ + git_config_check_init(repo); + return git_configset_get_maybe_bool(repo->config, key, dest); +} + +int repo_config_get_pathname(struct repository *repo, + const char *key, const char **dest) +{ + int ret; + git_config_check_init(repo); + ret = git_configset_get_pathname(repo->config, key, dest); + if (ret < 0) + git_die_config(key, NULL); + return ret; +} + +/* Functions used historically to read configuration from 'the_repository' */ +void git_config(config_fn_t fn, void *data) +{ + repo_config(the_repository, fn, data); +} + +void git_config_clear(void) +{ + repo_config_clear(the_repository); +} + +int git_config_get_value(const char *key, const char **value) +{ + return repo_config_get_value(the_repository, key, value); +} + +const struct string_list *git_config_get_value_multi(const char *key) +{ + return repo_config_get_value_multi(the_repository, key); +} + +int git_config_get_string(const char *key, char **dest) +{ + return repo_config_get_string(the_repository, key, dest); +} + +int git_config_get_string_tmp(const char *key, const char **dest) +{ + return repo_config_get_string_tmp(the_repository, key, dest); +} + +int git_config_get_int(const char *key, int *dest) +{ + return repo_config_get_int(the_repository, key, dest); +} + +int git_config_get_ulong(const char *key, unsigned long *dest) +{ + return repo_config_get_ulong(the_repository, key, dest); +} + +int git_config_get_bool(const char *key, int *dest) +{ + return repo_config_get_bool(the_repository, key, dest); +} + +int git_config_get_bool_or_int(const char *key, int *is_bool, int *dest) +{ + return repo_config_get_bool_or_int(the_repository, key, is_bool, dest); +} + +int git_config_get_maybe_bool(const char *key, int *dest) +{ + return repo_config_get_maybe_bool(the_repository, key, dest); +} + +int git_config_get_pathname(const char *key, const char **dest) +{ + return repo_config_get_pathname(the_repository, key, dest); +} + +int git_config_get_expiry(const char *key, const char **output) +{ + int ret = git_config_get_string(key, (char **)output); + if (ret) + return ret; + if (strcmp(*output, "now")) { + timestamp_t now = approxidate("now"); + if (approxidate(*output) >= now) + git_die_config(key, _("Invalid %s: '%s'"), key, *output); + } + return ret; +} + +int git_config_get_expiry_in_days(const char *key, timestamp_t *expiry, timestamp_t now) +{ + const char *expiry_string; + intmax_t days; + timestamp_t when; + + if (git_config_get_string_tmp(key, &expiry_string)) + return 1; /* no such thing */ + + if (git_parse_signed(expiry_string, &days, maximum_signed_value_of_type(int))) { + const int scale = 86400; + *expiry = now - days * scale; + return 0; + } + + if (!parse_expiry_date(expiry_string, &when)) { + *expiry = when; + return 0; + } + return -1; /* thing exists but cannot be parsed */ +} + +int git_config_get_split_index(void) +{ + int val; + + if (!git_config_get_maybe_bool("core.splitindex", &val)) + return val; + + return -1; /* default value */ +} + +int git_config_get_max_percent_split_change(void) +{ + int val = -1; + + if (!git_config_get_int("splitindex.maxpercentchange", &val)) { + if (0 <= val && val <= 100) + return val; + + return error(_("splitIndex.maxPercentChange value '%d' " + "should be between 0 and 100"), val); + } + + return -1; /* default value */ +} + +int git_config_get_fsmonitor(void) +{ + if (git_config_get_pathname("core.fsmonitor", &core_fsmonitor)) + core_fsmonitor = getenv("GIT_TEST_FSMONITOR"); + + if (core_fsmonitor && !*core_fsmonitor) + core_fsmonitor = NULL; + + if (core_fsmonitor) + return 1; + + return 0; +} + +int git_config_get_index_threads(int *dest) +{ + int is_bool, val; + + val = git_env_ulong("GIT_TEST_INDEX_THREADS", 0); + if (val) { + *dest = val; + return 0; + } + + if (!git_config_get_bool_or_int("index.threads", &is_bool, &val)) { + if (is_bool) + *dest = val ? 0 : 1; + else + *dest = val; + return 0; + } + + return 1; +} + +NORETURN +void git_die_config_linenr(const char *key, const char *filename, int linenr) +{ + if (!filename) + die(_("unable to parse '%s' from command-line config"), key); + else + die(_("bad config variable '%s' in file '%s' at line %d"), + key, filename, linenr); +} + +NORETURN __attribute__((format(printf, 2, 3))) +void git_die_config(const char *key, const char *err, ...) +{ + const struct string_list *values; + struct key_value_info *kv_info; + + if (err) { + va_list params; + va_start(params, err); + vreportf("error: ", err, params); + va_end(params); + } + values = git_config_get_value_multi(key); + kv_info = values->items[values->nr - 1].util; + git_die_config_linenr(key, kv_info->filename, kv_info->linenr); +} + +/* + * Find all the stuff for git_config_set() below. + */ + +struct config_store_data { + size_t baselen; + char *key; + int do_not_match; + const char *fixed_value; + regex_t *value_pattern; + int multi_replace; + struct { + size_t begin, end; + enum config_event_t type; + int is_keys_section; + } *parsed; + unsigned int parsed_nr, parsed_alloc, *seen, seen_nr, seen_alloc; + unsigned int key_seen:1, section_seen:1, is_keys_section:1; +}; + +static void config_store_data_clear(struct config_store_data *store) +{ + free(store->key); + if (store->value_pattern != NULL && + store->value_pattern != CONFIG_REGEX_NONE) { + regfree(store->value_pattern); + free(store->value_pattern); + } + free(store->parsed); + free(store->seen); + memset(store, 0, sizeof(*store)); +} + +static int matches(const char *key, const char *value, + const struct config_store_data *store) +{ + if (strcmp(key, store->key)) + return 0; /* not ours */ + if (store->fixed_value) + return !strcmp(store->fixed_value, value); + if (!store->value_pattern) + return 1; /* always matches */ + if (store->value_pattern == CONFIG_REGEX_NONE) + return 0; /* never matches */ + + return store->do_not_match ^ + (value && !regexec(store->value_pattern, value, 0, NULL, 0)); +} + +static int store_aux_event(enum config_event_t type, + size_t begin, size_t end, void *data) +{ + struct config_store_data *store = data; + + ALLOC_GROW(store->parsed, store->parsed_nr + 1, store->parsed_alloc); + store->parsed[store->parsed_nr].begin = begin; + store->parsed[store->parsed_nr].end = end; + store->parsed[store->parsed_nr].type = type; + + if (type == CONFIG_EVENT_SECTION) { + int (*cmpfn)(const char *, const char *, size_t); + + if (cf->var.len < 2 || cf->var.buf[cf->var.len - 1] != '.') + return error(_("invalid section name '%s'"), cf->var.buf); + + if (cf->subsection_case_sensitive) + cmpfn = strncasecmp; + else + cmpfn = strncmp; + + /* Is this the section we were looking for? */ + store->is_keys_section = + store->parsed[store->parsed_nr].is_keys_section = + cf->var.len - 1 == store->baselen && + !cmpfn(cf->var.buf, store->key, store->baselen); + if (store->is_keys_section) { + store->section_seen = 1; + ALLOC_GROW(store->seen, store->seen_nr + 1, + store->seen_alloc); + store->seen[store->seen_nr] = store->parsed_nr; + } + } + + store->parsed_nr++; + + return 0; +} + +static int store_aux(const char *key, const char *value, void *cb) +{ + struct config_store_data *store = cb; + + if (store->key_seen) { + if (matches(key, value, store)) { + if (store->seen_nr == 1 && store->multi_replace == 0) { + warning(_("%s has multiple values"), key); + } + + ALLOC_GROW(store->seen, store->seen_nr + 1, + store->seen_alloc); + + store->seen[store->seen_nr] = store->parsed_nr; + store->seen_nr++; + } + } else if (store->is_keys_section) { + /* + * Do not increment matches yet: this may not be a match, but we + * are in the desired section. + */ + ALLOC_GROW(store->seen, store->seen_nr + 1, store->seen_alloc); + store->seen[store->seen_nr] = store->parsed_nr; + store->section_seen = 1; + + if (matches(key, value, store)) { + store->seen_nr++; + store->key_seen = 1; + } + } + + return 0; +} + +static int write_error(const char *filename) +{ + error(_("failed to write new configuration file %s"), filename); + + /* Same error code as "failed to rename". */ + return 4; +} + +static struct strbuf store_create_section(const char *key, + const struct config_store_data *store) +{ + const char *dot; + size_t i; + struct strbuf sb = STRBUF_INIT; + + dot = memchr(key, '.', store->baselen); + if (dot) { + strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key); + for (i = dot - key + 1; i < store->baselen; i++) { + if (key[i] == '"' || key[i] == '\\') + strbuf_addch(&sb, '\\'); + strbuf_addch(&sb, key[i]); + } + strbuf_addstr(&sb, "\"]\n"); + } else { + strbuf_addch(&sb, '['); + strbuf_add(&sb, key, store->baselen); + strbuf_addstr(&sb, "]\n"); + } + + return sb; +} + +static ssize_t write_section(int fd, const char *key, + const struct config_store_data *store) +{ + struct strbuf sb = store_create_section(key, store); + ssize_t ret; + + ret = write_in_full(fd, sb.buf, sb.len); + strbuf_release(&sb); + + return ret; +} + +static ssize_t write_pair(int fd, const char *key, const char *value, + const struct config_store_data *store) +{ + int i; + ssize_t ret; + const char *quote = ""; + struct strbuf sb = STRBUF_INIT; + + /* + * Check to see if the value needs to be surrounded with a dq pair. + * Note that problematic characters are always backslash-quoted; this + * check is about not losing leading or trailing SP and strings that + * follow beginning-of-comment characters (i.e. ';' and '#') by the + * configuration parser. + */ + if (value[0] == ' ') + quote = "\""; + for (i = 0; value[i]; i++) + if (value[i] == ';' || value[i] == '#') + quote = "\""; + if (i && value[i - 1] == ' ') + quote = "\""; + + strbuf_addf(&sb, "\t%s = %s", key + store->baselen + 1, quote); + + for (i = 0; value[i]; i++) + switch (value[i]) { + case '\n': + strbuf_addstr(&sb, "\\n"); + break; + case '\t': + strbuf_addstr(&sb, "\\t"); + break; + case '"': + case '\\': + strbuf_addch(&sb, '\\'); + /* fallthrough */ + default: + strbuf_addch(&sb, value[i]); + break; + } + strbuf_addf(&sb, "%s\n", quote); + + ret = write_in_full(fd, sb.buf, sb.len); + strbuf_release(&sb); + + return ret; +} + +/* + * If we are about to unset the last key(s) in a section, and if there are + * no comments surrounding (or included in) the section, we will want to + * extend begin/end to remove the entire section. + * + * Note: the parameter `seen_ptr` points to the index into the store.seen + * array. * This index may be incremented if a section has more than one + * entry (which all are to be removed). + */ +static void maybe_remove_section(struct config_store_data *store, + size_t *begin_offset, size_t *end_offset, + int *seen_ptr) +{ + size_t begin; + int i, seen, section_seen = 0; + + /* + * First, ensure that this is the first key, and that there are no + * comments before the entry nor before the section header. + */ + seen = *seen_ptr; + for (i = store->seen[seen]; i > 0; i--) { + enum config_event_t type = store->parsed[i - 1].type; + + if (type == CONFIG_EVENT_COMMENT) + /* There is a comment before this entry or section */ + return; + if (type == CONFIG_EVENT_ENTRY) { + if (!section_seen) + /* This is not the section's first entry. */ + return; + /* We encountered no comment before the section. */ + break; + } + if (type == CONFIG_EVENT_SECTION) { + if (!store->parsed[i - 1].is_keys_section) + break; + section_seen = 1; + } + } + begin = store->parsed[i].begin; + + /* + * Next, make sure that we are removing the last key(s) in the section, + * and that there are no comments that are possibly about the current + * section. + */ + for (i = store->seen[seen] + 1; i < store->parsed_nr; i++) { + enum config_event_t type = store->parsed[i].type; + + if (type == CONFIG_EVENT_COMMENT) + return; + if (type == CONFIG_EVENT_SECTION) { + if (store->parsed[i].is_keys_section) + continue; + break; + } + if (type == CONFIG_EVENT_ENTRY) { + if (++seen < store->seen_nr && + i == store->seen[seen]) + /* We want to remove this entry, too */ + continue; + /* There is another entry in this section. */ + return; + } + } + + /* + * We are really removing the last entry/entries from this section, and + * there are no enclosed or surrounding comments. Remove the entire, + * now-empty section. + */ + *seen_ptr = seen; + *begin_offset = begin; + if (i < store->parsed_nr) + *end_offset = store->parsed[i].begin; + else + *end_offset = store->parsed[store->parsed_nr - 1].end; +} + +int git_config_set_in_file_gently(const char *config_filename, + const char *key, const char *value) +{ + return git_config_set_multivar_in_file_gently(config_filename, key, value, NULL, 0); +} + +void git_config_set_in_file(const char *config_filename, + const char *key, const char *value) +{ + git_config_set_multivar_in_file(config_filename, key, value, NULL, 0); +} + +int git_config_set_gently(const char *key, const char *value) +{ + return git_config_set_multivar_gently(key, value, NULL, 0); +} + +void git_config_set(const char *key, const char *value) +{ + git_config_set_multivar(key, value, NULL, 0); + + trace2_cmd_set_config(key, value); +} + +/* + * If value==NULL, unset in (remove from) config, + * if value_pattern!=NULL, disregard key/value pairs where value does not match. + * if value_pattern==CONFIG_REGEX_NONE, do not match any existing values + * (only add a new one) + * if flags contains the CONFIG_FLAGS_MULTI_REPLACE flag, all matching + * key/values are removed before a single new pair is written. If the + * flag is not present, then replace only the first match. + * + * Returns 0 on success. + * + * This function does this: + * + * - it locks the config file by creating ".git/config.lock" + * + * - it then parses the config using store_aux() as validator to find + * the position on the key/value pair to replace. If it is to be unset, + * it must be found exactly once. + * + * - the config file is mmap()ed and the part before the match (if any) is + * written to the lock file, then the changed part and the rest. + * + * - the config file is removed and the lock file rename()d to it. + * + */ +int git_config_set_multivar_in_file_gently(const char *config_filename, + const char *key, const char *value, + const char *value_pattern, + unsigned flags) +{ + int fd = -1, in_fd = -1; + int ret; + struct lock_file lock = LOCK_INIT; + char *filename_buf = NULL; + char *contents = NULL; + size_t contents_sz; + struct config_store_data store; + + memset(&store, 0, sizeof(store)); + + /* parse-key returns negative; flip the sign to feed exit(3) */ + ret = 0 - git_config_parse_key(key, &store.key, &store.baselen); + if (ret) + goto out_free; + + store.multi_replace = (flags & CONFIG_FLAGS_MULTI_REPLACE) != 0; + + if (!config_filename) + config_filename = filename_buf = git_pathdup("config"); + + /* + * The lock serves a purpose in addition to locking: the new + * contents of .git/config will be written into it. + */ + fd = hold_lock_file_for_update(&lock, config_filename, 0); + if (fd < 0) { + error_errno(_("could not lock config file %s"), config_filename); + ret = CONFIG_NO_LOCK; + goto out_free; + } + + /* + * If .git/config does not exist yet, write a minimal version. + */ + in_fd = open(config_filename, O_RDONLY); + if ( in_fd < 0 ) { + if ( ENOENT != errno ) { + error_errno(_("opening %s"), config_filename); + ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */ + goto out_free; + } + /* if nothing to unset, error out */ + if (value == NULL) { + ret = CONFIG_NOTHING_SET; + goto out_free; + } + + free(store.key); + store.key = xstrdup(key); + if (write_section(fd, key, &store) < 0 || + write_pair(fd, key, value, &store) < 0) + goto write_err_out; + } else { + struct stat st; + size_t copy_begin, copy_end; + int i, new_line = 0; + struct config_options opts; + + if (value_pattern == NULL) + store.value_pattern = NULL; + else if (value_pattern == CONFIG_REGEX_NONE) + store.value_pattern = CONFIG_REGEX_NONE; + else if (flags & CONFIG_FLAGS_FIXED_VALUE) + store.fixed_value = value_pattern; + else { + if (value_pattern[0] == '!') { + store.do_not_match = 1; + value_pattern++; + } else + store.do_not_match = 0; + + store.value_pattern = (regex_t*)xmalloc(sizeof(regex_t)); + if (regcomp(store.value_pattern, value_pattern, + REG_EXTENDED)) { + error(_("invalid pattern: %s"), value_pattern); + FREE_AND_NULL(store.value_pattern); + ret = CONFIG_INVALID_PATTERN; + goto out_free; + } + } + + ALLOC_GROW(store.parsed, 1, store.parsed_alloc); + store.parsed[0].end = 0; + + memset(&opts, 0, sizeof(opts)); + opts.event_fn = store_aux_event; + opts.event_fn_data = &store; + + /* + * After this, store.parsed will contain offsets of all the + * parsed elements, and store.seen will contain a list of + * matches, as indices into store.parsed. + * + * As a side effect, we make sure to transform only a valid + * existing config file. + */ + if (git_config_from_file_with_options(store_aux, + config_filename, + &store, &opts)) { + error(_("invalid config file %s"), config_filename); + ret = CONFIG_INVALID_FILE; + goto out_free; + } + + /* if nothing to unset, or too many matches, error out */ + if ((store.seen_nr == 0 && value == NULL) || + (store.seen_nr > 1 && !store.multi_replace)) { + ret = CONFIG_NOTHING_SET; + goto out_free; + } + + if (fstat(in_fd, &st) == -1) { + error_errno(_("fstat on %s failed"), config_filename); + ret = CONFIG_INVALID_FILE; + goto out_free; + } + + contents_sz = xsize_t(st.st_size); + contents = xmmap_gently(NULL, contents_sz, PROT_READ, + MAP_PRIVATE, in_fd, 0); + if (contents == MAP_FAILED) { + if (errno == ENODEV && S_ISDIR(st.st_mode)) + errno = EISDIR; + error_errno(_("unable to mmap '%s'%s"), + config_filename, mmap_os_err()); + ret = CONFIG_INVALID_FILE; + contents = NULL; + goto out_free; + } + close(in_fd); + in_fd = -1; + + if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) { + error_errno(_("chmod on %s failed"), get_lock_file_path(&lock)); + ret = CONFIG_NO_WRITE; + goto out_free; + } + + if (store.seen_nr == 0) { + if (!store.seen_alloc) { + /* Did not see key nor section */ + ALLOC_GROW(store.seen, 1, store.seen_alloc); + store.seen[0] = store.parsed_nr + - !!store.parsed_nr; + } + store.seen_nr = 1; + } + + for (i = 0, copy_begin = 0; i < store.seen_nr; i++) { + size_t replace_end; + int j = store.seen[i]; + + new_line = 0; + if (!store.key_seen) { + copy_end = store.parsed[j].end; + /* include '\n' when copying section header */ + if (copy_end > 0 && copy_end < contents_sz && + contents[copy_end - 1] != '\n' && + contents[copy_end] == '\n') + copy_end++; + replace_end = copy_end; + } else { + replace_end = store.parsed[j].end; + copy_end = store.parsed[j].begin; + if (!value) + maybe_remove_section(&store, + ©_end, + &replace_end, &i); + /* + * Swallow preceding white-space on the same + * line. + */ + while (copy_end > 0 ) { + char c = contents[copy_end - 1]; + + if (isspace(c) && c != '\n') + copy_end--; + else + break; + } + } + + if (copy_end > 0 && contents[copy_end-1] != '\n') + new_line = 1; + + /* write the first part of the config */ + if (copy_end > copy_begin) { + if (write_in_full(fd, contents + copy_begin, + copy_end - copy_begin) < 0) + goto write_err_out; + if (new_line && + write_str_in_full(fd, "\n") < 0) + goto write_err_out; + } + copy_begin = replace_end; + } + + /* write the pair (value == NULL means unset) */ + if (value != NULL) { + if (!store.section_seen) { + if (write_section(fd, key, &store) < 0) + goto write_err_out; + } + if (write_pair(fd, key, value, &store) < 0) + goto write_err_out; + } + + /* write the rest of the config */ + if (copy_begin < contents_sz) + if (write_in_full(fd, contents + copy_begin, + contents_sz - copy_begin) < 0) + goto write_err_out; + + munmap(contents, contents_sz); + contents = NULL; + } + + if (commit_lock_file(&lock) < 0) { + error_errno(_("could not write config file %s"), config_filename); + ret = CONFIG_NO_WRITE; + goto out_free; + } + + ret = 0; + + /* Invalidate the config cache */ + git_config_clear(); + +out_free: + rollback_lock_file(&lock); + free(filename_buf); + if (contents) + munmap(contents, contents_sz); + if (in_fd >= 0) + close(in_fd); + config_store_data_clear(&store); + return ret; + +write_err_out: + ret = write_error(get_lock_file_path(&lock)); + goto out_free; + +} + +void git_config_set_multivar_in_file(const char *config_filename, + const char *key, const char *value, + const char *value_pattern, unsigned flags) +{ + if (!git_config_set_multivar_in_file_gently(config_filename, key, value, + value_pattern, flags)) + return; + if (value) + die(_("could not set '%s' to '%s'"), key, value); + else + die(_("could not unset '%s'"), key); +} + +int git_config_set_multivar_gently(const char *key, const char *value, + const char *value_pattern, unsigned flags) +{ + return git_config_set_multivar_in_file_gently(NULL, key, value, value_pattern, + flags); +} + +void git_config_set_multivar(const char *key, const char *value, + const char *value_pattern, unsigned flags) +{ + git_config_set_multivar_in_file(NULL, key, value, value_pattern, + flags); +} + +static size_t section_name_match (const char *buf, const char *name) +{ + size_t i = 0, j = 0; + int dot = 0; + if (buf[i] != '[') + return 0; + for (i = 1; buf[i] && buf[i] != ']'; i++) { + if (!dot && isspace(buf[i])) { + dot = 1; + if (name[j++] != '.') + break; + for (i++; isspace(buf[i]); i++) + ; /* do nothing */ + if (buf[i] != '"') + break; + continue; + } + if (buf[i] == '\\' && dot) + i++; + else if (buf[i] == '"' && dot) { + for (i++; isspace(buf[i]); i++) + ; /* do_nothing */ + break; + } + if (buf[i] != name[j++]) + break; + } + if (buf[i] == ']' && name[j] == 0) { + /* + * We match, now just find the right length offset by + * gobbling up any whitespace after it, as well + */ + i++; + for (; buf[i] && isspace(buf[i]); i++) + ; /* do nothing */ + return i; + } + return 0; +} + +static int section_name_is_ok(const char *name) +{ + /* Empty section names are bogus. */ + if (!*name) + return 0; + + /* + * Before a dot, we must be alphanumeric or dash. After the first dot, + * anything goes, so we can stop checking. + */ + for (; *name && *name != '.'; name++) + if (*name != '-' && !isalnum(*name)) + return 0; + return 1; +} + +/* if new_name == NULL, the section is removed instead */ +static int git_config_copy_or_rename_section_in_file(const char *config_filename, + const char *old_name, + const char *new_name, int copy) +{ + int ret = 0, remove = 0; + char *filename_buf = NULL; + struct lock_file lock = LOCK_INIT; + int out_fd; + struct strbuf buf = STRBUF_INIT; + FILE *config_file = NULL; + struct stat st; + struct strbuf copystr = STRBUF_INIT; + struct config_store_data store; + + memset(&store, 0, sizeof(store)); + + if (new_name && !section_name_is_ok(new_name)) { + ret = error(_("invalid section name: %s"), new_name); + goto out_no_rollback; + } + + if (!config_filename) + config_filename = filename_buf = git_pathdup("config"); + + out_fd = hold_lock_file_for_update(&lock, config_filename, 0); + if (out_fd < 0) { + ret = error(_("could not lock config file %s"), config_filename); + goto out; + } + + if (!(config_file = fopen(config_filename, "rb"))) { + ret = warn_on_fopen_errors(config_filename); + if (ret) + goto out; + /* no config file means nothing to rename, no error */ + goto commit_and_out; + } + + if (fstat(fileno(config_file), &st) == -1) { + ret = error_errno(_("fstat on %s failed"), config_filename); + goto out; + } + + if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) { + ret = error_errno(_("chmod on %s failed"), + get_lock_file_path(&lock)); + goto out; + } + + while (!strbuf_getwholeline(&buf, config_file, '\n')) { + size_t i, length; + int is_section = 0; + char *output = buf.buf; + for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++) + ; /* do nothing */ + if (buf.buf[i] == '[') { + /* it's a section */ + size_t offset; + is_section = 1; + + /* + * When encountering a new section under -c we + * need to flush out any section we're already + * coping and begin anew. There might be + * multiple [branch "$name"] sections. + */ + if (copystr.len > 0) { + if (write_in_full(out_fd, copystr.buf, copystr.len) < 0) { + ret = write_error(get_lock_file_path(&lock)); + goto out; + } + strbuf_reset(©str); + } + + offset = section_name_match(&buf.buf[i], old_name); + if (offset > 0) { + ret++; + if (new_name == NULL) { + remove = 1; + continue; + } + store.baselen = strlen(new_name); + if (!copy) { + if (write_section(out_fd, new_name, &store) < 0) { + ret = write_error(get_lock_file_path(&lock)); + goto out; + } + /* + * We wrote out the new section, with + * a newline, now skip the old + * section's length + */ + output += offset + i; + if (strlen(output) > 0) { + /* + * More content means there's + * a declaration to put on the + * next line; indent with a + * tab + */ + output -= 1; + output[0] = '\t'; + } + } else { + copystr = store_create_section(new_name, &store); + } + } + remove = 0; + } + if (remove) + continue; + length = strlen(output); + + if (!is_section && copystr.len > 0) { + strbuf_add(©str, output, length); + } + + if (write_in_full(out_fd, output, length) < 0) { + ret = write_error(get_lock_file_path(&lock)); + goto out; + } + } + + /* + * Copy a trailing section at the end of the config, won't be + * flushed by the usual "flush because we have a new section + * logic in the loop above. + */ + if (copystr.len > 0) { + if (write_in_full(out_fd, copystr.buf, copystr.len) < 0) { + ret = write_error(get_lock_file_path(&lock)); + goto out; + } + strbuf_reset(©str); + } + + fclose(config_file); + config_file = NULL; +commit_and_out: + if (commit_lock_file(&lock) < 0) + ret = error_errno(_("could not write config file %s"), + config_filename); +out: + if (config_file) + fclose(config_file); + rollback_lock_file(&lock); +out_no_rollback: + free(filename_buf); + config_store_data_clear(&store); + strbuf_release(&buf); + return ret; +} + +int git_config_rename_section_in_file(const char *config_filename, + const char *old_name, const char *new_name) +{ + return git_config_copy_or_rename_section_in_file(config_filename, + old_name, new_name, 0); +} + +int git_config_rename_section(const char *old_name, const char *new_name) +{ + return git_config_rename_section_in_file(NULL, old_name, new_name); +} + +int git_config_copy_section_in_file(const char *config_filename, + const char *old_name, const char *new_name) +{ + return git_config_copy_or_rename_section_in_file(config_filename, + old_name, new_name, 1); +} + +int git_config_copy_section(const char *old_name, const char *new_name) +{ + return git_config_copy_section_in_file(NULL, old_name, new_name); +} + +/* + * Call this to report error for your variable that should not + * get a boolean value (i.e. "[my] var" means "true"). + */ +#undef config_error_nonbool +int config_error_nonbool(const char *var) +{ + return error(_("missing value for '%s'"), var); +} + +int parse_config_key(const char *var, + const char *section, + const char **subsection, size_t *subsection_len, + const char **key) +{ + const char *dot; + + /* Does it start with "section." ? */ + if (!skip_prefix(var, section, &var) || *var != '.') + return -1; + + /* + * Find the key; we don't know yet if we have a subsection, but we must + * parse backwards from the end, since the subsection may have dots in + * it, too. + */ + dot = strrchr(var, '.'); + *key = dot + 1; + + /* Did we have a subsection at all? */ + if (dot == var) { + if (subsection) { + *subsection = NULL; + *subsection_len = 0; + } + } + else { + if (!subsection) + return -1; + *subsection = var + 1; + *subsection_len = dot - *subsection; + } + + return 0; +} + +const char *current_config_origin_type(void) +{ + int type; + if (current_config_kvi) + type = current_config_kvi->origin_type; + else if(cf) + type = cf->origin_type; + else + BUG("current_config_origin_type called outside config callback"); + + switch (type) { + case CONFIG_ORIGIN_BLOB: + return "blob"; + case CONFIG_ORIGIN_FILE: + return "file"; + case CONFIG_ORIGIN_STDIN: + return "standard input"; + case CONFIG_ORIGIN_SUBMODULE_BLOB: + return "submodule-blob"; + case CONFIG_ORIGIN_CMDLINE: + return "command line"; + default: + BUG("unknown config origin type"); + } +} + +const char *config_scope_name(enum config_scope scope) +{ + switch (scope) { + case CONFIG_SCOPE_SYSTEM: + return "system"; + case CONFIG_SCOPE_GLOBAL: + return "global"; + case CONFIG_SCOPE_LOCAL: + return "local"; + case CONFIG_SCOPE_WORKTREE: + return "worktree"; + case CONFIG_SCOPE_COMMAND: + return "command"; + case CONFIG_SCOPE_SUBMODULE: + return "submodule"; + default: + return "unknown"; + } +} + +const char *current_config_name(void) +{ + const char *name; + if (current_config_kvi) + name = current_config_kvi->filename; + else if (cf) + name = cf->name; + else + BUG("current_config_name called outside config callback"); + return name ? name : ""; +} + +enum config_scope current_config_scope(void) +{ + if (current_config_kvi) + return current_config_kvi->scope; + else + return current_parsing_scope; +} + +int current_config_line(void) +{ + if (current_config_kvi) + return current_config_kvi->linenr; + else + return cf->linenr; +} + +int lookup_config(const char **mapping, int nr_mapping, const char *var) +{ + int i; + + for (i = 0; i < nr_mapping; i++) { + const char *name = mapping[i]; + + if (name && !strcasecmp(var, name)) + return i; + } + return -1; +} diff --git a/dir-iterator.c b/dir-iterator.c index b17e9f970a747a..3764dd81a185b2 100644 --- a/dir-iterator.c +++ b/dir-iterator.c @@ -203,7 +203,7 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags) { struct dir_iterator_int *iter = xcalloc(1, sizeof(*iter)); struct dir_iterator *dir_iterator = &iter->base; - int saved_errno; + int saved_errno, err; strbuf_init(&iter->base.path, PATH_MAX); strbuf_addstr(&iter->base.path, path); @@ -213,10 +213,15 @@ struct dir_iterator *dir_iterator_begin(const char *path, unsigned int flags) iter->flags = flags; /* - * Note: stat already checks for NULL or empty strings and - * inexistent paths. + * Note: stat/lstat already checks for NULL or empty strings and + * nonexistent paths. */ - if (stat(iter->base.path.buf, &iter->base.st) < 0) { + if (iter->flags & DIR_ITERATOR_FOLLOW_SYMLINKS) + err = stat(iter->base.path.buf, &iter->base.st); + else + err = lstat(iter->base.path.buf, &iter->base.st); + + if (err < 0) { saved_errno = errno; goto error_out; } diff --git a/dir-iterator.h b/dir-iterator.h index 08229157c63804..e3b6ff28007366 100644 --- a/dir-iterator.h +++ b/dir-iterator.h @@ -61,6 +61,11 @@ * not the symlinks themselves, which is the default behavior. Broken * symlinks are ignored. * + * Note: setting DIR_ITERATOR_FOLLOW_SYMLINKS affects resolving the + * starting path as well (e.g., attempting to iterate starting at a + * symbolic link pointing to a directory without FOLLOW_SYMLINKS will + * result in an error). + * * Warning: circular symlinks are also followed when * DIR_ITERATOR_FOLLOW_SYMLINKS is set. The iteration may end up with * an ELOOP if they happen and DIR_ITERATOR_PEDANTIC is set. diff --git a/git-compat-util.h b/git-compat-util.h index b46605300abf81..1345eae3b1b86f 100644 --- a/git-compat-util.h +++ b/git-compat-util.h @@ -127,7 +127,9 @@ /* Approximation of the length of the decimal representation of this type. */ #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) -#if defined(__sun__) +#ifdef __MINGW64__ +#define _POSIX_C_SOURCE 1 +#elif defined(__sun__) /* * On Solaris, when _XOPEN_EXTENDED is set, its header file * forces the programs to be XPG4v2, defeating any _XOPEN_SOURCE @@ -395,6 +397,18 @@ static inline int git_offset_1st_component(const char *path) #define is_valid_path(path) 1 #endif +#ifndef is_path_owned_by_current_user +static inline int is_path_owned_by_current_uid(const char *path) +{ + struct stat st; + if (lstat(path, &st)) + return 0; + return st.st_uid == geteuid(); +} + +#define is_path_owned_by_current_user is_path_owned_by_current_uid +#endif + #ifndef find_last_dir_sep static inline char *git_find_last_dir_sep(const char *path) { @@ -859,6 +873,14 @@ static inline size_t st_sub(size_t a, size_t b) return a - b; } +static inline int cast_size_t_to_int(size_t a) +{ + if (a > INT_MAX) + die("number too large to represent as int on this platform: %"PRIuMAX, + (uintmax_t)a); + return (int)a; +} + #ifdef HAVE_ALLOCA_H # include # define xalloca(size) (alloca(size)) diff --git a/git-compat-util.h.orig b/git-compat-util.h.orig new file mode 100644 index 00000000000000..dc52aeaf55e110 --- /dev/null +++ b/git-compat-util.h.orig @@ -0,0 +1,1398 @@ +#ifndef GIT_COMPAT_UTIL_H +#define GIT_COMPAT_UTIL_H + +#ifdef USE_MSVC_CRTDBG +/* + * For these to work they must appear very early in each + * file -- before most of the standard header files. + */ +#include +#include +#endif + +#define _FILE_OFFSET_BITS 64 + + +/* Derived from Linux "Features Test Macro" header + * Convenience macros to test the versions of gcc (or + * a compatible compiler). + * Use them like this: + * #if GIT_GNUC_PREREQ (2,8) + * ... code requiring gcc 2.8 or later ... + * #endif +*/ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define GIT_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else + #define GIT_GNUC_PREREQ(maj, min) 0 +#endif + + +#ifndef FLEX_ARRAY +/* + * See if our compiler is known to support flexible array members. + */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && (!defined(__SUNPRO_C) || (__SUNPRO_C > 0x580)) +# define FLEX_ARRAY /* empty */ +#elif defined(__GNUC__) +# if (__GNUC__ >= 3) +# define FLEX_ARRAY /* empty */ +# else +# define FLEX_ARRAY 0 /* older GNU extension */ +# endif +#endif + +/* + * Otherwise, default to safer but a bit wasteful traditional style + */ +#ifndef FLEX_ARRAY +# define FLEX_ARRAY 1 +#endif +#endif + + +/* + * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression. + * @cond: the compile-time condition which must be true. + * + * Your compile will fail if the condition isn't true, or can't be evaluated + * by the compiler. This can be used in an expression: its value is "0". + * + * Example: + * #define foo_to_char(foo) \ + * ((char *)(foo) \ + * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0)) + */ +#define BUILD_ASSERT_OR_ZERO(cond) \ + (sizeof(char [1 - 2*!(cond)]) - 1) + +#if GIT_GNUC_PREREQ(3, 1) + /* &arr[0] degrades to a pointer: a different type from an array */ +# define BARF_UNLESS_AN_ARRAY(arr) \ + BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(__typeof__(arr), \ + __typeof__(&(arr)[0]))) +#else +# define BARF_UNLESS_AN_ARRAY(arr) 0 +#endif +/* + * ARRAY_SIZE - get the number of elements in a visible array + * @x: the array whose size you want. + * + * This does not work on pointers, or arrays declared as [], or + * function parameters. With correct compiler support, such usage + * will cause a build error (see the build_assert_or_zero macro). + */ +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]) + BARF_UNLESS_AN_ARRAY(x)) + +#define bitsizeof(x) (CHAR_BIT * sizeof(x)) + +#define maximum_signed_value_of_type(a) \ + (INTMAX_MAX >> (bitsizeof(intmax_t) - bitsizeof(a))) + +#define maximum_unsigned_value_of_type(a) \ + (UINTMAX_MAX >> (bitsizeof(uintmax_t) - bitsizeof(a))) + +/* + * Signed integer overflow is undefined in C, so here's a helper macro + * to detect if the sum of two integers will overflow. + * + * Requires: a >= 0, typeof(a) equals typeof(b) + */ +#define signed_add_overflows(a, b) \ + ((b) > maximum_signed_value_of_type(a) - (a)) + +#define unsigned_add_overflows(a, b) \ + ((b) > maximum_unsigned_value_of_type(a) - (a)) + +/* + * Returns true if the multiplication of "a" and "b" will + * overflow. The types of "a" and "b" must match and must be unsigned. + * Note that this macro evaluates "a" twice! + */ +#define unsigned_mult_overflows(a, b) \ + ((a) && (b) > maximum_unsigned_value_of_type(a) / (a)) + +#ifdef __GNUC__ +#define TYPEOF(x) (__typeof__(x)) +#else +#define TYPEOF(x) +#endif + +#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (bitsizeof(x) - (bits)))) +#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +/* Approximation of the length of the decimal representation of this type. */ +#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) + +#ifdef __MINGW64__ +#define _POSIX_C_SOURCE 1 +#elif defined(__sun__) + /* + * On Solaris, when _XOPEN_EXTENDED is set, its header file + * forces the programs to be XPG4v2, defeating any _XOPEN_SOURCE + * setting to say we are XPG5 or XPG6. Also on Solaris, + * XPG6 programs must be compiled with a c99 compiler, while + * non XPG6 programs must be compiled with a pre-c99 compiler. + */ +# if __STDC_VERSION__ - 0 >= 199901L +# define _XOPEN_SOURCE 600 +# else +# define _XOPEN_SOURCE 500 +# endif +#elif !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && \ + !defined(_M_UNIX) && !defined(__sgi) && !defined(__DragonFly__) && \ + !defined(__TANDEM) && !defined(__QNX__) && !defined(__MirBSD__) && \ + !defined(__CYGWIN__) +#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */ +#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */ +#endif +#define _ALL_SOURCE 1 +#define _GNU_SOURCE 1 +#define _BSD_SOURCE 1 +#define _DEFAULT_SOURCE 1 +#define _NETBSD_SOURCE 1 +#define _SGI_SOURCE 1 + +#if defined(WIN32) && !defined(__CYGWIN__) /* Both MinGW and MSVC */ +# if !defined(_WIN32_WINNT) +# define _WIN32_WINNT 0x0600 +# endif +#define WIN32_LEAN_AND_MEAN /* stops windows.h including winsock.h */ +#include +#include +#define GIT_WINDOWS_NATIVE +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_STRINGS_H +#include /* for strcasecmp() */ +#endif +#include +#include +#ifdef NEEDS_SYS_PARAM_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(NO_POLL_H) +#include +#elif !defined(NO_SYS_POLL_H) +#include +#else +/* Pull the compat stuff */ +#include +#endif +#ifdef HAVE_BSD_SYSCTL +#include +#endif + +#if defined(__CYGWIN__) +#include "compat/win32/path-utils.h" +#endif +#if defined(__MINGW32__) +/* pull in Windows compatibility stuff */ +#include "compat/win32/path-utils.h" +#include "compat/mingw.h" +#elif defined(_MSC_VER) +#include "compat/win32/path-utils.h" +#include "compat/msvc.h" +#else +#include +#include +#include +#include +#include +#include +#ifndef NO_SYS_SELECT_H +#include +#endif +#include +#include +#include +#include +#include +#include +#ifndef NO_INTTYPES_H +#include +#else +#include +#endif +#ifdef NO_INTPTR_T +/* + * On I16LP32, ILP32 and LP64 "long" is the safe bet, however + * on LLP86, IL33LLP64 and P64 it needs to be "long long", + * while on IP16 and IP16L32 it is "int" (resp. "short") + * Size needs to match (or exceed) 'sizeof(void *)'. + * We can't take "long long" here as not everybody has it. + */ +typedef long intptr_t; +typedef unsigned long uintptr_t; +#endif +#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */ +#include +#define _ALL_SOURCE 1 +#endif + +/* used on Mac OS X */ +#ifdef PRECOMPOSE_UNICODE +#include "compat/precompose_utf8.h" +#else +static inline const char *precompose_argv_prefix(int argc, const char **argv, const char *prefix) +{ + return prefix; +} +static inline const char *precompose_string_if_needed(const char *in) +{ + return in; +} + +#define probe_utf8_pathname_composition() +#endif + +#ifdef MKDIR_WO_TRAILING_SLASH +#define mkdir(a,b) compat_mkdir_wo_trailing_slash((a),(b)) +int compat_mkdir_wo_trailing_slash(const char*, mode_t); +#endif + +#ifdef NO_STRUCT_ITIMERVAL +struct itimerval { + struct timeval it_interval; + struct timeval it_value; +}; +#endif + +#ifdef NO_SETITIMER +static inline int setitimer(int which, const struct itimerval *value, struct itimerval *newvalue) { + return 0; /* pretend success */ +} +#endif + +#ifndef NO_LIBGEN_H +#include +#else +#define basename gitbasename +char *gitbasename(char *); +#define dirname gitdirname +char *gitdirname(char *); +#endif + +#ifndef NO_ICONV +#include +#endif + +#ifndef NO_OPENSSL +#ifdef __APPLE__ +#define __AVAILABILITY_MACROS_USES_AVAILABILITY 0 +#include +#undef DEPRECATED_ATTRIBUTE +#define DEPRECATED_ATTRIBUTE +#undef __AVAILABILITY_MACROS_USES_AVAILABILITY +#endif +#include +#include +#endif + +#ifdef HAVE_SYSINFO +# include +#endif + +/* On most systems would have given us this, but + * not on some systems (e.g. z/OS). + */ +#ifndef NI_MAXHOST +#define NI_MAXHOST 1025 +#endif + +#ifndef NI_MAXSERV +#define NI_MAXSERV 32 +#endif + +/* On most systems would have given us this, but + * not on some systems (e.g. GNU/Hurd). + */ +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +typedef uintmax_t timestamp_t; +#define PRItime PRIuMAX +#define parse_timestamp strtoumax +#define TIME_MAX UINTMAX_MAX +#define TIME_MIN 0 + +#ifndef PATH_SEP +#define PATH_SEP ':' +#endif + +#ifdef HAVE_PATHS_H +#include +#endif +#ifndef _PATH_DEFPATH +#define _PATH_DEFPATH "/usr/local/bin:/usr/bin:/bin" +#endif + +#ifndef platform_core_config +static inline int noop_core_config(const char *var, const char *value, void *cb) +{ + return 0; +} +#define platform_core_config noop_core_config +#endif + +int lstat_cache_aware_rmdir(const char *path); +#if !defined(__MINGW32__) && !defined(_MSC_VER) +#define rmdir lstat_cache_aware_rmdir +#endif + +#ifndef has_dos_drive_prefix +static inline int git_has_dos_drive_prefix(const char *path) +{ + return 0; +} +#define has_dos_drive_prefix git_has_dos_drive_prefix +#endif + +#ifndef skip_dos_drive_prefix +static inline int git_skip_dos_drive_prefix(char **path) +{ + return 0; +} +#define skip_dos_drive_prefix git_skip_dos_drive_prefix +#endif + +#ifndef is_dir_sep +static inline int git_is_dir_sep(int c) +{ + return c == '/'; +} +#define is_dir_sep git_is_dir_sep +#endif + +#ifndef offset_1st_component +static inline int git_offset_1st_component(const char *path) +{ + return is_dir_sep(path[0]); +} +#define offset_1st_component git_offset_1st_component +#endif + +#ifndef is_valid_path +#define is_valid_path(path) 1 +#endif + +#ifndef is_path_owned_by_current_user +static inline int is_path_owned_by_current_uid(const char *path) +{ + struct stat st; + if (lstat(path, &st)) + return 0; + return st.st_uid == geteuid(); +} + +#define is_path_owned_by_current_user is_path_owned_by_current_uid +#endif + +#ifndef find_last_dir_sep +static inline char *git_find_last_dir_sep(const char *path) +{ + return strrchr(path, '/'); +} +#define find_last_dir_sep git_find_last_dir_sep +#endif + +#ifndef has_dir_sep +static inline int git_has_dir_sep(const char *path) +{ + return !!strchr(path, '/'); +} +#define has_dir_sep(path) git_has_dir_sep(path) +#endif + +#ifndef query_user_email +#define query_user_email() NULL +#endif + +#ifdef __TANDEM +#include +#include +#ifndef NSIG +/* + * NonStop NSE and NSX do not provide NSIG. SIGGUARDIAN(99) is the highest + * known, by detective work using kill -l as a list is all signals + * instead of signal.h where it should be. + */ +# define NSIG 100 +#endif +#endif + +#if defined(__HP_cc) && (__HP_cc >= 61000) +#define NORETURN __attribute__((noreturn)) +#define NORETURN_PTR +#elif defined(__GNUC__) && !defined(NO_NORETURN) +#define NORETURN __attribute__((__noreturn__)) +#define NORETURN_PTR __attribute__((__noreturn__)) +#elif defined(_MSC_VER) +#define NORETURN __declspec(noreturn) +#define NORETURN_PTR +#else +#define NORETURN +#define NORETURN_PTR +#ifndef __GNUC__ +#ifndef __attribute__ +#define __attribute__(x) +#endif +#endif +#endif + +/* The sentinel attribute is valid from gcc version 4.0 */ +#if defined(__GNUC__) && (__GNUC__ >= 4) +#define LAST_ARG_MUST_BE_NULL __attribute__((sentinel)) +#else +#define LAST_ARG_MUST_BE_NULL +#endif + +#define MAYBE_UNUSED __attribute__((__unused__)) + +#include "compat/bswap.h" + +#include "wildmatch.h" + +struct strbuf; + +/* General helper functions */ +void vreportf(const char *prefix, const char *err, va_list params); +NORETURN void usage(const char *err); +NORETURN void usagef(const char *err, ...) __attribute__((format (printf, 1, 2))); +NORETURN void die(const char *err, ...) __attribute__((format (printf, 1, 2))); +NORETURN void die_errno(const char *err, ...) __attribute__((format (printf, 1, 2))); +int error(const char *err, ...) __attribute__((format (printf, 1, 2))); +int error_errno(const char *err, ...) __attribute__((format (printf, 1, 2))); +void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); +void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2))); + +#ifndef NO_OPENSSL +#ifdef APPLE_COMMON_CRYPTO +#include "compat/apple-common-crypto.h" +#else +#include +#include +#endif /* APPLE_COMMON_CRYPTO */ +#include +#endif /* NO_OPENSSL */ + +/* + * Let callers be aware of the constant return value; this can help + * gcc with -Wuninitialized analysis. We restrict this trick to gcc, though, + * because some compilers may not support variadic macros. Since we're only + * trying to help gcc, anyway, it's OK; other compilers will fall back to + * using the function as usual. + */ +#if defined(__GNUC__) +static inline int const_error(void) +{ + return -1; +} +#define error(...) (error(__VA_ARGS__), const_error()) +#define error_errno(...) (error_errno(__VA_ARGS__), const_error()) +#endif + +typedef void (*report_fn)(const char *, va_list params); + +void set_die_routine(NORETURN_PTR report_fn routine); +void set_error_routine(report_fn routine); +report_fn get_error_routine(void); +void set_warn_routine(report_fn routine); +report_fn get_warn_routine(void); +void set_die_is_recursing_routine(int (*routine)(void)); + +int starts_with(const char *str, const char *prefix); +int istarts_with(const char *str, const char *prefix); + +/* + * If the string "str" begins with the string found in "prefix", return 1. + * The "out" parameter is set to "str + strlen(prefix)" (i.e., to the point in + * the string right after the prefix). + * + * Otherwise, return 0 and leave "out" untouched. + * + * Examples: + * + * [extract branch name, fail if not a branch] + * if (!skip_prefix(ref, "refs/heads/", &branch) + * return -1; + * + * [skip prefix if present, otherwise use whole string] + * skip_prefix(name, "refs/heads/", &name); + */ +static inline int skip_prefix(const char *str, const char *prefix, + const char **out) +{ + do { + if (!*prefix) { + *out = str; + return 1; + } + } while (*str++ == *prefix++); + return 0; +} + +/* + * If the string "str" is the same as the string in "prefix", then the "arg" + * parameter is set to the "def" parameter and 1 is returned. + * If the string "str" begins with the string found in "prefix" and then a + * "=" sign, then the "arg" parameter is set to "str + strlen(prefix) + 1" + * (i.e., to the point in the string right after the prefix and the "=" sign), + * and 1 is returned. + * + * Otherwise, return 0 and leave "arg" untouched. + * + * When we accept both a "--key" and a "--key=" option, this function + * can be used instead of !strcmp(arg, "--key") and then + * skip_prefix(arg, "--key=", &arg) to parse such an option. + */ +int skip_to_optional_arg_default(const char *str, const char *prefix, + const char **arg, const char *def); + +static inline int skip_to_optional_arg(const char *str, const char *prefix, + const char **arg) +{ + return skip_to_optional_arg_default(str, prefix, arg, ""); +} + +/* + * Like skip_prefix, but promises never to read past "len" bytes of the input + * buffer, and returns the remaining number of bytes in "out" via "outlen". + */ +static inline int skip_prefix_mem(const char *buf, size_t len, + const char *prefix, + const char **out, size_t *outlen) +{ + size_t prefix_len = strlen(prefix); + if (prefix_len <= len && !memcmp(buf, prefix, prefix_len)) { + *out = buf + prefix_len; + *outlen = len - prefix_len; + return 1; + } + return 0; +} + +/* + * If buf ends with suffix, return 1 and subtract the length of the suffix + * from *len. Otherwise, return 0 and leave *len untouched. + */ +static inline int strip_suffix_mem(const char *buf, size_t *len, + const char *suffix) +{ + size_t suflen = strlen(suffix); + if (*len < suflen || memcmp(buf + (*len - suflen), suffix, suflen)) + return 0; + *len -= suflen; + return 1; +} + +/* + * If str ends with suffix, return 1 and set *len to the size of the string + * without the suffix. Otherwise, return 0 and set *len to the size of the + * string. + * + * Note that we do _not_ NUL-terminate str to the new length. + */ +static inline int strip_suffix(const char *str, const char *suffix, size_t *len) +{ + *len = strlen(str); + return strip_suffix_mem(str, len, suffix); +} + +static inline int ends_with(const char *str, const char *suffix) +{ + size_t len; + return strip_suffix(str, suffix, &len); +} + +#define SWAP(a, b) do { \ + void *_swap_a_ptr = &(a); \ + void *_swap_b_ptr = &(b); \ + unsigned char _swap_buffer[sizeof(a)]; \ + memcpy(_swap_buffer, _swap_a_ptr, sizeof(a)); \ + memcpy(_swap_a_ptr, _swap_b_ptr, sizeof(a) + \ + BUILD_ASSERT_OR_ZERO(sizeof(a) == sizeof(b))); \ + memcpy(_swap_b_ptr, _swap_buffer, sizeof(a)); \ +} while (0) + +#if defined(NO_MMAP) || defined(USE_WIN32_MMAP) + +#ifndef PROT_READ +#define PROT_READ 1 +#define PROT_WRITE 2 +#define MAP_PRIVATE 1 +#endif + +#define mmap git_mmap +#define munmap git_munmap +void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +int git_munmap(void *start, size_t length); + +#else /* NO_MMAP || USE_WIN32_MMAP */ + +#include + +#endif /* NO_MMAP || USE_WIN32_MMAP */ + +#ifdef NO_MMAP + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024) + +#else /* NO_MMAP */ + +/* This value must be multiple of (pagesize * 2) */ +#define DEFAULT_PACKED_GIT_WINDOW_SIZE \ + (sizeof(void*) >= 8 \ + ? 1 * 1024 * 1024 * 1024 \ + : 32 * 1024 * 1024) + +#endif /* NO_MMAP */ + +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT +#define on_disk_bytes(st) ((st).st_size) +#else +#define on_disk_bytes(st) ((st).st_blocks * 512) +#endif + +#ifdef NEEDS_MODE_TRANSLATION +#undef S_IFMT +#undef S_IFREG +#undef S_IFDIR +#undef S_IFLNK +#undef S_IFBLK +#undef S_IFCHR +#undef S_IFIFO +#undef S_IFSOCK +#define S_IFMT 0170000 +#define S_IFREG 0100000 +#define S_IFDIR 0040000 +#define S_IFLNK 0120000 +#define S_IFBLK 0060000 +#define S_IFCHR 0020000 +#define S_IFIFO 0010000 +#define S_IFSOCK 0140000 +#ifdef stat +#undef stat +#endif +#define stat(path, buf) git_stat(path, buf) +int git_stat(const char *, struct stat *); +#ifdef fstat +#undef fstat +#endif +#define fstat(fd, buf) git_fstat(fd, buf) +int git_fstat(int, struct stat *); +#ifdef lstat +#undef lstat +#endif +#define lstat(path, buf) git_lstat(path, buf) +int git_lstat(const char *, struct stat *); +#endif + +#define DEFAULT_PACKED_GIT_LIMIT \ + ((1024L * 1024L) * (size_t)(sizeof(void*) >= 8 ? (32 * 1024L * 1024L) : 256)) + +#ifdef NO_PREAD +#define pread git_pread +ssize_t git_pread(int fd, void *buf, size_t count, off_t offset); +#endif +/* + * Forward decl that will remind us if its twin in cache.h changes. + * This function is used in compat/pread.c. But we can't include + * cache.h there. + */ +ssize_t read_in_full(int fd, void *buf, size_t count); + +#ifdef NO_SETENV +#define setenv gitsetenv +int gitsetenv(const char *, const char *, int); +#endif + +#ifdef NO_MKDTEMP +#define mkdtemp gitmkdtemp +char *gitmkdtemp(char *); +#endif + +#ifdef NO_UNSETENV +#define unsetenv gitunsetenv +void gitunsetenv(const char *); +#endif + +#ifdef NO_STRCASESTR +#define strcasestr gitstrcasestr +char *gitstrcasestr(const char *haystack, const char *needle); +#endif + +#ifdef NO_STRLCPY +#define strlcpy gitstrlcpy +size_t gitstrlcpy(char *, const char *, size_t); +#endif + +#ifdef NO_STRTOUMAX +#define strtoumax gitstrtoumax +uintmax_t gitstrtoumax(const char *, char **, int); +#define strtoimax gitstrtoimax +intmax_t gitstrtoimax(const char *, char **, int); +#endif + +#ifdef NO_HSTRERROR +#define hstrerror githstrerror +const char *githstrerror(int herror); +#endif + +#ifdef NO_MEMMEM +#define memmem gitmemmem +void *gitmemmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen); +#endif + +#ifdef OVERRIDE_STRDUP +#ifdef strdup +#undef strdup +#endif +#define strdup gitstrdup +char *gitstrdup(const char *s); +#endif + +#ifdef NO_GETPAGESIZE +#define getpagesize() sysconf(_SC_PAGESIZE) +#endif + +#ifndef O_CLOEXEC +#define O_CLOEXEC 0 +#endif + +#ifdef FREAD_READS_DIRECTORIES +# if !defined(SUPPRESS_FOPEN_REDEFINITION) +# ifdef fopen +# undef fopen +# endif +# define fopen(a,b) git_fopen(a,b) +# endif +FILE *git_fopen(const char*, const char*); +#endif + +#ifdef SNPRINTF_RETURNS_BOGUS +#ifdef snprintf +#undef snprintf +#endif +#define snprintf git_snprintf +int git_snprintf(char *str, size_t maxsize, + const char *format, ...); +#ifdef vsnprintf +#undef vsnprintf +#endif +#define vsnprintf git_vsnprintf +int git_vsnprintf(char *str, size_t maxsize, + const char *format, va_list ap); +#endif + +#ifdef OPEN_RETURNS_EINTR +#undef open +#define open git_open_with_retry +int git_open_with_retry(const char *path, int flag, ...); +#endif + +#ifdef __GLIBC_PREREQ +#if __GLIBC_PREREQ(2, 1) +#define HAVE_STRCHRNUL +#endif +#endif + +#ifndef HAVE_STRCHRNUL +#define strchrnul gitstrchrnul +static inline char *gitstrchrnul(const char *s, int c) +{ + while (*s && *s != c) + s++; + return (char *)s; +} +#endif + +#ifdef NO_INET_PTON +int inet_pton(int af, const char *src, void *dst); +#endif + +#ifdef NO_INET_NTOP +const char *inet_ntop(int af, const void *src, char *dst, size_t size); +#endif + +#ifdef NO_PTHREADS +#define atexit git_atexit +int git_atexit(void (*handler)(void)); +#endif + +static inline size_t st_add(size_t a, size_t b) +{ + if (unsigned_add_overflows(a, b)) + die("size_t overflow: %"PRIuMAX" + %"PRIuMAX, + (uintmax_t)a, (uintmax_t)b); + return a + b; +} +#define st_add3(a,b,c) st_add(st_add((a),(b)),(c)) +#define st_add4(a,b,c,d) st_add(st_add3((a),(b),(c)),(d)) + +static inline size_t st_mult(size_t a, size_t b) +{ + if (unsigned_mult_overflows(a, b)) + die("size_t overflow: %"PRIuMAX" * %"PRIuMAX, + (uintmax_t)a, (uintmax_t)b); + return a * b; +} + +static inline size_t st_sub(size_t a, size_t b) +{ + if (a < b) + die("size_t underflow: %"PRIuMAX" - %"PRIuMAX, + (uintmax_t)a, (uintmax_t)b); + return a - b; +} + +#ifdef HAVE_ALLOCA_H +# include +# define xalloca(size) (alloca(size)) +# define xalloca_free(p) do {} while (0) +#else +# define xalloca(size) (xmalloc(size)) +# define xalloca_free(p) (free(p)) +#endif +char *xstrdup(const char *str); +void *xmalloc(size_t size); +void *xmallocz(size_t size); +void *xmallocz_gently(size_t size); +void *xmemdupz(const void *data, size_t len); +char *xstrndup(const char *str, size_t len); +void *xrealloc(void *ptr, size_t size); +void *xcalloc(size_t nmemb, size_t size); +void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); +const char *mmap_os_err(void); +void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset); +int xopen(const char *path, int flags, ...); +ssize_t xread(int fd, void *buf, size_t len); +ssize_t xwrite(int fd, const void *buf, size_t len); +ssize_t xpread(int fd, void *buf, size_t len, off_t offset); +int xdup(int fd); +FILE *xfopen(const char *path, const char *mode); +FILE *xfdopen(int fd, const char *mode); +int xmkstemp(char *temp_filename); +int xmkstemp_mode(char *temp_filename, int mode); +char *xgetcwd(void); +FILE *fopen_for_writing(const char *path); +FILE *fopen_or_warn(const char *path, const char *mode); + +/* + * Like strncmp, but only return zero if s is NUL-terminated and exactly len + * characters long. If it is not, consider it greater than t. + */ +int xstrncmpz(const char *s, const char *t, size_t len); + +/* + * FREE_AND_NULL(ptr) is like free(ptr) followed by ptr = NULL. Note + * that ptr is used twice, so don't pass e.g. ptr++. + */ +#define FREE_AND_NULL(p) do { free(p); (p) = NULL; } while (0) + +#define ALLOC_ARRAY(x, alloc) (x) = xmalloc(st_mult(sizeof(*(x)), (alloc))) +#define CALLOC_ARRAY(x, alloc) (x) = xcalloc((alloc), sizeof(*(x))) +#define REALLOC_ARRAY(x, alloc) (x) = xrealloc((x), st_mult(sizeof(*(x)), (alloc))) + +#define COPY_ARRAY(dst, src, n) copy_array((dst), (src), (n), sizeof(*(dst)) + \ + BUILD_ASSERT_OR_ZERO(sizeof(*(dst)) == sizeof(*(src)))) +static inline void copy_array(void *dst, const void *src, size_t n, size_t size) +{ + if (n) + memcpy(dst, src, st_mult(size, n)); +} + +#define MOVE_ARRAY(dst, src, n) move_array((dst), (src), (n), sizeof(*(dst)) + \ + BUILD_ASSERT_OR_ZERO(sizeof(*(dst)) == sizeof(*(src)))) +static inline void move_array(void *dst, const void *src, size_t n, size_t size) +{ + if (n) + memmove(dst, src, st_mult(size, n)); +} + +/* + * These functions help you allocate structs with flex arrays, and copy + * the data directly into the array. For example, if you had: + * + * struct foo { + * int bar; + * char name[FLEX_ARRAY]; + * }; + * + * you can do: + * + * struct foo *f; + * FLEX_ALLOC_MEM(f, name, src, len); + * + * to allocate a "foo" with the contents of "src" in the "name" field. + * The resulting struct is automatically zero'd, and the flex-array field + * is NUL-terminated (whether the incoming src buffer was or not). + * + * The FLEXPTR_* variants operate on structs that don't use flex-arrays, + * but do want to store a pointer to some extra data in the same allocated + * block. For example, if you have: + * + * struct foo { + * char *name; + * int bar; + * }; + * + * you can do: + * + * struct foo *f; + * FLEXPTR_ALLOC_STR(f, name, src); + * + * and "name" will point to a block of memory after the struct, which will be + * freed along with the struct (but the pointer can be repointed anywhere). + * + * The *_STR variants accept a string parameter rather than a ptr/len + * combination. + * + * Note that these macros will evaluate the first parameter multiple + * times, and it must be assignable as an lvalue. + */ +#define FLEX_ALLOC_MEM(x, flexname, buf, len) do { \ + size_t flex_array_len_ = (len); \ + (x) = xcalloc(1, st_add3(sizeof(*(x)), flex_array_len_, 1)); \ + memcpy((void *)(x)->flexname, (buf), flex_array_len_); \ +} while (0) +#define FLEXPTR_ALLOC_MEM(x, ptrname, buf, len) do { \ + size_t flex_array_len_ = (len); \ + (x) = xcalloc(1, st_add3(sizeof(*(x)), flex_array_len_, 1)); \ + memcpy((x) + 1, (buf), flex_array_len_); \ + (x)->ptrname = (void *)((x)+1); \ +} while(0) +#define FLEX_ALLOC_STR(x, flexname, str) \ + FLEX_ALLOC_MEM((x), flexname, (str), strlen(str)) +#define FLEXPTR_ALLOC_STR(x, ptrname, str) \ + FLEXPTR_ALLOC_MEM((x), ptrname, (str), strlen(str)) + +static inline char *xstrdup_or_null(const char *str) +{ + return str ? xstrdup(str) : NULL; +} + +static inline size_t xsize_t(off_t len) +{ + if (len < 0 || (uintmax_t) len > SIZE_MAX) + die("Cannot handle files this big"); + return (size_t) len; +} + +__attribute__((format (printf, 3, 4))) +int xsnprintf(char *dst, size_t max, const char *fmt, ...); + +#ifndef HOST_NAME_MAX +#define HOST_NAME_MAX 256 +#endif + +int xgethostname(char *buf, size_t len); + +/* in ctype.c, for kwset users */ +extern const unsigned char tolower_trans_tbl[256]; + +/* Sane ctype - no locale, and works with signed chars */ +#undef isascii +#undef isspace +#undef isdigit +#undef isalpha +#undef isalnum +#undef isprint +#undef islower +#undef isupper +#undef tolower +#undef toupper +#undef iscntrl +#undef ispunct +#undef isxdigit + +extern const unsigned char sane_ctype[256]; +#define GIT_SPACE 0x01 +#define GIT_DIGIT 0x02 +#define GIT_ALPHA 0x04 +#define GIT_GLOB_SPECIAL 0x08 +#define GIT_REGEX_SPECIAL 0x10 +#define GIT_PATHSPEC_MAGIC 0x20 +#define GIT_CNTRL 0x40 +#define GIT_PUNCT 0x80 +#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) +#define isascii(x) (((x) & ~0x7f) == 0) +#define isspace(x) sane_istest(x,GIT_SPACE) +#define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isalpha(x) sane_istest(x,GIT_ALPHA) +#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) +#define isprint(x) ((x) >= 0x20 && (x) <= 0x7e) +#define islower(x) sane_iscase(x, 1) +#define isupper(x) sane_iscase(x, 0) +#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) +#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL) +#define iscntrl(x) (sane_istest(x,GIT_CNTRL)) +#define ispunct(x) sane_istest(x, GIT_PUNCT | GIT_REGEX_SPECIAL | \ + GIT_GLOB_SPECIAL | GIT_PATHSPEC_MAGIC) +#define isxdigit(x) (hexval_table[(unsigned char)(x)] != -1) +#define tolower(x) sane_case((unsigned char)(x), 0x20) +#define toupper(x) sane_case((unsigned char)(x), 0) +#define is_pathspec_magic(x) sane_istest(x,GIT_PATHSPEC_MAGIC) + +static inline int sane_case(int x, int high) +{ + if (sane_istest(x, GIT_ALPHA)) + x = (x & ~0x20) | high; + return x; +} + +static inline int sane_iscase(int x, int is_lower) +{ + if (!sane_istest(x, GIT_ALPHA)) + return 0; + + if (is_lower) + return (x & 0x20) != 0; + else + return (x & 0x20) == 0; +} + +/* + * Like skip_prefix, but compare case-insensitively. Note that the comparison + * is done via tolower(), so it is strictly ASCII (no multi-byte characters or + * locale-specific conversions). + */ +static inline int skip_iprefix(const char *str, const char *prefix, + const char **out) +{ + do { + if (!*prefix) { + *out = str; + return 1; + } + } while (tolower(*str++) == tolower(*prefix++)); + return 0; +} + +static inline int strtoul_ui(char const *s, int base, unsigned int *result) +{ + unsigned long ul; + char *p; + + errno = 0; + /* negative values would be accepted by strtoul */ + if (strchr(s, '-')) + return -1; + ul = strtoul(s, &p, base); + if (errno || *p || p == s || (unsigned int) ul != ul) + return -1; + *result = ul; + return 0; +} + +static inline int strtol_i(char const *s, int base, int *result) +{ + long ul; + char *p; + + errno = 0; + ul = strtol(s, &p, base); + if (errno || *p || p == s || (int) ul != ul) + return -1; + *result = ul; + return 0; +} + +void git_stable_qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)); +#ifdef INTERNAL_QSORT +#define qsort git_stable_qsort +#endif + +#define QSORT(base, n, compar) sane_qsort((base), (n), sizeof(*(base)), compar) +static inline void sane_qsort(void *base, size_t nmemb, size_t size, + int(*compar)(const void *, const void *)) +{ + if (nmemb > 1) + qsort(base, nmemb, size, compar); +} + +#define STABLE_QSORT(base, n, compar) \ + git_stable_qsort((base), (n), sizeof(*(base)), compar) + +#ifndef HAVE_ISO_QSORT_S +int git_qsort_s(void *base, size_t nmemb, size_t size, + int (*compar)(const void *, const void *, void *), void *ctx); +#define qsort_s git_qsort_s +#endif + +#define QSORT_S(base, n, compar, ctx) do { \ + if (qsort_s((base), (n), sizeof(*(base)), compar, ctx)) \ + BUG("qsort_s() failed"); \ +} while (0) + +#ifndef REG_STARTEND +#error "Git requires REG_STARTEND support. Compile with NO_REGEX=NeedsStartEnd" +#endif + +static inline int regexec_buf(const regex_t *preg, const char *buf, size_t size, + size_t nmatch, regmatch_t pmatch[], int eflags) +{ + assert(nmatch > 0 && pmatch); + pmatch[0].rm_so = 0; + pmatch[0].rm_eo = size; + return regexec(preg, buf, nmatch, pmatch, eflags | REG_STARTEND); +} + +#ifndef DIR_HAS_BSD_GROUP_SEMANTICS +# define FORCE_DIR_SET_GID S_ISGID +#else +# define FORCE_DIR_SET_GID 0 +#endif + +#ifdef NO_NSEC +#undef USE_NSEC +#define ST_CTIME_NSEC(st) 0 +#define ST_MTIME_NSEC(st) 0 +#else +#ifdef USE_ST_TIMESPEC +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) +#else +#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) +#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) +#endif +#endif + +#ifdef UNRELIABLE_FSTAT +#define fstat_is_reliable() 0 +#else +#define fstat_is_reliable() 1 +#endif + +#ifndef va_copy +/* + * Since an obvious implementation of va_list would be to make it a + * pointer into the stack frame, a simple assignment will work on + * many systems. But let's try to be more portable. + */ +#ifdef __va_copy +#define va_copy(dst, src) __va_copy(dst, src) +#else +#define va_copy(dst, src) ((dst) = (src)) +#endif +#endif + +/* + * This is always defined as a first step towards making the use of variadic + * macros unconditional. If it causes compilation problems on your platform, + * please report it to the Git mailing list at git@vger.kernel.org. + */ +#define HAVE_VARIADIC_MACROS 1 + +/* usage.c: only to be used for testing BUG() implementation (see test-tool) */ +extern int BUG_exit_code; + +#ifdef HAVE_VARIADIC_MACROS +__attribute__((format (printf, 3, 4))) NORETURN +void BUG_fl(const char *file, int line, const char *fmt, ...); +#define BUG(...) BUG_fl(__FILE__, __LINE__, __VA_ARGS__) +#else +__attribute__((format (printf, 1, 2))) NORETURN +void BUG(const char *fmt, ...); +#endif + +/* + * Preserves errno, prints a message, but gives no warning for ENOENT. + * Returns 0 on success, which includes trying to unlink an object that does + * not exist. + */ +int unlink_or_warn(const char *path); + /* + * Tries to unlink file. Returns 0 if unlink succeeded + * or the file already didn't exist. Returns -1 and + * appends a message to err suitable for + * 'error("%s", err->buf)' on error. + */ +int unlink_or_msg(const char *file, struct strbuf *err); +/* + * Preserves errno, prints a message, but gives no warning for ENOENT. + * Returns 0 on success, which includes trying to remove a directory that does + * not exist. + */ +int rmdir_or_warn(const char *path); +/* + * Calls the correct function out of {unlink,rmdir}_or_warn based on + * the supplied file mode. + */ +int remove_or_warn(unsigned int mode, const char *path); + +/* + * Call access(2), but warn for any error except "missing file" + * (ENOENT or ENOTDIR). + */ +#define ACCESS_EACCES_OK (1U << 0) +int access_or_warn(const char *path, int mode, unsigned flag); +int access_or_die(const char *path, int mode, unsigned flag); + +/* Warn on an inaccessible file if errno indicates this is an error */ +int warn_on_fopen_errors(const char *path); + +/* + * Open with O_NOFOLLOW, or equivalent. Note that the fallback equivalent + * may be racy. Do not use this as protection against an attacker who can + * simultaneously create paths. + */ +int open_nofollow(const char *path, int flags); + +#if !defined(USE_PARENS_AROUND_GETTEXT_N) && defined(__GNUC__) +#define USE_PARENS_AROUND_GETTEXT_N 1 +#endif + +#ifndef SHELL_PATH +# define SHELL_PATH "/bin/sh" +#endif + +#ifndef _POSIX_THREAD_SAFE_FUNCTIONS +static inline void flockfile(FILE *fh) +{ + ; /* nothing */ +} +static inline void funlockfile(FILE *fh) +{ + ; /* nothing */ +} +#define getc_unlocked(fh) getc(fh) +#endif + +#ifdef FILENO_IS_A_MACRO +int git_fileno(FILE *stream); +# ifndef COMPAT_CODE_FILENO +# undef fileno +# define fileno(p) git_fileno(p) +# endif +#endif + +#ifdef NEED_ACCESS_ROOT_HANDLER +int git_access(const char *path, int mode); +# ifndef COMPAT_CODE_ACCESS +# ifdef access +# undef access +# endif +# define access(path, mode) git_access(path, mode) +# endif +#endif + +/* + * Our code often opens a path to an optional file, to work on its + * contents when we can successfully open it. We can ignore a failure + * to open if such an optional file does not exist, but we do want to + * report a failure in opening for other reasons (e.g. we got an I/O + * error, or the file is there, but we lack the permission to open). + * + * Call this function after seeing an error from open() or fopen() to + * see if the errno indicates a missing file that we can safely ignore. + */ +static inline int is_missing_file_error(int errno_) +{ + return (errno_ == ENOENT || errno_ == ENOTDIR); +} + +int cmd_main(int, const char **); + +/* + * Intercept all calls to exit() and route them to trace2 to + * optionally emit a message before calling the real exit(). + */ +int trace2_cmd_exit_fl(const char *file, int line, int code); +#define exit(code) exit(trace2_cmd_exit_fl(__FILE__, __LINE__, (code))) + +/* + * You can mark a stack variable with UNLEAK(var) to avoid it being + * reported as a leak by tools like LSAN or valgrind. The argument + * should generally be the variable itself (not its address and not what + * it points to). It's safe to use this on pointers which may already + * have been freed, or on pointers which may still be in use. + * + * Use this _only_ for a variable that leaks by going out of scope at + * program exit (so only from cmd_* functions or their direct helpers). + * Normal functions, especially those which may be called multiple + * times, should actually free their memory. This is only meant as + * an annotation, and does nothing in non-leak-checking builds. + */ +#ifdef SUPPRESS_ANNOTATED_LEAKS +void unleak_memory(const void *ptr, size_t len); +#define UNLEAK(var) unleak_memory(&(var), sizeof(var)) +#else +#define UNLEAK(var) do {} while (0) +#endif + +/* + * This include must come after system headers, since it introduces macros that + * replace system names. + */ +#include "banned.h" + +/* + * container_of - Get the address of an object containing a field. + * + * @ptr: pointer to the field. + * @type: type of the object. + * @member: name of the field within the object. + */ +#define container_of(ptr, type, member) \ + ((type *) ((char *)(ptr) - offsetof(type, member))) + +/* + * helper function for `container_of_or_null' to avoid multiple + * evaluation of @ptr + */ +static inline void *container_of_or_null_offset(void *ptr, size_t offset) +{ + return ptr ? (char *)ptr - offset : NULL; +} + +/* + * like `container_of', but allows returned value to be NULL + */ +#define container_of_or_null(ptr, type, member) \ + (type *)container_of_or_null_offset(ptr, offsetof(type, member)) + +/* + * like offsetof(), but takes a pointer to a variable of type which + * contains @member, instead of a specified type. + * @ptr is subject to multiple evaluation since we can't rely on __typeof__ + * everywhere. + */ +#if defined(__GNUC__) /* clang sets this, too */ +#define OFFSETOF_VAR(ptr, member) offsetof(__typeof__(*ptr), member) +#else /* !__GNUC__ */ +#define OFFSETOF_VAR(ptr, member) \ + ((uintptr_t)&(ptr)->member - (uintptr_t)(ptr)) +#endif /* !__GNUC__ */ + +void sleep_millisec(int millisec); + +#endif diff --git a/path.c b/path.c index 7bccd830e95890..94ef6738999765 100644 --- a/path.c +++ b/path.c @@ -1218,11 +1218,15 @@ int longest_ancestor_length(const char *path, struct string_list *prefixes) const char *ceil = prefixes->items[i].string; int len = strlen(ceil); - if (len == 1 && ceil[0] == '/') - len = 0; /* root matches anything, with length 0 */ - else if (!strncmp(path, ceil, len) && path[len] == '/') - ; /* match of length len */ - else + /* + * For root directories (`/`, `C:/`, `//server/share/`) + * adjust the length to exclude the trailing slash. + */ + if (len > 0 && ceil[len - 1] == '/') + len--; + + if (strncmp(path, ceil, len) || + path[len] != '/' || !path[len + 1]) continue; /* no match */ if (len > max_len) diff --git a/pretty.c b/pretty.c index 73b5ead5099d3e..11ca08225216a2 100644 --- a/pretty.c +++ b/pretty.c @@ -14,6 +14,13 @@ #include "trailer.h" #include "run-command.h" +/* + * The limit for formatting directives, which enable the caller to append + * arbitrarily many bytes to the formatted buffer. This includes padding + * and wrapping formatters. + */ +#define FORMATTING_LIMIT (16 * 1024) + static char *user_format; static struct cmt_fmt_map { const char *name; @@ -924,7 +931,9 @@ static void strbuf_wrap(struct strbuf *sb, size_t pos, if (pos) strbuf_add(&tmp, sb->buf, pos); strbuf_add_wrapped_text(&tmp, sb->buf + pos, - (int) indent1, (int) indent2, (int) width); + cast_size_t_to_int(indent1), + cast_size_t_to_int(indent2), + cast_size_t_to_int(width)); strbuf_swap(&tmp, sb); strbuf_release(&tmp); } @@ -1050,9 +1059,18 @@ static size_t parse_padding_placeholder(const char *placeholder, const char *end = start + strcspn(start, ",)"); char *next; int width; - if (!end || end == start) + if (!*end || end == start) return 0; width = strtol(start, &next, 10); + + /* + * We need to limit the amount of padding, or otherwise this + * would allow the user to pad the buffer by arbitrarily many + * bytes and thus cause resource exhaustion. + */ + if (width < -FORMATTING_LIMIT || width > FORMATTING_LIMIT) + return 0; + if (next == start || width == 0) return 0; if (width < 0) { @@ -1297,6 +1315,16 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */ if (*next != ')') return 0; } + + /* + * We need to limit the format here as it allows the + * user to prepend arbitrarily many bytes to the buffer + * when rewrapping. + */ + if (width > FORMATTING_LIMIT || + indent1 > FORMATTING_LIMIT || + indent2 > FORMATTING_LIMIT) + return 0; rewrap_message_tail(sb, c, width, indent1, indent2); return end - placeholder + 1; } else @@ -1578,19 +1606,21 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */ struct format_commit_context *c) { struct strbuf local_sb = STRBUF_INIT; - int total_consumed = 0, len, padding = c->padding; + size_t total_consumed = 0; + int len, padding = c->padding; + if (padding < 0) { const char *start = strrchr(sb->buf, '\n'); int occupied; if (!start) start = sb->buf; - occupied = utf8_strnwidth(start, -1, 1); + occupied = utf8_strnwidth(start, strlen(start), 1); occupied += c->pretty_ctx->graph_width; padding = (-padding) - occupied; } while (1) { int modifier = *placeholder == 'C'; - int consumed = format_commit_one(&local_sb, placeholder, c); + size_t consumed = format_commit_one(&local_sb, placeholder, c); total_consumed += consumed; if (!modifier) @@ -1602,7 +1632,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */ placeholder++; total_consumed++; } - len = utf8_strnwidth(local_sb.buf, -1, 1); + len = utf8_strnwidth(local_sb.buf, local_sb.len, 1); if (c->flush_type == flush_left_and_steal) { const char *ch = sb->buf + sb->len - 1; @@ -1617,7 +1647,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */ if (*ch != 'm') break; p = ch - 1; - while (ch - p < 10 && *p != '\033') + while (p > sb->buf && ch - p < 10 && *p != '\033') p--; if (*p != '\033' || ch + 1 - p != display_mode_esc_sequence_len(p)) @@ -1656,7 +1686,7 @@ static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */ } strbuf_addbuf(sb, &local_sb); } else { - int sb_len = sb->len, offset = 0; + size_t sb_len = sb->len, offset = 0; if (c->flush_type == flush_left) offset = padding - len; else if (c->flush_type == flush_both) @@ -1679,8 +1709,7 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */ const char *placeholder, void *context) { - int consumed; - size_t orig_len; + size_t consumed, orig_len; enum { NO_MAGIC, ADD_LF_BEFORE_NON_EMPTY, @@ -1701,9 +1730,21 @@ static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */ default: break; } - if (magic != NO_MAGIC) + if (magic != NO_MAGIC) { placeholder++; + switch (placeholder[0]) { + case 'w': + /* + * `%+w()` cannot ever expand to a non-empty string, + * and it potentially changes the layout of preceding + * contents. We're thus not able to handle the magic in + * this combination and refuse the pattern. + */ + return 0; + }; + } + orig_len = sb->len; if (((struct format_commit_context *)context)->flush_type != no_flush) consumed = format_and_pad_commit(sb, placeholder, context); diff --git a/pretty.c.orig b/pretty.c.orig new file mode 100644 index 00000000000000..c14d7a041633e6 --- /dev/null +++ b/pretty.c.orig @@ -0,0 +1,2141 @@ +#include "cache.h" +#include "config.h" +#include "commit.h" +#include "utf8.h" +#include "diff.h" +#include "revision.h" +#include "string-list.h" +#include "mailmap.h" +#include "log-tree.h" +#include "notes.h" +#include "color.h" +#include "reflog-walk.h" +#include "gpg-interface.h" +#include "trailer.h" +#include "run-command.h" + +static char *user_format; +static struct cmt_fmt_map { + const char *name; + enum cmit_fmt format; + int is_tformat; + int expand_tabs_in_log; + int is_alias; + enum date_mode_type default_date_mode_type; + const char *user_format; +} *commit_formats; +static size_t builtin_formats_len; +static size_t commit_formats_len; +static size_t commit_formats_alloc; +static struct cmt_fmt_map *find_commit_format(const char *sought); + +int commit_format_is_empty(enum cmit_fmt fmt) +{ + return fmt == CMIT_FMT_USERFORMAT && !*user_format; +} + +static void save_user_format(struct rev_info *rev, const char *cp, int is_tformat) +{ + free(user_format); + user_format = xstrdup(cp); + if (is_tformat) + rev->use_terminator = 1; + rev->commit_format = CMIT_FMT_USERFORMAT; +} + +static int git_pretty_formats_config(const char *var, const char *value, void *cb) +{ + struct cmt_fmt_map *commit_format = NULL; + const char *name; + const char *fmt; + int i; + + if (!skip_prefix(var, "pretty.", &name)) + return 0; + + for (i = 0; i < builtin_formats_len; i++) { + if (!strcmp(commit_formats[i].name, name)) + return 0; + } + + for (i = builtin_formats_len; i < commit_formats_len; i++) { + if (!strcmp(commit_formats[i].name, name)) { + commit_format = &commit_formats[i]; + break; + } + } + + if (!commit_format) { + ALLOC_GROW(commit_formats, commit_formats_len+1, + commit_formats_alloc); + commit_format = &commit_formats[commit_formats_len]; + memset(commit_format, 0, sizeof(*commit_format)); + commit_formats_len++; + } + + commit_format->name = xstrdup(name); + commit_format->format = CMIT_FMT_USERFORMAT; + if (git_config_string(&fmt, var, value)) + return -1; + + if (skip_prefix(fmt, "format:", &fmt)) + commit_format->is_tformat = 0; + else if (skip_prefix(fmt, "tformat:", &fmt) || strchr(fmt, '%')) + commit_format->is_tformat = 1; + else + commit_format->is_alias = 1; + commit_format->user_format = fmt; + + return 0; +} + +static void setup_commit_formats(void) +{ + struct cmt_fmt_map builtin_formats[] = { + { "raw", CMIT_FMT_RAW, 0, 0 }, + { "medium", CMIT_FMT_MEDIUM, 0, 8 }, + { "short", CMIT_FMT_SHORT, 0, 0 }, + { "email", CMIT_FMT_EMAIL, 0, 0 }, + { "mboxrd", CMIT_FMT_MBOXRD, 0, 0 }, + { "fuller", CMIT_FMT_FULLER, 0, 8 }, + { "full", CMIT_FMT_FULL, 0, 8 }, + { "oneline", CMIT_FMT_ONELINE, 1, 0 }, + { "reference", CMIT_FMT_USERFORMAT, 1, 0, + 0, DATE_SHORT, "%C(auto)%h (%s, %ad)" }, + /* + * Please update $__git_log_pretty_formats in + * git-completion.bash when you add new formats. + */ + }; + commit_formats_len = ARRAY_SIZE(builtin_formats); + builtin_formats_len = commit_formats_len; + ALLOC_GROW(commit_formats, commit_formats_len, commit_formats_alloc); + COPY_ARRAY(commit_formats, builtin_formats, + ARRAY_SIZE(builtin_formats)); + + git_config(git_pretty_formats_config, NULL); +} + +static struct cmt_fmt_map *find_commit_format_recursive(const char *sought, + const char *original, + int num_redirections) +{ + struct cmt_fmt_map *found = NULL; + size_t found_match_len = 0; + int i; + + if (num_redirections >= commit_formats_len) + die("invalid --pretty format: " + "'%s' references an alias which points to itself", + original); + + for (i = 0; i < commit_formats_len; i++) { + size_t match_len; + + if (!starts_with(commit_formats[i].name, sought)) + continue; + + match_len = strlen(commit_formats[i].name); + if (found == NULL || found_match_len > match_len) { + found = &commit_formats[i]; + found_match_len = match_len; + } + } + + if (found && found->is_alias) { + found = find_commit_format_recursive(found->user_format, + original, + num_redirections+1); + } + + return found; +} + +static struct cmt_fmt_map *find_commit_format(const char *sought) +{ + if (!commit_formats) + setup_commit_formats(); + + return find_commit_format_recursive(sought, sought, 0); +} + +void get_commit_format(const char *arg, struct rev_info *rev) +{ + struct cmt_fmt_map *commit_format; + + rev->use_terminator = 0; + if (!arg) { + rev->commit_format = CMIT_FMT_DEFAULT; + return; + } + if (skip_prefix(arg, "format:", &arg)) { + save_user_format(rev, arg, 0); + return; + } + + if (!*arg || skip_prefix(arg, "tformat:", &arg) || strchr(arg, '%')) { + save_user_format(rev, arg, 1); + return; + } + + commit_format = find_commit_format(arg); + if (!commit_format) + die("invalid --pretty format: %s", arg); + + rev->commit_format = commit_format->format; + rev->use_terminator = commit_format->is_tformat; + rev->expand_tabs_in_log_default = commit_format->expand_tabs_in_log; + if (!rev->date_mode_explicit && commit_format->default_date_mode_type) + rev->date_mode.type = commit_format->default_date_mode_type; + if (commit_format->format == CMIT_FMT_USERFORMAT) { + save_user_format(rev, commit_format->user_format, + commit_format->is_tformat); + } +} + +/* + * Generic support for pretty-printing the header + */ +static int get_one_line(const char *msg) +{ + int ret = 0; + + for (;;) { + char c = *msg++; + if (!c) + break; + ret++; + if (c == '\n') + break; + } + return ret; +} + +/* High bit set, or ISO-2022-INT */ +static int non_ascii(int ch) +{ + return !isascii(ch) || ch == '\033'; +} + +int has_non_ascii(const char *s) +{ + int ch; + if (!s) + return 0; + while ((ch = *s++) != '\0') { + if (non_ascii(ch)) + return 1; + } + return 0; +} + +static int is_rfc822_special(char ch) +{ + switch (ch) { + case '(': + case ')': + case '<': + case '>': + case '[': + case ']': + case ':': + case ';': + case '@': + case ',': + case '.': + case '"': + case '\\': + return 1; + default: + return 0; + } +} + +static int needs_rfc822_quoting(const char *s, int len) +{ + int i; + for (i = 0; i < len; i++) + if (is_rfc822_special(s[i])) + return 1; + return 0; +} + +static int last_line_length(struct strbuf *sb) +{ + int i; + + /* How many bytes are already used on the last line? */ + for (i = sb->len - 1; i >= 0; i--) + if (sb->buf[i] == '\n') + break; + return sb->len - (i + 1); +} + +static void add_rfc822_quoted(struct strbuf *out, const char *s, int len) +{ + int i; + + /* just a guess, we may have to also backslash-quote */ + strbuf_grow(out, len + 2); + + strbuf_addch(out, '"'); + for (i = 0; i < len; i++) { + switch (s[i]) { + case '"': + case '\\': + strbuf_addch(out, '\\'); + /* fall through */ + default: + strbuf_addch(out, s[i]); + } + } + strbuf_addch(out, '"'); +} + +enum rfc2047_type { + RFC2047_SUBJECT, + RFC2047_ADDRESS +}; + +static int is_rfc2047_special(char ch, enum rfc2047_type type) +{ + /* + * rfc2047, section 4.2: + * + * 8-bit values which correspond to printable ASCII characters other + * than "=", "?", and "_" (underscore), MAY be represented as those + * characters. (But see section 5 for restrictions.) In + * particular, SPACE and TAB MUST NOT be represented as themselves + * within encoded words. + */ + + /* + * rule out non-ASCII characters and non-printable characters (the + * non-ASCII check should be redundant as isprint() is not localized + * and only knows about ASCII, but be defensive about that) + */ + if (non_ascii(ch) || !isprint(ch)) + return 1; + + /* + * rule out special printable characters (' ' should be the only + * whitespace character considered printable, but be defensive and use + * isspace()) + */ + if (isspace(ch) || ch == '=' || ch == '?' || ch == '_') + return 1; + + /* + * rfc2047, section 5.3: + * + * As a replacement for a 'word' entity within a 'phrase', for example, + * one that precedes an address in a From, To, or Cc header. The ABNF + * definition for 'phrase' from RFC 822 thus becomes: + * + * phrase = 1*( encoded-word / word ) + * + * In this case the set of characters that may be used in a "Q"-encoded + * 'encoded-word' is restricted to: . An 'encoded-word' that appears within a + * 'phrase' MUST be separated from any adjacent 'word', 'text' or + * 'special' by 'linear-white-space'. + */ + + if (type != RFC2047_ADDRESS) + return 0; + + /* '=' and '_' are special cases and have been checked above */ + return !(isalnum(ch) || ch == '!' || ch == '*' || ch == '+' || ch == '-' || ch == '/'); +} + +static int needs_rfc2047_encoding(const char *line, int len) +{ + int i; + + for (i = 0; i < len; i++) { + int ch = line[i]; + if (non_ascii(ch) || ch == '\n') + return 1; + if ((i + 1 < len) && (ch == '=' && line[i+1] == '?')) + return 1; + } + + return 0; +} + +static void add_rfc2047(struct strbuf *sb, const char *line, size_t len, + const char *encoding, enum rfc2047_type type) +{ + static const int max_encoded_length = 76; /* per rfc2047 */ + int i; + int line_len = last_line_length(sb); + + strbuf_grow(sb, len * 3 + strlen(encoding) + 100); + strbuf_addf(sb, "=?%s?q?", encoding); + line_len += strlen(encoding) + 5; /* 5 for =??q? */ + + while (len) { + /* + * RFC 2047, section 5 (3): + * + * Each 'encoded-word' MUST represent an integral number of + * characters. A multi-octet character may not be split across + * adjacent 'encoded- word's. + */ + const unsigned char *p = (const unsigned char *)line; + int chrlen = mbs_chrlen(&line, &len, encoding); + int is_special = (chrlen > 1) || is_rfc2047_special(*p, type); + + /* "=%02X" * chrlen, or the byte itself */ + const char *encoded_fmt = is_special ? "=%02X" : "%c"; + int encoded_len = is_special ? 3 * chrlen : 1; + + /* + * According to RFC 2047, we could encode the special character + * ' ' (space) with '_' (underscore) for readability. But many + * programs do not understand this and just leave the + * underscore in place. Thus, we do nothing special here, which + * causes ' ' to be encoded as '=20', avoiding this problem. + */ + + if (line_len + encoded_len + 2 > max_encoded_length) { + /* It won't fit with trailing "?=" --- break the line */ + strbuf_addf(sb, "?=\n =?%s?q?", encoding); + line_len = strlen(encoding) + 5 + 1; /* =??q? plus SP */ + } + + for (i = 0; i < chrlen; i++) + strbuf_addf(sb, encoded_fmt, p[i]); + line_len += encoded_len; + } + strbuf_addstr(sb, "?="); +} + +const char *show_ident_date(const struct ident_split *ident, + const struct date_mode *mode) +{ + timestamp_t date = 0; + long tz = 0; + + if (ident->date_begin && ident->date_end) + date = parse_timestamp(ident->date_begin, NULL, 10); + if (date_overflows(date)) + date = 0; + else { + if (ident->tz_begin && ident->tz_end) + tz = strtol(ident->tz_begin, NULL, 10); + if (tz >= INT_MAX || tz <= INT_MIN) + tz = 0; + } + return show_date(date, tz, mode); +} + +void pp_user_info(struct pretty_print_context *pp, + const char *what, struct strbuf *sb, + const char *line, const char *encoding) +{ + struct ident_split ident; + char *line_end; + const char *mailbuf, *namebuf; + size_t namelen, maillen; + int max_length = 78; /* per rfc2822 */ + + if (pp->fmt == CMIT_FMT_ONELINE) + return; + + line_end = strchrnul(line, '\n'); + if (split_ident_line(&ident, line, line_end - line)) + return; + + mailbuf = ident.mail_begin; + maillen = ident.mail_end - ident.mail_begin; + namebuf = ident.name_begin; + namelen = ident.name_end - ident.name_begin; + + if (pp->mailmap) + map_user(pp->mailmap, &mailbuf, &maillen, &namebuf, &namelen); + + if (cmit_fmt_is_mail(pp->fmt)) { + if (pp->from_ident && ident_cmp(pp->from_ident, &ident)) { + struct strbuf buf = STRBUF_INIT; + + strbuf_addstr(&buf, "From: "); + strbuf_add(&buf, namebuf, namelen); + strbuf_addstr(&buf, " <"); + strbuf_add(&buf, mailbuf, maillen); + strbuf_addstr(&buf, ">\n"); + string_list_append(&pp->in_body_headers, + strbuf_detach(&buf, NULL)); + + mailbuf = pp->from_ident->mail_begin; + maillen = pp->from_ident->mail_end - mailbuf; + namebuf = pp->from_ident->name_begin; + namelen = pp->from_ident->name_end - namebuf; + } + + strbuf_addstr(sb, "From: "); + if (pp->encode_email_headers && + needs_rfc2047_encoding(namebuf, namelen)) { + add_rfc2047(sb, namebuf, namelen, + encoding, RFC2047_ADDRESS); + max_length = 76; /* per rfc2047 */ + } else if (needs_rfc822_quoting(namebuf, namelen)) { + struct strbuf quoted = STRBUF_INIT; + add_rfc822_quoted("ed, namebuf, namelen); + strbuf_add_wrapped_bytes(sb, quoted.buf, quoted.len, + -6, 1, max_length); + strbuf_release("ed); + } else { + strbuf_add_wrapped_bytes(sb, namebuf, namelen, + -6, 1, max_length); + } + + if (max_length < + last_line_length(sb) + strlen(" <") + maillen + strlen(">")) + strbuf_addch(sb, '\n'); + strbuf_addf(sb, " <%.*s>\n", (int)maillen, mailbuf); + } else { + strbuf_addf(sb, "%s: %.*s%.*s <%.*s>\n", what, + (pp->fmt == CMIT_FMT_FULLER) ? 4 : 0, " ", + (int)namelen, namebuf, (int)maillen, mailbuf); + } + + switch (pp->fmt) { + case CMIT_FMT_MEDIUM: + strbuf_addf(sb, "Date: %s\n", + show_ident_date(&ident, &pp->date_mode)); + break; + case CMIT_FMT_EMAIL: + case CMIT_FMT_MBOXRD: + strbuf_addf(sb, "Date: %s\n", + show_ident_date(&ident, DATE_MODE(RFC2822))); + break; + case CMIT_FMT_FULLER: + strbuf_addf(sb, "%sDate: %s\n", what, + show_ident_date(&ident, &pp->date_mode)); + break; + default: + /* notin' */ + break; + } +} + +static int is_blank_line(const char *line, int *len_p) +{ + int len = *len_p; + while (len && isspace(line[len - 1])) + len--; + *len_p = len; + return !len; +} + +const char *skip_blank_lines(const char *msg) +{ + for (;;) { + int linelen = get_one_line(msg); + int ll = linelen; + if (!linelen) + break; + if (!is_blank_line(msg, &ll)) + break; + msg += linelen; + } + return msg; +} + +static void add_merge_info(const struct pretty_print_context *pp, + struct strbuf *sb, const struct commit *commit) +{ + struct commit_list *parent = commit->parents; + + if ((pp->fmt == CMIT_FMT_ONELINE) || (cmit_fmt_is_mail(pp->fmt)) || + !parent || !parent->next) + return; + + strbuf_addstr(sb, "Merge:"); + + while (parent) { + struct object_id *oidp = &parent->item->object.oid; + strbuf_addch(sb, ' '); + if (pp->abbrev) + strbuf_add_unique_abbrev(sb, oidp, pp->abbrev); + else + strbuf_addstr(sb, oid_to_hex(oidp)); + parent = parent->next; + } + strbuf_addch(sb, '\n'); +} + +static char *get_header(const char *msg, const char *key) +{ + size_t len; + const char *v = find_commit_header(msg, key, &len); + return v ? xmemdupz(v, len) : NULL; +} + +static char *replace_encoding_header(char *buf, const char *encoding) +{ + struct strbuf tmp = STRBUF_INIT; + size_t start, len; + char *cp = buf; + + /* guess if there is an encoding header before a \n\n */ + while (!starts_with(cp, "encoding ")) { + cp = strchr(cp, '\n'); + if (!cp || *++cp == '\n') + return buf; + } + start = cp - buf; + cp = strchr(cp, '\n'); + if (!cp) + return buf; /* should not happen but be defensive */ + len = cp + 1 - (buf + start); + + strbuf_attach(&tmp, buf, strlen(buf), strlen(buf) + 1); + if (is_encoding_utf8(encoding)) { + /* we have re-coded to UTF-8; drop the header */ + strbuf_remove(&tmp, start, len); + } else { + /* just replaces XXXX in 'encoding XXXX\n' */ + strbuf_splice(&tmp, start + strlen("encoding "), + len - strlen("encoding \n"), + encoding, strlen(encoding)); + } + return strbuf_detach(&tmp, NULL); +} + +const char *repo_logmsg_reencode(struct repository *r, + const struct commit *commit, + char **commit_encoding, + const char *output_encoding) +{ + static const char *utf8 = "UTF-8"; + const char *use_encoding; + char *encoding; + const char *msg = repo_get_commit_buffer(r, commit, NULL); + char *out; + + if (!output_encoding || !*output_encoding) { + if (commit_encoding) + *commit_encoding = get_header(msg, "encoding"); + return msg; + } + encoding = get_header(msg, "encoding"); + if (commit_encoding) + *commit_encoding = encoding; + use_encoding = encoding ? encoding : utf8; + if (same_encoding(use_encoding, output_encoding)) { + /* + * No encoding work to be done. If we have no encoding header + * at all, then there's nothing to do, and we can return the + * message verbatim (whether newly allocated or not). + */ + if (!encoding) + return msg; + + /* + * Otherwise, we still want to munge the encoding header in the + * result, which will be done by modifying the buffer. If we + * are using a fresh copy, we can reuse it. But if we are using + * the cached copy from get_commit_buffer, we need to duplicate it + * to avoid munging the cached copy. + */ + if (msg == get_cached_commit_buffer(r, commit, NULL)) + out = xstrdup(msg); + else + out = (char *)msg; + } + else { + /* + * There's actual encoding work to do. Do the reencoding, which + * still leaves the header to be replaced in the next step. At + * this point, we are done with msg. If we allocated a fresh + * copy, we can free it. + */ + out = reencode_string(msg, output_encoding, use_encoding); + if (out) + repo_unuse_commit_buffer(r, commit, msg); + } + + /* + * This replacement actually consumes the buffer we hand it, so we do + * not have to worry about freeing the old "out" here. + */ + if (out) + out = replace_encoding_header(out, output_encoding); + + if (!commit_encoding) + free(encoding); + /* + * If the re-encoding failed, out might be NULL here; in that + * case we just return the commit message verbatim. + */ + if (!out) { + warning("unable to reencode commit to '%s'", output_encoding); + return msg; + } + return out; +} + +static int mailmap_name(const char **email, size_t *email_len, + const char **name, size_t *name_len) +{ + static struct string_list *mail_map; + if (!mail_map) { + CALLOC_ARRAY(mail_map, 1); + read_mailmap(mail_map); + } + return mail_map->nr && map_user(mail_map, email, email_len, name, name_len); +} + +static size_t format_person_part(struct strbuf *sb, char part, + const char *msg, int len, + const struct date_mode *dmode) +{ + /* currently all placeholders have same length */ + const int placeholder_len = 2; + struct ident_split s; + const char *name, *mail; + size_t maillen, namelen; + + if (split_ident_line(&s, msg, len) < 0) + goto skip; + + name = s.name_begin; + namelen = s.name_end - s.name_begin; + mail = s.mail_begin; + maillen = s.mail_end - s.mail_begin; + + if (part == 'N' || part == 'E' || part == 'L') /* mailmap lookup */ + mailmap_name(&mail, &maillen, &name, &namelen); + if (part == 'n' || part == 'N') { /* name */ + strbuf_add(sb, name, namelen); + return placeholder_len; + } + if (part == 'e' || part == 'E') { /* email */ + strbuf_add(sb, mail, maillen); + return placeholder_len; + } + if (part == 'l' || part == 'L') { /* local-part */ + const char *at = memchr(mail, '@', maillen); + if (at) + maillen = at - mail; + strbuf_add(sb, mail, maillen); + return placeholder_len; + } + + if (!s.date_begin) + goto skip; + + if (part == 't') { /* date, UNIX timestamp */ + strbuf_add(sb, s.date_begin, s.date_end - s.date_begin); + return placeholder_len; + } + + switch (part) { + case 'd': /* date */ + strbuf_addstr(sb, show_ident_date(&s, dmode)); + return placeholder_len; + case 'D': /* date, RFC2822 style */ + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(RFC2822))); + return placeholder_len; + case 'r': /* date, relative */ + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(RELATIVE))); + return placeholder_len; + case 'i': /* date, ISO 8601-like */ + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(ISO8601))); + return placeholder_len; + case 'I': /* date, ISO 8601 strict */ + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(ISO8601_STRICT))); + return placeholder_len; + case 'h': /* date, human */ + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(HUMAN))); + return placeholder_len; + case 's': + strbuf_addstr(sb, show_ident_date(&s, DATE_MODE(SHORT))); + return placeholder_len; + } + +skip: + /* + * reading from either a bogus commit, or a reflog entry with + * %gn, %ge, etc.; 'sb' cannot be updated, but we still need + * to compute a valid return value. + */ + if (part == 'n' || part == 'e' || part == 't' || part == 'd' + || part == 'D' || part == 'r' || part == 'i') + return placeholder_len; + + return 0; /* unknown placeholder */ +} + +struct chunk { + size_t off; + size_t len; +}; + +enum flush_type { + no_flush, + flush_right, + flush_left, + flush_left_and_steal, + flush_both +}; + +enum trunc_type { + trunc_none, + trunc_left, + trunc_middle, + trunc_right +}; + +struct format_commit_context { + struct repository *repository; + const struct commit *commit; + const struct pretty_print_context *pretty_ctx; + unsigned commit_header_parsed:1; + unsigned commit_message_parsed:1; + struct signature_check signature_check; + enum flush_type flush_type; + enum trunc_type truncate; + const char *message; + char *commit_encoding; + size_t width, indent1, indent2; + int auto_color; + int padding; + + /* These offsets are relative to the start of the commit message. */ + struct chunk author; + struct chunk committer; + size_t message_off; + size_t subject_off; + size_t body_off; + + /* The following ones are relative to the result struct strbuf. */ + size_t wrap_start; +}; + +static void parse_commit_header(struct format_commit_context *context) +{ + const char *msg = context->message; + int i; + + for (i = 0; msg[i]; i++) { + const char *name; + int eol; + for (eol = i; msg[eol] && msg[eol] != '\n'; eol++) + ; /* do nothing */ + + if (i == eol) { + break; + } else if (skip_prefix(msg + i, "author ", &name)) { + context->author.off = name - msg; + context->author.len = msg + eol - name; + } else if (skip_prefix(msg + i, "committer ", &name)) { + context->committer.off = name - msg; + context->committer.len = msg + eol - name; + } + i = eol; + } + context->message_off = i; + context->commit_header_parsed = 1; +} + +static int istitlechar(char c) +{ + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || c == '.' || c == '_'; +} + +void format_sanitized_subject(struct strbuf *sb, const char *msg, size_t len) +{ + size_t trimlen; + size_t start_len = sb->len; + int space = 2; + int i; + + for (i = 0; i < len; i++) { + if (istitlechar(msg[i])) { + if (space == 1) + strbuf_addch(sb, '-'); + space = 0; + strbuf_addch(sb, msg[i]); + if (msg[i] == '.') + while (msg[i+1] == '.') + i++; + } else + space |= 1; + } + + /* trim any trailing '.' or '-' characters */ + trimlen = 0; + while (sb->len - trimlen > start_len && + (sb->buf[sb->len - 1 - trimlen] == '.' + || sb->buf[sb->len - 1 - trimlen] == '-')) + trimlen++; + strbuf_remove(sb, sb->len - trimlen, trimlen); +} + +const char *format_subject(struct strbuf *sb, const char *msg, + const char *line_separator) +{ + int first = 1; + + for (;;) { + const char *line = msg; + int linelen = get_one_line(line); + + msg += linelen; + if (!linelen || is_blank_line(line, &linelen)) + break; + + if (!sb) + continue; + strbuf_grow(sb, linelen + 2); + if (!first) + strbuf_addstr(sb, line_separator); + strbuf_add(sb, line, linelen); + first = 0; + } + return msg; +} + +static void parse_commit_message(struct format_commit_context *c) +{ + const char *msg = c->message + c->message_off; + const char *start = c->message; + + msg = skip_blank_lines(msg); + c->subject_off = msg - start; + + msg = format_subject(NULL, msg, NULL); + msg = skip_blank_lines(msg); + c->body_off = msg - start; + + c->commit_message_parsed = 1; +} + +static void strbuf_wrap(struct strbuf *sb, size_t pos, + size_t width, size_t indent1, size_t indent2) +{ + struct strbuf tmp = STRBUF_INIT; + + if (pos) + strbuf_add(&tmp, sb->buf, pos); + strbuf_add_wrapped_text(&tmp, sb->buf + pos, + cast_size_t_to_int(indent1), + cast_size_t_to_int(indent2), + cast_size_t_to_int(width)); + strbuf_swap(&tmp, sb); + strbuf_release(&tmp); +} + +static void rewrap_message_tail(struct strbuf *sb, + struct format_commit_context *c, + size_t new_width, size_t new_indent1, + size_t new_indent2) +{ + if (c->width == new_width && c->indent1 == new_indent1 && + c->indent2 == new_indent2) + return; + if (c->wrap_start < sb->len) + strbuf_wrap(sb, c->wrap_start, c->width, c->indent1, c->indent2); + c->wrap_start = sb->len; + c->width = new_width; + c->indent1 = new_indent1; + c->indent2 = new_indent2; +} + +static int format_reflog_person(struct strbuf *sb, + char part, + struct reflog_walk_info *log, + const struct date_mode *dmode) +{ + const char *ident; + + if (!log) + return 2; + + ident = get_reflog_ident(log); + if (!ident) + return 2; + + return format_person_part(sb, part, ident, strlen(ident), dmode); +} + +static size_t parse_color(struct strbuf *sb, /* in UTF-8 */ + const char *placeholder, + struct format_commit_context *c) +{ + const char *rest = placeholder; + const char *basic_color = NULL; + + if (placeholder[1] == '(') { + const char *begin = placeholder + 2; + const char *end = strchr(begin, ')'); + char color[COLOR_MAXLEN]; + + if (!end) + return 0; + + if (skip_prefix(begin, "auto,", &begin)) { + if (!want_color(c->pretty_ctx->color)) + return end - placeholder + 1; + } else if (skip_prefix(begin, "always,", &begin)) { + /* nothing to do; we do not respect want_color at all */ + } else { + /* the default is the same as "auto" */ + if (!want_color(c->pretty_ctx->color)) + return end - placeholder + 1; + } + + if (color_parse_mem(begin, end - begin, color) < 0) + die(_("unable to parse --pretty format")); + strbuf_addstr(sb, color); + return end - placeholder + 1; + } + + /* + * We handle things like "%C(red)" above; for historical reasons, there + * are a few colors that can be specified without parentheses (and + * they cannot support things like "auto" or "always" at all). + */ + if (skip_prefix(placeholder + 1, "red", &rest)) + basic_color = GIT_COLOR_RED; + else if (skip_prefix(placeholder + 1, "green", &rest)) + basic_color = GIT_COLOR_GREEN; + else if (skip_prefix(placeholder + 1, "blue", &rest)) + basic_color = GIT_COLOR_BLUE; + else if (skip_prefix(placeholder + 1, "reset", &rest)) + basic_color = GIT_COLOR_RESET; + + if (basic_color && want_color(c->pretty_ctx->color)) + strbuf_addstr(sb, basic_color); + + return rest - placeholder; +} + +static size_t parse_padding_placeholder(const char *placeholder, + struct format_commit_context *c) +{ + const char *ch = placeholder; + enum flush_type flush_type; + int to_column = 0; + + switch (*ch++) { + case '<': + flush_type = flush_right; + break; + case '>': + if (*ch == '<') { + flush_type = flush_both; + ch++; + } else if (*ch == '>') { + flush_type = flush_left_and_steal; + ch++; + } else + flush_type = flush_left; + break; + default: + return 0; + } + + /* the next value means "wide enough to that column" */ + if (*ch == '|') { + to_column = 1; + ch++; + } + + if (*ch == '(') { + const char *start = ch + 1; + const char *end = start + strcspn(start, ",)"); + char *next; + int width; + if (!*end || end == start) + return 0; + width = strtol(start, &next, 10); + if (next == start || width == 0) + return 0; + if (width < 0) { + if (to_column) + width += term_columns(); + if (width < 0) + return 0; + } + c->padding = to_column ? -width : width; + c->flush_type = flush_type; + + if (*end == ',') { + start = end + 1; + end = strchr(start, ')'); + if (!end || end == start) + return 0; + if (starts_with(start, "trunc)")) + c->truncate = trunc_right; + else if (starts_with(start, "ltrunc)")) + c->truncate = trunc_left; + else if (starts_with(start, "mtrunc)")) + c->truncate = trunc_middle; + else + return 0; + } else + c->truncate = trunc_none; + + return end - placeholder + 1; + } + return 0; +} + +static int match_placeholder_arg_value(const char *to_parse, const char *candidate, + const char **end, const char **valuestart, + size_t *valuelen) +{ + const char *p; + + if (!(skip_prefix(to_parse, candidate, &p))) + return 0; + if (valuestart) { + if (*p == '=') { + *valuestart = p + 1; + *valuelen = strcspn(*valuestart, ",)"); + p = *valuestart + *valuelen; + } else { + if (*p != ',' && *p != ')') + return 0; + *valuestart = NULL; + *valuelen = 0; + } + } + if (*p == ',') { + *end = p + 1; + return 1; + } + if (*p == ')') { + *end = p; + return 1; + } + return 0; +} + +static int match_placeholder_bool_arg(const char *to_parse, const char *candidate, + const char **end, int *val) +{ + const char *argval; + char *strval; + size_t arglen; + int v; + + if (!match_placeholder_arg_value(to_parse, candidate, end, &argval, &arglen)) + return 0; + + if (!argval) { + *val = 1; + return 1; + } + + strval = xstrndup(argval, arglen); + v = git_parse_maybe_bool(strval); + free(strval); + + if (v == -1) + return 0; + + *val = v; + + return 1; +} + +static int format_trailer_match_cb(const struct strbuf *key, void *ud) +{ + const struct string_list *list = ud; + const struct string_list_item *item; + + for_each_string_list_item (item, list) { + if (key->len == (uintptr_t)item->util && + !strncasecmp(item->string, key->buf, key->len)) + return 1; + } + return 0; +} + +int format_set_trailers_options(struct process_trailer_options *opts, + struct string_list *filter_list, + struct strbuf *sepbuf, + struct strbuf *kvsepbuf, + const char **arg, + char **invalid_arg) +{ + for (;;) { + const char *argval; + size_t arglen; + + if (**arg == ')') + break; + + if (match_placeholder_arg_value(*arg, "key", arg, &argval, &arglen)) { + uintptr_t len = arglen; + + if (!argval) + return -1; + + if (len && argval[len - 1] == ':') + len--; + string_list_append(filter_list, argval)->util = (char *)len; + + opts->filter = format_trailer_match_cb; + opts->filter_data = filter_list; + opts->only_trailers = 1; + } else if (match_placeholder_arg_value(*arg, "separator", arg, &argval, &arglen)) { + char *fmt; + + strbuf_reset(sepbuf); + fmt = xstrndup(argval, arglen); + strbuf_expand(sepbuf, fmt, strbuf_expand_literal_cb, NULL); + free(fmt); + opts->separator = sepbuf; + } else if (match_placeholder_arg_value(*arg, "key_value_separator", arg, &argval, &arglen)) { + char *fmt; + + strbuf_reset(kvsepbuf); + fmt = xstrndup(argval, arglen); + strbuf_expand(kvsepbuf, fmt, strbuf_expand_literal_cb, NULL); + free(fmt); + opts->key_value_separator = kvsepbuf; + } else if (!match_placeholder_bool_arg(*arg, "only", arg, &opts->only_trailers) && + !match_placeholder_bool_arg(*arg, "unfold", arg, &opts->unfold) && + !match_placeholder_bool_arg(*arg, "keyonly", arg, &opts->key_only) && + !match_placeholder_bool_arg(*arg, "valueonly", arg, &opts->value_only)) { + if (invalid_arg) { + size_t len = strcspn(*arg, ",)"); + *invalid_arg = xstrndup(*arg, len); + } + return -1; + } + } + return 0; +} + +static size_t parse_describe_args(const char *start, struct strvec *args) +{ + const char *options[] = { "match", "exclude" }; + const char *arg = start; + + for (;;) { + const char *matched = NULL; + const char *argval; + size_t arglen = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(options); i++) { + if (match_placeholder_arg_value(arg, options[i], &arg, + &argval, &arglen)) { + matched = options[i]; + break; + } + } + if (!matched) + break; + + if (!arglen) + return 0; + strvec_pushf(args, "--%s=%.*s", matched, (int)arglen, argval); + } + return arg - start; +} + +static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */ + const char *placeholder, + void *context) +{ + struct format_commit_context *c = context; + const struct commit *commit = c->commit; + const char *msg = c->message; + struct commit_list *p; + const char *arg, *eol; + size_t res; + char **slot; + + /* these are independent of the commit */ + res = strbuf_expand_literal_cb(sb, placeholder, NULL); + if (res) + return res; + + switch (placeholder[0]) { + case 'C': + if (starts_with(placeholder + 1, "(auto)")) { + c->auto_color = want_color(c->pretty_ctx->color); + if (c->auto_color && sb->len) + strbuf_addstr(sb, GIT_COLOR_RESET); + return 7; /* consumed 7 bytes, "C(auto)" */ + } else { + int ret = parse_color(sb, placeholder, c); + if (ret) + c->auto_color = 0; + /* + * Otherwise, we decided to treat %C + * as a literal string, and the previous + * %C(auto) is still valid. + */ + return ret; + } + case 'w': + if (placeholder[1] == '(') { + unsigned long width = 0, indent1 = 0, indent2 = 0; + char *next; + const char *start = placeholder + 2; + const char *end = strchr(start, ')'); + if (!end) + return 0; + if (end > start) { + width = strtoul(start, &next, 10); + if (*next == ',') { + indent1 = strtoul(next + 1, &next, 10); + if (*next == ',') { + indent2 = strtoul(next + 1, + &next, 10); + } + } + if (*next != ')') + return 0; + } + rewrap_message_tail(sb, c, width, indent1, indent2); + return end - placeholder + 1; + } else + return 0; + + case '<': + case '>': + return parse_padding_placeholder(placeholder, c); + } + + if (skip_prefix(placeholder, "(describe", &arg)) { + struct child_process cmd = CHILD_PROCESS_INIT; + struct strbuf out = STRBUF_INIT; + struct strbuf err = STRBUF_INIT; + struct pretty_print_describe_status *describe_status; + + describe_status = c->pretty_ctx->describe_status; + if (describe_status) { + if (!describe_status->max_invocations) + return 0; + describe_status->max_invocations--; + } + + cmd.git_cmd = 1; + strvec_push(&cmd.args, "describe"); + + if (*arg == ':') { + arg++; + arg += parse_describe_args(arg, &cmd.args); + } + + if (*arg != ')') { + child_process_clear(&cmd); + return 0; + } + + strvec_push(&cmd.args, oid_to_hex(&commit->object.oid)); + pipe_command(&cmd, NULL, 0, &out, 0, &err, 0); + strbuf_rtrim(&out); + strbuf_addbuf(sb, &out); + strbuf_release(&out); + strbuf_release(&err); + return arg - placeholder + 1; + } + + /* these depend on the commit */ + if (!commit->object.parsed) + parse_object(the_repository, &commit->object.oid); + + switch (placeholder[0]) { + case 'H': /* commit hash */ + strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT)); + strbuf_addstr(sb, oid_to_hex(&commit->object.oid)); + strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_RESET)); + return 1; + case 'h': /* abbreviated commit hash */ + strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT)); + strbuf_add_unique_abbrev(sb, &commit->object.oid, + c->pretty_ctx->abbrev); + strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_RESET)); + return 1; + case 'T': /* tree hash */ + strbuf_addstr(sb, oid_to_hex(get_commit_tree_oid(commit))); + return 1; + case 't': /* abbreviated tree hash */ + strbuf_add_unique_abbrev(sb, + get_commit_tree_oid(commit), + c->pretty_ctx->abbrev); + return 1; + case 'P': /* parent hashes */ + for (p = commit->parents; p; p = p->next) { + if (p != commit->parents) + strbuf_addch(sb, ' '); + strbuf_addstr(sb, oid_to_hex(&p->item->object.oid)); + } + return 1; + case 'p': /* abbreviated parent hashes */ + for (p = commit->parents; p; p = p->next) { + if (p != commit->parents) + strbuf_addch(sb, ' '); + strbuf_add_unique_abbrev(sb, &p->item->object.oid, + c->pretty_ctx->abbrev); + } + return 1; + case 'm': /* left/right/bottom */ + strbuf_addstr(sb, get_revision_mark(NULL, commit)); + return 1; + case 'd': + format_decorations(sb, commit, c->auto_color); + return 1; + case 'D': + format_decorations_extended(sb, commit, c->auto_color, "", ", ", ""); + return 1; + case 'S': /* tag/branch like --source */ + if (!(c->pretty_ctx->rev && c->pretty_ctx->rev->sources)) + return 0; + slot = revision_sources_at(c->pretty_ctx->rev->sources, commit); + if (!(slot && *slot)) + return 0; + strbuf_addstr(sb, *slot); + return 1; + case 'g': /* reflog info */ + switch(placeholder[1]) { + case 'd': /* reflog selector */ + case 'D': + if (c->pretty_ctx->reflog_info) + get_reflog_selector(sb, + c->pretty_ctx->reflog_info, + &c->pretty_ctx->date_mode, + c->pretty_ctx->date_mode_explicit, + (placeholder[1] == 'd')); + return 2; + case 's': /* reflog message */ + if (c->pretty_ctx->reflog_info) + get_reflog_message(sb, c->pretty_ctx->reflog_info); + return 2; + case 'n': + case 'N': + case 'e': + case 'E': + return format_reflog_person(sb, + placeholder[1], + c->pretty_ctx->reflog_info, + &c->pretty_ctx->date_mode); + } + return 0; /* unknown %g placeholder */ + case 'N': + if (c->pretty_ctx->notes_message) { + strbuf_addstr(sb, c->pretty_ctx->notes_message); + return 1; + } + return 0; + } + + if (placeholder[0] == 'G') { + if (!c->signature_check.result) + check_commit_signature(c->commit, &(c->signature_check)); + switch (placeholder[1]) { + case 'G': + if (c->signature_check.gpg_output) + strbuf_addstr(sb, c->signature_check.gpg_output); + break; + case '?': + switch (c->signature_check.result) { + case 'G': + switch (c->signature_check.trust_level) { + case TRUST_UNDEFINED: + case TRUST_NEVER: + strbuf_addch(sb, 'U'); + break; + default: + strbuf_addch(sb, 'G'); + break; + } + break; + case 'B': + case 'E': + case 'N': + case 'X': + case 'Y': + case 'R': + strbuf_addch(sb, c->signature_check.result); + } + break; + case 'S': + if (c->signature_check.signer) + strbuf_addstr(sb, c->signature_check.signer); + break; + case 'K': + if (c->signature_check.key) + strbuf_addstr(sb, c->signature_check.key); + break; + case 'F': + if (c->signature_check.fingerprint) + strbuf_addstr(sb, c->signature_check.fingerprint); + break; + case 'P': + if (c->signature_check.primary_key_fingerprint) + strbuf_addstr(sb, c->signature_check.primary_key_fingerprint); + break; + case 'T': + switch (c->signature_check.trust_level) { + case TRUST_UNDEFINED: + strbuf_addstr(sb, "undefined"); + break; + case TRUST_NEVER: + strbuf_addstr(sb, "never"); + break; + case TRUST_MARGINAL: + strbuf_addstr(sb, "marginal"); + break; + case TRUST_FULLY: + strbuf_addstr(sb, "fully"); + break; + case TRUST_ULTIMATE: + strbuf_addstr(sb, "ultimate"); + break; + } + break; + default: + return 0; + } + return 2; + } + + /* For the rest we have to parse the commit header. */ + if (!c->commit_header_parsed) { + msg = c->message = + repo_logmsg_reencode(c->repository, commit, + &c->commit_encoding, "UTF-8"); + parse_commit_header(c); + } + + switch (placeholder[0]) { + case 'a': /* author ... */ + return format_person_part(sb, placeholder[1], + msg + c->author.off, c->author.len, + &c->pretty_ctx->date_mode); + case 'c': /* committer ... */ + return format_person_part(sb, placeholder[1], + msg + c->committer.off, c->committer.len, + &c->pretty_ctx->date_mode); + case 'e': /* encoding */ + if (c->commit_encoding) + strbuf_addstr(sb, c->commit_encoding); + return 1; + case 'B': /* raw body */ + /* message_off is always left at the initial newline */ + strbuf_addstr(sb, msg + c->message_off + 1); + return 1; + } + + /* Now we need to parse the commit message. */ + if (!c->commit_message_parsed) + parse_commit_message(c); + + switch (placeholder[0]) { + case 's': /* subject */ + format_subject(sb, msg + c->subject_off, " "); + return 1; + case 'f': /* sanitized subject */ + eol = strchrnul(msg + c->subject_off, '\n'); + format_sanitized_subject(sb, msg + c->subject_off, eol - (msg + c->subject_off)); + return 1; + case 'b': /* body */ + strbuf_addstr(sb, msg + c->body_off); + return 1; + } + + if (skip_prefix(placeholder, "(trailers", &arg)) { + struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT; + struct string_list filter_list = STRING_LIST_INIT_NODUP; + struct strbuf sepbuf = STRBUF_INIT; + struct strbuf kvsepbuf = STRBUF_INIT; + size_t ret = 0; + + opts.no_divider = 1; + + if (*arg == ':') { + arg++; + if (format_set_trailers_options(&opts, &filter_list, &sepbuf, &kvsepbuf, &arg, NULL)) + goto trailer_out; + } + if (*arg == ')') { + format_trailers_from_commit(sb, msg + c->subject_off, &opts); + ret = arg - placeholder + 1; + } + trailer_out: + string_list_clear(&filter_list, 0); + strbuf_release(&sepbuf); + return ret; + } + + return 0; /* unknown placeholder */ +} + +static size_t format_and_pad_commit(struct strbuf *sb, /* in UTF-8 */ + const char *placeholder, + struct format_commit_context *c) +{ + struct strbuf local_sb = STRBUF_INIT; + size_t total_consumed = 0; + int len, padding = c->padding; + + if (padding < 0) { + const char *start = strrchr(sb->buf, '\n'); + int occupied; + if (!start) + start = sb->buf; + occupied = utf8_strnwidth(start, strlen(start), 1); + occupied += c->pretty_ctx->graph_width; + padding = (-padding) - occupied; + } + while (1) { + int modifier = *placeholder == 'C'; + size_t consumed = format_commit_one(&local_sb, placeholder, c); + total_consumed += consumed; + + if (!modifier) + break; + + placeholder += consumed; + if (*placeholder != '%') + break; + placeholder++; + total_consumed++; + } + len = utf8_strnwidth(local_sb.buf, local_sb.len, 1); + + if (c->flush_type == flush_left_and_steal) { + const char *ch = sb->buf + sb->len - 1; + while (len > padding && ch > sb->buf) { + const char *p; + if (*ch == ' ') { + ch--; + padding++; + continue; + } + /* check for trailing ansi sequences */ + if (*ch != 'm') + break; + p = ch - 1; + while (p > sb->buf && ch - p < 10 && *p != '\033') + p--; + if (*p != '\033' || + ch + 1 - p != display_mode_esc_sequence_len(p)) + break; + /* + * got a good ansi sequence, put it back to + * local_sb as we're cutting sb + */ + strbuf_insert(&local_sb, 0, p, ch + 1 - p); + ch = p - 1; + } + strbuf_setlen(sb, ch + 1 - sb->buf); + c->flush_type = flush_left; + } + + if (len > padding) { + switch (c->truncate) { + case trunc_left: + strbuf_utf8_replace(&local_sb, + 0, len - (padding - 2), + ".."); + break; + case trunc_middle: + strbuf_utf8_replace(&local_sb, + padding / 2 - 1, + len - (padding - 2), + ".."); + break; + case trunc_right: + strbuf_utf8_replace(&local_sb, + padding - 2, len - (padding - 2), + ".."); + break; + case trunc_none: + break; + } + strbuf_addbuf(sb, &local_sb); + } else { + size_t sb_len = sb->len, offset = 0; + if (c->flush_type == flush_left) + offset = padding - len; + else if (c->flush_type == flush_both) + offset = (padding - len) / 2; + /* + * we calculate padding in columns, now + * convert it back to chars + */ + padding = padding - len + local_sb.len; + strbuf_addchars(sb, ' ', padding); + memcpy(sb->buf + sb_len + offset, local_sb.buf, + local_sb.len); + } + strbuf_release(&local_sb); + c->flush_type = no_flush; + return total_consumed; +} + +static size_t format_commit_item(struct strbuf *sb, /* in UTF-8 */ + const char *placeholder, + void *context) +{ + size_t consumed, orig_len; + enum { + NO_MAGIC, + ADD_LF_BEFORE_NON_EMPTY, + DEL_LF_BEFORE_EMPTY, + ADD_SP_BEFORE_NON_EMPTY + } magic = NO_MAGIC; + + switch (placeholder[0]) { + case '-': + magic = DEL_LF_BEFORE_EMPTY; + break; + case '+': + magic = ADD_LF_BEFORE_NON_EMPTY; + break; + case ' ': + magic = ADD_SP_BEFORE_NON_EMPTY; + break; + default: + break; + } + if (magic != NO_MAGIC) { + placeholder++; + + switch (placeholder[0]) { + case 'w': + /* + * `%+w()` cannot ever expand to a non-empty string, + * and it potentially changes the layout of preceding + * contents. We're thus not able to handle the magic in + * this combination and refuse the pattern. + */ + return 0; + }; + } + + orig_len = sb->len; + if (((struct format_commit_context *)context)->flush_type != no_flush) + consumed = format_and_pad_commit(sb, placeholder, context); + else + consumed = format_commit_one(sb, placeholder, context); + if (magic == NO_MAGIC) + return consumed; + + if ((orig_len == sb->len) && magic == DEL_LF_BEFORE_EMPTY) { + while (sb->len && sb->buf[sb->len - 1] == '\n') + strbuf_setlen(sb, sb->len - 1); + } else if (orig_len != sb->len) { + if (magic == ADD_LF_BEFORE_NON_EMPTY) + strbuf_insertstr(sb, orig_len, "\n"); + else if (magic == ADD_SP_BEFORE_NON_EMPTY) + strbuf_insertstr(sb, orig_len, " "); + } + return consumed + 1; +} + +static size_t userformat_want_item(struct strbuf *sb, const char *placeholder, + void *context) +{ + struct userformat_want *w = context; + + if (*placeholder == '+' || *placeholder == '-' || *placeholder == ' ') + placeholder++; + + switch (*placeholder) { + case 'N': + w->notes = 1; + break; + case 'S': + w->source = 1; + break; + case 'd': + case 'D': + w->decorate = 1; + break; + } + return 0; +} + +void userformat_find_requirements(const char *fmt, struct userformat_want *w) +{ + struct strbuf dummy = STRBUF_INIT; + + if (!fmt) { + if (!user_format) + return; + fmt = user_format; + } + strbuf_expand(&dummy, fmt, userformat_want_item, w); + strbuf_release(&dummy); +} + +void repo_format_commit_message(struct repository *r, + const struct commit *commit, + const char *format, struct strbuf *sb, + const struct pretty_print_context *pretty_ctx) +{ + struct format_commit_context context = { + .repository = r, + .commit = commit, + .pretty_ctx = pretty_ctx, + .wrap_start = sb->len + }; + const char *output_enc = pretty_ctx->output_encoding; + const char *utf8 = "UTF-8"; + + strbuf_expand(sb, format, format_commit_item, &context); + rewrap_message_tail(sb, &context, 0, 0, 0); + + /* + * Convert output to an actual output encoding; note that + * format_commit_item() will always use UTF-8, so we don't + * have to bother if that's what the output wants. + */ + if (output_enc) { + if (same_encoding(utf8, output_enc)) + output_enc = NULL; + } else { + if (context.commit_encoding && + !same_encoding(context.commit_encoding, utf8)) + output_enc = context.commit_encoding; + } + + if (output_enc) { + size_t outsz; + char *out = reencode_string_len(sb->buf, sb->len, + output_enc, utf8, &outsz); + if (out) + strbuf_attach(sb, out, outsz, outsz + 1); + } + + free(context.commit_encoding); + repo_unuse_commit_buffer(r, commit, context.message); +} + +static void pp_header(struct pretty_print_context *pp, + const char *encoding, + const struct commit *commit, + const char **msg_p, + struct strbuf *sb) +{ + int parents_shown = 0; + + for (;;) { + const char *name, *line = *msg_p; + int linelen = get_one_line(*msg_p); + + if (!linelen) + return; + *msg_p += linelen; + + if (linelen == 1) + /* End of header */ + return; + + if (pp->fmt == CMIT_FMT_RAW) { + strbuf_add(sb, line, linelen); + continue; + } + + if (starts_with(line, "parent ")) { + if (linelen != the_hash_algo->hexsz + 8) + die("bad parent line in commit"); + continue; + } + + if (!parents_shown) { + unsigned num = commit_list_count(commit->parents); + /* with enough slop */ + strbuf_grow(sb, num * (GIT_MAX_HEXSZ + 10) + 20); + add_merge_info(pp, sb, commit); + parents_shown = 1; + } + + /* + * MEDIUM == DEFAULT shows only author with dates. + * FULL shows both authors but not dates. + * FULLER shows both authors and dates. + */ + if (skip_prefix(line, "author ", &name)) { + strbuf_grow(sb, linelen + 80); + pp_user_info(pp, "Author", sb, name, encoding); + } + if (skip_prefix(line, "committer ", &name) && + (pp->fmt == CMIT_FMT_FULL || pp->fmt == CMIT_FMT_FULLER)) { + strbuf_grow(sb, linelen + 80); + pp_user_info(pp, "Commit", sb, name, encoding); + } + } +} + +void pp_title_line(struct pretty_print_context *pp, + const char **msg_p, + struct strbuf *sb, + const char *encoding, + int need_8bit_cte) +{ + static const int max_length = 78; /* per rfc2047 */ + struct strbuf title; + + strbuf_init(&title, 80); + *msg_p = format_subject(&title, *msg_p, + pp->preserve_subject ? "\n" : " "); + + strbuf_grow(sb, title.len + 1024); + if (pp->print_email_subject) { + if (pp->rev) + fmt_output_email_subject(sb, pp->rev); + if (pp->encode_email_headers && + needs_rfc2047_encoding(title.buf, title.len)) + add_rfc2047(sb, title.buf, title.len, + encoding, RFC2047_SUBJECT); + else + strbuf_add_wrapped_bytes(sb, title.buf, title.len, + -last_line_length(sb), 1, max_length); + } else { + strbuf_addbuf(sb, &title); + } + strbuf_addch(sb, '\n'); + + if (need_8bit_cte == 0) { + int i; + for (i = 0; i < pp->in_body_headers.nr; i++) { + if (has_non_ascii(pp->in_body_headers.items[i].string)) { + need_8bit_cte = 1; + break; + } + } + } + + if (need_8bit_cte > 0) { + const char *header_fmt = + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=%s\n" + "Content-Transfer-Encoding: 8bit\n"; + strbuf_addf(sb, header_fmt, encoding); + } + if (pp->after_subject) { + strbuf_addstr(sb, pp->after_subject); + } + if (cmit_fmt_is_mail(pp->fmt)) { + strbuf_addch(sb, '\n'); + } + + if (pp->in_body_headers.nr) { + int i; + for (i = 0; i < pp->in_body_headers.nr; i++) { + strbuf_addstr(sb, pp->in_body_headers.items[i].string); + free(pp->in_body_headers.items[i].string); + } + string_list_clear(&pp->in_body_headers, 0); + strbuf_addch(sb, '\n'); + } + + strbuf_release(&title); +} + +static int pp_utf8_width(const char *start, const char *end) +{ + int width = 0; + size_t remain = end - start; + + while (remain) { + int n = utf8_width(&start, &remain); + if (n < 0 || !start) + return -1; + width += n; + } + return width; +} + +static void strbuf_add_tabexpand(struct strbuf *sb, int tabwidth, + const char *line, int linelen) +{ + const char *tab; + + while ((tab = memchr(line, '\t', linelen)) != NULL) { + int width = pp_utf8_width(line, tab); + + /* + * If it wasn't well-formed utf8, or it + * had characters with badly defined + * width (control characters etc), just + * give up on trying to align things. + */ + if (width < 0) + break; + + /* Output the data .. */ + strbuf_add(sb, line, tab - line); + + /* .. and the de-tabified tab */ + strbuf_addchars(sb, ' ', tabwidth - (width % tabwidth)); + + /* Skip over the printed part .. */ + linelen -= tab + 1 - line; + line = tab + 1; + } + + /* + * Print out everything after the last tab without + * worrying about width - there's nothing more to + * align. + */ + strbuf_add(sb, line, linelen); +} + +/* + * pp_handle_indent() prints out the intendation, and + * the whole line (without the final newline), after + * de-tabifying. + */ +static void pp_handle_indent(struct pretty_print_context *pp, + struct strbuf *sb, int indent, + const char *line, int linelen) +{ + strbuf_addchars(sb, ' ', indent); + if (pp->expand_tabs_in_log) + strbuf_add_tabexpand(sb, pp->expand_tabs_in_log, line, linelen); + else + strbuf_add(sb, line, linelen); +} + +static int is_mboxrd_from(const char *line, int len) +{ + /* + * a line matching /^From $/ here would only have len == 4 + * at this point because is_empty_line would've trimmed all + * trailing space + */ + return len > 4 && starts_with(line + strspn(line, ">"), "From "); +} + +void pp_remainder(struct pretty_print_context *pp, + const char **msg_p, + struct strbuf *sb, + int indent) +{ + int first = 1; + for (;;) { + const char *line = *msg_p; + int linelen = get_one_line(line); + *msg_p += linelen; + + if (!linelen) + break; + + if (is_blank_line(line, &linelen)) { + if (first) + continue; + if (pp->fmt == CMIT_FMT_SHORT) + break; + } + first = 0; + + strbuf_grow(sb, linelen + indent + 20); + if (indent) + pp_handle_indent(pp, sb, indent, line, linelen); + else if (pp->expand_tabs_in_log) + strbuf_add_tabexpand(sb, pp->expand_tabs_in_log, + line, linelen); + else { + if (pp->fmt == CMIT_FMT_MBOXRD && + is_mboxrd_from(line, linelen)) + strbuf_addch(sb, '>'); + + strbuf_add(sb, line, linelen); + } + strbuf_addch(sb, '\n'); + } +} + +void pretty_print_commit(struct pretty_print_context *pp, + const struct commit *commit, + struct strbuf *sb) +{ + unsigned long beginning_of_body; + int indent = 4; + const char *msg; + const char *reencoded; + const char *encoding; + int need_8bit_cte = pp->need_8bit_cte; + + if (pp->fmt == CMIT_FMT_USERFORMAT) { + format_commit_message(commit, user_format, sb, pp); + return; + } + + encoding = get_log_output_encoding(); + msg = reencoded = logmsg_reencode(commit, NULL, encoding); + + if (pp->fmt == CMIT_FMT_ONELINE || cmit_fmt_is_mail(pp->fmt)) + indent = 0; + + /* + * We need to check and emit Content-type: to mark it + * as 8-bit if we haven't done so. + */ + if (cmit_fmt_is_mail(pp->fmt) && need_8bit_cte == 0) { + int i, ch, in_body; + + for (in_body = i = 0; (ch = msg[i]); i++) { + if (!in_body) { + /* author could be non 7-bit ASCII but + * the log may be so; skip over the + * header part first. + */ + if (ch == '\n' && msg[i+1] == '\n') + in_body = 1; + } + else if (non_ascii(ch)) { + need_8bit_cte = 1; + break; + } + } + } + + pp_header(pp, encoding, commit, &msg, sb); + if (pp->fmt != CMIT_FMT_ONELINE && !pp->print_email_subject) { + strbuf_addch(sb, '\n'); + } + + /* Skip excess blank lines at the beginning of body, if any... */ + msg = skip_blank_lines(msg); + + /* These formats treat the title line specially. */ + if (pp->fmt == CMIT_FMT_ONELINE || cmit_fmt_is_mail(pp->fmt)) + pp_title_line(pp, &msg, sb, encoding, need_8bit_cte); + + beginning_of_body = sb->len; + if (pp->fmt != CMIT_FMT_ONELINE) + pp_remainder(pp, &msg, sb, indent); + strbuf_rtrim(sb); + + /* Make sure there is an EOLN for the non-oneline case */ + if (pp->fmt != CMIT_FMT_ONELINE) + strbuf_addch(sb, '\n'); + + /* + * The caller may append additional body text in e-mail + * format. Make sure we did not strip the blank line + * between the header and the body. + */ + if (cmit_fmt_is_mail(pp->fmt) && sb->len <= beginning_of_body) + strbuf_addch(sb, '\n'); + + unuse_commit_buffer(commit, reencoded); +} + +void pp_commit_easy(enum cmit_fmt fmt, const struct commit *commit, + struct strbuf *sb) +{ + struct pretty_print_context pp = {0}; + pp.fmt = fmt; + pretty_print_commit(&pp, commit, sb); +} diff --git a/setup.c b/setup.c index 347d7181ae907c..a9161acbe204fe 100644 --- a/setup.c +++ b/setup.c @@ -5,6 +5,7 @@ #include "string-list.h" #include "chdir-notify.h" #include "promisor-remote.h" +#include "quote.h" static int inside_git_dir = -1; static int inside_work_tree = -1; @@ -1025,6 +1026,48 @@ static int canonicalize_ceiling_entry(struct string_list_item *item, } } +struct safe_directory_data { + const char *path; + int is_safe; +}; + +static int safe_directory_cb(const char *key, const char *value, void *d) +{ + struct safe_directory_data *data = d; + + if (strcmp(key, "safe.directory")) + return 0; + + if (!value || !*value) { + data->is_safe = 0; + } else if (!strcmp(value, "*")) { + data->is_safe = 1; + } else { + const char *interpolated = NULL; + + if (!git_config_pathname(&interpolated, key, value) && + !fspathcmp(data->path, interpolated ? interpolated : value)) + data->is_safe = 1; + + free((char *)interpolated); + } + + return 0; +} + +static int ensure_valid_ownership(const char *path) +{ + struct safe_directory_data data = { .path = path }; + + if (!git_env_bool("GIT_TEST_ASSUME_DIFFERENT_OWNER", 0) && + is_path_owned_by_current_user(path)) + return 1; + + read_very_early_config(safe_directory_cb, &data); + + return data.is_safe; +} + enum discovery_result { GIT_DIR_NONE = 0, GIT_DIR_EXPLICIT, @@ -1033,7 +1076,8 @@ enum discovery_result { /* these are errors */ GIT_DIR_HIT_CEILING = -1, GIT_DIR_HIT_MOUNT_POINT = -2, - GIT_DIR_INVALID_GITFILE = -3 + GIT_DIR_INVALID_GITFILE = -3, + GIT_DIR_INVALID_OWNERSHIP = -4 }; /* @@ -1123,11 +1167,15 @@ static enum discovery_result setup_git_directory_gently_1(struct strbuf *dir, } strbuf_setlen(dir, offset); if (gitdirenv) { + if (!ensure_valid_ownership(dir->buf)) + return GIT_DIR_INVALID_OWNERSHIP; strbuf_addstr(gitdir, gitdirenv); return GIT_DIR_DISCOVERED; } if (is_git_directory(dir->buf)) { + if (!ensure_valid_ownership(dir->buf)) + return GIT_DIR_INVALID_OWNERSHIP; strbuf_addstr(gitdir, "."); return GIT_DIR_BARE; } @@ -1259,6 +1307,19 @@ const char *setup_git_directory_gently(int *nongit_ok) dir.buf); *nongit_ok = 1; break; + case GIT_DIR_INVALID_OWNERSHIP: + if (!nongit_ok) { + struct strbuf quoted = STRBUF_INIT; + + sq_quote_buf_pretty("ed, dir.buf); + die(_("unsafe repository ('%s' is owned by someone else)\n" + "To add an exception for this directory, call:\n" + "\n" + "\tgit config --global --add safe.directory %s"), + dir.buf, quoted.buf); + } + *nongit_ok = 1; + break; case GIT_DIR_NONE: /* * As a safeguard against setup_git_directory_gently_1 returning diff --git a/setup.c.orig b/setup.c.orig new file mode 100644 index 00000000000000..5285a4dfac62ed --- /dev/null +++ b/setup.c.orig @@ -0,0 +1,1514 @@ +#include "cache.h" +#include "repository.h" +#include "config.h" +#include "dir.h" +#include "string-list.h" +#include "chdir-notify.h" +#include "promisor-remote.h" +#include "quote.h" + +static int inside_git_dir = -1; +static int inside_work_tree = -1; +static int work_tree_config_is_bogus; + +static struct startup_info the_startup_info; +struct startup_info *startup_info = &the_startup_info; + +/* + * The input parameter must contain an absolute path, and it must already be + * normalized. + * + * Find the part of an absolute path that lies inside the work tree by + * dereferencing symlinks outside the work tree, for example: + * /dir1/repo/dir2/file (work tree is /dir1/repo) -> dir2/file + * /dir/file (work tree is /) -> dir/file + * /dir/symlink1/symlink2 (symlink1 points to work tree) -> symlink2 + * /dir/repolink/file (repolink points to /dir/repo) -> file + * /dir/repo (exactly equal to work tree) -> (empty string) + */ +static int abspath_part_inside_repo(char *path) +{ + size_t len; + size_t wtlen; + char *path0; + int off; + const char *work_tree = get_git_work_tree(); + struct strbuf realpath = STRBUF_INIT; + + if (!work_tree) + return -1; + wtlen = strlen(work_tree); + len = strlen(path); + off = offset_1st_component(path); + + /* check if work tree is already the prefix */ + if (wtlen <= len && !fspathncmp(path, work_tree, wtlen)) { + if (path[wtlen] == '/') { + memmove(path, path + wtlen + 1, len - wtlen); + return 0; + } else if (path[wtlen - 1] == '/' || path[wtlen] == '\0') { + /* work tree is the root, or the whole path */ + memmove(path, path + wtlen, len - wtlen + 1); + return 0; + } + /* work tree might match beginning of a symlink to work tree */ + off = wtlen; + } + path0 = path; + path += off; + + /* check each '/'-terminated level */ + while (*path) { + path++; + if (*path == '/') { + *path = '\0'; + strbuf_realpath(&realpath, path0, 1); + if (fspathcmp(realpath.buf, work_tree) == 0) { + memmove(path0, path + 1, len - (path - path0)); + strbuf_release(&realpath); + return 0; + } + *path = '/'; + } + } + + /* check whole path */ + strbuf_realpath(&realpath, path0, 1); + if (fspathcmp(realpath.buf, work_tree) == 0) { + *path0 = '\0'; + strbuf_release(&realpath); + return 0; + } + + strbuf_release(&realpath); + return -1; +} + +/* + * Normalize "path", prepending the "prefix" for relative paths. If + * remaining_prefix is not NULL, return the actual prefix still + * remains in the path. For example, prefix = sub1/sub2/ and path is + * + * foo -> sub1/sub2/foo (full prefix) + * ../foo -> sub1/foo (remaining prefix is sub1/) + * ../../bar -> bar (no remaining prefix) + * ../../sub1/sub2/foo -> sub1/sub2/foo (but no remaining prefix) + * `pwd`/../bar -> sub1/bar (no remaining prefix) + */ +char *prefix_path_gently(const char *prefix, int len, + int *remaining_prefix, const char *path) +{ + const char *orig = path; + char *sanitized; + if (is_absolute_path(orig)) { + sanitized = xmallocz(strlen(path)); + if (remaining_prefix) + *remaining_prefix = 0; + if (normalize_path_copy_len(sanitized, path, remaining_prefix)) { + free(sanitized); + return NULL; + } + if (abspath_part_inside_repo(sanitized)) { + free(sanitized); + return NULL; + } + } else { + sanitized = xstrfmt("%.*s%s", len, len ? prefix : "", path); + if (remaining_prefix) + *remaining_prefix = len; + if (normalize_path_copy_len(sanitized, sanitized, remaining_prefix)) { + free(sanitized); + return NULL; + } + } + return sanitized; +} + +char *prefix_path(const char *prefix, int len, const char *path) +{ + char *r = prefix_path_gently(prefix, len, NULL, path); + if (!r) { + const char *hint_path = get_git_work_tree(); + if (!hint_path) + hint_path = get_git_dir(); + die(_("'%s' is outside repository at '%s'"), path, + absolute_path(hint_path)); + } + return r; +} + +int path_inside_repo(const char *prefix, const char *path) +{ + int len = prefix ? strlen(prefix) : 0; + char *r = prefix_path_gently(prefix, len, NULL, path); + if (r) { + free(r); + return 1; + } + return 0; +} + +int check_filename(const char *prefix, const char *arg) +{ + char *to_free = NULL; + struct stat st; + + if (skip_prefix(arg, ":/", &arg)) { + if (!*arg) /* ":/" is root dir, always exists */ + return 1; + prefix = NULL; + } else if (skip_prefix(arg, ":!", &arg) || + skip_prefix(arg, ":^", &arg)) { + if (!*arg) /* excluding everything is silly, but allowed */ + return 1; + } + + if (prefix) + arg = to_free = prefix_filename(prefix, arg); + + if (!lstat(arg, &st)) { + free(to_free); + return 1; /* file exists */ + } + if (is_missing_file_error(errno)) { + free(to_free); + return 0; /* file does not exist */ + } + die_errno(_("failed to stat '%s'"), arg); +} + +static void NORETURN die_verify_filename(struct repository *r, + const char *prefix, + const char *arg, + int diagnose_misspelt_rev) +{ + if (!diagnose_misspelt_rev) + die(_("%s: no such path in the working tree.\n" + "Use 'git -- ...' to specify paths that do not exist locally."), + arg); + /* + * Saying "'(icase)foo' does not exist in the index" when the + * user gave us ":(icase)foo" is just stupid. A magic pathspec + * begins with a colon and is followed by a non-alnum; do not + * let maybe_die_on_misspelt_object_name() even trigger. + */ + if (!(arg[0] == ':' && !isalnum(arg[1]))) + maybe_die_on_misspelt_object_name(r, arg, prefix); + + /* ... or fall back the most general message. */ + die(_("ambiguous argument '%s': unknown revision or path not in the working tree.\n" + "Use '--' to separate paths from revisions, like this:\n" + "'git [...] -- [...]'"), arg); + +} + +/* + * Check for arguments that don't resolve as actual files, + * but which look sufficiently like pathspecs that we'll consider + * them such for the purposes of rev/pathspec DWIM parsing. + */ +static int looks_like_pathspec(const char *arg) +{ + const char *p; + int escaped = 0; + + /* + * Wildcard characters imply the user is looking to match pathspecs + * that aren't in the filesystem. Note that this doesn't include + * backslash even though it's a glob special; by itself it doesn't + * cause any increase in the match. Likewise ignore backslash-escaped + * wildcard characters. + */ + for (p = arg; *p; p++) { + if (escaped) { + escaped = 0; + } else if (is_glob_special(*p)) { + if (*p == '\\') + escaped = 1; + else + return 1; + } + } + + /* long-form pathspec magic */ + if (starts_with(arg, ":(")) + return 1; + + return 0; +} + +/* + * Verify a filename that we got as an argument for a pathspec + * entry. Note that a filename that begins with "-" never verifies + * as true, because even if such a filename were to exist, we want + * it to be preceded by the "--" marker (or we want the user to + * use a format like "./-filename") + * + * The "diagnose_misspelt_rev" is used to provide a user-friendly + * diagnosis when dying upon finding that "name" is not a pathname. + * If set to 1, the diagnosis will try to diagnose "name" as an + * invalid object name (e.g. HEAD:foo). If set to 0, the diagnosis + * will only complain about an inexisting file. + * + * This function is typically called to check that a "file or rev" + * argument is unambiguous. In this case, the caller will want + * diagnose_misspelt_rev == 1 when verifying the first non-rev + * argument (which could have been a revision), and + * diagnose_misspelt_rev == 0 for the next ones (because we already + * saw a filename, there's not ambiguity anymore). + */ +void verify_filename(const char *prefix, + const char *arg, + int diagnose_misspelt_rev) +{ + if (*arg == '-') + die(_("option '%s' must come before non-option arguments"), arg); + if (looks_like_pathspec(arg) || check_filename(prefix, arg)) + return; + die_verify_filename(the_repository, prefix, arg, diagnose_misspelt_rev); +} + +/* + * Opposite of the above: the command line did not have -- marker + * and we parsed the arg as a refname. It should not be interpretable + * as a filename. + */ +void verify_non_filename(const char *prefix, const char *arg) +{ + if (!is_inside_work_tree() || is_inside_git_dir()) + return; + if (*arg == '-') + return; /* flag */ + if (!check_filename(prefix, arg)) + return; + die(_("ambiguous argument '%s': both revision and filename\n" + "Use '--' to separate paths from revisions, like this:\n" + "'git [...] -- [...]'"), arg); +} + +int get_common_dir(struct strbuf *sb, const char *gitdir) +{ + const char *git_env_common_dir = getenv(GIT_COMMON_DIR_ENVIRONMENT); + if (git_env_common_dir) { + strbuf_addstr(sb, git_env_common_dir); + return 1; + } else { + return get_common_dir_noenv(sb, gitdir); + } +} + +int get_common_dir_noenv(struct strbuf *sb, const char *gitdir) +{ + struct strbuf data = STRBUF_INIT; + struct strbuf path = STRBUF_INIT; + int ret = 0; + + strbuf_addf(&path, "%s/commondir", gitdir); + if (file_exists(path.buf)) { + if (strbuf_read_file(&data, path.buf, 0) <= 0) + die_errno(_("failed to read %s"), path.buf); + while (data.len && (data.buf[data.len - 1] == '\n' || + data.buf[data.len - 1] == '\r')) + data.len--; + data.buf[data.len] = '\0'; + strbuf_reset(&path); + if (!is_absolute_path(data.buf)) + strbuf_addf(&path, "%s/", gitdir); + strbuf_addbuf(&path, &data); + strbuf_add_real_path(sb, path.buf); + ret = 1; + } else { + strbuf_addstr(sb, gitdir); + } + + strbuf_release(&data); + strbuf_release(&path); + return ret; +} + +/* + * Test if it looks like we're at a git directory. + * We want to see: + * + * - either an objects/ directory _or_ the proper + * GIT_OBJECT_DIRECTORY environment variable + * - a refs/ directory + * - either a HEAD symlink or a HEAD file that is formatted as + * a proper "ref:", or a regular file HEAD that has a properly + * formatted sha1 object name. + */ +int is_git_directory(const char *suspect) +{ + struct strbuf path = STRBUF_INIT; + int ret = 0; + size_t len; + + /* Check worktree-related signatures */ + strbuf_addstr(&path, suspect); + strbuf_complete(&path, '/'); + strbuf_addstr(&path, "HEAD"); + if (validate_headref(path.buf)) + goto done; + + strbuf_reset(&path); + get_common_dir(&path, suspect); + len = path.len; + + /* Check non-worktree-related signatures */ + if (getenv(DB_ENVIRONMENT)) { + if (access(getenv(DB_ENVIRONMENT), X_OK)) + goto done; + } + else { + strbuf_setlen(&path, len); + strbuf_addstr(&path, "/objects"); + if (access(path.buf, X_OK)) + goto done; + } + + strbuf_setlen(&path, len); + strbuf_addstr(&path, "/refs"); + if (access(path.buf, X_OK)) + goto done; + + ret = 1; +done: + strbuf_release(&path); + return ret; +} + +int is_nonbare_repository_dir(struct strbuf *path) +{ + int ret = 0; + int gitfile_error; + size_t orig_path_len = path->len; + assert(orig_path_len != 0); + strbuf_complete(path, '/'); + strbuf_addstr(path, ".git"); + if (read_gitfile_gently(path->buf, &gitfile_error) || is_git_directory(path->buf)) + ret = 1; + if (gitfile_error == READ_GITFILE_ERR_OPEN_FAILED || + gitfile_error == READ_GITFILE_ERR_READ_FAILED) + ret = 1; + strbuf_setlen(path, orig_path_len); + return ret; +} + +int is_inside_git_dir(void) +{ + if (inside_git_dir < 0) + inside_git_dir = is_inside_dir(get_git_dir()); + return inside_git_dir; +} + +int is_inside_work_tree(void) +{ + if (inside_work_tree < 0) + inside_work_tree = is_inside_dir(get_git_work_tree()); + return inside_work_tree; +} + +void setup_work_tree(void) +{ + const char *work_tree; + static int initialized = 0; + + if (initialized) + return; + + if (work_tree_config_is_bogus) + die(_("unable to set up work tree using invalid config")); + + work_tree = get_git_work_tree(); + if (!work_tree || chdir_notify(work_tree)) + die(_("this operation must be run in a work tree")); + + /* + * Make sure subsequent git processes find correct worktree + * if $GIT_WORK_TREE is set relative + */ + if (getenv(GIT_WORK_TREE_ENVIRONMENT)) + setenv(GIT_WORK_TREE_ENVIRONMENT, ".", 1); + + initialized = 1; +} + +static int read_worktree_config(const char *var, const char *value, void *vdata) +{ + struct repository_format *data = vdata; + + if (strcmp(var, "core.bare") == 0) { + data->is_bare = git_config_bool(var, value); + } else if (strcmp(var, "core.worktree") == 0) { + if (!value) + return config_error_nonbool(var); + free(data->work_tree); + data->work_tree = xstrdup(value); + } + return 0; +} + +enum extension_result { + EXTENSION_ERROR = -1, /* compatible with error(), etc */ + EXTENSION_UNKNOWN = 0, + EXTENSION_OK = 1 +}; + +/* + * Do not add new extensions to this function. It handles extensions which are + * respected even in v0-format repositories for historical compatibility. + */ +static enum extension_result handle_extension_v0(const char *var, + const char *value, + const char *ext, + struct repository_format *data) +{ + if (!strcmp(ext, "noop")) { + return EXTENSION_OK; + } else if (!strcmp(ext, "preciousobjects")) { + data->precious_objects = git_config_bool(var, value); + return EXTENSION_OK; + } else if (!strcmp(ext, "partialclone")) { + data->partial_clone = xstrdup(value); + return EXTENSION_OK; + } else if (!strcmp(ext, "worktreeconfig")) { + data->worktree_config = git_config_bool(var, value); + return EXTENSION_OK; + } + + return EXTENSION_UNKNOWN; +} + +/* + * Record any new extensions in this function. + */ +static enum extension_result handle_extension(const char *var, + const char *value, + const char *ext, + struct repository_format *data) +{ + if (!strcmp(ext, "noop-v1")) { + return EXTENSION_OK; + } else if (!strcmp(ext, "objectformat")) { + int format; + + if (!value) + return config_error_nonbool(var); + format = hash_algo_by_name(value); + if (format == GIT_HASH_UNKNOWN) + return error("invalid value for 'extensions.objectformat'"); + data->hash_algo = format; + return EXTENSION_OK; + } + return EXTENSION_UNKNOWN; +} + +static int check_repo_format(const char *var, const char *value, void *vdata) +{ + struct repository_format *data = vdata; + const char *ext; + + if (strcmp(var, "core.repositoryformatversion") == 0) + data->version = git_config_int(var, value); + else if (skip_prefix(var, "extensions.", &ext)) { + switch (handle_extension_v0(var, value, ext, data)) { + case EXTENSION_ERROR: + return -1; + case EXTENSION_OK: + return 0; + case EXTENSION_UNKNOWN: + break; + } + + switch (handle_extension(var, value, ext, data)) { + case EXTENSION_ERROR: + return -1; + case EXTENSION_OK: + string_list_append(&data->v1_only_extensions, ext); + return 0; + case EXTENSION_UNKNOWN: + string_list_append(&data->unknown_extensions, ext); + return 0; + } + } + + return read_worktree_config(var, value, vdata); +} + +static int check_repository_format_gently(const char *gitdir, struct repository_format *candidate, int *nongit_ok) +{ + struct strbuf sb = STRBUF_INIT; + struct strbuf err = STRBUF_INIT; + int has_common; + + has_common = get_common_dir(&sb, gitdir); + strbuf_addstr(&sb, "/config"); + read_repository_format(candidate, sb.buf); + strbuf_release(&sb); + + /* + * For historical use of check_repository_format() in git-init, + * we treat a missing config as a silent "ok", even when nongit_ok + * is unset. + */ + if (candidate->version < 0) + return 0; + + if (verify_repository_format(candidate, &err) < 0) { + if (nongit_ok) { + warning("%s", err.buf); + strbuf_release(&err); + *nongit_ok = -1; + return -1; + } + die("%s", err.buf); + } + + repository_format_precious_objects = candidate->precious_objects; + repository_format_worktree_config = candidate->worktree_config; + string_list_clear(&candidate->unknown_extensions, 0); + string_list_clear(&candidate->v1_only_extensions, 0); + + if (repository_format_worktree_config) { + /* + * pick up core.bare and core.worktree from per-worktree + * config if present + */ + strbuf_addf(&sb, "%s/config.worktree", gitdir); + git_config_from_file(read_worktree_config, sb.buf, candidate); + strbuf_release(&sb); + has_common = 0; + } + + if (!has_common) { + if (candidate->is_bare != -1) { + is_bare_repository_cfg = candidate->is_bare; + if (is_bare_repository_cfg == 1) + inside_work_tree = -1; + } + if (candidate->work_tree) { + free(git_work_tree_cfg); + git_work_tree_cfg = xstrdup(candidate->work_tree); + inside_work_tree = -1; + } + } + + return 0; +} + +int upgrade_repository_format(int target_version) +{ + struct strbuf sb = STRBUF_INIT; + struct strbuf err = STRBUF_INIT; + struct strbuf repo_version = STRBUF_INIT; + struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT; + + strbuf_git_common_path(&sb, the_repository, "config"); + read_repository_format(&repo_fmt, sb.buf); + strbuf_release(&sb); + + if (repo_fmt.version >= target_version) + return 0; + + if (verify_repository_format(&repo_fmt, &err) < 0) { + error("cannot upgrade repository format from %d to %d: %s", + repo_fmt.version, target_version, err.buf); + strbuf_release(&err); + return -1; + } + if (!repo_fmt.version && repo_fmt.unknown_extensions.nr) + return error("cannot upgrade repository format: " + "unknown extension %s", + repo_fmt.unknown_extensions.items[0].string); + + strbuf_addf(&repo_version, "%d", target_version); + git_config_set("core.repositoryformatversion", repo_version.buf); + strbuf_release(&repo_version); + return 1; +} + +static void init_repository_format(struct repository_format *format) +{ + const struct repository_format fresh = REPOSITORY_FORMAT_INIT; + + memcpy(format, &fresh, sizeof(fresh)); +} + +int read_repository_format(struct repository_format *format, const char *path) +{ + clear_repository_format(format); + git_config_from_file(check_repo_format, path, format); + if (format->version == -1) + clear_repository_format(format); + return format->version; +} + +void clear_repository_format(struct repository_format *format) +{ + string_list_clear(&format->unknown_extensions, 0); + string_list_clear(&format->v1_only_extensions, 0); + free(format->work_tree); + free(format->partial_clone); + init_repository_format(format); +} + +int verify_repository_format(const struct repository_format *format, + struct strbuf *err) +{ + if (GIT_REPO_VERSION_READ < format->version) { + strbuf_addf(err, _("Expected git repo version <= %d, found %d"), + GIT_REPO_VERSION_READ, format->version); + return -1; + } + + if (format->version >= 1 && format->unknown_extensions.nr) { + int i; + + strbuf_addstr(err, Q_("unknown repository extension found:", + "unknown repository extensions found:", + format->unknown_extensions.nr)); + + for (i = 0; i < format->unknown_extensions.nr; i++) + strbuf_addf(err, "\n\t%s", + format->unknown_extensions.items[i].string); + return -1; + } + + if (format->version == 0 && format->v1_only_extensions.nr) { + int i; + + strbuf_addstr(err, + Q_("repo version is 0, but v1-only extension found:", + "repo version is 0, but v1-only extensions found:", + format->v1_only_extensions.nr)); + + for (i = 0; i < format->v1_only_extensions.nr; i++) + strbuf_addf(err, "\n\t%s", + format->v1_only_extensions.items[i].string); + return -1; + } + + return 0; +} + +void read_gitfile_error_die(int error_code, const char *path, const char *dir) +{ + switch (error_code) { + case READ_GITFILE_ERR_STAT_FAILED: + case READ_GITFILE_ERR_NOT_A_FILE: + /* non-fatal; follow return path */ + break; + case READ_GITFILE_ERR_OPEN_FAILED: + die_errno(_("error opening '%s'"), path); + case READ_GITFILE_ERR_TOO_LARGE: + die(_("too large to be a .git file: '%s'"), path); + case READ_GITFILE_ERR_READ_FAILED: + die(_("error reading %s"), path); + case READ_GITFILE_ERR_INVALID_FORMAT: + die(_("invalid gitfile format: %s"), path); + case READ_GITFILE_ERR_NO_PATH: + die(_("no path in gitfile: %s"), path); + case READ_GITFILE_ERR_NOT_A_REPO: + die(_("not a git repository: %s"), dir); + default: + BUG("unknown error code"); + } +} + +/* + * Try to read the location of the git directory from the .git file, + * return path to git directory if found. The return value comes from + * a shared buffer. + * + * On failure, if return_error_code is not NULL, return_error_code + * will be set to an error code and NULL will be returned. If + * return_error_code is NULL the function will die instead (for most + * cases). + */ +const char *read_gitfile_gently(const char *path, int *return_error_code) +{ + const int max_file_size = 1 << 20; /* 1MB */ + int error_code = 0; + char *buf = NULL; + char *dir = NULL; + const char *slash; + struct stat st; + int fd; + ssize_t len; + static struct strbuf realpath = STRBUF_INIT; + + if (stat(path, &st)) { + /* NEEDSWORK: discern between ENOENT vs other errors */ + error_code = READ_GITFILE_ERR_STAT_FAILED; + goto cleanup_return; + } + if (!S_ISREG(st.st_mode)) { + error_code = READ_GITFILE_ERR_NOT_A_FILE; + goto cleanup_return; + } + if (st.st_size > max_file_size) { + error_code = READ_GITFILE_ERR_TOO_LARGE; + goto cleanup_return; + } + fd = open(path, O_RDONLY); + if (fd < 0) { + error_code = READ_GITFILE_ERR_OPEN_FAILED; + goto cleanup_return; + } + buf = xmallocz(st.st_size); + len = read_in_full(fd, buf, st.st_size); + close(fd); + if (len != st.st_size) { + error_code = READ_GITFILE_ERR_READ_FAILED; + goto cleanup_return; + } + if (!starts_with(buf, "gitdir: ")) { + error_code = READ_GITFILE_ERR_INVALID_FORMAT; + goto cleanup_return; + } + while (buf[len - 1] == '\n' || buf[len - 1] == '\r') + len--; + if (len < 9) { + error_code = READ_GITFILE_ERR_NO_PATH; + goto cleanup_return; + } + buf[len] = '\0'; + dir = buf + 8; + + if (!is_absolute_path(dir) && (slash = strrchr(path, '/'))) { + size_t pathlen = slash+1 - path; + dir = xstrfmt("%.*s%.*s", (int)pathlen, path, + (int)(len - 8), buf + 8); + free(buf); + buf = dir; + } + if (!is_git_directory(dir)) { + error_code = READ_GITFILE_ERR_NOT_A_REPO; + goto cleanup_return; + } + + strbuf_realpath(&realpath, dir, 1); + path = realpath.buf; + +cleanup_return: + if (return_error_code) + *return_error_code = error_code; + else if (error_code) + read_gitfile_error_die(error_code, path, dir); + + free(buf); + return error_code ? NULL : path; +} + +static const char *setup_explicit_git_dir(const char *gitdirenv, + struct strbuf *cwd, + struct repository_format *repo_fmt, + int *nongit_ok) +{ + const char *work_tree_env = getenv(GIT_WORK_TREE_ENVIRONMENT); + const char *worktree; + char *gitfile; + int offset; + + if (PATH_MAX - 40 < strlen(gitdirenv)) + die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT); + + gitfile = (char*)read_gitfile(gitdirenv); + if (gitfile) { + gitfile = xstrdup(gitfile); + gitdirenv = gitfile; + } + + if (!is_git_directory(gitdirenv)) { + if (nongit_ok) { + *nongit_ok = 1; + free(gitfile); + return NULL; + } + die(_("not a git repository: '%s'"), gitdirenv); + } + + if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) { + free(gitfile); + return NULL; + } + + /* #3, #7, #11, #15, #19, #23, #27, #31 (see t1510) */ + if (work_tree_env) + set_git_work_tree(work_tree_env); + else if (is_bare_repository_cfg > 0) { + if (git_work_tree_cfg) { + /* #22.2, #30 */ + warning("core.bare and core.worktree do not make sense"); + work_tree_config_is_bogus = 1; + } + + /* #18, #26 */ + set_git_dir(gitdirenv, 0); + free(gitfile); + return NULL; + } + else if (git_work_tree_cfg) { /* #6, #14 */ + if (is_absolute_path(git_work_tree_cfg)) + set_git_work_tree(git_work_tree_cfg); + else { + char *core_worktree; + if (chdir(gitdirenv)) + die_errno(_("cannot chdir to '%s'"), gitdirenv); + if (chdir(git_work_tree_cfg)) + die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg); + core_worktree = xgetcwd(); + if (chdir(cwd->buf)) + die_errno(_("cannot come back to cwd")); + set_git_work_tree(core_worktree); + free(core_worktree); + } + } + else if (!git_env_bool(GIT_IMPLICIT_WORK_TREE_ENVIRONMENT, 1)) { + /* #16d */ + set_git_dir(gitdirenv, 0); + free(gitfile); + return NULL; + } + else /* #2, #10 */ + set_git_work_tree("."); + + /* set_git_work_tree() must have been called by now */ + worktree = get_git_work_tree(); + + /* both get_git_work_tree() and cwd are already normalized */ + if (!strcmp(cwd->buf, worktree)) { /* cwd == worktree */ + set_git_dir(gitdirenv, 0); + free(gitfile); + return NULL; + } + + offset = dir_inside_of(cwd->buf, worktree); + if (offset >= 0) { /* cwd inside worktree? */ + set_git_dir(gitdirenv, 1); + if (chdir(worktree)) + die_errno(_("cannot chdir to '%s'"), worktree); + strbuf_addch(cwd, '/'); + free(gitfile); + return cwd->buf + offset; + } + + /* cwd outside worktree */ + set_git_dir(gitdirenv, 0); + free(gitfile); + return NULL; +} + +static const char *setup_discovered_git_dir(const char *gitdir, + struct strbuf *cwd, int offset, + struct repository_format *repo_fmt, + int *nongit_ok) +{ + if (check_repository_format_gently(gitdir, repo_fmt, nongit_ok)) + return NULL; + + /* --work-tree is set without --git-dir; use discovered one */ + if (getenv(GIT_WORK_TREE_ENVIRONMENT) || git_work_tree_cfg) { + char *to_free = NULL; + const char *ret; + + if (offset != cwd->len && !is_absolute_path(gitdir)) + gitdir = to_free = real_pathdup(gitdir, 1); + if (chdir(cwd->buf)) + die_errno(_("cannot come back to cwd")); + ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); + free(to_free); + return ret; + } + + /* #16.2, #17.2, #20.2, #21.2, #24, #25, #28, #29 (see t1510) */ + if (is_bare_repository_cfg > 0) { + set_git_dir(gitdir, (offset != cwd->len)); + if (chdir(cwd->buf)) + die_errno(_("cannot come back to cwd")); + return NULL; + } + + /* #0, #1, #5, #8, #9, #12, #13 */ + set_git_work_tree("."); + if (strcmp(gitdir, DEFAULT_GIT_DIR_ENVIRONMENT)) + set_git_dir(gitdir, 0); + inside_git_dir = 0; + inside_work_tree = 1; + if (offset >= cwd->len) + return NULL; + + /* Make "offset" point past the '/' (already the case for root dirs) */ + if (offset != offset_1st_component(cwd->buf)) + offset++; + /* Add a '/' at the end */ + strbuf_addch(cwd, '/'); + return cwd->buf + offset; +} + +/* #16.1, #17.1, #20.1, #21.1, #22.1 (see t1510) */ +static const char *setup_bare_git_dir(struct strbuf *cwd, int offset, + struct repository_format *repo_fmt, + int *nongit_ok) +{ + int root_len; + + if (check_repository_format_gently(".", repo_fmt, nongit_ok)) + return NULL; + + setenv(GIT_IMPLICIT_WORK_TREE_ENVIRONMENT, "0", 1); + + /* --work-tree is set without --git-dir; use discovered one */ + if (getenv(GIT_WORK_TREE_ENVIRONMENT) || git_work_tree_cfg) { + static const char *gitdir; + + gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset); + if (chdir(cwd->buf)) + die_errno(_("cannot come back to cwd")); + return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok); + } + + inside_git_dir = 1; + inside_work_tree = 0; + if (offset != cwd->len) { + if (chdir(cwd->buf)) + die_errno(_("cannot come back to cwd")); + root_len = offset_1st_component(cwd->buf); + strbuf_setlen(cwd, offset > root_len ? offset : root_len); + set_git_dir(cwd->buf, 0); + } + else + set_git_dir(".", 0); + return NULL; +} + +static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_len) +{ + struct stat buf; + if (stat(path, &buf)) { + die_errno(_("failed to stat '%*s%s%s'"), + prefix_len, + prefix ? prefix : "", + prefix ? "/" : "", path); + } + return buf.st_dev; +} + +/* + * A "string_list_each_func_t" function that canonicalizes an entry + * from GIT_CEILING_DIRECTORIES using real_pathdup(), or + * discards it if unusable. The presence of an empty entry in + * GIT_CEILING_DIRECTORIES turns off canonicalization for all + * subsequent entries. + */ +static int canonicalize_ceiling_entry(struct string_list_item *item, + void *cb_data) +{ + int *empty_entry_found = cb_data; + char *ceil = item->string; + + if (!*ceil) { + *empty_entry_found = 1; + return 0; + } else if (!is_absolute_path(ceil)) { + return 0; + } else if (*empty_entry_found) { + /* Keep entry but do not canonicalize it */ + return 1; + } else { + char *real_path = real_pathdup(ceil, 0); + if (!real_path) { + return 0; + } + free(item->string); + item->string = real_path; + return 1; + } +} + +struct safe_directory_data { + const char *path; + int is_safe; +}; + +static int safe_directory_cb(const char *key, const char *value, void *d) +{ + struct safe_directory_data *data = d; + + if (strcmp(key, "safe.directory")) + return 0; + + if (!value || !*value) + data->is_safe = 0; + else { + const char *interpolated = NULL; + + if (!git_config_pathname(&interpolated, key, value) && + !fspathcmp(data->path, interpolated ? interpolated : value)) + data->is_safe = 1; + + free((char *)interpolated); + } + + return 0; +} + +static int ensure_valid_ownership(const char *path) +{ + struct safe_directory_data data = { .path = path }; + + if (!git_env_bool("GIT_TEST_ASSUME_DIFFERENT_OWNER", 0) && + is_path_owned_by_current_user(path)) + return 1; + + read_very_early_config(safe_directory_cb, &data); + + return data.is_safe; +} + +enum discovery_result { + GIT_DIR_NONE = 0, + GIT_DIR_EXPLICIT, + GIT_DIR_DISCOVERED, + GIT_DIR_BARE, + /* these are errors */ + GIT_DIR_HIT_CEILING = -1, + GIT_DIR_HIT_MOUNT_POINT = -2, + GIT_DIR_INVALID_GITFILE = -3, + GIT_DIR_INVALID_OWNERSHIP = -4 +}; + +/* + * We cannot decide in this function whether we are in the work tree or + * not, since the config can only be read _after_ this function was called. + * + * Also, we avoid changing any global state (such as the current working + * directory) to allow early callers. + * + * The directory where the search should start needs to be passed in via the + * `dir` parameter; upon return, the `dir` buffer will contain the path of + * the directory where the search ended, and `gitdir` will contain the path of + * the discovered .git/ directory, if any. If `gitdir` is not absolute, it + * is relative to `dir` (i.e. *not* necessarily the cwd). + */ +static enum discovery_result setup_git_directory_gently_1(struct strbuf *dir, + struct strbuf *gitdir, + int die_on_error) +{ + const char *env_ceiling_dirs = getenv(CEILING_DIRECTORIES_ENVIRONMENT); + struct string_list ceiling_dirs = STRING_LIST_INIT_DUP; + const char *gitdirenv; + int ceil_offset = -1, min_offset = offset_1st_component(dir->buf); + dev_t current_device = 0; + int one_filesystem = 1; + + /* + * If GIT_DIR is set explicitly, we're not going + * to do any discovery, but we still do repository + * validation. + */ + gitdirenv = getenv(GIT_DIR_ENVIRONMENT); + if (gitdirenv) { + strbuf_addstr(gitdir, gitdirenv); + return GIT_DIR_EXPLICIT; + } + + if (env_ceiling_dirs) { + int empty_entry_found = 0; + + string_list_split(&ceiling_dirs, env_ceiling_dirs, PATH_SEP, -1); + filter_string_list(&ceiling_dirs, 0, + canonicalize_ceiling_entry, &empty_entry_found); + ceil_offset = longest_ancestor_length(dir->buf, &ceiling_dirs); + string_list_clear(&ceiling_dirs, 0); + } + + if (ceil_offset < 0) + ceil_offset = min_offset - 2; + + if (min_offset && min_offset == dir->len && + !is_dir_sep(dir->buf[min_offset - 1])) { + strbuf_addch(dir, '/'); + min_offset++; + } + + /* + * Test in the following order (relative to the dir): + * - .git (file containing "gitdir: ") + * - .git/ + * - ./ (bare) + * - ../.git + * - ../.git/ + * - ../ (bare) + * - ../../.git + * etc. + */ + one_filesystem = !git_env_bool("GIT_DISCOVERY_ACROSS_FILESYSTEM", 0); + if (one_filesystem) + current_device = get_device_or_die(dir->buf, NULL, 0); + for (;;) { + int offset = dir->len, error_code = 0; + + if (offset > min_offset) + strbuf_addch(dir, '/'); + strbuf_addstr(dir, DEFAULT_GIT_DIR_ENVIRONMENT); + gitdirenv = read_gitfile_gently(dir->buf, die_on_error ? + NULL : &error_code); + if (!gitdirenv) { + if (die_on_error || + error_code == READ_GITFILE_ERR_NOT_A_FILE) { + /* NEEDSWORK: fail if .git is not file nor dir */ + if (is_git_directory(dir->buf)) + gitdirenv = DEFAULT_GIT_DIR_ENVIRONMENT; + } else if (error_code != READ_GITFILE_ERR_STAT_FAILED) + return GIT_DIR_INVALID_GITFILE; + } + strbuf_setlen(dir, offset); + if (gitdirenv) { + if (!ensure_valid_ownership(dir->buf)) + return GIT_DIR_INVALID_OWNERSHIP; + strbuf_addstr(gitdir, gitdirenv); + return GIT_DIR_DISCOVERED; + } + + if (is_git_directory(dir->buf)) { + if (!ensure_valid_ownership(dir->buf)) + return GIT_DIR_INVALID_OWNERSHIP; + strbuf_addstr(gitdir, "."); + return GIT_DIR_BARE; + } + + if (offset <= min_offset) + return GIT_DIR_HIT_CEILING; + + while (--offset > ceil_offset && !is_dir_sep(dir->buf[offset])) + ; /* continue */ + if (offset <= ceil_offset) + return GIT_DIR_HIT_CEILING; + + strbuf_setlen(dir, offset > min_offset ? offset : min_offset); + if (one_filesystem && + current_device != get_device_or_die(dir->buf, NULL, offset)) + return GIT_DIR_HIT_MOUNT_POINT; + } +} + +int discover_git_directory(struct strbuf *commondir, + struct strbuf *gitdir) +{ + struct strbuf dir = STRBUF_INIT, err = STRBUF_INIT; + size_t gitdir_offset = gitdir->len, cwd_len; + size_t commondir_offset = commondir->len; + struct repository_format candidate = REPOSITORY_FORMAT_INIT; + + if (strbuf_getcwd(&dir)) + return -1; + + cwd_len = dir.len; + if (setup_git_directory_gently_1(&dir, gitdir, 0) <= 0) { + strbuf_release(&dir); + return -1; + } + + /* + * The returned gitdir is relative to dir, and if dir does not reflect + * the current working directory, we simply make the gitdir absolute. + */ + if (dir.len < cwd_len && !is_absolute_path(gitdir->buf + gitdir_offset)) { + /* Avoid a trailing "/." */ + if (!strcmp(".", gitdir->buf + gitdir_offset)) + strbuf_setlen(gitdir, gitdir_offset); + else + strbuf_addch(&dir, '/'); + strbuf_insert(gitdir, gitdir_offset, dir.buf, dir.len); + } + + get_common_dir(commondir, gitdir->buf + gitdir_offset); + + strbuf_reset(&dir); + strbuf_addf(&dir, "%s/config", commondir->buf + commondir_offset); + read_repository_format(&candidate, dir.buf); + strbuf_release(&dir); + + if (verify_repository_format(&candidate, &err) < 0) { + warning("ignoring git dir '%s': %s", + gitdir->buf + gitdir_offset, err.buf); + strbuf_release(&err); + strbuf_setlen(commondir, commondir_offset); + strbuf_setlen(gitdir, gitdir_offset); + clear_repository_format(&candidate); + return -1; + } + + /* take ownership of candidate.partial_clone */ + the_repository->repository_format_partial_clone = + candidate.partial_clone; + candidate.partial_clone = NULL; + + clear_repository_format(&candidate); + return 0; +} + +const char *setup_git_directory_gently(int *nongit_ok) +{ + static struct strbuf cwd = STRBUF_INIT; + struct strbuf dir = STRBUF_INIT, gitdir = STRBUF_INIT; + const char *prefix = NULL; + struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT; + + /* + * We may have read an incomplete configuration before + * setting-up the git directory. If so, clear the cache so + * that the next queries to the configuration reload complete + * configuration (including the per-repo config file that we + * ignored previously). + */ + git_config_clear(); + + /* + * Let's assume that we are in a git repository. + * If it turns out later that we are somewhere else, the value will be + * updated accordingly. + */ + if (nongit_ok) + *nongit_ok = 0; + + if (strbuf_getcwd(&cwd)) + die_errno(_("Unable to read current working directory")); + strbuf_addbuf(&dir, &cwd); + + switch (setup_git_directory_gently_1(&dir, &gitdir, 1)) { + case GIT_DIR_EXPLICIT: + prefix = setup_explicit_git_dir(gitdir.buf, &cwd, &repo_fmt, nongit_ok); + break; + case GIT_DIR_DISCOVERED: + if (dir.len < cwd.len && chdir(dir.buf)) + die(_("cannot change to '%s'"), dir.buf); + prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len, + &repo_fmt, nongit_ok); + break; + case GIT_DIR_BARE: + if (dir.len < cwd.len && chdir(dir.buf)) + die(_("cannot change to '%s'"), dir.buf); + prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok); + break; + case GIT_DIR_HIT_CEILING: + if (!nongit_ok) + die(_("not a git repository (or any of the parent directories): %s"), + DEFAULT_GIT_DIR_ENVIRONMENT); + *nongit_ok = 1; + break; + case GIT_DIR_HIT_MOUNT_POINT: + if (!nongit_ok) + die(_("not a git repository (or any parent up to mount point %s)\n" + "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."), + dir.buf); + *nongit_ok = 1; + break; + case GIT_DIR_INVALID_OWNERSHIP: + if (!nongit_ok) { + struct strbuf quoted = STRBUF_INIT; + + sq_quote_buf_pretty("ed, dir.buf); + die(_("unsafe repository ('%s' is owned by someone else)\n" + "To add an exception for this directory, call:\n" + "\n" + "\tgit config --global --add safe.directory %s"), + dir.buf, quoted.buf); + } + *nongit_ok = 1; + break; + case GIT_DIR_NONE: + /* + * As a safeguard against setup_git_directory_gently_1 returning + * this value, fallthrough to BUG. Otherwise it is possible to + * set startup_info->have_repository to 1 when we did nothing to + * find a repository. + */ + default: + BUG("unhandled setup_git_directory_1() result"); + } + + /* + * At this point, nongit_ok is stable. If it is non-NULL and points + * to a non-zero value, then this means that we haven't found a + * repository and that the caller expects startup_info to reflect + * this. + * + * Regardless of the state of nongit_ok, startup_info->prefix and + * the GIT_PREFIX environment variable must always match. For details + * see Documentation/config/alias.txt. + */ + if (nongit_ok && *nongit_ok) + startup_info->have_repository = 0; + else + startup_info->have_repository = 1; + + /* + * Not all paths through the setup code will call 'set_git_dir()' (which + * directly sets up the environment) so in order to guarantee that the + * environment is in a consistent state after setup, explicitly setup + * the environment if we have a repository. + * + * NEEDSWORK: currently we allow bogus GIT_DIR values to be set in some + * code paths so we also need to explicitly setup the environment if + * the user has set GIT_DIR. It may be beneficial to disallow bogus + * GIT_DIR values at some point in the future. + */ + if (/* GIT_DIR_EXPLICIT, GIT_DIR_DISCOVERED, GIT_DIR_BARE */ + startup_info->have_repository || + /* GIT_DIR_EXPLICIT */ + getenv(GIT_DIR_ENVIRONMENT)) { + if (!the_repository->gitdir) { + const char *gitdir = getenv(GIT_DIR_ENVIRONMENT); + if (!gitdir) + gitdir = DEFAULT_GIT_DIR_ENVIRONMENT; + setup_git_env(gitdir); + } + if (startup_info->have_repository) { + repo_set_hash_algo(the_repository, repo_fmt.hash_algo); + /* take ownership of repo_fmt.partial_clone */ + the_repository->repository_format_partial_clone = + repo_fmt.partial_clone; + repo_fmt.partial_clone = NULL; + } + } + /* + * Since precompose_string_if_needed() needs to look at + * the core.precomposeunicode configuration, this + * has to happen after the above block that finds + * out where the repository is, i.e. a preparation + * for calling git_config_get_bool(). + */ + if (prefix) { + prefix = precompose_string_if_needed(prefix); + startup_info->prefix = prefix; + setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1); + } else { + startup_info->prefix = NULL; + setenv(GIT_PREFIX_ENVIRONMENT, "", 1); + } + + + strbuf_release(&dir); + strbuf_release(&gitdir); + clear_repository_format(&repo_fmt); + + return prefix; +} + +int git_config_perm(const char *var, const char *value) +{ + int i; + char *endptr; + + if (value == NULL) + return PERM_GROUP; + + if (!strcmp(value, "umask")) + return PERM_UMASK; + if (!strcmp(value, "group")) + return PERM_GROUP; + if (!strcmp(value, "all") || + !strcmp(value, "world") || + !strcmp(value, "everybody")) + return PERM_EVERYBODY; + + /* Parse octal numbers */ + i = strtol(value, &endptr, 8); + + /* If not an octal number, maybe true/false? */ + if (*endptr != 0) + return git_config_bool(var, value) ? PERM_GROUP : PERM_UMASK; + + /* + * Treat values 0, 1 and 2 as compatibility cases, otherwise it is + * a chmod value to restrict to. + */ + switch (i) { + case PERM_UMASK: /* 0 */ + return PERM_UMASK; + case OLD_PERM_GROUP: /* 1 */ + return PERM_GROUP; + case OLD_PERM_EVERYBODY: /* 2 */ + return PERM_EVERYBODY; + } + + /* A filemode value was given: 0xxx */ + + if ((i & 0600) != 0600) + die(_("problem with core.sharedRepository filemode value " + "(0%.3o).\nThe owner of files must always have " + "read and write permissions."), i); + + /* + * Mask filemode value. Others can not get write permission. + * x flags for directories are handled separately. + */ + return -(i & 0666); +} + +void check_repository_format(struct repository_format *fmt) +{ + struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT; + if (!fmt) + fmt = &repo_fmt; + check_repository_format_gently(get_git_dir(), fmt, NULL); + startup_info->have_repository = 1; + repo_set_hash_algo(the_repository, fmt->hash_algo); + the_repository->repository_format_partial_clone = + xstrdup_or_null(fmt->partial_clone); + clear_repository_format(&repo_fmt); +} + +/* + * Returns the "prefix", a path to the current working directory + * relative to the work tree root, or NULL, if the current working + * directory is not a strict subdirectory of the work tree root. The + * prefix always ends with a '/' character. + */ +const char *setup_git_directory(void) +{ + return setup_git_directory_gently(NULL); +} + +const char *resolve_gitdir_gently(const char *suspect, int *return_error_code) +{ + if (is_git_directory(suspect)) + return suspect; + return read_gitfile_gently(suspect, return_error_code); +} + +/* if any standard file descriptor is missing open it to /dev/null */ +void sanitize_stdfds(void) +{ + int fd = xopen("/dev/null", O_RDWR); + while (fd < 2) + fd = xdup(fd); + if (fd > 2) + close(fd); +} + +int daemonize(void) +{ +#ifdef NO_POSIX_GOODIES + errno = ENOSYS; + return -1; +#else + switch (fork()) { + case 0: + break; + case -1: + die_errno(_("fork failed")); + default: + exit(0); + } + if (setsid() == -1) + die_errno(_("setsid failed")); + close(0); + close(1); + close(2); + sanitize_stdfds(); + return 0; +#endif +} diff --git a/shell.c b/shell.c index 811e13b9c9597e..7ff4109db7058b 100644 --- a/shell.c +++ b/shell.c @@ -47,6 +47,8 @@ static void cd_to_homedir(void) die("could not chdir to user's home directory"); } +#define MAX_INTERACTIVE_COMMAND (4*1024*1024) + static void run_shell(void) { int done = 0; @@ -67,22 +69,46 @@ static void run_shell(void) run_command_v_opt(help_argv, RUN_SILENT_EXEC_FAILURE); do { - struct strbuf line = STRBUF_INIT; const char *prog; char *full_cmd; char *rawargs; + size_t len; char *split_args; const char **argv; int code; int count; fprintf(stderr, "git> "); - if (git_read_line_interactively(&line) == EOF) { + + /* + * Avoid using a strbuf or git_read_line_interactively() here. + * We don't want to allocate arbitrary amounts of memory on + * behalf of a possibly untrusted client, and we're subject to + * OS limits on command length anyway. + */ + fflush(stdout); + rawargs = xmalloc(MAX_INTERACTIVE_COMMAND); + if (!fgets(rawargs, MAX_INTERACTIVE_COMMAND, stdin)) { fprintf(stderr, "\n"); - strbuf_release(&line); + free(rawargs); break; } - rawargs = strbuf_detach(&line, NULL); + len = strlen(rawargs); + + /* + * If we truncated due to our input buffer size, reject the + * command. That's better than running bogus input, and + * there's a good chance it's just malicious garbage anyway. + */ + if (len >= MAX_INTERACTIVE_COMMAND - 1) + die("invalid command format: input too long"); + + if (len > 0 && rawargs[len - 1] == '\n') { + if (--len > 0 && rawargs[len - 1] == '\r') + --len; + rawargs[len] = '\0'; + } + split_args = xstrdup(rawargs); count = split_cmdline(split_args, &argv); if (count < 0) { diff --git a/symlink/file b/symlink/file new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/t/lib-submodule-update.sh b/t/lib-submodule-update.sh index f7c7df0ca427b3..25a5f3016f08d5 100644 --- a/t/lib-submodule-update.sh +++ b/t/lib-submodule-update.sh @@ -197,6 +197,7 @@ test_git_directory_exists () { # the submodule repo if it doesn't exist and configures the most problematic # settings for diff.ignoreSubmodules. prolog () { + test_config_global protocol.file.allow always && (test -d submodule_update_repo || create_lib_submodule_repo) && test_config_global diff.ignoreSubmodules all && test_config diff.ignoreSubmodules all diff --git a/t/lib-submodule-update.sh.orig b/t/lib-submodule-update.sh.orig new file mode 100644 index 00000000000000..f7c7df0ca427b3 --- /dev/null +++ b/t/lib-submodule-update.sh.orig @@ -0,0 +1,1097 @@ +# Create a submodule layout used for all tests below. +# +# The following use cases are covered: +# - New submodule (no_submodule => add_sub1) +# - Removed submodule (add_sub1 => remove_sub1) +# - Updated submodule (add_sub1 => modify_sub1) +# - Updated submodule recursively (add_nested_sub => modify_sub1_recursively) +# - Submodule updated to invalid commit (add_sub1 => invalid_sub1) +# - Submodule updated from invalid commit (invalid_sub1 => valid_sub1) +# - Submodule replaced by tracked files in directory (add_sub1 => +# replace_sub1_with_directory) +# - Directory containing tracked files replaced by submodule +# (replace_sub1_with_directory => replace_directory_with_sub1) +# - Submodule replaced by tracked file with the same name (add_sub1 => +# replace_sub1_with_file) +# - Tracked file replaced by submodule (replace_sub1_with_file => +# replace_file_with_sub1) +# +# ----O +# / ^ +# / remove_sub1 +# / +# add_sub1 /-------O---------O--------O modify_sub1_recursively +# | / ^ add_nested_sub +# | / modify_sub1 +# v/ +# O------O-----------O---------O +# ^ \ ^ replace_directory_with_sub1 +# | \ replace_sub1_with_directory +# no_submodule \ +# --------O---------O +# \ ^ replace_file_with_sub1 +# \ replace_sub1_with_file +# \ +# ----O---------O +# ^ valid_sub1 +# invalid_sub1 +# + +create_lib_submodule_repo () { + git init submodule_update_sub1 && + ( + cd submodule_update_sub1 && + echo "expect" >>.gitignore && + echo "actual" >>.gitignore && + echo "x" >file1 && + echo "y" >file2 && + git add .gitignore file1 file2 && + git commit -m "Base inside first submodule" && + git branch "no_submodule" + ) && + git init submodule_update_sub2 && + ( + cd submodule_update_sub2 + echo "expect" >>.gitignore && + echo "actual" >>.gitignore && + echo "x" >file1 && + echo "y" >file2 && + git add .gitignore file1 file2 && + git commit -m "nested submodule base" && + git branch "no_submodule" + ) && + git init submodule_update_repo && + ( + cd submodule_update_repo && + branch=$(git symbolic-ref --short HEAD) && + echo "expect" >>.gitignore && + echo "actual" >>.gitignore && + echo "x" >file1 && + echo "y" >file2 && + git add .gitignore file1 file2 && + git commit -m "Base" && + git branch "no_submodule" && + + git checkout -b "add_sub1" && + git submodule add ../submodule_update_sub1 sub1 && + git submodule add ../submodule_update_sub1 uninitialized_sub && + git config -f .gitmodules submodule.sub1.ignore all && + git config submodule.sub1.ignore all && + git add .gitmodules && + git commit -m "Add sub1" && + + git checkout -b remove_sub1 add_sub1 && + git revert HEAD && + + git checkout -b modify_sub1 add_sub1 && + git submodule update && + ( + cd sub1 && + git fetch && + git checkout -b "modifications" && + echo "z" >file2 && + echo "x" >file3 && + git add file2 file3 && + git commit -m "modified file2 and added file3" && + git push origin modifications + ) && + git add sub1 && + git commit -m "Modify sub1" && + + git checkout -b add_nested_sub modify_sub1 && + git -C sub1 checkout -b "add_nested_sub" && + git -C sub1 submodule add --branch no_submodule ../submodule_update_sub2 sub2 && + git -C sub1 commit -a -m "add a nested submodule" && + git add sub1 && + git commit -a -m "update submodule, that updates a nested submodule" && + git checkout -b modify_sub1_recursively && + git -C sub1 checkout -b modify_sub1_recursively && + git -C sub1/sub2 checkout -b modify_sub1_recursively && + echo change >sub1/sub2/file3 && + git -C sub1/sub2 add file3 && + git -C sub1/sub2 commit -m "make a change in nested sub" && + git -C sub1 add sub2 && + git -C sub1 commit -m "update nested sub" && + git add sub1 && + git commit -m "update sub1, that updates nested sub" && + git -C sub1 push origin modify_sub1_recursively && + git -C sub1/sub2 push origin modify_sub1_recursively && + git -C sub1 submodule deinit -f --all && + + git checkout -b replace_sub1_with_directory add_sub1 && + git submodule update && + git -C sub1 checkout modifications && + git rm --cached sub1 && + rm sub1/.git* && + git config -f .gitmodules --remove-section "submodule.sub1" && + git add .gitmodules sub1/* && + git commit -m "Replace sub1 with directory" && + + git checkout -b replace_directory_with_sub1 && + git revert HEAD && + + git checkout -b replace_sub1_with_file add_sub1 && + git rm sub1 && + echo "content" >sub1 && + git add sub1 && + git commit -m "Replace sub1 with file" && + + git checkout -b replace_file_with_sub1 && + git revert HEAD && + + git checkout -b invalid_sub1 add_sub1 && + git update-index --cacheinfo 160000 $(test_oid numeric) sub1 && + git commit -m "Invalid sub1 commit" && + git checkout -b valid_sub1 && + git revert HEAD && + + git checkout "$branch" + ) +} + +# Helper function to replace gitfile with .git directory +replace_gitfile_with_git_dir () { + ( + cd "$1" && + git_dir="$(git rev-parse --git-dir)" && + rm -f .git && + cp -R "$git_dir" .git && + GIT_WORK_TREE=. git config --unset core.worktree + ) +} + +# Test that the .git directory in the submodule is unchanged (except for the +# core.worktree setting, which appears only in $GIT_DIR/modules/$1/config). +# Call this function before test_submodule_content as the latter might +# write the index file leading to false positive index differences. +# +# Note that this only supports submodules at the root level of the +# superproject, with the default name, i.e. same as its path. +test_git_directory_is_unchanged () { + ( + cd ".git/modules/$1" && + # does core.worktree point at the right place? + test "$(git config core.worktree)" = "../../../$1" && + # remove it temporarily before comparing, as + # "$1/.git/config" lacks it... + git config --unset core.worktree + ) && + diff -r ".git/modules/$1" "$1/.git" && + ( + # ... and then restore. + cd ".git/modules/$1" && + git config core.worktree "../../../$1" + ) +} + +test_git_directory_exists () { + test -e ".git/modules/$1" && + if test -f sub1/.git + then + # does core.worktree point at the right place? + test "$(git -C .git/modules/$1 config core.worktree)" = "../../../$1" + fi +} + +# Helper function to be executed at the start of every test below, it sets up +# the submodule repo if it doesn't exist and configures the most problematic +# settings for diff.ignoreSubmodules. +prolog () { + (test -d submodule_update_repo || create_lib_submodule_repo) && + test_config_global diff.ignoreSubmodules all && + test_config diff.ignoreSubmodules all +} + +# Helper function to bring work tree back into the state given by the +# commit. This includes trying to populate sub1 accordingly if it exists and +# should be updated to an existing commit. +reset_work_tree_to () { + rm -rf submodule_update && + git clone submodule_update_repo submodule_update && + ( + cd submodule_update && + rm -rf sub1 && + git checkout -f "$1" && + git status -u -s >actual && + test_must_be_empty actual && + hash=$(git rev-parse --revs-only HEAD:sub1) && + if test -n "$hash" && + test $(cd "../submodule_update_sub1" && git rev-parse --verify "$hash^{commit}") + then + git submodule update --init --recursive "sub1" + fi + ) +} + +reset_work_tree_to_interested () { + reset_work_tree_to $1 && + # make the submodule git dirs available + if ! test -d submodule_update/.git/modules/sub1 + then + mkdir -p submodule_update/.git/modules && + cp -r submodule_update_repo/.git/modules/sub1 submodule_update/.git/modules/sub1 + GIT_WORK_TREE=. git -C submodule_update/.git/modules/sub1 config --unset core.worktree + fi && + if ! test -d submodule_update/.git/modules/sub1/modules/sub2 + then + mkdir -p submodule_update/.git/modules/sub1/modules && + cp -r submodule_update_repo/.git/modules/sub1/modules/sub2 submodule_update/.git/modules/sub1/modules/sub2 + # core.worktree is unset for sub2 as it is not checked out + fi && + # indicate we are interested in the submodule: + git -C submodule_update config submodule.sub1.url "bogus" && + # sub1 might not be checked out, so use the git dir + git -C submodule_update/.git/modules/sub1 config submodule.sub2.url "bogus" +} + +# Test that the superproject contains the content according to commit "$1" +# (the work tree must match the index for everything but submodules but the +# index must exactly match the given commit including any submodule SHA-1s). +test_superproject_content () { + git diff-index --cached "$1" >actual && + test_must_be_empty actual && + git diff-files --ignore-submodules >actual && + test_must_be_empty actual +} + +# Test that the given submodule at path "$1" contains the content according +# to the submodule commit recorded in the superproject's commit "$2" +test_submodule_content () { + if test x"$1" = "x-C" + then + cd "$2" + shift; shift; + fi + if test $# != 2 + then + echo "test_submodule_content needs two arguments" + return 1 + fi && + submodule="$1" && + commit="$2" && + test -d "$submodule"/ && + if ! test -f "$submodule"/.git && ! test -d "$submodule"/.git + then + echo "Submodule $submodule is not populated" + return 1 + fi && + sha1=$(git rev-parse --verify "$commit:$submodule") && + if test -z "$sha1" + then + echo "Couldn't retrieve SHA-1 of $submodule for $commit" + return 1 + fi && + ( + cd "$submodule" && + git status -u -s >actual && + test_must_be_empty actual && + git diff "$sha1" >actual && + test_must_be_empty actual + ) +} + +# Test that the following transitions are correctly handled: +# - Updated submodule +# - New submodule +# - Removed submodule +# - Directory containing tracked files replaced by submodule +# - Submodule replaced by tracked files in directory +# - Submodule replaced by tracked file with the same name +# - Tracked file replaced by submodule +# +# The default is that submodule contents aren't changed until "git submodule +# update" is run. And even then that command doesn't delete the work tree of +# a removed submodule. +# +# The first argument of the callback function will be the name of the submodule. +# +# Removing a submodule containing a .git directory must fail even when forced +# to protect the history! If we are testing this case, the second argument of +# the callback function will be 'test_must_fail', else it will be the empty +# string. +# + +# Internal function; use test_submodule_switch_func(), test_submodule_switch(), +# or test_submodule_forced_switch() instead. +test_submodule_switch_common () { + command="$1" + ######################### Appearing submodule ######################### + # Switching to a commit letting a submodule appear creates empty dir ... + test_expect_success "$command: added submodule creates empty directory" ' + prolog && + reset_work_tree_to no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_dir_is_empty sub1 && + git submodule update --init --recursive && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # ... and doesn't care if it already exists. + if test "$KNOWN_FAILURE_STASH_DOES_IGNORE_SUBMODULE_CHANGES" = 1 + then + # Restoring stash fails to restore submodule index entry + RESULT="failure" + else + RESULT="success" + fi + test_expect_$RESULT "$command: added submodule leaves existing empty directory alone" ' + prolog && + reset_work_tree_to no_submodule && + ( + cd submodule_update && + mkdir sub1 && + git branch -t add_sub1 origin/add_sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_dir_is_empty sub1 && + git submodule update --init --recursive && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # Replacing a tracked file with a submodule produces an empty + # directory ... + test_expect_$RESULT "$command: replace tracked file with submodule creates empty directory" ' + prolog && + reset_work_tree_to replace_sub1_with_file && + ( + cd submodule_update && + git branch -t replace_file_with_sub1 origin/replace_file_with_sub1 && + $command replace_file_with_sub1 && + test_superproject_content origin/replace_file_with_sub1 && + test_dir_is_empty sub1 && + git submodule update --init --recursive && + test_submodule_content sub1 origin/replace_file_with_sub1 + ) + ' + # ... as does removing a directory with tracked files with a + # submodule. + if test "$KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR" = 1 + then + # Non fast-forward merges fail with "Directory sub1 doesn't + # exist. sub1" because the empty submodule directory is not + # created + RESULT="failure" + else + RESULT="success" + fi + test_expect_$RESULT "$command: replace directory with submodule" ' + prolog && + reset_work_tree_to replace_sub1_with_directory && + ( + cd submodule_update && + git branch -t replace_directory_with_sub1 origin/replace_directory_with_sub1 && + $command replace_directory_with_sub1 && + test_superproject_content origin/replace_directory_with_sub1 && + test_dir_is_empty sub1 && + git submodule update --init --recursive && + test_submodule_content sub1 origin/replace_directory_with_sub1 + ) + ' + + ######################## Disappearing submodule ####################### + # Removing a submodule doesn't remove its work tree ... + if test "$KNOWN_FAILURE_STASH_DOES_IGNORE_SUBMODULE_CHANGES" = 1 + then + RESULT="failure" + else + RESULT="success" + fi + test_expect_$RESULT "$command: removed submodule leaves submodule directory and its contents in place" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t remove_sub1 origin/remove_sub1 && + $command remove_sub1 && + test_superproject_content origin/remove_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # ... especially when it contains a .git directory. + test_expect_$RESULT "$command: removed submodule leaves submodule containing a .git directory alone" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t remove_sub1 origin/remove_sub1 && + replace_gitfile_with_git_dir sub1 && + $command remove_sub1 && + test_superproject_content origin/remove_sub1 && + test_git_directory_is_unchanged sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # Replacing a submodule with files in a directory must fail as the + # submodule work tree isn't removed ... + if test "$KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES" = 1 + then + # Non fast-forward merges attempt to merge the former + # submodule files with the newly checked out ones in the + # directory of the same name while it shouldn't. + RESULT="failure" + elif test "$KNOWN_FAILURE_FORCED_SWITCH_TESTS" = 1 + then + # All existing tests that use test_submodule_forced_switch() + # require this. + RESULT="failure" + else + RESULT="success" + fi + test_expect_$RESULT "$command: replace submodule with a directory must fail" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + $command replace_sub1_with_directory test_must_fail && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # ... especially when it contains a .git directory. + test_expect_$RESULT "$command: replace submodule containing a .git directory with a directory must fail" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + replace_gitfile_with_git_dir sub1 && + $command replace_sub1_with_directory test_must_fail && + test_superproject_content origin/add_sub1 && + test_git_directory_is_unchanged sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # Replacing it with a file must fail as it could throw away any local + # work tree changes ... + test_expect_failure "$command: replace submodule with a file must fail" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + $command replace_sub1_with_file test_must_fail && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # ... or even destroy unpushed parts of submodule history if that + # still uses a .git directory. + test_expect_failure "$command: replace submodule containing a .git directory with a file must fail" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + replace_gitfile_with_git_dir sub1 && + $command replace_sub1_with_file test_must_fail && + test_superproject_content origin/add_sub1 && + test_git_directory_is_unchanged sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + + ########################## Modified submodule ######################### + # Updating a submodule sha1 doesn't update the submodule's work tree + if test "$KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT" = 1 + then + # When cherry picking a SHA-1 update for an ignored submodule + # the commit incorrectly fails with "The previous cherry-pick + # is now empty, possibly due to conflict resolution." + RESULT="failure" + else + RESULT="success" + fi + test_expect_$RESULT "$command: modified submodule does not update submodule work tree" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t modify_sub1 origin/modify_sub1 && + $command modify_sub1 && + test_superproject_content origin/modify_sub1 && + test_submodule_content sub1 origin/add_sub1 && + git submodule update && + test_submodule_content sub1 origin/modify_sub1 + ) + ' + # Updating a submodule to an invalid sha1 doesn't update the + # submodule's work tree, subsequent update will fail + test_expect_$RESULT "$command: modified submodule does not update submodule work tree to invalid commit" ' + prolog && + reset_work_tree_to add_sub1 && + ( + cd submodule_update && + git branch -t invalid_sub1 origin/invalid_sub1 && + $command invalid_sub1 && + test_superproject_content origin/invalid_sub1 && + test_submodule_content sub1 origin/add_sub1 && + test_must_fail git submodule update && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # Updating a submodule from an invalid sha1 doesn't update the + # submodule's work tree, subsequent update will succeed + test_expect_$RESULT "$command: modified submodule does not update submodule work tree from invalid commit" ' + prolog && + reset_work_tree_to invalid_sub1 && + ( + cd submodule_update && + git branch -t valid_sub1 origin/valid_sub1 && + $command valid_sub1 && + test_superproject_content origin/valid_sub1 && + test_dir_is_empty sub1 && + git submodule update --init --recursive && + test_submodule_content sub1 origin/valid_sub1 + ) + ' +} + +# Declares and invokes several tests that, in various situations, checks that +# the provided transition function: +# - succeeds in updating the worktree and index of a superproject to a target +# commit, or fails atomically (depending on the test situation) +# - if succeeds, the contents of submodule directories are unchanged +# - if succeeds, once "git submodule update" is invoked, the contents of +# submodule directories are updated +# +# If the command under test is known to not work with submodules in certain +# conditions, set the appropriate KNOWN_FAILURE_* variable used in the tests +# below to 1. +# +# The first argument of the callback function will be the name of the submodule. +# +# Removing a submodule containing a .git directory must fail even when forced +# to protect the history! If we are testing this case, the second argument of +# the callback function will be 'test_must_fail', else it will be the empty +# string. +# +# The following example uses `git some-command` as an example command to be +# tested. It updates the worktree and index to match a target, but not any +# submodule directories. +# +# my_func () { +# ...prepare for `git some-command` to be run... +# $2 git some-command "$1" && +# if test -n "$2" +# then +# return +# fi && +# ...check the state after git some-command is run... +# } +# test_submodule_switch_func "my_func" +test_submodule_switch_func () { + command="$1" + test_submodule_switch_common "$command" + + # An empty directory does not prevent the creation of a submodule of + # the same name, but a file does. + test_expect_success "$command: added submodule doesn't remove untracked unignored file with same name" ' + prolog && + reset_work_tree_to no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + >sub1 && + $command add_sub1 test_must_fail && + test_superproject_content origin/no_submodule && + test_must_be_empty sub1 + ) + ' +} + +# Ensures that the that the arg either contains "test_must_fail" or is empty. +may_only_be_test_must_fail () { + test -z "$1" || test "$1" = test_must_fail || die +} + +git_test_func () { + may_only_be_test_must_fail "$2" && + $2 git $gitcmd "$1" +} + +test_submodule_switch () { + gitcmd="$1" + test_submodule_switch_func "git_test_func" +} + +# Same as test_submodule_switch(), except that throwing away local changes in +# the superproject is allowed. +test_submodule_forced_switch () { + gitcmd="$1" + command="git_test_func" + KNOWN_FAILURE_FORCED_SWITCH_TESTS=1 + test_submodule_switch_common "$command" + + # When forced, a file in the superproject does not prevent creating a + # submodule of the same name. + test_expect_success "$command: added submodule does remove untracked unignored file with same name when forced" ' + prolog && + reset_work_tree_to no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + >sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_dir_is_empty sub1 + ) + ' +} + +# Test that submodule contents are correctly updated when switching +# between commits that change a submodule. +# Test that the following transitions are correctly handled: +# (These tests are also above in the case where we expect no change +# in the submodule) +# - Updated submodule +# - New submodule +# - Removed submodule +# - Directory containing tracked files replaced by submodule +# - Submodule replaced by tracked files in directory +# - Submodule replaced by tracked file with the same name +# - Tracked file replaced by submodule +# +# New test cases +# - Removing a submodule with a git directory absorbs the submodules +# git directory first into the superproject. +# - Switching from no submodule to nested submodules +# - Switching from nested submodules to no submodule + +# Internal function; use test_submodule_switch_recursing_with_args() or +# test_submodule_forced_switch_recursing_with_args() instead. +test_submodule_recursing_with_args_common () { + command="$1 --recurse-submodules" + + ######################### Appearing submodule ######################### + # Switching to a commit letting a submodule appear checks it out ... + test_expect_success "$command: added submodule is checked out" ' + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # ... ignoring an empty existing directory. + test_expect_success "$command: added submodule is checked out in empty dir" ' + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + mkdir sub1 && + git branch -t add_sub1 origin/add_sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + + # Replacing a tracked file with a submodule produces a checked out submodule + test_expect_success "$command: replace tracked file with submodule checks out submodule" ' + prolog && + reset_work_tree_to_interested replace_sub1_with_file && + ( + cd submodule_update && + git branch -t replace_file_with_sub1 origin/replace_file_with_sub1 && + $command replace_file_with_sub1 && + test_superproject_content origin/replace_file_with_sub1 && + test_submodule_content sub1 origin/replace_file_with_sub1 + ) + ' + # ... as does removing a directory with tracked files with a submodule. + test_expect_success "$command: replace directory with submodule" ' + prolog && + reset_work_tree_to_interested replace_sub1_with_directory && + ( + cd submodule_update && + git branch -t replace_directory_with_sub1 origin/replace_directory_with_sub1 && + $command replace_directory_with_sub1 && + test_superproject_content origin/replace_directory_with_sub1 && + test_submodule_content sub1 origin/replace_directory_with_sub1 + ) + ' + # Switching to a commit with nested submodules recursively checks them out + test_expect_success "$command: nested submodules are checked out" ' + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + git branch -t modify_sub1_recursively origin/modify_sub1_recursively && + $command modify_sub1_recursively && + test_superproject_content origin/modify_sub1_recursively && + test_submodule_content sub1 origin/modify_sub1_recursively && + test_submodule_content -C sub1 sub2 origin/modify_sub1_recursively + ) + ' + + ######################## Disappearing submodule ####################### + # Removing a submodule removes its work tree ... + test_expect_success "$command: removed submodule removes submodules working tree" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t remove_sub1 origin/remove_sub1 && + $command remove_sub1 && + test_superproject_content origin/remove_sub1 && + ! test -e sub1 && + test_must_fail git config -f .git/modules/sub1/config core.worktree + ) + ' + # ... absorbing a .git directory along the way. + test_expect_success "$command: removed submodule absorbs submodules .git directory" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t remove_sub1 origin/remove_sub1 && + replace_gitfile_with_git_dir sub1 && + rm -rf .git/modules && + $command remove_sub1 && + test_superproject_content origin/remove_sub1 && + ! test -e sub1 && + test_git_directory_exists sub1 + ) + ' + + # Replacing it with a file ... + test_expect_success "$command: replace submodule with a file" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + $command replace_sub1_with_file && + test_superproject_content origin/replace_sub1_with_file && + test -f sub1 + ) + ' + RESULTDS=success + if test "$KNOWN_FAILURE_DIRECTORY_SUBMODULE_CONFLICTS" = 1 + then + RESULTDS=failure + fi + # ... must check its local work tree for untracked files + test_expect_$RESULTDS "$command: replace submodule with a file must fail with untracked files" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + : >sub1/untrackedfile && + test_must_fail $command replace_sub1_with_file && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 && + test -f sub1/untracked_file + ) + ' + + # Switching to a commit without nested submodules removes their worktrees + test_expect_success "$command: worktrees of nested submodules are removed" ' + prolog && + reset_work_tree_to_interested add_nested_sub && + ( + cd submodule_update && + git branch -t no_submodule origin/no_submodule && + $command no_submodule && + test_superproject_content origin/no_submodule && + ! test_path_is_dir sub1 && + test_must_fail git config -f .git/modules/sub1/config core.worktree && + test_must_fail git config -f .git/modules/sub1/modules/sub2/config core.worktree + ) + ' + + ########################## Modified submodule ######################### + # Updating a submodule sha1 updates the submodule's work tree + test_expect_success "$command: modified submodule updates submodule work tree" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t modify_sub1 origin/modify_sub1 && + $command modify_sub1 && + test_superproject_content origin/modify_sub1 && + test_submodule_content sub1 origin/modify_sub1 + ) + ' + # Updating a submodule to an invalid sha1 doesn't update the + # superproject nor the submodule's work tree. + test_expect_success "$command: updating to a missing submodule commit fails" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t invalid_sub1 origin/invalid_sub1 && + test_must_fail $command invalid_sub1 2>err && + test_i18ngrep sub1 err && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + # Updating a submodule does not touch the currently checked out branch in the submodule + test_expect_success "$command: submodule branch is not changed, detach HEAD instead" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git -C sub1 checkout -b keep_branch && + git -C sub1 rev-parse HEAD >expect && + git branch -t modify_sub1 origin/modify_sub1 && + $command modify_sub1 && + test_superproject_content origin/modify_sub1 && + test_submodule_content sub1 origin/modify_sub1 && + git -C sub1 rev-parse keep_branch >actual && + test_cmp expect actual && + test_must_fail git -C sub1 symbolic-ref HEAD + ) + ' +} + +# Declares and invokes several tests that, in various situations, checks that +# the provided Git command, when invoked with --recurse-submodules: +# - succeeds in updating the worktree and index of a superproject to a target +# commit, or fails atomically (depending on the test situation) +# - if succeeds, the contents of submodule directories are updated +# +# Specify the Git command so that "git $GIT_COMMAND --recurse-submodules" +# works. +# +# If the command under test is known to not work with submodules in certain +# conditions, set the appropriate KNOWN_FAILURE_* variable used in the tests +# below to 1. +# +# Use as follows: +# +# test_submodule_switch_recursing_with_args "$GIT_COMMAND" +test_submodule_switch_recursing_with_args () { + cmd_args="$1" + command="git $cmd_args" + test_submodule_recursing_with_args_common "$command" + + RESULTDS=success + if test "$KNOWN_FAILURE_DIRECTORY_SUBMODULE_CONFLICTS" = 1 + then + RESULTDS=failure + fi + RESULTOI=success + if test "$KNOWN_FAILURE_SUBMODULE_OVERWRITE_IGNORED_UNTRACKED" = 1 + then + RESULTOI=failure + fi + # Switching to a commit letting a submodule appear cannot override an + # untracked file. + test_expect_success "$command: added submodule doesn't remove untracked file with same name" ' + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + : >sub1 && + test_must_fail $command add_sub1 && + test_superproject_content origin/no_submodule && + test_must_be_empty sub1 + ) + ' + # ... but an ignored file is fine. + test_expect_$RESULTOI "$command: added submodule removes an untracked ignored file" ' + test_when_finished "rm submodule_update/.git/info/exclude" && + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + : >sub1 && + echo sub1 >.git/info/exclude && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + + # Replacing a submodule with files in a directory must succeeds + # when the submodule is clean + test_expect_$RESULTDS "$command: replace submodule with a directory" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + $command replace_sub1_with_directory && + test_superproject_content origin/replace_sub1_with_directory && + test_submodule_content sub1 origin/replace_sub1_with_directory + ) + ' + # ... absorbing a .git directory. + test_expect_$RESULTDS "$command: replace submodule containing a .git directory with a directory must absorb the git dir" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + replace_gitfile_with_git_dir sub1 && + rm -rf .git/modules && + $command replace_sub1_with_directory && + test_superproject_content origin/replace_sub1_with_directory && + test_git_directory_exists sub1 + ) + ' + + # ... and ignored files are ignored + test_expect_success "$command: replace submodule with a file works ignores ignored files in submodule" ' + test_when_finished "rm submodule_update/.git/modules/sub1/info/exclude" && + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + echo ignored >.git/modules/sub1/info/exclude && + : >sub1/ignored && + $command replace_sub1_with_file && + test_superproject_content origin/replace_sub1_with_file && + test -f sub1 + ) + ' + + test_expect_success "git -c submodule.recurse=true $cmd_args: modified submodule updates submodule work tree" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t modify_sub1 origin/modify_sub1 && + git -c submodule.recurse=true $cmd_args modify_sub1 && + test_superproject_content origin/modify_sub1 && + test_submodule_content sub1 origin/modify_sub1 + ) + ' + + test_expect_success "$command: modified submodule updates submodule recursively" ' + prolog && + reset_work_tree_to_interested add_nested_sub && + ( + cd submodule_update && + git branch -t modify_sub1_recursively origin/modify_sub1_recursively && + $command modify_sub1_recursively && + test_superproject_content origin/modify_sub1_recursively && + test_submodule_content sub1 origin/modify_sub1_recursively && + test_submodule_content -C sub1 sub2 origin/modify_sub1_recursively + ) + ' +} + +# Same as test_submodule_switch_recursing_with_args(), except that throwing +# away local changes in the superproject is allowed. +test_submodule_forced_switch_recursing_with_args () { + cmd_args="$1" + command="git $cmd_args" + test_submodule_recursing_with_args_common "$command" + + RESULT=success + if test "$KNOWN_FAILURE_DIRECTORY_SUBMODULE_CONFLICTS" = 1 + then + RESULT=failure + fi + # Switching to a commit letting a submodule appear does not care about + # an untracked file. + test_expect_success "$command: added submodule does remove untracked unignored file with same name when forced" ' + prolog && + reset_work_tree_to_interested no_submodule && + ( + cd submodule_update && + git branch -t add_sub1 origin/add_sub1 && + >sub1 && + $command add_sub1 && + test_superproject_content origin/add_sub1 && + test_submodule_content sub1 origin/add_sub1 + ) + ' + + # Replacing a submodule with files in a directory ... + test_expect_success "$command: replace submodule with a directory" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + $command replace_sub1_with_directory && + test_superproject_content origin/replace_sub1_with_directory + ) + ' + # ... absorbing a .git directory. + test_expect_success "$command: replace submodule containing a .git directory with a directory must fail" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory && + replace_gitfile_with_git_dir sub1 && + rm -rf .git/modules/sub1 && + $command replace_sub1_with_directory && + test_superproject_content origin/replace_sub1_with_directory && + test_git_directory_exists sub1 + ) + ' + + # ... even if the submodule contains ignored files + test_expect_success "$command: replace submodule with a file ignoring ignored files" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t replace_sub1_with_file origin/replace_sub1_with_file && + : >sub1/expect && + $command replace_sub1_with_file && + test_superproject_content origin/replace_sub1_with_file + ) + ' + + # Updating a submodule from an invalid sha1 updates + test_expect_success "$command: modified submodule does update submodule work tree from invalid commit" ' + prolog && + reset_work_tree_to_interested invalid_sub1 && + ( + cd submodule_update && + git branch -t valid_sub1 origin/valid_sub1 && + $command valid_sub1 && + test_superproject_content origin/valid_sub1 && + test_submodule_content sub1 origin/valid_sub1 + ) + ' + + # Old versions of Git were buggy writing the .git link file + # (e.g. before f8eaa0ba98b and then moving the superproject repo + # whose submodules contained absolute paths) + test_expect_success "$command: updating submodules fixes .git links" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + git branch -t modify_sub1 origin/modify_sub1 && + echo "gitdir: bogus/path" >sub1/.git && + $command modify_sub1 && + test_superproject_content origin/modify_sub1 && + test_submodule_content sub1 origin/modify_sub1 + ) + ' + + test_expect_success "$command: changed submodule worktree is reset" ' + prolog && + reset_work_tree_to_interested add_sub1 && + ( + cd submodule_update && + rm sub1/file1 && + : >sub1/new_file && + git -C sub1 add new_file && + $command HEAD && + test_path_is_file sub1/file1 && + test_path_is_missing sub1/new_file + ) + ' +} diff --git a/t/t0033-safe-directory.sh b/t/t0033-safe-directory.sh new file mode 100755 index 00000000000000..239d93f4d21141 --- /dev/null +++ b/t/t0033-safe-directory.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +test_description='verify safe.directory checks' + +. ./test-lib.sh + +GIT_TEST_ASSUME_DIFFERENT_OWNER=1 +export GIT_TEST_ASSUME_DIFFERENT_OWNER + +expect_rejected_dir () { + test_must_fail git status 2>err && + grep "safe.directory" err +} + +test_expect_success 'safe.directory is not set' ' + expect_rejected_dir +' + +test_expect_success 'safe.directory does not match' ' + git config --global safe.directory bogus && + expect_rejected_dir +' + +test_expect_success 'path exist as different key' ' + git config --global foo.bar "$(pwd)" && + expect_rejected_dir +' + +test_expect_success 'safe.directory matches' ' + git config --global --add safe.directory "$(pwd)" && + git status +' + +test_expect_success 'safe.directory matches, but is reset' ' + git config --global --add safe.directory "" && + expect_rejected_dir +' + +test_expect_success 'safe.directory=*' ' + git config --global --add safe.directory "*" && + git status +' + +test_expect_success 'safe.directory=*, but is reset' ' + git config --global --add safe.directory "" && + expect_rejected_dir +' + +test_done diff --git a/t/t0060-path-utils.sh b/t/t0060-path-utils.sh index de4960783f071a..9e2219f37aa68f 100755 --- a/t/t0060-path-utils.sh +++ b/t/t0060-path-utils.sh @@ -55,12 +55,15 @@ fi ancestor() { # We do some math with the expected ancestor length. expected=$3 - if test -n "$rootoff" && test "x$expected" != x-1; then - expected=$(($expected-$rootslash)) - test $expected -lt 0 || - expected=$(($expected+$rootoff)) - fi - test_expect_success "longest ancestor: $1 $2 => $expected" \ + case "$rootoff,$expected,$2" in + *,*,//*) ;; # leave UNC paths alone + [0-9]*,[0-9]*,/*) + # On Windows, expect MSYS2 pseudo root translation for + # Unix-style absolute paths + expected=$(($expected-$rootslash+$rootoff)) + ;; + esac + test_expect_success $4 "longest ancestor: $1 $2 => $expected" \ "actual=\$(test-tool path-utils longest_ancestor_length '$1' '$2') && test \"\$actual\" = '$expected'" } @@ -156,6 +159,11 @@ ancestor /foo/bar /foo 4 ancestor /foo/bar /foo:/bar 4 ancestor /foo/bar /bar -1 +# Windows-specific: DOS drives, network shares +ancestor C:/Users/me C:/ 2 MINGW +ancestor D:/Users/me C:/ -1 MINGW +ancestor //server/share/my-directory //server/share/ 14 MINGW + test_expect_success 'strip_path_suffix' ' test c:/msysgit = $(test-tool path-utils strip_path_suffix \ c:/msysgit/libexec//git-core libexec/git-core) diff --git a/t/t0066-dir-iterator.sh b/t/t0066-dir-iterator.sh index 92910e4e6c13ce..c826f60f6d6f89 100755 --- a/t/t0066-dir-iterator.sh +++ b/t/t0066-dir-iterator.sh @@ -109,7 +109,9 @@ test_expect_success SYMLINKS 'setup dirs with symlinks' ' mkdir -p dir5/a/c && ln -s ../c dir5/a/b/d && ln -s ../ dir5/a/b/e && - ln -s ../../ dir5/a/b/f + ln -s ../../ dir5/a/b/f && + + ln -s dir4 dir6 ' test_expect_success SYMLINKS 'dir-iterator should not follow symlinks by default' ' @@ -145,4 +147,27 @@ test_expect_success SYMLINKS 'dir-iterator should follow symlinks w/ follow flag test_cmp expected-follow-sorted-output actual-follow-sorted-output ' +test_expect_success SYMLINKS 'dir-iterator does not resolve top-level symlinks' ' + test_must_fail test-tool dir-iterator ./dir6 >out && + + grep "ENOTDIR" out +' + +test_expect_success SYMLINKS 'dir-iterator resolves top-level symlinks w/ follow flag' ' + cat >expected-follow-sorted-output <<-EOF && + [d] (a) [a] ./dir6/a + [d] (a/f) [f] ./dir6/a/f + [d] (a/f/c) [c] ./dir6/a/f/c + [d] (b) [b] ./dir6/b + [d] (b/c) [c] ./dir6/b/c + [f] (a/d) [d] ./dir6/a/d + [f] (a/e) [e] ./dir6/a/e + EOF + + test-tool dir-iterator --follow-symlinks ./dir6 >out && + sort out >actual-follow-sorted-output && + + test_cmp expected-follow-sorted-output actual-follow-sorted-output +' + test_done diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh index 38fc8340f5c9b7..ab7b6049983e4a 100755 --- a/t/t1091-sparse-checkout-builtin.sh +++ b/t/t1091-sparse-checkout-builtin.sh @@ -465,7 +465,8 @@ test_expect_success 'interaction with submodules' ' ( cd super && mkdir modules && - git submodule add ../repo modules/child && + git -c protocol.file.allow=always \ + submodule add ../repo modules/child && git add . && git commit -m "add submodule" && git sparse-checkout init --cone && diff --git a/t/t1091-sparse-checkout-builtin.sh.orig b/t/t1091-sparse-checkout-builtin.sh.orig new file mode 100755 index 00000000000000..38fc8340f5c9b7 --- /dev/null +++ b/t/t1091-sparse-checkout-builtin.sh.orig @@ -0,0 +1,645 @@ +#!/bin/sh + +test_description='sparse checkout builtin tests' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +list_files() { + # Do not replace this with 'ls "$1"', as "ls" with BSD-lineage + # enables "-A" by default for root and ends up including ".git" and + # such in its output. (Note, though, that running the test suite as + # root is generally not recommended.) + (cd "$1" && printf '%s\n' *) +} + +check_files() { + list_files "$1" >actual && + shift && + printf "%s\n" $@ >expect && + test_cmp expect actual +} + +test_expect_success 'setup' ' + git init repo && + ( + cd repo && + echo "initial" >a && + mkdir folder1 folder2 deep && + mkdir deep/deeper1 deep/deeper2 && + mkdir deep/deeper1/deepest && + cp a folder1 && + cp a folder2 && + cp a deep && + cp a deep/deeper1 && + cp a deep/deeper2 && + cp a deep/deeper1/deepest && + git add . && + git commit -m "initial commit" + ) +' + +test_expect_success 'git sparse-checkout list (empty)' ' + git -C repo sparse-checkout list >list 2>err && + test_must_be_empty list && + test_i18ngrep "this worktree is not sparse (sparse-checkout file may not exist)" err +' + +test_expect_success 'git sparse-checkout list (populated)' ' + test_when_finished rm -f repo/.git/info/sparse-checkout && + cat >repo/.git/info/sparse-checkout <<-\EOF && + /folder1/* + /deep/ + **/a + !*bin* + EOF + cp repo/.git/info/sparse-checkout expect && + git -C repo sparse-checkout list >list && + test_cmp expect list +' + +test_expect_success 'git sparse-checkout init' ' + git -C repo sparse-checkout init && + cat >expect <<-\EOF && + /* + !/*/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + test_cmp_config -C repo true core.sparsecheckout && + check_files repo a +' + +test_expect_success 'git sparse-checkout list after init' ' + git -C repo sparse-checkout list >actual && + cat >expect <<-\EOF && + /* + !/*/ + EOF + test_cmp expect actual +' + +test_expect_success 'init with existing sparse-checkout' ' + echo "*folder*" >> repo/.git/info/sparse-checkout && + git -C repo sparse-checkout init && + cat >expect <<-\EOF && + /* + !/*/ + *folder* + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a folder1 folder2 +' + +test_expect_success 'clone --sparse' ' + git clone --sparse "file://$(pwd)/repo" clone && + git -C clone sparse-checkout list >actual && + cat >expect <<-\EOF && + /* + !/*/ + EOF + test_cmp expect actual && + check_files clone a +' + +test_expect_success 'interaction with clone --no-checkout (unborn index)' ' + git clone --no-checkout "file://$(pwd)/repo" clone_no_checkout && + git -C clone_no_checkout sparse-checkout init --cone && + git -C clone_no_checkout sparse-checkout set folder1 && + + git -C clone_no_checkout sparse-checkout list >actual && + cat >expect <<-\EOF && + folder1 + EOF + test_cmp expect actual && + + # nothing checked out, expect "No such file or directory" + ! ls clone_no_checkout/* >actual && + test_must_be_empty actual && + test_path_is_missing clone_no_checkout/.git/index && + + # No branch is checked out until we manually switch to one + git -C clone_no_checkout switch main && + test_path_is_file clone_no_checkout/.git/index && + check_files clone_no_checkout a folder1 +' + +test_expect_success 'set enables config' ' + git init empty-config && + ( + cd empty-config && + test_commit test file && + test_path_is_missing .git/config.worktree && + git sparse-checkout set nothing && + test_path_is_file .git/config.worktree && + test_cmp_config true core.sparseCheckout + ) +' + +test_expect_success 'set sparse-checkout using builtin' ' + git -C repo sparse-checkout set "/*" "!/*/" "*folder*" && + cat >expect <<-\EOF && + /* + !/*/ + *folder* + EOF + git -C repo sparse-checkout list >actual && + test_cmp expect actual && + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a folder1 folder2 +' + +test_expect_success 'set sparse-checkout using --stdin' ' + cat >expect <<-\EOF && + /* + !/*/ + /folder1/ + /folder2/ + EOF + git -C repo sparse-checkout set --stdin actual && + test_cmp expect actual && + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo "a folder1 folder2" +' + +test_expect_success 'add to sparse-checkout' ' + cat repo/.git/info/sparse-checkout >expect && + cat >add <<-\EOF && + pattern1 + /folder1/ + pattern2 + EOF + cat add >>expect && + git -C repo sparse-checkout add --stdin actual && + test_cmp expect actual && + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo "a folder1 folder2" +' + +test_expect_success 'cone mode: match patterns' ' + git -C repo config --worktree core.sparseCheckoutCone true && + rm -rf repo/a repo/folder1 repo/folder2 && + git -C repo read-tree -mu HEAD 2>err && + test_i18ngrep ! "disabling cone patterns" err && + git -C repo reset --hard && + check_files repo a folder1 folder2 +' + +test_expect_success 'cone mode: warn on bad pattern' ' + test_when_finished mv sparse-checkout repo/.git/info/ && + cp repo/.git/info/sparse-checkout . && + echo "!/deep/deeper/*" >>repo/.git/info/sparse-checkout && + git -C repo read-tree -mu HEAD 2>err && + test_i18ngrep "unrecognized negative pattern" err +' + +test_expect_success 'sparse-checkout disable' ' + test_when_finished rm -rf repo/.git/info/sparse-checkout && + git -C repo sparse-checkout disable && + test_path_is_file repo/.git/info/sparse-checkout && + git -C repo config --list >config && + test_must_fail git config core.sparseCheckout && + check_files repo a deep folder1 folder2 +' + +test_expect_success 'sparse-index enabled and disabled' ' + git -C repo sparse-checkout init --cone --sparse-index && + test_cmp_config -C repo true index.sparse && + test-tool -C repo read-cache --table >cache && + grep " tree " cache && + + git -C repo sparse-checkout disable && + test-tool -C repo read-cache --table >cache && + ! grep " tree " cache && + git -C repo config --list >config && + ! grep index.sparse config +' + +test_expect_success 'cone mode: init and set' ' + git -C repo sparse-checkout init --cone && + git -C repo config --list >config && + test_i18ngrep "core.sparsecheckoutcone=true" config && + list_files repo >dir && + echo a >expect && + test_cmp expect dir && + git -C repo sparse-checkout set deep/deeper1/deepest/ 2>err && + test_must_be_empty err && + check_files repo a deep && + check_files repo/deep a deeper1 && + check_files repo/deep/deeper1 a deepest && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + !/deep/*/ + /deep/deeper1/ + !/deep/deeper1/*/ + /deep/deeper1/deepest/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + git -C repo sparse-checkout set --stdin 2>err <<-\EOF && + folder1 + folder2 + EOF + test_must_be_empty err && + check_files repo a folder1 folder2 +' + +test_expect_success 'cone mode: list' ' + cat >expect <<-\EOF && + folder1 + folder2 + EOF + git -C repo sparse-checkout set --stdin actual 2>err && + test_must_be_empty err && + test_cmp expect actual +' + +test_expect_success 'cone mode: set with nested folders' ' + git -C repo sparse-checkout set deep deep/deeper1/deepest 2>err && + test_line_count = 0 err && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + EOF + test_cmp repo/.git/info/sparse-checkout expect +' + +test_expect_success 'cone mode: add independent path' ' + git -C repo sparse-checkout set deep/deeper1 && + git -C repo sparse-checkout add folder1 && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + !/deep/*/ + /deep/deeper1/ + /folder1/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a deep folder1 +' + +test_expect_success 'cone mode: add sibling path' ' + git -C repo sparse-checkout set deep/deeper1 && + git -C repo sparse-checkout add deep/deeper2 && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + !/deep/*/ + /deep/deeper1/ + /deep/deeper2/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a deep +' + +test_expect_success 'cone mode: add parent path' ' + git -C repo sparse-checkout set deep/deeper1 folder1 && + git -C repo sparse-checkout add deep && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + /folder1/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a deep folder1 +' + +test_expect_success 'not-up-to-date does not block rest of sparsification' ' + test_when_finished git -C repo sparse-checkout disable && + test_when_finished git -C repo reset --hard && + git -C repo sparse-checkout set deep && + + echo update >repo/deep/deeper2/a && + cp repo/.git/info/sparse-checkout expect && + test_write_lines "!/deep/*/" "/deep/deeper1/" >>expect && + + git -C repo sparse-checkout set deep/deeper1 2>err && + + test_i18ngrep "The following paths are not up to date" err && + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo/deep a deeper1 deeper2 && + check_files repo/deep/deeper1 a deepest && + check_files repo/deep/deeper1/deepest a && + check_files repo/deep/deeper2 a +' + +test_expect_success 'revert to old sparse-checkout on empty update' ' + git init empty-test && + ( + echo >file && + git add file && + git commit -m "test" && + git sparse-checkout set nothing 2>err && + test_i18ngrep ! "Sparse checkout leaves no entry on working directory" err && + test_i18ngrep ! ".git/index.lock" err && + git sparse-checkout set file + ) +' + +test_expect_success 'fail when lock is taken' ' + test_when_finished rm -rf repo/.git/info/sparse-checkout.lock && + touch repo/.git/info/sparse-checkout.lock && + test_must_fail git -C repo sparse-checkout set deep 2>err && + test_i18ngrep "Unable to create .*\.lock" err +' + +test_expect_success '.gitignore should not warn about cone mode' ' + git -C repo config --worktree core.sparseCheckoutCone true && + echo "**/bin/*" >repo/.gitignore && + git -C repo reset --hard 2>err && + test_i18ngrep ! "disabling cone patterns" err +' + +test_expect_success 'sparse-checkout (init|set|disable) warns with dirty status' ' + git clone repo dirty && + echo dirty >dirty/folder1/a && + + git -C dirty sparse-checkout init 2>err && + test_i18ngrep "warning.*The following paths are not up to date" err && + + git -C dirty sparse-checkout set /folder2/* /deep/deeper1/* 2>err && + test_i18ngrep "warning.*The following paths are not up to date" err && + test_path_is_file dirty/folder1/a && + + git -C dirty sparse-checkout disable 2>err && + test_must_be_empty err && + + git -C dirty reset --hard && + git -C dirty sparse-checkout init && + git -C dirty sparse-checkout set /folder2/* /deep/deeper1/* && + test_path_is_missing dirty/folder1/a && + git -C dirty sparse-checkout disable && + test_path_is_file dirty/folder1/a +' + +test_expect_success 'sparse-checkout (init|set|disable) warns with unmerged status' ' + git clone repo unmerged && + + cat >input <<-EOF && + 0 $ZERO_OID folder1/a + 100644 $(git -C unmerged rev-parse HEAD:folder1/a) 1 folder1/a + EOF + git -C unmerged update-index --index-info err && + test_i18ngrep "warning.*The following paths are unmerged" err && + + git -C unmerged sparse-checkout set /folder2/* /deep/deeper1/* 2>err && + test_i18ngrep "warning.*The following paths are unmerged" err && + test_path_is_file dirty/folder1/a && + + git -C unmerged sparse-checkout disable 2>err && + test_i18ngrep "warning.*The following paths are unmerged" err && + + git -C unmerged reset --hard && + git -C unmerged sparse-checkout init && + git -C unmerged sparse-checkout set /folder2/* /deep/deeper1/* && + git -C unmerged sparse-checkout disable +' + +test_expect_success 'sparse-checkout reapply' ' + git clone repo tweak && + + echo dirty >tweak/deep/deeper2/a && + + cat >input <<-EOF && + 0 $ZERO_OID folder1/a + 100644 $(git -C tweak rev-parse HEAD:folder1/a) 1 folder1/a + EOF + git -C tweak update-index --index-info err && + test_i18ngrep "warning.*The following paths are not up to date" err && + test_i18ngrep "warning.*The following paths are unmerged" err && + + git -C tweak sparse-checkout set folder2 deep/deeper1 2>err && + test_i18ngrep "warning.*The following paths are not up to date" err && + test_i18ngrep "warning.*The following paths are unmerged" err && + + git -C tweak sparse-checkout reapply 2>err && + test_i18ngrep "warning.*The following paths are not up to date" err && + test_path_is_file tweak/deep/deeper2/a && + test_i18ngrep "warning.*The following paths are unmerged" err && + test_path_is_file tweak/folder1/a && + + git -C tweak checkout HEAD deep/deeper2/a && + git -C tweak sparse-checkout reapply 2>err && + test_i18ngrep ! "warning.*The following paths are not up to date" err && + test_path_is_missing tweak/deep/deeper2/a && + test_i18ngrep "warning.*The following paths are unmerged" err && + test_path_is_file tweak/folder1/a && + + git -C tweak add folder1/a && + git -C tweak sparse-checkout reapply 2>err && + test_must_be_empty err && + test_path_is_missing tweak/deep/deeper2/a && + test_path_is_missing tweak/folder1/a && + + git -C tweak sparse-checkout disable +' + +test_expect_success 'cone mode: set with core.ignoreCase=true' ' + rm repo/.git/info/sparse-checkout && + git -C repo sparse-checkout init --cone && + git -C repo -c core.ignoreCase=true sparse-checkout set folder1 && + cat >expect <<-\EOF && + /* + !/*/ + /folder1/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a folder1 +' + +test_expect_success 'interaction with submodules' ' + git clone repo super && + ( + cd super && + mkdir modules && + git submodule add ../repo modules/child && + git add . && + git commit -m "add submodule" && + git sparse-checkout init --cone && + git sparse-checkout set folder1 + ) && + check_files super a folder1 modules && + check_files super/modules/child a deep folder1 folder2 +' + +test_expect_success 'different sparse-checkouts with worktrees' ' + git -C repo worktree add --detach ../worktree && + check_files worktree "a deep folder1 folder2" && + git -C worktree sparse-checkout init --cone && + git -C repo sparse-checkout set folder1 && + git -C worktree sparse-checkout set deep/deeper1 && + check_files repo a folder1 && + check_files worktree a deep +' + +test_expect_success 'set using filename keeps file on-disk' ' + git -C repo sparse-checkout set a deep && + cat >expect <<-\EOF && + /* + !/*/ + /a/ + /deep/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a deep +' + +check_read_tree_errors () { + REPO=$1 + FILES=$2 + ERRORS=$3 + git -C $REPO -c core.sparseCheckoutCone=false read-tree -mu HEAD 2>err && + test_must_be_empty err && + check_files $REPO "$FILES" && + git -C $REPO read-tree -mu HEAD 2>err && + if test -z "$ERRORS" + then + test_must_be_empty err + else + test_i18ngrep "$ERRORS" err + fi && + check_files $REPO $FILES +} + +test_expect_success 'pattern-checks: /A/**' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + /folder1/** + EOF + check_read_tree_errors repo "a folder1" "disabling cone pattern matching" +' + +test_expect_success 'pattern-checks: /A/**/B/' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + /deep/**/deepest + EOF + check_read_tree_errors repo "a deep" "disabling cone pattern matching" && + check_files repo/deep "deeper1" && + check_files repo/deep/deeper1 "deepest" +' + +test_expect_success 'pattern-checks: too short' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + / + EOF + check_read_tree_errors repo "a" "disabling cone pattern matching" +' +test_expect_success 'pattern-checks: not too short' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + /b/ + EOF + git -C repo read-tree -mu HEAD 2>err && + test_must_be_empty err && + check_files repo a +' + +test_expect_success 'pattern-checks: trailing "*"' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + /a* + EOF + check_read_tree_errors repo "a" "disabling cone pattern matching" +' + +test_expect_success 'pattern-checks: starting "*"' ' + cat >repo/.git/info/sparse-checkout <<-\EOF && + /* + !/*/ + *eep/ + EOF + check_read_tree_errors repo "a deep" "disabling cone pattern matching" +' + +test_expect_success 'pattern-checks: contained glob characters' ' + for c in "[a]" "\\" "?" "*" + do + cat >repo/.git/info/sparse-checkout <<-EOF && + /* + !/*/ + something$c-else/ + EOF + check_read_tree_errors repo "a" "disabling cone pattern matching" + done +' + +test_expect_success BSLASHPSPEC 'pattern-checks: escaped characters' ' + git clone repo escaped && + TREEOID=$(git -C escaped rev-parse HEAD:folder1) && + NEWTREE=$(git -C escaped mktree <<-EOF + $(git -C escaped ls-tree HEAD) + 040000 tree $TREEOID zbad\\dir + 040000 tree $TREEOID zdoes*exist + 040000 tree $TREEOID zglob[!a]? + EOF + ) && + COMMIT=$(git -C escaped commit-tree $NEWTREE -p HEAD) && + git -C escaped reset --hard $COMMIT && + check_files escaped "a deep folder1 folder2 zbad\\dir zdoes*exist" zglob[!a]? && + git -C escaped sparse-checkout init --cone && + git -C escaped sparse-checkout set zbad\\dir/bogus "zdoes*not*exist" "zdoes*exist" "zglob[!a]?" && + cat >expect <<-\EOF && + /* + !/*/ + /zbad\\dir/ + !/zbad\\dir/*/ + /zbad\\dir/bogus/ + /zdoes\*exist/ + /zdoes\*not\*exist/ + /zglob\[!a]\?/ + EOF + test_cmp expect escaped/.git/info/sparse-checkout && + check_read_tree_errors escaped "a zbad\\dir zdoes*exist zglob[!a]?" && + git -C escaped ls-tree -d --name-only HEAD >list-expect && + git -C escaped sparse-checkout set --stdin expect <<-\EOF && + /* + !/*/ + /deep/ + /folder1/ + /folder2/ + /zbad\\dir/ + /zdoes\*exist/ + /zglob\[!a]\?/ + EOF + test_cmp expect escaped/.git/info/sparse-checkout && + check_files escaped "a deep folder1 folder2 zbad\\dir zdoes*exist" zglob[!a]? && + git -C escaped sparse-checkout list >list-actual && + test_cmp list-expect list-actual +' + +test_expect_success MINGW 'cone mode replaces backslashes with slashes' ' + git -C repo sparse-checkout set deep\\deeper1 && + cat >expect <<-\EOF && + /* + !/*/ + /deep/ + !/deep/*/ + /deep/deeper1/ + EOF + test_cmp expect repo/.git/info/sparse-checkout && + check_files repo a deep && + check_files repo/deep a deeper1 +' + +test_done diff --git a/t/t1300-config.sh b/t/t1300-config.sh index 9ff46f3b0471fb..1b6188a89f1ab5 100755 --- a/t/t1300-config.sh +++ b/t/t1300-config.sh @@ -616,6 +616,36 @@ test_expect_success 'renaming to bogus section is rejected' ' test_must_fail git config --rename-section branch.zwei "bogus name" ' +test_expect_success 'renaming a section with a long line' ' + { + printf "[b]\\n" && + printf " c = d %1024s [a] e = f\\n" " " && + printf "[a] g = h\\n" + } >y && + git config -f y --rename-section a xyz && + test_must_fail git config -f y b.e +' + +test_expect_success 'renaming an embedded section with a long line' ' + { + printf "[b]\\n" && + printf " c = d %1024s [a] [foo] e = f\\n" " " && + printf "[a] g = h\\n" + } >y && + git config -f y --rename-section a xyz && + test_must_fail git config -f y foo.e +' + +test_expect_success 'renaming a section with an overly-long line' ' + { + printf "[b]\\n" && + printf " c = d %525000s e" " " && + printf "[a] g = h\\n" + } >y && + test_must_fail git config -f y --rename-section a xyz 2>err && + test_i18ngrep "refusing to work with overly long line in .y. on line 2" err +' + cat >> .git/config << EOF [branch "zwei"] a = 1 [branch "vier"] EOF diff --git a/t/t1300-config.sh.orig b/t/t1300-config.sh.orig new file mode 100755 index 00000000000000..fd306a9000885c --- /dev/null +++ b/t/t1300-config.sh.orig @@ -0,0 +1,2410 @@ +#!/bin/sh +# +# Copyright (c) 2005 Johannes Schindelin +# + +test_description='Test git config in different settings' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +test_expect_success 'clear default config' ' + rm -f .git/config +' + +cat > expect << EOF +[section] + penguin = little blue +EOF +test_expect_success 'initial' ' + git config section.penguin "little blue" && + test_cmp expect .git/config +' + +cat > expect << EOF +[section] + penguin = little blue + Movie = BadPhysics +EOF +test_expect_success 'mixed case' ' + git config Section.Movie BadPhysics && + test_cmp expect .git/config +' + +cat > expect << EOF +[section] + penguin = little blue + Movie = BadPhysics +[Sections] + WhatEver = Second +EOF +test_expect_success 'similar section' ' + git config Sections.WhatEver Second && + test_cmp expect .git/config +' + +cat > expect << EOF +[section] + penguin = little blue + Movie = BadPhysics + UPPERCASE = true +[Sections] + WhatEver = Second +EOF +test_expect_success 'uppercase section' ' + git config SECTION.UPPERCASE true && + test_cmp expect .git/config +' + +test_expect_success 'replace with non-match' ' + git config section.penguin kingpin !blue +' + +test_expect_success 'replace with non-match (actually matching)' ' + git config section.penguin "very blue" !kingpin +' + +cat > expect << EOF +[section] + penguin = very blue + Movie = BadPhysics + UPPERCASE = true + penguin = kingpin +[Sections] + WhatEver = Second +EOF + +test_expect_success 'non-match result' 'test_cmp expect .git/config' + +test_expect_success 'find mixed-case key by canonical name' ' + test_cmp_config Second sections.whatever +' + +test_expect_success 'find mixed-case key by non-canonical name' ' + test_cmp_config Second SeCtIoNs.WhAtEvEr +' + +test_expect_success 'subsections are not canonicalized by git-config' ' + cat >>.git/config <<-\EOF && + [section.SubSection] + key = one + [section "SubSection"] + key = two + EOF + test_cmp_config one section.subsection.key && + test_cmp_config two section.SubSection.key +' + +cat > .git/config <<\EOF +[alpha] +bar = foo +[beta] +baz = multiple \ +lines +foo = bar +EOF + +test_expect_success 'unset with cont. lines' ' + git config --unset beta.baz +' + +cat > expect <<\EOF +[alpha] +bar = foo +[beta] +foo = bar +EOF + +test_expect_success 'unset with cont. lines is correct' 'test_cmp expect .git/config' + +cat > .git/config << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment + haha ="beta" # last silly comment +haha = hello + haha = bello +[nextSection] noNewline = ouch +EOF + +cp .git/config .git/config2 + +test_expect_success 'multiple unset' ' + git config --unset-all beta.haha +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] noNewline = ouch +EOF + +test_expect_success 'multiple unset is correct' ' + test_cmp expect .git/config +' + +cp .git/config2 .git/config + +test_expect_success '--replace-all missing value' ' + test_must_fail git config --replace-all beta.haha && + test_cmp .git/config2 .git/config +' + +rm .git/config2 + +test_expect_success '--replace-all' ' + git config --replace-all beta.haha gamma +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment + haha = gamma +[nextSection] noNewline = ouch +EOF + +test_expect_success 'all replaced' ' + test_cmp expect .git/config +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment + haha = alpha +[nextSection] noNewline = ouch +EOF +test_expect_success 'really mean test' ' + git config beta.haha alpha && + test_cmp expect .git/config +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment + haha = alpha +[nextSection] + nonewline = wow +EOF +test_expect_success 'really really mean test' ' + git config nextsection.nonewline wow && + test_cmp expect .git/config +' + +test_expect_success 'get value' ' + test_cmp_config alpha beta.haha +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] + nonewline = wow +EOF +test_expect_success 'unset' ' + git config --unset beta.haha && + test_cmp expect .git/config +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] + nonewline = wow + NoNewLine = wow2 for me +EOF +test_expect_success 'multivar' ' + git config nextsection.NoNewLine "wow2 for me" "for me$" && + test_cmp expect .git/config +' + +test_expect_success 'non-match' ' + git config --get nextsection.nonewline !for +' + +test_expect_success 'non-match value' ' + test_cmp_config wow --get nextsection.nonewline !for +' + +test_expect_success 'multi-valued get returns final one' ' + test_cmp_config "wow2 for me" --get nextsection.nonewline +' + +test_expect_success 'multi-valued get-all returns all' ' + cat >expect <<-\EOF && + wow + wow2 for me + EOF + git config --get-all nextsection.nonewline >actual && + test_cmp expect actual +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] + nonewline = wow3 + NoNewLine = wow2 for me +EOF +test_expect_success 'multivar replace' ' + git config nextsection.nonewline "wow3" "wow$" && + test_cmp expect .git/config +' + +test_expect_success 'ambiguous unset' ' + test_must_fail git config --unset nextsection.nonewline +' + +test_expect_success 'invalid unset' ' + test_must_fail git config --unset somesection.nonewline +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] + NoNewLine = wow2 for me +EOF + +test_expect_success 'multivar unset' ' + git config --unset nextsection.nonewline "wow3$" && + test_cmp expect .git/config +' + +test_expect_success 'invalid key' 'test_must_fail git config inval.2key blabla' + +test_expect_success 'correct key' 'git config 123456.a123 987' + +test_expect_success 'hierarchical section' ' + git config Version.1.2.3eX.Alpha beta +' + +cat > expect << EOF +[beta] ; silly comment # another comment +noIndent= sillyValue ; 'nother silly comment + +# empty line + ; comment +[nextSection] + NoNewLine = wow2 for me +[123456] + a123 = 987 +[Version "1.2.3eX"] + Alpha = beta +EOF + +test_expect_success 'hierarchical section value' ' + test_cmp expect .git/config +' + +cat > expect << EOF +beta.noindent=sillyValue +nextsection.nonewline=wow2 for me +123456.a123=987 +version.1.2.3eX.alpha=beta +EOF + +test_expect_success 'working --list' ' + git config --list > output && + test_cmp expect output +' +test_expect_success '--list without repo produces empty output' ' + git --git-dir=nonexistent config --list >output && + test_must_be_empty output +' + +cat > expect << EOF +beta.noindent +nextsection.nonewline +123456.a123 +version.1.2.3eX.alpha +EOF + +test_expect_success '--name-only --list' ' + git config --name-only --list >output && + test_cmp expect output +' + +cat > expect << EOF +beta.noindent sillyValue +nextsection.nonewline wow2 for me +EOF + +test_expect_success '--get-regexp' ' + git config --get-regexp in >output && + test_cmp expect output +' + +cat > expect << EOF +beta.noindent +nextsection.nonewline +EOF + +test_expect_success '--name-only --get-regexp' ' + git config --name-only --get-regexp in >output && + test_cmp expect output +' + +cat > expect << EOF +wow2 for me +wow4 for you +EOF + +test_expect_success '--add' ' + git config --add nextsection.nonewline "wow4 for you" && + git config --get-all nextsection.nonewline > output && + test_cmp expect output +' + +cat > .git/config << EOF +[novalue] + variable +[emptyvalue] + variable = +EOF + +test_expect_success 'get variable with no value' ' + git config --get novalue.variable ^$ +' + +test_expect_success 'get variable with empty value' ' + git config --get emptyvalue.variable ^$ +' + +echo novalue.variable > expect + +test_expect_success 'get-regexp variable with no value' ' + git config --get-regexp novalue > output && + test_cmp expect output +' + +echo 'novalue.variable true' > expect + +test_expect_success 'get-regexp --bool variable with no value' ' + git config --bool --get-regexp novalue > output && + test_cmp expect output +' + +echo 'emptyvalue.variable ' > expect + +test_expect_success 'get-regexp variable with empty value' ' + git config --get-regexp emptyvalue > output && + test_cmp expect output +' + +echo true > expect + +test_expect_success 'get bool variable with no value' ' + git config --bool novalue.variable > output && + test_cmp expect output +' + +echo false > expect + +test_expect_success 'get bool variable with empty value' ' + git config --bool emptyvalue.variable > output && + test_cmp expect output +' + +test_expect_success 'no arguments, but no crash' ' + test_must_fail git config >output 2>&1 && + test_i18ngrep usage output +' + +cat > .git/config << EOF +[a.b] + c = d +EOF + +cat > expect << EOF +[a.b] + c = d +[a] + x = y +EOF + +test_expect_success 'new section is partial match of another' ' + git config a.x y && + test_cmp expect .git/config +' + +cat > expect << EOF +[a.b] + c = d +[a] + x = y + b = c +[b] + x = y +EOF + +test_expect_success 'new variable inserts into proper section' ' + git config b.x y && + git config a.b c && + test_cmp expect .git/config +' + +test_expect_success 'alternative --file (non-existing file should fail)' ' + test_must_fail git config --file non-existing-config -l && + test_must_fail git config --file non-existing-config test.xyzzy +' + +cat > other-config << EOF +[ein] + bahn = strasse +EOF + +cat > expect << EOF +ein.bahn=strasse +EOF + +test_expect_success 'alternative GIT_CONFIG' ' + GIT_CONFIG=other-config git config --list >output && + test_cmp expect output +' + +test_expect_success 'alternative GIT_CONFIG (--file)' ' + git config --file other-config --list >output && + test_cmp expect output +' + +test_expect_success 'alternative GIT_CONFIG (--file=-)' ' + git config --file - --list output && + test_cmp expect output +' + +test_expect_success 'setting a value in stdin is an error' ' + test_must_fail git config --file - some.value foo +' + +test_expect_success 'editing stdin is an error' ' + test_must_fail git config --file - --edit +' + +test_expect_success 'refer config from subdirectory' ' + mkdir x && + test_cmp_config -C x strasse --file=../other-config --get ein.bahn +' + +cat > expect << EOF +[ein] + bahn = strasse +[anwohner] + park = ausweis +EOF + +test_expect_success '--set in alternative file' ' + git config --file=other-config anwohner.park ausweis && + test_cmp expect other-config +' + +cat > .git/config << EOF +# Hallo + #Bello +[branch "eins"] + x = 1 +[branch.eins] + y = 1 + [branch "1 234 blabl/a"] +weird +EOF + +test_expect_success 'rename section' ' + git config --rename-section branch.eins branch.zwei +' + +cat > expect << EOF +# Hallo + #Bello +[branch "zwei"] + x = 1 +[branch "zwei"] + y = 1 + [branch "1 234 blabl/a"] +weird +EOF + +test_expect_success 'rename succeeded' ' + test_cmp expect .git/config +' + +test_expect_success 'rename non-existing section' ' + test_must_fail git config --rename-section \ + branch."world domination" branch.drei +' + +test_expect_success 'rename succeeded' ' + test_cmp expect .git/config +' + +test_expect_success 'rename another section' ' + git config --rename-section branch."1 234 blabl/a" branch.drei +' + +cat > expect << EOF +# Hallo + #Bello +[branch "zwei"] + x = 1 +[branch "zwei"] + y = 1 +[branch "drei"] +weird +EOF + +test_expect_success 'rename succeeded' ' + test_cmp expect .git/config +' + +cat >> .git/config << EOF +[branch "vier"] z = 1 +EOF + +test_expect_success 'rename a section with a var on the same line' ' + git config --rename-section branch.vier branch.zwei +' + +cat > expect << EOF +# Hallo + #Bello +[branch "zwei"] + x = 1 +[branch "zwei"] + y = 1 +[branch "drei"] +weird +[branch "zwei"] + z = 1 +EOF + +test_expect_success 'rename succeeded' ' + test_cmp expect .git/config +' + +test_expect_success 'renaming empty section name is rejected' ' + test_must_fail git config --rename-section branch.zwei "" +' + +test_expect_success 'renaming to bogus section is rejected' ' + test_must_fail git config --rename-section branch.zwei "bogus name" +' + +test_expect_success 'renaming a section with a long line' ' + { + printf "[b]\\n" && + printf " c = d %1024s [a] e = f\\n" " " && + printf "[a] g = h\\n" + } >y && + git config -f y --rename-section a xyz && + test_must_fail git config -f y b.e +' + +test_expect_success 'renaming an embedded section with a long line' ' + { + printf "[b]\\n" && + printf " c = d %1024s [a] [foo] e = f\\n" " " && + printf "[a] g = h\\n" + } >y && + git config -f y --rename-section a xyz && + test_must_fail git config -f y foo.e +' + +cat >> .git/config << EOF + [branch "zwei"] a = 1 [branch "vier"] +EOF + +test_expect_success 'remove section' ' + git config --remove-section branch.zwei +' + +cat > expect << EOF +# Hallo + #Bello +[branch "drei"] +weird +EOF + +test_expect_success 'section was removed properly' ' + test_cmp expect .git/config +' + +cat > expect << EOF +[gitcvs] + enabled = true + dbname = %Ggitcvs2.%a.%m.sqlite +[gitcvs "ext"] + dbname = %Ggitcvs1.%a.%m.sqlite +EOF + +test_expect_success 'section ending' ' + rm -f .git/config && + git config gitcvs.enabled true && + git config gitcvs.ext.dbname %Ggitcvs1.%a.%m.sqlite && + git config gitcvs.dbname %Ggitcvs2.%a.%m.sqlite && + test_cmp expect .git/config + +' + +test_expect_success numbers ' + git config kilo.gram 1k && + git config mega.ton 1m && + echo 1024 >expect && + echo 1048576 >>expect && + git config --int --get kilo.gram >actual && + git config --int --get mega.ton >>actual && + test_cmp expect actual +' + +test_expect_success '--int is at least 64 bits' ' + git config giga.watts 121g && + echo >expect && + test_cmp_config 129922760704 --int --get giga.watts +' + +test_expect_success 'invalid unit' ' + git config aninvalid.unit "1auto" && + test_cmp_config 1auto aninvalid.unit && + test_must_fail git config --int --get aninvalid.unit 2>actual && + test_i18ngrep "bad numeric config value .1auto. for .aninvalid.unit. in file .git/config: invalid unit" actual +' + +test_expect_success 'invalid unit boolean' ' + git config commit.gpgsign "1true" && + test_cmp_config 1true commit.gpgsign && + test_must_fail git config --bool --get commit.gpgsign 2>actual && + test_i18ngrep "bad boolean config value .1true. for .commit.gpgsign." actual +' + +test_expect_success 'line number is reported correctly' ' + printf "[bool]\n\tvar\n" >invalid && + test_must_fail git config -f invalid --path bool.var 2>actual && + test_i18ngrep "line 2" actual +' + +test_expect_success 'invalid stdin config' ' + echo "[broken" | test_must_fail git config --list --file - >output 2>&1 && + test_i18ngrep "bad config line 1 in standard input" output +' + +cat > expect << EOF +true +false +true +false +true +false +true +false +EOF + +test_expect_success bool ' + + git config bool.true1 01 && + git config bool.true2 -1 && + git config bool.true3 YeS && + git config bool.true4 true && + git config bool.false1 000 && + git config bool.false2 "" && + git config bool.false3 nO && + git config bool.false4 FALSE && + rm -f result && + for i in 1 2 3 4 + do + git config --bool --get bool.true$i >>result + git config --bool --get bool.false$i >>result + done && + test_cmp expect result' + +test_expect_success 'invalid bool (--get)' ' + + git config bool.nobool foobar && + test_must_fail git config --bool --get bool.nobool' + +test_expect_success 'invalid bool (set)' ' + + test_must_fail git config --bool bool.nobool foobar' + +cat > expect <<\EOF +[bool] + true1 = true + true2 = true + true3 = true + true4 = true + false1 = false + false2 = false + false3 = false + false4 = false +EOF + +test_expect_success 'set --bool' ' + + rm -f .git/config && + git config --bool bool.true1 01 && + git config --bool bool.true2 -1 && + git config --bool bool.true3 YeS && + git config --bool bool.true4 true && + git config --bool bool.false1 000 && + git config --bool bool.false2 "" && + git config --bool bool.false3 nO && + git config --bool bool.false4 FALSE && + test_cmp expect .git/config' + +cat > expect <<\EOF +[int] + val1 = 1 + val2 = -1 + val3 = 5242880 +EOF + +test_expect_success 'set --int' ' + + rm -f .git/config && + git config --int int.val1 01 && + git config --int int.val2 -1 && + git config --int int.val3 5m && + test_cmp expect .git/config +' + +test_expect_success 'get --bool-or-int' ' + cat >.git/config <<-\EOF && + [bool] + true1 + true2 = true + false = false + [int] + int1 = 0 + int2 = 1 + int3 = -1 + EOF + cat >expect <<-\EOF && + true + true + false + 0 + 1 + -1 + EOF + { + git config --bool-or-int bool.true1 && + git config --bool-or-int bool.true2 && + git config --bool-or-int bool.false && + git config --bool-or-int int.int1 && + git config --bool-or-int int.int2 && + git config --bool-or-int int.int3 + } >actual && + test_cmp expect actual +' + +cat >expect <<\EOF +[bool] + true1 = true + false1 = false + true2 = true + false2 = false +[int] + int1 = 0 + int2 = 1 + int3 = -1 +EOF + +test_expect_success 'set --bool-or-int' ' + rm -f .git/config && + git config --bool-or-int bool.true1 true && + git config --bool-or-int bool.false1 false && + git config --bool-or-int bool.true2 yes && + git config --bool-or-int bool.false2 no && + git config --bool-or-int int.int1 0 && + git config --bool-or-int int.int2 1 && + git config --bool-or-int int.int3 -1 && + test_cmp expect .git/config +' + +cat >expect <<\EOF +[path] + home = ~/ + normal = /dev/null + trailingtilde = foo~ +EOF + +test_expect_success !MINGW 'set --path' ' + rm -f .git/config && + git config --path path.home "~/" && + git config --path path.normal "/dev/null" && + git config --path path.trailingtilde "foo~" && + test_cmp expect .git/config' + +if test_have_prereq !MINGW && test "${HOME+set}" +then + test_set_prereq HOMEVAR +fi + +cat >expect < result && + git config --get --path path.normal >> result && + git config --get --path path.trailingtilde >> result && + test_cmp expect result +' + +cat >expect <<\EOF +/dev/null +foo~ +EOF + +test_expect_success !MINGW 'get --path copes with unset $HOME' ' + ( + sane_unset HOME && + test_must_fail git config --get --path path.home \ + >result 2>msg && + git config --get --path path.normal >>result && + git config --get --path path.trailingtilde >>result + ) && + test_i18ngrep "[Ff]ailed to expand.*~/" msg && + test_cmp expect result +' + +test_expect_success 'get --path barfs on boolean variable' ' + echo "[path]bool" >.git/config && + test_must_fail git config --get --path path.bool +' + +test_expect_success 'get --expiry-date' ' + rel="3.weeks.5.days.00:00" && + rel_out="$rel ->" && + cat >.git/config <<-\EOF && + [date] + valid1 = "3.weeks.5.days 00:00" + valid2 = "Fri Jun 4 15:46:55 2010" + valid3 = "2017/11/11 11:11:11PM" + valid4 = "2017/11/10 09:08:07 PM" + valid5 = "never" + invalid1 = "abc" + EOF + cat >expect <<-EOF && + $(test-tool date timestamp $rel) + 1275666415 + 1510441871 + 1510348087 + 0 + EOF + : "work around heredoc parsing bug fixed in dash 0.5.7 (in ec2c84d)" && + { + echo "$rel_out $(git config --expiry-date date.valid1)" + git config --expiry-date date.valid2 && + git config --expiry-date date.valid3 && + git config --expiry-date date.valid4 && + git config --expiry-date date.valid5 + } >actual && + test_cmp expect actual && + test_must_fail git config --expiry-date date.invalid1 +' + +test_expect_success 'get --type=color' ' + rm .git/config && + git config foo.color "red" && + git config --get --type=color foo.color >actual.raw && + test_decode_color actual && + echo "" >expect && + test_cmp expect actual +' + +cat >expect << EOF +[foo] + color = red +EOF + +test_expect_success 'set --type=color' ' + rm .git/config && + git config --type=color foo.color "red" && + test_cmp expect .git/config +' + +test_expect_success 'get --type=color barfs on non-color' ' + echo "[foo]bar=not-a-color" >.git/config && + test_must_fail git config --get --type=color foo.bar +' + +test_expect_success 'set --type=color barfs on non-color' ' + test_must_fail git config --type=color foo.color "not-a-color" 2>error && + test_i18ngrep "cannot parse color" error +' + +cat > expect << EOF +[quote] + leading = " test" + ending = "test " + semicolon = "test;test" + hash = "test#test" +EOF +test_expect_success 'quoting' ' + rm -f .git/config && + git config quote.leading " test" && + git config quote.ending "test " && + git config quote.semicolon "test;test" && + git config quote.hash "test#test" && + test_cmp expect .git/config +' + +test_expect_success 'key with newline' ' + test_must_fail git config "key.with +newline" 123' + +test_expect_success 'value with newline' 'git config key.sub value.with\\\ +newline' + +cat > .git/config <<\EOF +[section] + ; comment \ + continued = cont\ +inued + noncont = not continued ; \ + quotecont = "cont;\ +inued" +EOF + +cat > expect <<\EOF +section.continued=continued +section.noncont=not continued +section.quotecont=cont;inued +EOF + +test_expect_success 'value continued on next line' ' + git config --list > result && + test_cmp expect result +' + +cat > .git/config <<\EOF +[section "sub=section"] + val1 = foo=bar + val2 = foo\nbar + val3 = \n\n + val4 = + val5 +EOF + +cat > expect <<\EOF +section.sub=section.val1 +foo=barQsection.sub=section.val2 +foo +barQsection.sub=section.val3 + + +Qsection.sub=section.val4 +Qsection.sub=section.val5Q +EOF +test_expect_success '--null --list' ' + git config --null --list >result.raw && + nul_to_q result && + echo >>result && + test_cmp expect result +' + +test_expect_success '--null --get-regexp' ' + git config --null --get-regexp "val[0-9]" >result.raw && + nul_to_q result && + echo >>result && + test_cmp expect result +' + +test_expect_success 'inner whitespace kept verbatim' ' + git config section.val "foo bar" && + test_cmp_config "foo bar" section.val +' + +test_expect_success SYMLINKS 'symlinked configuration' ' + ln -s notyet myconfig && + git config --file=myconfig test.frotz nitfol && + test -h myconfig && + test -f notyet && + test "z$(git config --file=notyet test.frotz)" = znitfol && + git config --file=myconfig test.xyzzy rezrov && + test -h myconfig && + test -f notyet && + cat >expect <<-\EOF && + nitfol + rezrov + EOF + { + git config --file=notyet test.frotz && + git config --file=notyet test.xyzzy + } >actual && + test_cmp expect actual +' + +test_expect_success SYMLINKS 'symlink to nonexistent configuration' ' + ln -s doesnotexist linktonada && + ln -s linktonada linktolinktonada && + test_must_fail git config --file=linktonada --list && + test_must_fail git config --file=linktolinktonada --list +' + +test_expect_success 'check split_cmdline return' " + git config alias.split-cmdline-fix 'echo \"' && + test_must_fail git split-cmdline-fix && + echo foo > foo && + git add foo && + git commit -m 'initial commit' && + git config branch.main.mergeoptions 'echo \"' && + test_must_fail git merge main +" + +test_expect_success 'git -c "key=value" support' ' + cat >expect <<-\EOF && + value + value + true + EOF + { + git -c section.name=value config section.name && + git -c foo.CamelCase=value config foo.camelcase && + git -c foo.flag config --bool foo.flag + } >actual && + test_cmp expect actual && + test_must_fail git -c name=value config section.name +' + +# We just need a type-specifier here that cares about the +# distinction internally between a NULL boolean and a real +# string (because most of git's internal parsers do care). +# Using "--path" works, but we do not otherwise care about +# its semantics. +test_expect_success 'git -c can represent empty string' ' + echo >expect && + git -c foo.empty= config --path foo.empty >actual && + test_cmp expect actual +' + +test_expect_success 'key sanity-checking' ' + test_must_fail git config foo=bar && + test_must_fail git config foo=.bar && + test_must_fail git config foo.ba=r && + test_must_fail git config foo.1bar && + test_must_fail git config foo."ba + z".bar && + test_must_fail git config . false && + test_must_fail git config .foo false && + test_must_fail git config foo. false && + test_must_fail git config .foo. false && + git config foo.bar true && + git config foo."ba =z".bar false +' + +test_expect_success 'git -c works with aliases of builtins' ' + git config alias.checkconfig "-c foo.check=bar config foo.check" && + echo bar >expect && + git checkconfig >actual && + test_cmp expect actual +' + +test_expect_success 'aliases can be CamelCased' ' + test_config alias.CamelCased "rev-parse HEAD" && + git CamelCased >out && + git rev-parse HEAD >expect && + test_cmp expect out +' + +test_expect_success 'git -c does not split values on equals' ' + echo "value with = in it" >expect && + git -c section.foo="value with = in it" config section.foo >actual && + test_cmp expect actual +' + +test_expect_success 'git -c dies on bogus config' ' + test_must_fail git -c core.bare=foo rev-parse +' + +test_expect_success 'git -c complains about empty key' ' + test_must_fail git -c "=foo" rev-parse +' + +test_expect_success 'git -c complains about empty key and value' ' + test_must_fail git -c "" rev-parse +' + +test_expect_success 'multiple git -c appends config' ' + test_config alias.x "!git -c x.two=2 config --get-regexp ^x\.*" && + cat >expect <<-\EOF && + x.one 1 + x.two 2 + EOF + git -c x.one=1 x >actual && + test_cmp expect actual +' + +test_expect_success 'last one wins: two level vars' ' + + # sec.var and sec.VAR are the same variable, as the first + # and the last level of a configuration variable name is + # case insensitive. + + echo VAL >expect && + + git -c sec.var=val -c sec.VAR=VAL config --get sec.var >actual && + test_cmp expect actual && + git -c SEC.var=val -c sec.var=VAL config --get sec.var >actual && + test_cmp expect actual && + + git -c sec.var=val -c sec.VAR=VAL config --get SEC.var >actual && + test_cmp expect actual && + git -c SEC.var=val -c sec.var=VAL config --get sec.VAR >actual && + test_cmp expect actual +' + +test_expect_success 'last one wins: three level vars' ' + + # v.a.r and v.A.r are not the same variable, as the middle + # level of a three-level configuration variable name is + # case sensitive. + + echo val >expect && + git -c v.a.r=val -c v.A.r=VAL config --get v.a.r >actual && + test_cmp expect actual && + git -c v.a.r=val -c v.A.r=VAL config --get V.a.R >actual && + test_cmp expect actual && + + # v.a.r and V.a.R are the same variable, as the first + # and the last level of a configuration variable name is + # case insensitive. + + echo VAL >expect && + git -c v.a.r=val -c v.a.R=VAL config --get v.a.r >actual && + test_cmp expect actual && + git -c v.a.r=val -c V.a.r=VAL config --get v.a.r >actual && + test_cmp expect actual && + git -c v.a.r=val -c v.a.R=VAL config --get V.a.R >actual && + test_cmp expect actual && + git -c v.a.r=val -c V.a.r=VAL config --get V.a.R >actual && + test_cmp expect actual +' + +test_expect_success 'old-fashioned settings are case insensitive' ' + test_when_finished "rm -f testConfig testConfig_expect testConfig_actual" && + + cat >testConfig_actual <<-EOF && + [V.A] + r = value1 + EOF + q_to_tab >testConfig_expect <<-EOF && + [V.A] + Qr = value2 + EOF + git config -f testConfig_actual "v.a.r" value2 && + test_cmp testConfig_expect testConfig_actual && + + cat >testConfig_actual <<-EOF && + [V.A] + r = value1 + EOF + q_to_tab >testConfig_expect <<-EOF && + [V.A] + QR = value2 + EOF + git config -f testConfig_actual "V.a.R" value2 && + test_cmp testConfig_expect testConfig_actual && + + cat >testConfig_actual <<-EOF && + [V.A] + r = value1 + EOF + q_to_tab >testConfig_expect <<-EOF && + [V.A] + r = value1 + Qr = value2 + EOF + git config -f testConfig_actual "V.A.r" value2 && + test_cmp testConfig_expect testConfig_actual && + + cat >testConfig_actual <<-EOF && + [V.A] + r = value1 + EOF + q_to_tab >testConfig_expect <<-EOF && + [V.A] + r = value1 + Qr = value2 + EOF + git config -f testConfig_actual "v.A.r" value2 && + test_cmp testConfig_expect testConfig_actual +' + +test_expect_success 'setting different case sensitive subsections ' ' + test_when_finished "rm -f testConfig testConfig_expect testConfig_actual" && + + cat >testConfig_actual <<-EOF && + [V "A"] + R = v1 + [K "E"] + Y = v1 + [a "b"] + c = v1 + [d "e"] + f = v1 + EOF + q_to_tab >testConfig_expect <<-EOF && + [V "A"] + Qr = v2 + [K "E"] + Qy = v2 + [a "b"] + Qc = v2 + [d "e"] + f = v1 + [d "E"] + Qf = v2 + EOF + # exact match + git config -f testConfig_actual a.b.c v2 && + # match section and subsection, key is cased differently. + git config -f testConfig_actual K.E.y v2 && + # section and key are matched case insensitive, but subsection needs + # to match; When writing out new values only the key is adjusted + git config -f testConfig_actual v.A.r v2 && + # subsection is not matched: + git config -f testConfig_actual d.E.f v2 && + test_cmp testConfig_expect testConfig_actual +' + +for VAR in a .a a. a.0b a."b c". a."b c".0d +do + test_expect_success "git -c $VAR=VAL rejects invalid '$VAR'" ' + test_must_fail git -c "$VAR=VAL" config -l + ' +done + +for VAR in a.b a."b c".d +do + test_expect_success "git -c $VAR=VAL works with valid '$VAR'" ' + echo VAL >expect && + git -c "$VAR=VAL" config --get "$VAR" >actual && + test_cmp expect actual + ' +done + +test_expect_success 'git -c is not confused by empty environment' ' + GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list +' + +test_expect_success 'GIT_CONFIG_PARAMETERS handles old-style entries' ' + v="${SQ}key.one=foo${SQ}" && + v="$v ${SQ}key.two=bar${SQ}" && + v="$v ${SQ}key.ambiguous=section.whatever=value${SQ}" && + GIT_CONFIG_PARAMETERS=$v git config --get-regexp "key.*" >actual && + cat >expect <<-EOF && + key.one foo + key.two bar + key.ambiguous section.whatever=value + EOF + test_cmp expect actual +' + +test_expect_success 'GIT_CONFIG_PARAMETERS handles new-style entries' ' + v="${SQ}key.one${SQ}=${SQ}foo${SQ}" && + v="$v ${SQ}key.two${SQ}=${SQ}bar${SQ}" && + v="$v ${SQ}key.ambiguous=section.whatever${SQ}=${SQ}value${SQ}" && + GIT_CONFIG_PARAMETERS=$v git config --get-regexp "key.*" >actual && + cat >expect <<-EOF && + key.one foo + key.two bar + key.ambiguous=section.whatever value + EOF + test_cmp expect actual +' + +test_expect_success 'old and new-style entries can mix' ' + v="${SQ}key.oldone=oldfoo${SQ}" && + v="$v ${SQ}key.newone${SQ}=${SQ}newfoo${SQ}" && + v="$v ${SQ}key.oldtwo=oldbar${SQ}" && + v="$v ${SQ}key.newtwo${SQ}=${SQ}newbar${SQ}" && + GIT_CONFIG_PARAMETERS=$v git config --get-regexp "key.*" >actual && + cat >expect <<-EOF && + key.oldone oldfoo + key.newone newfoo + key.oldtwo oldbar + key.newtwo newbar + EOF + test_cmp expect actual +' + +test_expect_success 'old and new bools with ambiguous subsection' ' + v="${SQ}key.with=equals.oldbool${SQ}" && + v="$v ${SQ}key.with=equals.newbool${SQ}=" && + GIT_CONFIG_PARAMETERS=$v git config --get-regexp "key.*" >actual && + cat >expect <<-EOF && + key.with equals.oldbool + key.with=equals.newbool + EOF + test_cmp expect actual +' + +test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' ' + cat >expect <<-\EOF && + env.one one + env.two two + EOF + GIT_CONFIG_PARAMETERS="${SQ}env.one=one${SQ} ${SQ}env.two=two${SQ}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + cat >expect <<-EOF && + env.one one${SQ} + env.two two + EOF + GIT_CONFIG_PARAMETERS="${SQ}env.one=one${SQ}\\$SQ$SQ$SQ ${SQ}env.two=two${SQ}" \ + git config --get-regexp "env.*" >actual && + test_cmp expect actual && + + test_must_fail env \ + GIT_CONFIG_PARAMETERS="${SQ}env.one=one${SQ}\\$SQ ${SQ}env.two=two${SQ}" \ + git config --get-regexp "env.*" +' + +test_expect_success 'git --config-env=key=envvar support' ' + cat >expect <<-\EOF && + value + value + value + value + false + false + EOF + { + ENVVAR=value git --config-env=core.name=ENVVAR config core.name && + ENVVAR=value git --config-env core.name=ENVVAR config core.name && + ENVVAR=value git --config-env=foo.CamelCase=ENVVAR config foo.camelcase && + ENVVAR=value git --config-env foo.CamelCase=ENVVAR config foo.camelcase && + ENVVAR= git --config-env=foo.flag=ENVVAR config --bool foo.flag && + ENVVAR= git --config-env foo.flag=ENVVAR config --bool foo.flag + } >actual && + test_cmp expect actual +' + +test_expect_success 'git --config-env with missing value' ' + test_must_fail env ENVVAR=value git --config-env 2>error && + grep "no config key given for --config-env" error && + test_must_fail env ENVVAR=value git --config-env config core.name 2>error && + grep "invalid config format: config" error +' + +test_expect_success 'git --config-env fails with invalid parameters' ' + test_must_fail git --config-env=foo.flag config --bool foo.flag 2>error && + test_i18ngrep "invalid config format: foo.flag" error && + test_must_fail git --config-env=foo.flag= config --bool foo.flag 2>error && + test_i18ngrep "missing environment variable name for configuration ${SQ}foo.flag${SQ}" error && + sane_unset NONEXISTENT && + test_must_fail git --config-env=foo.flag=NONEXISTENT config --bool foo.flag 2>error && + test_i18ngrep "missing environment variable ${SQ}NONEXISTENT${SQ} for configuration ${SQ}foo.flag${SQ}" error +' + +test_expect_success 'git -c and --config-env work together' ' + cat >expect <<-\EOF && + bar.cmd cmd-value + bar.env env-value + EOF + ENVVAR=env-value git \ + -c bar.cmd=cmd-value \ + --config-env=bar.env=ENVVAR \ + config --get-regexp "^bar.*" >actual && + test_cmp expect actual +' + +test_expect_success 'git -c and --config-env override each other' ' + cat >expect <<-\EOF && + env + cmd + EOF + { + ENVVAR=env git -c bar.bar=cmd --config-env=bar.bar=ENVVAR config bar.bar && + ENVVAR=env git --config-env=bar.bar=ENVVAR -c bar.bar=cmd config bar.bar + } >actual && + test_cmp expect actual +' + +test_expect_success '--config-env handles keys with equals' ' + echo value=with=equals >expect && + ENVVAR=value=with=equals git \ + --config-env=section.subsection=with=equals.key=ENVVAR \ + config section.subsection=with=equals.key >actual && + test_cmp expect actual +' + +test_expect_success 'git config handles environment config pairs' ' + GIT_CONFIG_COUNT=2 \ + GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="foo" \ + GIT_CONFIG_KEY_1="pair.two" GIT_CONFIG_VALUE_1="bar" \ + git config --get-regexp "pair.*" >actual && + cat >expect <<-EOF && + pair.one foo + pair.two bar + EOF + test_cmp expect actual +' + +test_expect_success 'git config ignores pairs without count' ' + test_must_fail env GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="value" \ + git config pair.one 2>error && + test_must_be_empty error +' + +test_expect_success 'git config ignores pairs with zero count' ' + test_must_fail env \ + GIT_CONFIG_COUNT=0 \ + GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="value" \ + git config pair.one +' + +test_expect_success 'git config ignores pairs exceeding count' ' + GIT_CONFIG_COUNT=1 \ + GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="value" \ + GIT_CONFIG_KEY_1="pair.two" GIT_CONFIG_VALUE_1="value" \ + git config --get-regexp "pair.*" >actual && + cat >expect <<-EOF && + pair.one value + EOF + test_cmp expect actual +' + +test_expect_success 'git config ignores pairs with zero count' ' + test_must_fail env \ + GIT_CONFIG_COUNT=0 GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="value" \ + git config pair.one >error && + test_must_be_empty error +' + +test_expect_success 'git config ignores pairs with empty count' ' + test_must_fail env \ + GIT_CONFIG_COUNT= GIT_CONFIG_KEY_0="pair.one" GIT_CONFIG_VALUE_0="value" \ + git config pair.one >error && + test_must_be_empty error +' + +test_expect_success 'git config fails with invalid count' ' + test_must_fail env GIT_CONFIG_COUNT=10a git config --list 2>error && + test_i18ngrep "bogus count" error && + test_must_fail env GIT_CONFIG_COUNT=9999999999999999 git config --list 2>error && + test_i18ngrep "too many entries" error +' + +test_expect_success 'git config fails with missing config key' ' + test_must_fail env GIT_CONFIG_COUNT=1 GIT_CONFIG_VALUE_0="value" \ + git config --list 2>error && + test_i18ngrep "missing config key" error +' + +test_expect_success 'git config fails with missing config value' ' + test_must_fail env GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0="pair.one" \ + git config --list 2>error && + test_i18ngrep "missing config value" error +' + +test_expect_success 'git config fails with invalid config pair key' ' + test_must_fail env GIT_CONFIG_COUNT=1 \ + GIT_CONFIG_KEY_0= GIT_CONFIG_VALUE_0=value \ + git config --list && + test_must_fail env GIT_CONFIG_COUNT=1 \ + GIT_CONFIG_KEY_0=missing-section GIT_CONFIG_VALUE_0=value \ + git config --list +' + +test_expect_success 'environment overrides config file' ' + test_when_finished "rm -f .git/config" && + cat >.git/config <<-EOF && + [pair] + one = value + EOF + GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=pair.one GIT_CONFIG_VALUE_0=override \ + git config pair.one >actual && + cat >expect <<-EOF && + override + EOF + test_cmp expect actual +' + +test_expect_success 'GIT_CONFIG_PARAMETERS overrides environment config' ' + GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=pair.one GIT_CONFIG_VALUE_0=value \ + GIT_CONFIG_PARAMETERS="${SQ}pair.one=override${SQ}" \ + git config pair.one >actual && + cat >expect <<-EOF && + override + EOF + test_cmp expect actual +' + +test_expect_success 'command line overrides environment config' ' + GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=pair.one GIT_CONFIG_VALUE_0=value \ + git -c pair.one=override config pair.one >actual && + cat >expect <<-EOF && + override + EOF + test_cmp expect actual +' + +test_expect_success 'git config --edit works' ' + git config -f tmp test.value no && + echo test.value=yes >expect && + GIT_EDITOR="echo [test]value=yes >" git config -f tmp --edit && + git config -f tmp --list >actual && + test_cmp expect actual +' + +test_expect_success 'git config --edit respects core.editor' ' + git config -f tmp test.value no && + echo test.value=yes >expect && + test_config core.editor "echo [test]value=yes >" && + git config -f tmp --edit && + git config -f tmp --list >actual && + test_cmp expect actual +' + +# malformed configuration files +test_expect_success 'barf on syntax error' ' + cat >.git/config <<-\EOF && + # broken section line + [section] + key garbage + EOF + test_must_fail git config --get section.key >actual 2>error && + test_i18ngrep " line 3 " error +' + +test_expect_success 'barf on incomplete section header' ' + cat >.git/config <<-\EOF && + # broken section line + [section + key = value + EOF + test_must_fail git config --get section.key >actual 2>error && + test_i18ngrep " line 2 " error +' + +test_expect_success 'barf on incomplete string' ' + cat >.git/config <<-\EOF && + # broken section line + [section] + key = "value string + EOF + test_must_fail git config --get section.key >actual 2>error && + test_i18ngrep " line 3 " error +' + +test_expect_success 'urlmatch' ' + cat >.git/config <<-\EOF && + [http] + sslVerify + [http "https://weak.example.com"] + sslVerify = false + cookieFile = /tmp/cookie.txt + EOF + + test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual && + test_must_be_empty actual && + + echo true >expect && + git config --bool --get-urlmatch http.SSLverify https://good.example.com >actual && + test_cmp expect actual && + + echo false >expect && + git config --bool --get-urlmatch http.sslverify https://weak.example.com >actual && + test_cmp expect actual && + + { + echo http.cookiefile /tmp/cookie.txt && + echo http.sslverify false + } >expect && + git config --get-urlmatch HTTP https://weak.example.com >actual && + test_cmp expect actual +' + +test_expect_success 'urlmatch favors more specific URLs' ' + cat >.git/config <<-\EOF && + [http "https://example.com/"] + cookieFile = /tmp/root.txt + [http "https://example.com/subdirectory"] + cookieFile = /tmp/subdirectory.txt + [http "https://user@example.com/"] + cookieFile = /tmp/user.txt + [http "https://averylonguser@example.com/"] + cookieFile = /tmp/averylonguser.txt + [http "https://preceding.example.com"] + cookieFile = /tmp/preceding.txt + [http "https://*.example.com"] + cookieFile = /tmp/wildcard.txt + [http "https://*.example.com/wildcardwithsubdomain"] + cookieFile = /tmp/wildcardwithsubdomain.txt + [http "https://*.example.*"] + cookieFile = /tmp/multiwildcard.txt + [http "https://trailing.example.com"] + cookieFile = /tmp/trailing.txt + [http "https://user@*.example.com/"] + cookieFile = /tmp/wildcardwithuser.txt + [http "https://sub.example.com/"] + cookieFile = /tmp/sub.txt + EOF + + echo http.cookiefile /tmp/root.txt >expect && + git config --get-urlmatch HTTP https://example.com >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/subdirectory.txt >expect && + git config --get-urlmatch HTTP https://example.com/subdirectory >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/subdirectory.txt >expect && + git config --get-urlmatch HTTP https://example.com/subdirectory/nested >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/user.txt >expect && + git config --get-urlmatch HTTP https://user@example.com/ >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/subdirectory.txt >expect && + git config --get-urlmatch HTTP https://averylonguser@example.com/subdirectory >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/preceding.txt >expect && + git config --get-urlmatch HTTP https://preceding.example.com >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/wildcard.txt >expect && + git config --get-urlmatch HTTP https://wildcard.example.com >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/sub.txt >expect && + git config --get-urlmatch HTTP https://sub.example.com/wildcardwithsubdomain >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/trailing.txt >expect && + git config --get-urlmatch HTTP https://trailing.example.com >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/sub.txt >expect && + git config --get-urlmatch HTTP https://user@sub.example.com >actual && + test_cmp expect actual && + + echo http.cookiefile /tmp/multiwildcard.txt >expect && + git config --get-urlmatch HTTP https://wildcard.example.org >actual && + test_cmp expect actual +' + +test_expect_success 'urlmatch with wildcard' ' + cat >.git/config <<-\EOF && + [http] + sslVerify + [http "https://*.example.com"] + sslVerify = false + cookieFile = /tmp/cookie.txt + EOF + + test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual && + test_must_be_empty actual && + + echo true >expect && + git config --bool --get-urlmatch http.SSLverify https://example.com >actual && + test_cmp expect actual && + + echo true >expect && + git config --bool --get-urlmatch http.SSLverify https://good-example.com >actual && + test_cmp expect actual && + + echo true >expect && + git config --bool --get-urlmatch http.sslverify https://deep.nested.example.com >actual && + test_cmp expect actual && + + echo false >expect && + git config --bool --get-urlmatch http.sslverify https://good.example.com >actual && + test_cmp expect actual && + + { + echo http.cookiefile /tmp/cookie.txt && + echo http.sslverify false + } >expect && + git config --get-urlmatch HTTP https://good.example.com >actual && + test_cmp expect actual && + + echo http.sslverify >expect && + git config --get-urlmatch HTTP https://more.example.com.au >actual && + test_cmp expect actual +' + +# good section hygiene +test_expect_success '--unset last key removes section (except if commented)' ' + cat >.git/config <<-\EOF && + # some generic comment on the configuration file itself + # a comment specific to this "section" section. + [section] + # some intervening lines + # that should also be dropped + + key = value + # please be careful when you update the above variable + EOF + + cat >expect <<-\EOF && + # some generic comment on the configuration file itself + # a comment specific to this "section" section. + [section] + # some intervening lines + # that should also be dropped + + # please be careful when you update the above variable + EOF + + git config --unset section.key && + test_cmp expect .git/config && + + cat >.git/config <<-\EOF && + [section] + key = value + [next-section] + EOF + + cat >expect <<-\EOF && + [next-section] + EOF + + git config --unset section.key && + test_cmp expect .git/config && + + q_to_tab >.git/config <<-\EOF && + [one] + Qkey = "multiline \ + QQ# with comment" + [two] + key = true + EOF + git config --unset two.key && + ! grep two .git/config && + + q_to_tab >.git/config <<-\EOF && + [one] + Qkey = "multiline \ + QQ# with comment" + [one] + key = true + EOF + git config --unset-all one.key && + test_line_count = 0 .git/config && + + q_to_tab >.git/config <<-\EOF && + [one] + Qkey = true + Q# a comment not at the start + [two] + Qkey = true + EOF + git config --unset two.key && + grep two .git/config && + + q_to_tab >.git/config <<-\EOF && + [one] + Qkey = not [two "subsection"] + [two "subsection"] + [two "subsection"] + Qkey = true + [TWO "subsection"] + [one] + EOF + git config --unset two.subsection.key && + test "not [two subsection]" = "$(git config one.key)" && + test_line_count = 3 .git/config +' + +test_expect_success '--unset-all removes section if empty & uncommented' ' + cat >.git/config <<-\EOF && + [section] + key = value1 + key = value2 + EOF + + git config --unset-all section.key && + test_line_count = 0 .git/config +' + +test_expect_success 'adding a key into an empty section reuses header' ' + cat >.git/config <<-\EOF && + [section] + EOF + + q_to_tab >expect <<-\EOF && + [section] + Qkey = value + EOF + + git config section.key value && + test_cmp expect .git/config +' + +test_expect_success POSIXPERM,PERL 'preserves existing permissions' ' + chmod 0600 .git/config && + git config imap.pass Hunter2 && + perl -e \ + "die q(badset) if ((stat(q(.git/config)))[2] & 07777) != 0600" && + git config --rename-section imap pop && + perl -e \ + "die q(badrename) if ((stat(q(.git/config)))[2] & 07777) != 0600" +' + +! test_have_prereq MINGW || +HOME="$(pwd)" # convert to Windows path + +test_expect_success 'set up --show-origin tests' ' + INCLUDE_DIR="$HOME/include" && + mkdir -p "$INCLUDE_DIR" && + cat >"$INCLUDE_DIR"/absolute.include <<-\EOF && + [user] + absolute = include + EOF + cat >"$INCLUDE_DIR"/relative.include <<-\EOF && + [user] + relative = include + EOF + cat >"$HOME"/.gitconfig <<-EOF && + [user] + global = true + override = global + [include] + path = "$INCLUDE_DIR/absolute.include" + EOF + cat >.git/config <<-\EOF + [user] + local = true + override = local + [include] + path = ../include/relative.include + EOF +' + +test_expect_success '--show-origin with --list' ' + cat >expect <<-EOF && + file:$HOME/.gitconfig user.global=true + file:$HOME/.gitconfig user.override=global + file:$HOME/.gitconfig include.path=$INCLUDE_DIR/absolute.include + file:$INCLUDE_DIR/absolute.include user.absolute=include + file:.git/config user.local=true + file:.git/config user.override=local + file:.git/config include.path=../include/relative.include + file:.git/../include/relative.include user.relative=include + command line: user.environ=true + command line: user.cmdline=true + EOF + GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=user.environ GIT_CONFIG_VALUE_0=true\ + git -c user.cmdline=true config --list --show-origin >output && + test_cmp expect output +' + +test_expect_success '--show-origin with --list --null' ' + cat >expect <<-EOF && + file:$HOME/.gitconfigQuser.global + trueQfile:$HOME/.gitconfigQuser.override + globalQfile:$HOME/.gitconfigQinclude.path + $INCLUDE_DIR/absolute.includeQfile:$INCLUDE_DIR/absolute.includeQuser.absolute + includeQfile:.git/configQuser.local + trueQfile:.git/configQuser.override + localQfile:.git/configQinclude.path + ../include/relative.includeQfile:.git/../include/relative.includeQuser.relative + includeQcommand line:Quser.cmdline + trueQ + EOF + git -c user.cmdline=true config --null --list --show-origin >output.raw && + nul_to_q output && + # The here-doc above adds a newline that the --null output would not + # include. Add it here to make the two comparable. + echo >>output && + test_cmp expect output +' + +test_expect_success '--show-origin with single file' ' + cat >expect <<-\EOF && + file:.git/config user.local=true + file:.git/config user.override=local + file:.git/config include.path=../include/relative.include + EOF + git config --local --list --show-origin >output && + test_cmp expect output +' + +test_expect_success '--show-origin with --get-regexp' ' + cat >expect <<-EOF && + file:$HOME/.gitconfig user.global true + file:.git/config user.local true + EOF + git config --show-origin --get-regexp "user\.[g|l].*" >output && + test_cmp expect output +' + +test_expect_success '--show-origin getting a single key' ' + cat >expect <<-\EOF && + file:.git/config local + EOF + git config --show-origin user.override >output && + test_cmp expect output +' + +test_expect_success 'set up custom config file' ' + CUSTOM_CONFIG_FILE="custom.conf" && + cat >"$CUSTOM_CONFIG_FILE" <<-\EOF + [user] + custom = true + EOF +' + +test_expect_success !MINGW 'set up custom config file with special name characters' ' + WEIRDLY_NAMED_FILE="file\" (dq) and spaces.conf" && + cp "$CUSTOM_CONFIG_FILE" "$WEIRDLY_NAMED_FILE" +' + +test_expect_success !MINGW '--show-origin escape special file name characters' ' + cat >expect <<-\EOF && + file:"file\" (dq) and spaces.conf" user.custom=true + EOF + git config --file "$WEIRDLY_NAMED_FILE" --show-origin --list >output && + test_cmp expect output +' + +test_expect_success '--show-origin stdin' ' + cat >expect <<-\EOF && + standard input: user.custom=true + EOF + git config --file - --show-origin --list <"$CUSTOM_CONFIG_FILE" >output && + test_cmp expect output +' + +test_expect_success '--show-origin stdin with file include' ' + cat >"$INCLUDE_DIR"/stdin.include <<-EOF && + [user] + stdin = include + EOF + cat >expect <<-EOF && + file:$INCLUDE_DIR/stdin.include include + EOF + echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" | + git config --show-origin --includes --file - user.stdin >output && + + test_cmp expect output +' + +test_expect_success '--show-origin blob' ' + blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") && + cat >expect <<-EOF && + blob:$blob user.custom=true + EOF + git config --blob=$blob --show-origin --list >output && + test_cmp expect output +' + +test_expect_success '--show-origin blob ref' ' + cat >expect <<-\EOF && + blob:main:custom.conf user.custom=true + EOF + git add "$CUSTOM_CONFIG_FILE" && + git commit -m "new config file" && + git config --blob=main:"$CUSTOM_CONFIG_FILE" --show-origin --list >output && + test_cmp expect output +' + +test_expect_success '--show-scope with --list' ' + cat >expect <<-EOF && + global user.global=true + global user.override=global + global include.path=$INCLUDE_DIR/absolute.include + global user.absolute=include + local user.local=true + local user.override=local + local include.path=../include/relative.include + local user.relative=include + command user.cmdline=true + EOF + git -c user.cmdline=true config --list --show-scope >output && + test_cmp expect output +' + +test_expect_success !MINGW '--show-scope with --blob' ' + blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") && + cat >expect <<-EOF && + command user.custom=true + EOF + git config --blob=$blob --show-scope --list >output && + test_cmp expect output +' + +test_expect_success '--show-scope with --local' ' + cat >expect <<-\EOF && + local user.local=true + local user.override=local + local include.path=../include/relative.include + EOF + git config --local --list --show-scope >output && + test_cmp expect output +' + +test_expect_success '--show-scope getting a single value' ' + cat >expect <<-\EOF && + local true + EOF + git config --show-scope --get user.local >output && + test_cmp expect output +' + +test_expect_success '--show-scope with --show-origin' ' + cat >expect <<-EOF && + global file:$HOME/.gitconfig user.global=true + global file:$HOME/.gitconfig user.override=global + global file:$HOME/.gitconfig include.path=$INCLUDE_DIR/absolute.include + global file:$INCLUDE_DIR/absolute.include user.absolute=include + local file:.git/config user.local=true + local file:.git/config user.override=local + local file:.git/config include.path=../include/relative.include + local file:.git/../include/relative.include user.relative=include + command command line: user.cmdline=true + EOF + git -c user.cmdline=true config --list --show-origin --show-scope >output && + test_cmp expect output +' + +test_expect_success 'override global and system config' ' + test_when_finished rm -f "$HOME"/.config/git && + + cat >"$HOME"/.gitconfig <<-EOF && + [home] + config = true + EOF + mkdir -p "$HOME"/.config/git && + cat >"$HOME"/.config/git/config <<-EOF && + [xdg] + config = true + EOF + cat >.git/config <<-EOF && + [local] + config = true + EOF + cat >custom-global-config <<-EOF && + [global] + config = true + EOF + cat >custom-system-config <<-EOF && + [system] + config = true + EOF + + cat >expect <<-EOF && + global xdg.config=true + global home.config=true + local local.config=true + EOF + git config --show-scope --list >output && + test_cmp expect output && + + cat >expect <<-EOF && + system system.config=true + global global.config=true + local local.config=true + EOF + GIT_CONFIG_NOSYSTEM=false GIT_CONFIG_SYSTEM=custom-system-config GIT_CONFIG_GLOBAL=custom-global-config \ + git config --show-scope --list >output && + test_cmp expect output && + + cat >expect <<-EOF && + local local.config=true + EOF + GIT_CONFIG_NOSYSTEM=false GIT_CONFIG_SYSTEM=/dev/null GIT_CONFIG_GLOBAL=/dev/null \ + git config --show-scope --list >output && + test_cmp expect output +' + +test_expect_success 'override global and system config with missing file' ' + test_must_fail env GIT_CONFIG_GLOBAL=does-not-exist GIT_CONFIG_SYSTEM=/dev/null git config --global --list && + test_must_fail env GIT_CONFIG_GLOBAL=/dev/null GIT_CONFIG_SYSTEM=does-not-exist git config --system --list && + GIT_CONFIG_GLOBAL=does-not-exist GIT_CONFIG_SYSTEM=does-not-exist git version +' + +test_expect_success 'system override has no effect with GIT_CONFIG_NOSYSTEM' ' + # `git config --system` has different semantics compared to other + # commands as it ignores GIT_CONFIG_NOSYSTEM. We thus test whether the + # variable has an effect via a different proxy. + cat >alias-config <<-EOF && + [alias] + hello-world = !echo "hello world" + EOF + test_must_fail env GIT_CONFIG_NOSYSTEM=true GIT_CONFIG_SYSTEM=alias-config \ + git hello-world && + GIT_CONFIG_NOSYSTEM=false GIT_CONFIG_SYSTEM=alias-config \ + git hello-world >actual && + echo "hello world" >expect && + test_cmp expect actual +' + +test_expect_success 'write to overridden global and system config' ' + cat >expect <.git/config <<-\EOF && +[section] +foo = true +number = 10 +big = 1M +EOF + +test_expect_success 'identical modern --type specifiers are allowed' ' + test_cmp_config 1048576 --type=int --type=int section.big +' + +test_expect_success 'identical legacy --type specifiers are allowed' ' + test_cmp_config 1048576 --int --int section.big +' + +test_expect_success 'identical mixed --type specifiers are allowed' ' + test_cmp_config 1048576 --int --type=int section.big +' + +test_expect_success 'non-identical modern --type specifiers are not allowed' ' + test_must_fail git config --type=int --type=bool section.big 2>error && + test_i18ngrep "only one type at a time" error +' + +test_expect_success 'non-identical legacy --type specifiers are not allowed' ' + test_must_fail git config --int --bool section.big 2>error && + test_i18ngrep "only one type at a time" error +' + +test_expect_success 'non-identical mixed --type specifiers are not allowed' ' + test_must_fail git config --type=int --bool section.big 2>error && + test_i18ngrep "only one type at a time" error +' + +test_expect_success '--type allows valid type specifiers' ' + test_cmp_config true --type=bool section.foo +' + +test_expect_success '--no-type unsets type specifiers' ' + test_cmp_config 10 --type=bool --no-type section.number +' + +test_expect_success 'unset type specifiers may be reset to conflicting ones' ' + test_cmp_config 1048576 --type=bool --no-type --type=int section.big +' + +test_expect_success '--type rejects unknown specifiers' ' + test_must_fail git config --type=nonsense section.foo 2>error && + test_i18ngrep "unrecognized --type argument" error +' + +test_expect_success '--replace-all does not invent newlines' ' + q_to_tab >.git/config <<-\EOF && + [abc]key + QkeepSection + [xyz] + Qkey = 1 + [abc] + Qkey = a + EOF + q_to_tab >expect <<-\EOF && + [abc] + QkeepSection + [xyz] + Qkey = 1 + [abc] + Qkey = b + EOF + git config --replace-all abc.key b && + test_cmp expect .git/config +' + +test_expect_success 'set all config with value-pattern' ' + test_when_finished rm -f config initial && + git config --file=initial abc.key one && + + # no match => add new entry + cp initial config && + git config --file=config abc.key two a+ && + git config --file=config --list >actual && + cat >expect <<-\EOF && + abc.key=one + abc.key=two + EOF + test_cmp expect actual && + + # multiple matches => failure + test_must_fail git config --file=config abc.key three o+ 2>err && + test_i18ngrep "has multiple values" err && + + # multiple values, no match => add + git config --file=config abc.key three a+ && + git config --file=config --list >actual && + cat >expect <<-\EOF && + abc.key=one + abc.key=two + abc.key=three + EOF + test_cmp expect actual && + + # single match => replace + git config --file=config abc.key four h+ && + git config --file=config --list >actual && + cat >expect <<-\EOF && + abc.key=one + abc.key=two + abc.key=four + EOF + test_cmp expect actual +' + +test_expect_success '--replace-all and value-pattern' ' + test_when_finished rm -f config && + git config --file=config --add abc.key one && + git config --file=config --add abc.key two && + git config --file=config --add abc.key three && + git config --file=config --replace-all abc.key four "o+" && + git config --file=config --list >actual && + cat >expect <<-\EOF && + abc.key=four + abc.key=three + EOF + test_cmp expect actual +' + +test_expect_success 'refuse --fixed-value for incompatible actions' ' + test_when_finished rm -f config && + git config --file=config dev.null bogus && + + # These modes do not allow --fixed-value at all + test_must_fail git config --file=config --fixed-value --add dev.null bogus && + test_must_fail git config --file=config --fixed-value --get-urlmatch dev.null bogus && + test_must_fail git config --file=config --fixed-value --get-urlmatch dev.null bogus && + test_must_fail git config --file=config --fixed-value --rename-section dev null && + test_must_fail git config --file=config --fixed-value --remove-section dev && + test_must_fail git config --file=config --fixed-value --list && + test_must_fail git config --file=config --fixed-value --get-color dev.null && + test_must_fail git config --file=config --fixed-value --get-colorbool dev.null && + + # These modes complain when --fixed-value has no value-pattern + test_must_fail git config --file=config --fixed-value dev.null bogus && + test_must_fail git config --file=config --fixed-value --replace-all dev.null bogus && + test_must_fail git config --file=config --fixed-value --get dev.null && + test_must_fail git config --file=config --fixed-value --get-all dev.null && + test_must_fail git config --file=config --fixed-value --get-regexp "dev.*" && + test_must_fail git config --file=config --fixed-value --unset dev.null && + test_must_fail git config --file=config --fixed-value --unset-all dev.null +' + +test_expect_success '--fixed-value uses exact string matching' ' + test_when_finished rm -f config initial && + META="a+b*c?d[e]f.g" && + git config --file=initial fixed.test "$META" && + + cp initial config && + git config --file=config fixed.test bogus "$META" && + git config --file=config --list >actual && + cat >expect <<-EOF && + fixed.test=$META + fixed.test=bogus + EOF + test_cmp expect actual && + + cp initial config && + git config --file=config --fixed-value fixed.test bogus "$META" && + git config --file=config --list >actual && + cat >expect <<-\EOF && + fixed.test=bogus + EOF + test_cmp expect actual && + + cp initial config && + test_must_fail git config --file=config --unset fixed.test "$META" && + git config --file=config --fixed-value --unset fixed.test "$META" && + test_must_fail git config --file=config fixed.test && + + cp initial config && + test_must_fail git config --file=config --unset-all fixed.test "$META" && + git config --file=config --fixed-value --unset-all fixed.test "$META" && + test_must_fail git config --file=config fixed.test && + + cp initial config && + git config --file=config --replace-all fixed.test bogus "$META" && + git config --file=config --list >actual && + cat >expect <<-EOF && + fixed.test=$META + fixed.test=bogus + EOF + test_cmp expect actual && + + git config --file=config --fixed-value --replace-all fixed.test bogus "$META" && + git config --file=config --list >actual && + cat >expect <<-EOF && + fixed.test=bogus + fixed.test=bogus + EOF + test_cmp expect actual +' + +test_expect_success '--get and --get-all with --fixed-value' ' + test_when_finished rm -f config && + META="a+b*c?d[e]f.g" && + git config --file=config fixed.test bogus && + git config --file=config --add fixed.test "$META" && + + git config --file=config --get fixed.test bogus && + test_must_fail git config --file=config --get fixed.test "$META" && + git config --file=config --get --fixed-value fixed.test "$META" && + test_must_fail git config --file=config --get --fixed-value fixed.test non-existent && + + git config --file=config --get-all fixed.test bogus && + test_must_fail git config --file=config --get-all fixed.test "$META" && + git config --file=config --get-all --fixed-value fixed.test "$META" && + test_must_fail git config --file=config --get-all --fixed-value fixed.test non-existent && + + git config --file=config --get-regexp fixed+ bogus && + test_must_fail git config --file=config --get-regexp fixed+ "$META" && + git config --file=config --get-regexp --fixed-value fixed+ "$META" && + test_must_fail git config --file=config --get-regexp --fixed-value fixed+ non-existent +' + +test_done diff --git a/t/t1500-rev-parse.sh b/t/t1500-rev-parse.sh index 1c2df08333bc60..2c429f9635cc23 100755 --- a/t/t1500-rev-parse.sh +++ b/t/t1500-rev-parse.sh @@ -225,7 +225,8 @@ test_expect_success 'showing the superproject correctly' ' test_commit -C super test_commit && test_create_repo sub && test_commit -C sub test_commit && - git -C super submodule add ../sub dir/sub && + git -c protocol.file.allow=always \ + -C super submodule add ../sub dir/sub && echo $(pwd)/super >expect && git -C super/dir/sub rev-parse --show-superproject-working-tree >out && test_cmp expect out && diff --git a/t/t1500-rev-parse.sh.orig b/t/t1500-rev-parse.sh.orig new file mode 100755 index 00000000000000..1c2df08333bc60 --- /dev/null +++ b/t/t1500-rev-parse.sh.orig @@ -0,0 +1,265 @@ +#!/bin/sh + +test_description='test git rev-parse' +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +test_one () { + dir="$1" && + expect="$2" && + shift && + shift && + echo "$expect" >expect && + git -C "$dir" rev-parse "$@" >actual && + test_cmp expect actual +} + +# usage: [options] label is-bare is-inside-git is-inside-work prefix git-dir absolute-git-dir +test_rev_parse () { + d= + bare= + gitdir= + while : + do + case "$1" in + -C) d="$2"; shift; shift ;; + -b) case "$2" in + [tfu]*) bare="$2"; shift; shift ;; + *) error "test_rev_parse: bogus core.bare value '$2'" ;; + esac ;; + -g) gitdir="$2"; shift; shift ;; + -*) error "test_rev_parse: unrecognized option '$1'" ;; + *) break ;; + esac + done + + name=$1 + shift + + for o in --is-bare-repository \ + --is-inside-git-dir \ + --is-inside-work-tree \ + --show-prefix \ + --git-dir \ + --absolute-git-dir + do + test $# -eq 0 && break + expect="$1" + test_expect_success "$name: $o" ' + if test -n "$gitdir" + then + test_when_finished "unset GIT_DIR" && + GIT_DIR="$gitdir" && + export GIT_DIR + fi && + + case "$bare" in + t*) test_config ${d:+-C} ${d:+"$d"} core.bare true ;; + f*) test_config ${d:+-C} ${d:+"$d"} core.bare false ;; + u*) test_unconfig ${d:+-C} ${d:+"$d"} core.bare ;; + esac && + + echo "$expect" >expect && + git ${d:+-C} ${d:+"$d"} rev-parse $o >actual && + test_cmp expect actual + ' + shift + done +} + +ROOT=$(pwd) + +test_expect_success 'setup' ' + mkdir -p sub/dir work && + cp -R .git repo.git && + git checkout -B main && + test_commit abc && + git checkout -b side && + test_commit def && + git checkout main && + git worktree add worktree side +' + +test_rev_parse toplevel false false true '' .git "$ROOT/.git" + +test_rev_parse -C .git .git/ false true false '' . "$ROOT/.git" +test_rev_parse -C .git/objects .git/objects/ false true false '' "$ROOT/.git" "$ROOT/.git" + +test_rev_parse -C sub/dir subdirectory false false true sub/dir/ "$ROOT/.git" "$ROOT/.git" + +test_rev_parse -b t 'core.bare = true' true false false + +test_rev_parse -b u 'core.bare undefined' false false true + + +test_rev_parse -C work -g ../.git -b f 'GIT_DIR=../.git, core.bare = false' false false true '' "../.git" "$ROOT/.git" + +test_rev_parse -C work -g ../.git -b t 'GIT_DIR=../.git, core.bare = true' true false false '' + +test_rev_parse -C work -g ../.git -b u 'GIT_DIR=../.git, core.bare undefined' false false true '' + + +test_rev_parse -C work -g ../repo.git -b f 'GIT_DIR=../repo.git, core.bare = false' false false true '' "../repo.git" "$ROOT/repo.git" + +test_rev_parse -C work -g ../repo.git -b t 'GIT_DIR=../repo.git, core.bare = true' true false false '' + +test_rev_parse -C work -g ../repo.git -b u 'GIT_DIR=../repo.git, core.bare undefined' false false true '' + +test_expect_success 'rev-parse --path-format=absolute' ' + test_one "." "$ROOT/.git" --path-format=absolute --git-dir && + test_one "." "$ROOT/.git" --path-format=absolute --git-common-dir && + test_one "sub/dir" "$ROOT/.git" --path-format=absolute --git-dir && + test_one "sub/dir" "$ROOT/.git" --path-format=absolute --git-common-dir && + test_one "worktree" "$ROOT/.git/worktrees/worktree" --path-format=absolute --git-dir && + test_one "worktree" "$ROOT/.git" --path-format=absolute --git-common-dir && + test_one "." "$ROOT" --path-format=absolute --show-toplevel && + test_one "." "$ROOT/.git/objects" --path-format=absolute --git-path objects && + test_one "." "$ROOT/.git/objects/foo/bar/baz" --path-format=absolute --git-path objects/foo/bar/baz +' + +test_expect_success 'rev-parse --path-format=relative' ' + test_one "." ".git" --path-format=relative --git-dir && + test_one "." ".git" --path-format=relative --git-common-dir && + test_one "sub/dir" "../../.git" --path-format=relative --git-dir && + test_one "sub/dir" "../../.git" --path-format=relative --git-common-dir && + test_one "worktree" "../.git/worktrees/worktree" --path-format=relative --git-dir && + test_one "worktree" "../.git" --path-format=relative --git-common-dir && + test_one "." "./" --path-format=relative --show-toplevel && + test_one "." ".git/objects" --path-format=relative --git-path objects && + test_one "." ".git/objects/foo/bar/baz" --path-format=relative --git-path objects/foo/bar/baz +' + +test_expect_success '--path-format=relative does not affect --absolute-git-dir' ' + git rev-parse --path-format=relative --absolute-git-dir >actual && + echo "$ROOT/.git" >expect && + test_cmp expect actual +' + +test_expect_success '--path-format can change in the middle of the command line' ' + git rev-parse --path-format=absolute --git-dir --path-format=relative --git-path objects/foo/bar >actual && + cat >expect <<-EOF && + $ROOT/.git + .git/objects/foo/bar + EOF + test_cmp expect actual +' + +test_expect_success '--path-format does not segfault without an argument' ' + test_must_fail git rev-parse --path-format +' + +test_expect_success 'git-common-dir from worktree root' ' + echo .git >expect && + git rev-parse --git-common-dir >actual && + test_cmp expect actual +' + +test_expect_success 'git-common-dir inside sub-dir' ' + mkdir -p path/to/child && + test_when_finished "rm -rf path" && + echo "$(git -C path/to/child rev-parse --show-cdup).git" >expect && + git -C path/to/child rev-parse --git-common-dir >actual && + test_cmp expect actual +' + +test_expect_success 'git-path from worktree root' ' + echo .git/objects >expect && + git rev-parse --git-path objects >actual && + test_cmp expect actual +' + +test_expect_success 'git-path inside sub-dir' ' + mkdir -p path/to/child && + test_when_finished "rm -rf path" && + echo "$(git -C path/to/child rev-parse --show-cdup).git/objects" >expect && + git -C path/to/child rev-parse --git-path objects >actual && + test_cmp expect actual +' + +test_expect_success 'rev-parse --is-shallow-repository in shallow repo' ' + test_commit test_commit && + echo true >expect && + git clone --depth 1 --no-local . shallow && + test_when_finished "rm -rf shallow" && + git -C shallow rev-parse --is-shallow-repository >actual && + test_cmp expect actual +' + +test_expect_success 'rev-parse --is-shallow-repository in non-shallow repo' ' + echo false >expect && + git rev-parse --is-shallow-repository >actual && + test_cmp expect actual +' + +test_expect_success 'rev-parse --show-object-format in repo' ' + echo "$(test_oid algo)" >expect && + git rev-parse --show-object-format >actual && + test_cmp expect actual && + git rev-parse --show-object-format=storage >actual && + test_cmp expect actual && + git rev-parse --show-object-format=input >actual && + test_cmp expect actual && + git rev-parse --show-object-format=output >actual && + test_cmp expect actual && + test_must_fail git rev-parse --show-object-format=squeamish-ossifrage 2>err && + grep "unknown mode for --show-object-format: squeamish-ossifrage" err +' + +test_expect_success '--show-toplevel from subdir of working tree' ' + pwd >expect && + git -C sub/dir rev-parse --show-toplevel >actual && + test_cmp expect actual +' + +test_expect_success '--show-toplevel from inside .git' ' + test_must_fail git -C .git rev-parse --show-toplevel +' + +test_expect_success 'showing the superproject correctly' ' + git rev-parse --show-superproject-working-tree >out && + test_must_be_empty out && + + test_create_repo super && + test_commit -C super test_commit && + test_create_repo sub && + test_commit -C sub test_commit && + git -C super submodule add ../sub dir/sub && + echo $(pwd)/super >expect && + git -C super/dir/sub rev-parse --show-superproject-working-tree >out && + test_cmp expect out && + + test_commit -C super submodule_add && + git -C super checkout -b branch1 && + git -C super/dir/sub checkout -b branch1 && + test_commit -C super/dir/sub branch1_commit && + git -C super add dir/sub && + test_commit -C super branch1_commit && + git -C super checkout -b branch2 main && + git -C super/dir/sub checkout -b branch2 main && + test_commit -C super/dir/sub branch2_commit && + git -C super add dir/sub && + test_commit -C super branch2_commit && + test_must_fail git -C super merge branch1 && + + git -C super/dir/sub rev-parse --show-superproject-working-tree >out && + test_cmp expect out +' + +# at least one external project depends on this behavior: +test_expect_success 'rev-parse --since= unsqueezed ordering' ' + x1=--since=1970-01-01T00:00:01Z && + x2=--since=1970-01-01T00:00:02Z && + x3=--since=1970-01-01T00:00:03Z && + git rev-parse $x1 $x1 $x3 $x2 >actual && + cat >expect <<-EOF && + --max-age=1 + --max-age=1 + --max-age=3 + --max-age=2 + EOF + test_cmp expect actual +' + +test_done diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh index 37ad79470fb9e0..54b0eb6dce5a86 100755 --- a/t/t2400-worktree-add.sh +++ b/t/t2400-worktree-add.sh @@ -614,6 +614,7 @@ test_expect_success '"add" should not fail because of another bad worktree' ' ' test_expect_success '"add" with uninitialized submodule, with submodule.recurse unset' ' + test_config_global protocol.file.allow always && test_create_repo submodule && test_commit -C submodule first && test_create_repo project && @@ -629,6 +630,7 @@ test_expect_success '"add" with uninitialized submodule, with submodule.recurse ' test_expect_success '"add" with initialized submodule, with submodule.recurse unset' ' + test_config_global protocol.file.allow always && git -C project-clone submodule update --init && git -C project-clone worktree add ../project-4 ' diff --git a/t/t2400-worktree-add.sh.orig b/t/t2400-worktree-add.sh.orig new file mode 100755 index 00000000000000..37ad79470fb9e0 --- /dev/null +++ b/t/t2400-worktree-add.sh.orig @@ -0,0 +1,640 @@ +#!/bin/sh + +test_description='test git worktree add' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +. "$TEST_DIRECTORY"/lib-rebase.sh + +test_expect_success 'setup' ' + test_commit init +' + +test_expect_success '"add" an existing worktree' ' + mkdir -p existing/subtree && + test_must_fail git worktree add --detach existing main +' + +test_expect_success '"add" an existing empty worktree' ' + mkdir existing_empty && + git worktree add --detach existing_empty main +' + +test_expect_success '"add" using shorthand - fails when no previous branch' ' + test_must_fail git worktree add existing_short - +' + +test_expect_success '"add" using - shorthand' ' + git checkout -b newbranch && + echo hello >myworld && + git add myworld && + git commit -m myworld && + git checkout main && + git worktree add short-hand - && + echo refs/heads/newbranch >expect && + git -C short-hand rev-parse --symbolic-full-name HEAD >actual && + test_cmp expect actual +' + +test_expect_success '"add" refuses to checkout locked branch' ' + test_must_fail git worktree add zere main && + ! test -d zere && + ! test -d .git/worktrees/zere +' + +test_expect_success 'checking out paths not complaining about linked checkouts' ' + ( + cd existing_empty && + echo dirty >>init.t && + git checkout main -- init.t + ) +' + +test_expect_success '"add" worktree' ' + git rev-parse HEAD >expect && + git worktree add --detach here main && + ( + cd here && + test_cmp ../init.t init.t && + test_must_fail git symbolic-ref HEAD && + git rev-parse HEAD >actual && + test_cmp ../expect actual && + git fsck + ) +' + +test_expect_success '"add" worktree with lock' ' + git worktree add --detach --lock here-with-lock main && + test_when_finished "git worktree unlock here-with-lock || :" && + test -f .git/worktrees/here-with-lock/locked +' + +test_expect_success '"add" worktree with lock and reason' ' + lock_reason="why not" && + git worktree add --detach --lock --reason "$lock_reason" here-with-lock-reason main && + test_when_finished "git worktree unlock here-with-lock-reason || :" && + test -f .git/worktrees/here-with-lock-reason/locked && + echo "$lock_reason" >expect && + test_cmp expect .git/worktrees/here-with-lock-reason/locked +' + +test_expect_success '"add" worktree with reason but no lock' ' + test_must_fail git worktree add --detach --reason "why not" here-with-reason-only main && + test_path_is_missing .git/worktrees/here-with-reason-only/locked +' + +test_expect_success '"add" worktree from a subdir' ' + ( + mkdir sub && + cd sub && + git worktree add --detach here main && + cd here && + test_cmp ../../init.t init.t + ) +' + +test_expect_success '"add" from a linked checkout' ' + ( + cd here && + git worktree add --detach nested-here main && + cd nested-here && + git fsck + ) +' + +test_expect_success '"add" worktree creating new branch' ' + git worktree add -b newmain there main && + ( + cd there && + test_cmp ../init.t init.t && + git symbolic-ref HEAD >actual && + echo refs/heads/newmain >expect && + test_cmp expect actual && + git fsck + ) +' + +test_expect_success 'die the same branch is already checked out' ' + ( + cd here && + test_must_fail git checkout newmain + ) +' + +test_expect_success SYMLINKS 'die the same branch is already checked out (symlink)' ' + head=$(git -C there rev-parse --git-path HEAD) && + ref=$(git -C there symbolic-ref HEAD) && + rm "$head" && + ln -s "$ref" "$head" && + test_must_fail git -C here checkout newmain +' + +test_expect_success 'not die the same branch is already checked out' ' + ( + cd here && + git worktree add --force anothernewmain newmain + ) +' + +test_expect_success 'not die on re-checking out current branch' ' + ( + cd there && + git checkout newmain + ) +' + +test_expect_success '"add" from a bare repo' ' + ( + git clone --bare . bare && + cd bare && + git worktree add -b bare-main ../there2 main + ) +' + +test_expect_success 'checkout from a bare repo without "add"' ' + ( + cd bare && + test_must_fail git checkout main + ) +' + +test_expect_success '"add" default branch of a bare repo' ' + ( + git clone --bare . bare2 && + cd bare2 && + git worktree add ../there3 main + ) +' + +test_expect_success 'checkout with grafts' ' + test_when_finished rm .git/info/grafts && + test_commit abc && + SHA1=$(git rev-parse HEAD) && + test_commit def && + test_commit xyz && + echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts && + cat >expected <<-\EOF && + xyz + abc + EOF + git log --format=%s -2 >actual && + test_cmp expected actual && + git worktree add --detach grafted main && + git --git-dir=grafted/.git log --format=%s -2 >actual && + test_cmp expected actual +' + +test_expect_success '"add" from relative HEAD' ' + test_commit a && + test_commit b && + test_commit c && + git rev-parse HEAD~1 >expected && + git worktree add relhead HEAD~1 && + git -C relhead rev-parse HEAD >actual && + test_cmp expected actual +' + +test_expect_success '"add -b" with omitted' ' + git worktree add -b burble flornk && + test_cmp_rev HEAD burble +' + +test_expect_success '"add --detach" with omitted' ' + git worktree add --detach fishhook && + git rev-parse HEAD >expected && + git -C fishhook rev-parse HEAD >actual && + test_cmp expected actual && + test_must_fail git -C fishhook symbolic-ref HEAD +' + +test_expect_success '"add" with omitted' ' + git worktree add wiffle/bat && + test_cmp_rev HEAD bat +' + +test_expect_success '"add" checks out existing branch of dwimd name' ' + git branch dwim HEAD~1 && + git worktree add dwim && + test_cmp_rev HEAD~1 dwim && + ( + cd dwim && + test_cmp_rev HEAD dwim + ) +' + +test_expect_success '"add " dwim fails with checked out branch' ' + git checkout -b test-branch && + test_must_fail git worktree add test-branch && + test_path_is_missing test-branch +' + +test_expect_success '"add --force" with existing dwimd name doesnt die' ' + git checkout test-branch && + git worktree add --force test-branch +' + +test_expect_success '"add" no auto-vivify with --detach and omitted' ' + git worktree add --detach mish/mash && + test_must_fail git rev-parse mash -- && + test_must_fail git -C mish/mash symbolic-ref HEAD +' + +test_expect_success '"add" -b/-B mutually exclusive' ' + test_must_fail git worktree add -b poodle -B poodle bamboo main +' + +test_expect_success '"add" -b/--detach mutually exclusive' ' + test_must_fail git worktree add -b poodle --detach bamboo main +' + +test_expect_success '"add" -B/--detach mutually exclusive' ' + test_must_fail git worktree add -B poodle --detach bamboo main +' + +test_expect_success '"add -B" fails if the branch is checked out' ' + git rev-parse newmain >before && + test_must_fail git worktree add -B newmain bamboo main && + git rev-parse newmain >after && + test_cmp before after +' + +test_expect_success 'add -B' ' + git worktree add -B poodle bamboo2 main^ && + git -C bamboo2 symbolic-ref HEAD >actual && + echo refs/heads/poodle >expected && + test_cmp expected actual && + test_cmp_rev main^ poodle +' + +test_expect_success 'add --quiet' ' + git worktree add --quiet another-worktree main 2>actual && + test_must_be_empty actual +' + +test_expect_success 'local clone from linked checkout' ' + git clone --local here here-clone && + ( cd here-clone && git fsck ) +' + +test_expect_success 'local clone --shared from linked checkout' ' + git -C bare worktree add --detach ../baretree && + git clone --local --shared baretree bare-clone && + grep /bare/ bare-clone/.git/objects/info/alternates +' + +test_expect_success '"add" worktree with --no-checkout' ' + git worktree add --no-checkout -b swamp swamp && + ! test -e swamp/init.t && + git -C swamp reset --hard && + test_cmp init.t swamp/init.t +' + +test_expect_success '"add" worktree with --checkout' ' + git worktree add --checkout -b swmap2 swamp2 && + test_cmp init.t swamp2/init.t +' + +test_expect_success 'put a worktree under rebase' ' + git worktree add under-rebase && + ( + cd under-rebase && + set_fake_editor && + FAKE_LINES="edit 1" git rebase -i HEAD^ && + git worktree list | grep "under-rebase.*detached HEAD" + ) +' + +test_expect_success 'add a worktree, checking out a rebased branch' ' + test_must_fail git worktree add new-rebase under-rebase && + ! test -d new-rebase +' + +test_expect_success 'checking out a rebased branch from another worktree' ' + git worktree add new-place && + test_must_fail git -C new-place checkout under-rebase +' + +test_expect_success 'not allow to delete a branch under rebase' ' + ( + cd under-rebase && + test_must_fail git branch -D under-rebase + ) +' + +test_expect_success 'rename a branch under rebase not allowed' ' + test_must_fail git branch -M under-rebase rebase-with-new-name +' + +test_expect_success 'check out from current worktree branch ok' ' + ( + cd under-rebase && + git checkout under-rebase && + git checkout - && + git rebase --abort + ) +' + +test_expect_success 'checkout a branch under bisect' ' + git worktree add under-bisect && + ( + cd under-bisect && + git bisect start && + git bisect bad && + git bisect good HEAD~2 && + git worktree list | grep "under-bisect.*detached HEAD" && + test_must_fail git worktree add new-bisect under-bisect && + ! test -d new-bisect + ) +' + +test_expect_success 'rename a branch under bisect not allowed' ' + test_must_fail git branch -M under-bisect bisect-with-new-name +' +# Is branch "refs/heads/$1" set to pull from "$2/$3"? +test_branch_upstream () { + printf "%s\n" "$2" "refs/heads/$3" >expect.upstream && + { + git config "branch.$1.remote" && + git config "branch.$1.merge" + } >actual.upstream && + test_cmp expect.upstream actual.upstream +} + +test_expect_success '--track sets up tracking' ' + test_when_finished rm -rf track && + git worktree add --track -b track track main && + test_branch_upstream track . main +' + +# setup remote repository $1 and repository $2 with $1 set up as +# remote. The remote has two branches, main and foo. +setup_remote_repo () { + git init $1 && + ( + cd $1 && + test_commit $1_main && + git checkout -b foo && + test_commit upstream_foo + ) && + git init $2 && + ( + cd $2 && + test_commit $2_main && + git remote add $1 ../$1 && + git config remote.$1.fetch \ + "refs/heads/*:refs/remotes/$1/*" && + git fetch --all + ) +} + +test_expect_success '--no-track avoids setting up tracking' ' + test_when_finished rm -rf repo_upstream repo_local foo && + setup_remote_repo repo_upstream repo_local && + ( + cd repo_local && + git worktree add --no-track -b foo ../foo repo_upstream/foo + ) && + ( + cd foo && + test_must_fail git config "branch.foo.remote" && + test_must_fail git config "branch.foo.merge" && + test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo + ) +' + +test_expect_success '"add" fails' ' + test_must_fail git worktree add foo non-existent +' + +test_expect_success '"add" dwims' ' + test_when_finished rm -rf repo_upstream repo_dwim foo && + setup_remote_repo repo_upstream repo_dwim && + git init repo_dwim && + ( + cd repo_dwim && + git worktree add ../foo foo + ) && + ( + cd foo && + test_branch_upstream foo repo_upstream foo && + test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo + ) +' + +test_expect_success '"add" dwims with checkout.defaultRemote' ' + test_when_finished rm -rf repo_upstream repo_dwim foo && + setup_remote_repo repo_upstream repo_dwim && + git init repo_dwim && + ( + cd repo_dwim && + git remote add repo_upstream2 ../repo_upstream && + git fetch repo_upstream2 && + test_must_fail git worktree add ../foo foo && + git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo && + git status -uno --porcelain >status.actual && + test_must_be_empty status.actual + ) && + ( + cd foo && + test_branch_upstream foo repo_upstream foo && + test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo + ) +' + +test_expect_success 'git worktree add does not match remote' ' + test_when_finished rm -rf repo_a repo_b foo && + setup_remote_repo repo_a repo_b && + ( + cd repo_b && + git worktree add ../foo + ) && + ( + cd foo && + test_must_fail git config "branch.foo.remote" && + test_must_fail git config "branch.foo.merge" && + test_cmp_rev ! refs/remotes/repo_a/foo refs/heads/foo + ) +' + +test_expect_success 'git worktree add --guess-remote sets up tracking' ' + test_when_finished rm -rf repo_a repo_b foo && + setup_remote_repo repo_a repo_b && + ( + cd repo_b && + git worktree add --guess-remote ../foo + ) && + ( + cd foo && + test_branch_upstream foo repo_a foo && + test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo + ) +' + +test_expect_success 'git worktree add with worktree.guessRemote sets up tracking' ' + test_when_finished rm -rf repo_a repo_b foo && + setup_remote_repo repo_a repo_b && + ( + cd repo_b && + git config worktree.guessRemote true && + git worktree add ../foo + ) && + ( + cd foo && + test_branch_upstream foo repo_a foo && + test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo + ) +' + +test_expect_success 'git worktree --no-guess-remote option overrides config' ' + test_when_finished rm -rf repo_a repo_b foo && + setup_remote_repo repo_a repo_b && + ( + cd repo_b && + git config worktree.guessRemote true && + git worktree add --no-guess-remote ../foo + ) && + ( + cd foo && + test_must_fail git config "branch.foo.remote" && + test_must_fail git config "branch.foo.merge" && + test_cmp_rev ! refs/remotes/repo_a/foo refs/heads/foo + ) +' + +post_checkout_hook () { + gitdir=${1:-.git} + test_when_finished "rm -f $gitdir/hooks/post-checkout" && + mkdir -p $gitdir/hooks && + write_script $gitdir/hooks/post-checkout <<-\EOF + { + echo $* + git rev-parse --git-dir --show-toplevel + } >hook.actual + EOF +} + +test_expect_success '"add" invokes post-checkout hook (branch)' ' + post_checkout_hook && + { + echo $ZERO_OID $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/gumby && + echo $(pwd)/gumby + } >hook.expect && + git worktree add gumby && + test_cmp hook.expect gumby/hook.actual +' + +test_expect_success '"add" invokes post-checkout hook (detached)' ' + post_checkout_hook && + { + echo $ZERO_OID $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/grumpy && + echo $(pwd)/grumpy + } >hook.expect && + git worktree add --detach grumpy && + test_cmp hook.expect grumpy/hook.actual +' + +test_expect_success '"add --no-checkout" suppresses post-checkout hook' ' + post_checkout_hook && + rm -f hook.actual && + git worktree add --no-checkout gloopy && + test_path_is_missing gloopy/hook.actual +' + +test_expect_success '"add" in other worktree invokes post-checkout hook' ' + post_checkout_hook && + { + echo $ZERO_OID $(git rev-parse HEAD) 1 && + echo $(pwd)/.git/worktrees/guppy && + echo $(pwd)/guppy + } >hook.expect && + git -C gloopy worktree add --detach ../guppy && + test_cmp hook.expect guppy/hook.actual +' + +test_expect_success '"add" in bare repo invokes post-checkout hook' ' + rm -rf bare && + git clone --bare . bare && + { + echo $ZERO_OID $(git --git-dir=bare rev-parse HEAD) 1 && + echo $(pwd)/bare/worktrees/goozy && + echo $(pwd)/goozy + } >hook.expect && + post_checkout_hook bare && + git -C bare worktree add --detach ../goozy && + test_cmp hook.expect goozy/hook.actual +' + +test_expect_success '"add" an existing but missing worktree' ' + git worktree add --detach pneu && + test_must_fail git worktree add --detach pneu && + rm -fr pneu && + test_must_fail git worktree add --detach pneu && + git worktree add --force --detach pneu +' + +test_expect_success '"add" an existing locked but missing worktree' ' + git worktree add --detach gnoo && + git worktree lock gnoo && + test_when_finished "git worktree unlock gnoo || :" && + rm -fr gnoo && + test_must_fail git worktree add --detach gnoo && + test_must_fail git worktree add --force --detach gnoo && + git worktree add --force --force --detach gnoo +' + +test_expect_success '"add" not tripped up by magic worktree matching"' ' + # if worktree "sub1/bar" exists, "git worktree add bar" in distinct + # directory `sub2` should not mistakenly complain that `bar` is an + # already-registered worktree + mkdir sub1 sub2 && + git -C sub1 --git-dir=../.git worktree add --detach bozo && + git -C sub2 --git-dir=../.git worktree add --detach bozo +' + +test_expect_success FUNNYNAMES 'sanitize generated worktree name' ' + git worktree add --detach ". weird*..?.lock.lock" && + test -d .git/worktrees/---weird-.- +' + +test_expect_success '"add" should not fail because of another bad worktree' ' + git init add-fail && + ( + cd add-fail && + test_commit first && + mkdir sub && + git worktree add sub/to-be-deleted && + rm -rf sub && + git worktree add second + ) +' + +test_expect_success '"add" with uninitialized submodule, with submodule.recurse unset' ' + test_create_repo submodule && + test_commit -C submodule first && + test_create_repo project && + git -C project submodule add ../submodule && + git -C project add submodule && + test_tick && + git -C project commit -m add_sub && + git clone project project-clone && + git -C project-clone worktree add ../project-2 +' +test_expect_success '"add" with uninitialized submodule, with submodule.recurse set' ' + git -C project-clone -c submodule.recurse worktree add ../project-3 +' + +test_expect_success '"add" with initialized submodule, with submodule.recurse unset' ' + git -C project-clone submodule update --init && + git -C project-clone worktree add ../project-4 +' + +test_expect_success '"add" with initialized submodule, with submodule.recurse set' ' + git -C project-clone -c submodule.recurse worktree add ../project-5 +' + +test_done diff --git a/t/t2403-worktree-move.sh b/t/t2403-worktree-move.sh index a4e1a178e0a003..e8246eed9c6047 100755 --- a/t/t2403-worktree-move.sh +++ b/t/t2403-worktree-move.sh @@ -138,7 +138,8 @@ test_expect_success 'move a repo with uninitialized submodule' ' ( cd withsub && test_commit initial && - git submodule add "$PWD"/.git sub && + git -c protocol.file.allow=always \ + submodule add "$PWD"/.git sub && git commit -m withsub && git worktree add second HEAD && git worktree move second third @@ -148,7 +149,7 @@ test_expect_success 'move a repo with uninitialized submodule' ' test_expect_success 'not move a repo with initialized submodule' ' ( cd withsub && - git -C third submodule update && + git -c protocol.file.allow=always -C third submodule update && test_must_fail git worktree move third forth ) ' @@ -227,6 +228,7 @@ test_expect_success 'remove cleans up .git/worktrees when empty' ' ' test_expect_success 'remove a repo with uninitialized submodule' ' + test_config_global protocol.file.allow always && ( cd withsub && git worktree add to-remove HEAD && @@ -235,6 +237,7 @@ test_expect_success 'remove a repo with uninitialized submodule' ' ' test_expect_success 'not remove a repo with initialized submodule' ' + test_config_global protocol.file.allow always && ( cd withsub && git worktree add to-remove HEAD && diff --git a/t/t2405-worktree-submodule.sh b/t/t2405-worktree-submodule.sh index b172c26ca43291..11018f37c70c02 100755 --- a/t/t2405-worktree-submodule.sh +++ b/t/t2405-worktree-submodule.sh @@ -10,6 +10,7 @@ export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME base_path=$(pwd -P) test_expect_success 'setup: create origin repos' ' + git config --global protocol.file.allow always && git init origin/sub && test_commit -C origin/sub file1 && git init origin/main && diff --git a/t/t2405-worktree-submodule.sh.orig b/t/t2405-worktree-submodule.sh.orig new file mode 100755 index 00000000000000..b172c26ca43291 --- /dev/null +++ b/t/t2405-worktree-submodule.sh.orig @@ -0,0 +1,93 @@ +#!/bin/sh + +test_description='Combination of submodules and multiple worktrees' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +base_path=$(pwd -P) + +test_expect_success 'setup: create origin repos' ' + git init origin/sub && + test_commit -C origin/sub file1 && + git init origin/main && + test_commit -C origin/main first && + git -C origin/main submodule add ../sub && + git -C origin/main commit -m "add sub" && + test_commit -C origin/sub "file1 updated" file1 file1updated file1updated && + git -C origin/main/sub pull && + git -C origin/main add sub && + git -C origin/main commit -m "sub updated" +' + +test_expect_success 'setup: clone superproject to create main worktree' ' + git clone --recursive "$base_path/origin/main" main +' + +rev1_hash_main=$(git --git-dir=origin/main/.git show --pretty=format:%h -q "HEAD~1") +rev1_hash_sub=$(git --git-dir=origin/sub/.git show --pretty=format:%h -q "HEAD~1") + +test_expect_success 'add superproject worktree' ' + git -C main worktree add "$base_path/worktree" "$rev1_hash_main" +' + +test_expect_failure 'submodule is checked out just after worktree add' ' + git -C worktree diff --submodule main"^!" >out && + grep "file1 updated" out +' + +test_expect_success 'add superproject worktree and initialize submodules' ' + git -C main worktree add "$base_path/worktree-submodule-update" "$rev1_hash_main" && + git -C worktree-submodule-update submodule update +' + +test_expect_success 'submodule is checked out just after submodule update in linked worktree' ' + git -C worktree-submodule-update diff --submodule main"^!" >out && + grep "file1 updated" out +' + +test_expect_success 'add superproject worktree and manually add submodule worktree' ' + git -C main worktree add "$base_path/linked_submodule" "$rev1_hash_main" && + git -C main/sub worktree add "$base_path/linked_submodule/sub" "$rev1_hash_sub" +' + +test_expect_success 'submodule is checked out after manually adding submodule worktree' ' + git -C linked_submodule diff --submodule main"^!" >out && + grep "file1 updated" out +' + +test_expect_success 'checkout --recurse-submodules uses $GIT_DIR for submodules in a linked worktree' ' + git -C main worktree add "$base_path/checkout-recurse" --detach && + git -C checkout-recurse submodule update --init && + echo "gitdir: ../../main/.git/worktrees/checkout-recurse/modules/sub" >expect-gitfile && + cat checkout-recurse/sub/.git >actual-gitfile && + test_cmp expect-gitfile actual-gitfile && + git -C main/sub rev-parse HEAD >expect-head-main && + git -C checkout-recurse checkout --recurse-submodules HEAD~1 && + cat checkout-recurse/sub/.git >actual-gitfile && + git -C main/sub rev-parse HEAD >actual-head-main && + test_cmp expect-gitfile actual-gitfile && + test_cmp expect-head-main actual-head-main +' + +test_expect_success 'core.worktree is removed in $GIT_DIR/modules//config, not in $GIT_COMMON_DIR/modules//config' ' + echo "../../../sub" >expect-main && + git -C main/sub config --get core.worktree >actual-main && + test_cmp expect-main actual-main && + echo "../../../../../../checkout-recurse/sub" >expect-linked && + git -C checkout-recurse/sub config --get core.worktree >actual-linked && + test_cmp expect-linked actual-linked && + git -C checkout-recurse checkout --recurse-submodules first && + test_expect_code 1 git -C main/.git/worktrees/checkout-recurse/modules/sub config --get core.worktree >linked-config && + test_must_be_empty linked-config && + git -C main/sub config --get core.worktree >actual-main && + test_cmp expect-main actual-main +' + +test_expect_success 'unsetting core.worktree does not prevent running commands directly against the submodule repository' ' + git -C main/.git/worktrees/checkout-recurse/modules/sub log +' + +test_done diff --git a/t/t3200-branch.sh b/t/t3200-branch.sh index e575ffb4ffb4c2..354e625fbdbbdc 100755 --- a/t/t3200-branch.sh +++ b/t/t3200-branch.sh @@ -282,6 +282,7 @@ test_expect_success 'deleting checked-out branch from repo that is a submodule' git init repo1 && git init repo1/sub && test_commit -C repo1/sub x && + test_config_global protocol.file.allow always && git -C repo1 submodule add ./sub && git -C repo1 commit -m "adding sub" && diff --git a/t/t3200-branch.sh.orig b/t/t3200-branch.sh.orig new file mode 100755 index 00000000000000..e575ffb4ffb4c2 --- /dev/null +++ b/t/t3200-branch.sh.orig @@ -0,0 +1,1425 @@ +#!/bin/sh +# +# Copyright (c) 2005 Amos Waterland +# + +test_description='git branch assorted tests' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh +. "$TEST_DIRECTORY"/lib-rebase.sh + +test_expect_success 'prepare a trivial repository' ' + echo Hello >A && + git update-index --add A && + git commit -m "Initial commit." && + git branch -M main && + echo World >>A && + git update-index --add A && + git commit -m "Second commit." && + HEAD=$(git rev-parse --verify HEAD) +' + +test_expect_success 'git branch --help should not have created a bogus branch' ' + test_might_fail git branch --man --help /dev/null 2>&1 && + test_path_is_missing .git/refs/heads/--help +' + +test_expect_success 'branch -h in broken repository' ' + mkdir broken && + ( + cd broken && + git init -b main && + >.git/refs/heads/main && + test_expect_code 129 git branch -h >usage 2>&1 + ) && + test_i18ngrep "[Uu]sage" broken/usage +' + +test_expect_success 'git branch abc should create a branch' ' + git branch abc && test_path_is_file .git/refs/heads/abc +' + +test_expect_success 'git branch a/b/c should create a branch' ' + git branch a/b/c && test_path_is_file .git/refs/heads/a/b/c +' + +test_expect_success 'git branch mb main... should create a branch' ' + git branch mb main... && test_path_is_file .git/refs/heads/mb +' + +test_expect_success 'git branch HEAD should fail' ' + test_must_fail git branch HEAD +' + +cat >expect < 1117150200 +0000 branch: Created from main +EOF +test_expect_success 'git branch --create-reflog d/e/f should create a branch and a log' ' + GIT_COMMITTER_DATE="2005-05-26 23:30" \ + git -c core.logallrefupdates=false branch --create-reflog d/e/f && + test_path_is_file .git/refs/heads/d/e/f && + test_path_is_file .git/logs/refs/heads/d/e/f && + test_cmp expect .git/logs/refs/heads/d/e/f +' + +test_expect_success 'git branch -d d/e/f should delete a branch and a log' ' + git branch -d d/e/f && + test_path_is_missing .git/refs/heads/d/e/f && + test_must_fail git reflog exists refs/heads/d/e/f +' + +test_expect_success 'git branch j/k should work after branch j has been deleted' ' + git branch j && + git branch -d j && + git branch j/k +' + +test_expect_success 'git branch l should work after branch l/m has been deleted' ' + git branch l/m && + git branch -d l/m && + git branch l +' + +test_expect_success 'git branch -m dumps usage' ' + test_expect_code 128 git branch -m 2>err && + test_i18ngrep "branch name required" err +' + +test_expect_success 'git branch -m m broken_symref should work' ' + test_when_finished "git branch -D broken_symref" && + git branch --create-reflog m && + git symbolic-ref refs/heads/broken_symref refs/heads/i_am_broken && + git branch -m m broken_symref && + git reflog exists refs/heads/broken_symref && + test_must_fail git reflog exists refs/heads/i_am_broken +' + +test_expect_success 'git branch -m m m/m should work' ' + git branch --create-reflog m && + git branch -m m m/m && + git reflog exists refs/heads/m/m +' + +test_expect_success 'git branch -m n/n n should work' ' + git branch --create-reflog n/n && + git branch -m n/n n && + git reflog exists refs/heads/n +' + +# The topmost entry in reflog for branch bbb is about branch creation. +# Hence, we compare bbb@{1} (instead of bbb@{0}) with aaa@{0}. + +test_expect_success 'git branch -m bbb should rename checked out branch' ' + test_when_finished git branch -D bbb && + test_when_finished git checkout main && + git checkout -b aaa && + git commit --allow-empty -m "a new commit" && + git rev-parse aaa@{0} >expect && + git branch -m bbb && + git rev-parse bbb@{1} >actual && + test_cmp expect actual && + git symbolic-ref HEAD >actual && + echo refs/heads/bbb >expect && + test_cmp expect actual +' + +test_expect_success 'renaming checked out branch works with d/f conflict' ' + test_when_finished "git branch -D foo/bar || git branch -D foo" && + test_when_finished git checkout main && + git checkout -b foo && + git branch -m foo/bar && + git symbolic-ref HEAD >actual && + echo refs/heads/foo/bar >expect && + test_cmp expect actual +' + +test_expect_success 'git branch -m o/o o should fail when o/p exists' ' + git branch o/o && + git branch o/p && + test_must_fail git branch -m o/o o +' + +test_expect_success 'git branch -m o/q o/p should fail when o/p exists' ' + git branch o/q && + test_must_fail git branch -m o/q o/p +' + +test_expect_success 'git branch -M o/q o/p should work when o/p exists' ' + git branch -M o/q o/p +' + +test_expect_success 'git branch -m -f o/q o/p should work when o/p exists' ' + git branch o/q && + git branch -m -f o/q o/p +' + +test_expect_success 'git branch -m q r/q should fail when r exists' ' + git branch q && + git branch r && + test_must_fail git branch -m q r/q +' + +test_expect_success 'git branch -M foo bar should fail when bar is checked out' ' + git branch bar && + git checkout -b foo && + test_must_fail git branch -M bar foo +' + +test_expect_success 'git branch -M baz bam should succeed when baz is checked out' ' + git checkout -b baz && + git branch bam && + git branch -M baz bam && + test $(git rev-parse --abbrev-ref HEAD) = bam +' + +test_expect_success 'git branch -M baz bam should add entries to .git/logs/HEAD' ' + msg="Branch: renamed refs/heads/baz to refs/heads/bam" && + grep " 0\{40\}.*$msg$" .git/logs/HEAD && + grep "^0\{40\}.*$msg$" .git/logs/HEAD +' + +test_expect_success 'git branch -M should leave orphaned HEAD alone' ' + git init -b main orphan && + ( + cd orphan && + test_commit initial && + git checkout --orphan lonely && + grep lonely .git/HEAD && + test_path_is_missing .git/refs/head/lonely && + git branch -M main mistress && + grep lonely .git/HEAD + ) +' + +test_expect_success 'resulting reflog can be shown by log -g' ' + oid=$(git rev-parse HEAD) && + cat >expect <<-EOF && + HEAD@{0} $oid $msg + HEAD@{2} $oid checkout: moving from foo to baz + EOF + git log -g --format="%gd %H %gs" -2 HEAD >actual && + test_cmp expect actual +' + +test_expect_success 'git branch -M baz bam should succeed when baz is checked out as linked working tree' ' + git checkout main && + git worktree add -b baz bazdir && + git worktree add -f bazdir2 baz && + git branch -M baz bam && + test $(git -C bazdir rev-parse --abbrev-ref HEAD) = bam && + test $(git -C bazdir2 rev-parse --abbrev-ref HEAD) = bam && + rm -r bazdir bazdir2 && + git worktree prune +' + +test_expect_success 'git branch -M baz bam should succeed within a worktree in which baz is checked out' ' + git checkout -b baz && + git worktree add -f bazdir baz && + ( + cd bazdir && + git branch -M baz bam && + test $(git rev-parse --abbrev-ref HEAD) = bam + ) && + test $(git rev-parse --abbrev-ref HEAD) = bam && + rm -r bazdir && + git worktree prune +' + +test_expect_success 'git branch -M main should work when main is checked out' ' + git checkout main && + git branch -M main +' + +test_expect_success 'git branch -M main main should work when main is checked out' ' + git checkout main && + git branch -M main main +' + +test_expect_success 'git branch -M topic topic should work when main is checked out' ' + git checkout main && + git branch topic && + git branch -M topic topic +' + +test_expect_success 'git branch -v -d t should work' ' + git branch t && + git rev-parse --verify refs/heads/t && + git branch -v -d t && + test_must_fail git rev-parse --verify refs/heads/t +' + +test_expect_success 'git branch -v -m t s should work' ' + git branch t && + git rev-parse --verify refs/heads/t && + git branch -v -m t s && + test_must_fail git rev-parse --verify refs/heads/t && + git rev-parse --verify refs/heads/s && + git branch -d s +' + +test_expect_success 'git branch -m -d t s should fail' ' + git branch t && + git rev-parse refs/heads/t && + test_must_fail git branch -m -d t s && + git branch -d t && + test_must_fail git rev-parse refs/heads/t +' + +test_expect_success 'git branch --list -d t should fail' ' + git branch t && + git rev-parse refs/heads/t && + test_must_fail git branch --list -d t && + git branch -d t && + test_must_fail git rev-parse refs/heads/t +' + +test_expect_success 'deleting checked-out branch from repo that is a submodule' ' + test_when_finished "rm -rf repo1 repo2" && + + git init repo1 && + git init repo1/sub && + test_commit -C repo1/sub x && + git -C repo1 submodule add ./sub && + git -C repo1 commit -m "adding sub" && + + git clone --recurse-submodules repo1 repo2 && + git -C repo2/sub checkout -b work && + test_must_fail git -C repo2/sub branch -D work +' + +test_expect_success 'bare main worktree has HEAD at branch deleted by secondary worktree' ' + test_when_finished "rm -rf nonbare base secondary" && + + git init -b main nonbare && + test_commit -C nonbare x && + git clone --bare nonbare bare && + git -C bare worktree add --detach ../secondary main && + git -C secondary branch -D main +' + +test_expect_success 'git branch --list -v with --abbrev' ' + test_when_finished "git branch -D t" && + git branch t && + git branch -v --list t >actual.default && + git branch -v --list --abbrev t >actual.abbrev && + test_cmp actual.default actual.abbrev && + + git branch -v --list --no-abbrev t >actual.noabbrev && + git branch -v --list --abbrev=0 t >actual.0abbrev && + git -c core.abbrev=no branch -v --list t >actual.noabbrev-conf && + test_cmp actual.noabbrev actual.0abbrev && + test_cmp actual.noabbrev actual.noabbrev-conf && + + git branch -v --list --abbrev=36 t >actual.36abbrev && + # how many hexdigits are used? + read name objdefault rest /dev/null && + test "$objfull" != "$obj36" && + expr "$objfull" : "$obj36" >/dev/null + +' + +test_expect_success 'git branch --column' ' + COLUMNS=81 git branch --column=column >actual && + cat >expect <<\EOF && + a/b/c bam foo l * main n o/p r + abc bar j/k m/m mb o/o q topic +EOF + test_cmp expect actual +' + +test_expect_success 'git branch --column with an extremely long branch name' ' + long=this/is/a/part/of/long/branch/name && + long=z$long/$long/$long/$long && + test_when_finished "git branch -d $long" && + git branch $long && + COLUMNS=80 git branch --column=column >actual && + cat >expect <actual && + git config --unset column.branch && + git config --unset column.ui && + cat >expect <<\EOF && + a/b/c bam foo l * main n o/p r + abc bar j/k m/m mb o/o q topic +EOF + test_cmp expect actual +' + +test_expect_success 'git branch --column -v should fail' ' + test_must_fail git branch --column -v +' + +test_expect_success 'git branch -v with column.ui ignored' ' + git config column.ui column && + COLUMNS=80 git branch -v | cut -c -8 | sed "s/ *$//" >actual && + git config --unset column.ui && + cat >expect <<\EOF && + a/b/c + abc + bam + bar + foo + j/k + l + m/m +* main + mb + n + o/o + o/p + q + r + topic +EOF + test_cmp expect actual +' + +mv .git/config .git/config-saved + +test_expect_success SHA1 'git branch -m q q2 without config should succeed' ' + git branch -m q q2 && + git branch -m q2 q +' + +mv .git/config-saved .git/config + +git config branch.s/s.dummy Hello + +test_expect_success 'git branch -m s/s s should work when s/t is deleted' ' + git branch --create-reflog s/s && + git reflog exists refs/heads/s/s && + git branch --create-reflog s/t && + git reflog exists refs/heads/s/t && + git branch -d s/t && + git branch -m s/s s && + git reflog exists refs/heads/s +' + +test_expect_success 'config information was renamed, too' ' + test $(git config branch.s.dummy) = Hello && + test_must_fail git config branch.s/s.dummy +' + +test_expect_success 'git branch -m correctly renames multiple config sections' ' + test_when_finished "git checkout main" && + git checkout -b source main && + + # Assert that a config file with multiple config sections has + # those sections preserved... + cat >expect <<-\EOF && + branch.dest.key1=value1 + some.gar.b=age + branch.dest.key2=value2 + EOF + cat >config.branch <<\EOF && +;; Note the lack of -\EOF above & mixed indenting here. This is +;; intentional, we are also testing that the formatting of copied +;; sections is preserved. + +;; Comment for source. Tabs +[branch "source"] + ;; Comment for the source value + key1 = value1 +;; Comment for some.gar. Spaces +[some "gar"] + ;; Comment for the some.gar value + b = age +;; Comment for source, again. Mixed tabs/spaces. +[branch "source"] + ;; Comment for the source value, again + key2 = value2 +EOF + cat config.branch >>.git/config && + git branch -m source dest && + git config -f .git/config -l | grep -F -e source -e dest -e some.gar >actual && + test_cmp expect actual && + + # ...and that the comments for those sections are also + # preserved. + cat config.branch | sed "s/\"source\"/\"dest\"/" >expect && + sed -n -e "/Note the lack/,\$p" .git/config >actual && + test_cmp expect actual +' + +test_expect_success 'git branch -c dumps usage' ' + test_expect_code 128 git branch -c 2>err && + test_i18ngrep "branch name required" err +' + +test_expect_success 'git branch --copy dumps usage' ' + test_expect_code 128 git branch --copy 2>err && + test_i18ngrep "branch name required" err +' + +test_expect_success 'git branch -c d e should work' ' + git branch --create-reflog d && + git reflog exists refs/heads/d && + git config branch.d.dummy Hello && + git branch -c d e && + git reflog exists refs/heads/d && + git reflog exists refs/heads/e && + echo Hello >expect && + git config branch.e.dummy >actual && + test_cmp expect actual && + echo Hello >expect && + git config branch.d.dummy >actual && + test_cmp expect actual +' + +test_expect_success 'git branch --copy is a synonym for -c' ' + git branch --create-reflog copy && + git reflog exists refs/heads/copy && + git config branch.copy.dummy Hello && + git branch --copy copy copy-to && + git reflog exists refs/heads/copy && + git reflog exists refs/heads/copy-to && + echo Hello >expect && + git config branch.copy.dummy >actual && + test_cmp expect actual && + echo Hello >expect && + git config branch.copy-to.dummy >actual && + test_cmp expect actual +' + +test_expect_success 'git branch -c ee ef should copy ee to create branch ef' ' + git checkout -b ee && + git reflog exists refs/heads/ee && + git config branch.ee.dummy Hello && + git branch -c ee ef && + git reflog exists refs/heads/ee && + git reflog exists refs/heads/ef && + test $(git config branch.ee.dummy) = Hello && + test $(git config branch.ef.dummy) = Hello && + test $(git rev-parse --abbrev-ref HEAD) = ee +' + +test_expect_success 'git branch -c f/f g/g should work' ' + git branch --create-reflog f/f && + git reflog exists refs/heads/f/f && + git config branch.f/f.dummy Hello && + git branch -c f/f g/g && + git reflog exists refs/heads/f/f && + git reflog exists refs/heads/g/g && + test $(git config branch.f/f.dummy) = Hello && + test $(git config branch.g/g.dummy) = Hello +' + +test_expect_success 'git branch -c m2 m2 should work' ' + git branch --create-reflog m2 && + git reflog exists refs/heads/m2 && + git config branch.m2.dummy Hello && + git branch -c m2 m2 && + git reflog exists refs/heads/m2 && + test $(git config branch.m2.dummy) = Hello +' + +test_expect_success 'git branch -c zz zz/zz should fail' ' + git branch --create-reflog zz && + git reflog exists refs/heads/zz && + test_must_fail git branch -c zz zz/zz +' + +test_expect_success 'git branch -c b/b b should fail' ' + git branch --create-reflog b/b && + test_must_fail git branch -c b/b b +' + +test_expect_success 'git branch -C o/q o/p should work when o/p exists' ' + git branch --create-reflog o/q && + git reflog exists refs/heads/o/q && + git reflog exists refs/heads/o/p && + git branch -C o/q o/p +' + +test_expect_success 'git branch -c -f o/q o/p should work when o/p exists' ' + git reflog exists refs/heads/o/q && + git reflog exists refs/heads/o/p && + git branch -c -f o/q o/p +' + +test_expect_success 'git branch -c qq rr/qq should fail when rr exists' ' + git branch qq && + git branch rr && + test_must_fail git branch -c qq rr/qq +' + +test_expect_success 'git branch -C b1 b2 should fail when b2 is checked out' ' + git branch b1 && + git checkout -b b2 && + test_must_fail git branch -C b1 b2 +' + +test_expect_success 'git branch -C c1 c2 should succeed when c1 is checked out' ' + git checkout -b c1 && + git branch c2 && + git branch -C c1 c2 && + test $(git rev-parse --abbrev-ref HEAD) = c1 +' + +test_expect_success 'git branch -C c1 c2 should never touch HEAD' ' + msg="Branch: copied refs/heads/c1 to refs/heads/c2" && + ! grep "$msg$" .git/logs/HEAD +' + +test_expect_success 'git branch -C main should work when main is checked out' ' + git checkout main && + git branch -C main +' + +test_expect_success 'git branch -C main main should work when main is checked out' ' + git checkout main && + git branch -C main main +' + +test_expect_success 'git branch -C main5 main5 should work when main is checked out' ' + git checkout main && + git branch main5 && + git branch -C main5 main5 +' + +test_expect_success 'git branch -C ab cd should overwrite existing config for cd' ' + git branch --create-reflog cd && + git reflog exists refs/heads/cd && + git config branch.cd.dummy CD && + git branch --create-reflog ab && + git reflog exists refs/heads/ab && + git config branch.ab.dummy AB && + git branch -C ab cd && + git reflog exists refs/heads/ab && + git reflog exists refs/heads/cd && + test $(git config branch.ab.dummy) = AB && + test $(git config branch.cd.dummy) = AB +' + +test_expect_success 'git branch -c correctly copies multiple config sections' ' + FOO=1 && + export FOO && + test_when_finished "git checkout main" && + git checkout -b source2 main && + + # Assert that a config file with multiple config sections has + # those sections preserved... + cat >expect <<-\EOF && + branch.source2.key1=value1 + branch.dest2.key1=value1 + more.gar.b=age + branch.source2.key2=value2 + branch.dest2.key2=value2 + EOF + cat >config.branch <<\EOF && +;; Note the lack of -\EOF above & mixed indenting here. This is +;; intentional, we are also testing that the formatting of copied +;; sections is preserved. + +;; Comment for source2. Tabs +[branch "source2"] + ;; Comment for the source2 value + key1 = value1 +;; Comment for more.gar. Spaces +[more "gar"] + ;; Comment for the more.gar value + b = age +;; Comment for source2, again. Mixed tabs/spaces. +[branch "source2"] + ;; Comment for the source2 value, again + key2 = value2 +EOF + cat config.branch >>.git/config && + git branch -c source2 dest2 && + git config -f .git/config -l | grep -F -e source2 -e dest2 -e more.gar >actual && + test_cmp expect actual && + + # ...and that the comments and formatting for those sections + # is also preserved. + cat >expect <<\EOF && +;; Comment for source2. Tabs +[branch "source2"] + ;; Comment for the source2 value + key1 = value1 +;; Comment for more.gar. Spaces +[branch "dest2"] + ;; Comment for the source2 value + key1 = value1 +;; Comment for more.gar. Spaces +[more "gar"] + ;; Comment for the more.gar value + b = age +;; Comment for source2, again. Mixed tabs/spaces. +[branch "source2"] + ;; Comment for the source2 value, again + key2 = value2 +[branch "dest2"] + ;; Comment for the source2 value, again + key2 = value2 +EOF + sed -n -e "/Comment for source2/,\$p" .git/config >actual && + test_cmp expect actual +' + +test_expect_success 'deleting a symref' ' + git branch target && + git symbolic-ref refs/heads/symref refs/heads/target && + echo "Deleted branch symref (was refs/heads/target)." >expect && + git branch -d symref >actual && + test_path_is_file .git/refs/heads/target && + test_path_is_missing .git/refs/heads/symref && + test_cmp expect actual +' + +test_expect_success 'deleting a dangling symref' ' + git symbolic-ref refs/heads/dangling-symref nowhere && + test_path_is_file .git/refs/heads/dangling-symref && + echo "Deleted branch dangling-symref (was nowhere)." >expect && + git branch -d dangling-symref >actual && + test_path_is_missing .git/refs/heads/dangling-symref && + test_cmp expect actual +' + +test_expect_success 'deleting a self-referential symref' ' + git symbolic-ref refs/heads/self-reference refs/heads/self-reference && + test_path_is_file .git/refs/heads/self-reference && + echo "Deleted branch self-reference (was refs/heads/self-reference)." >expect && + git branch -d self-reference >actual && + test_path_is_missing .git/refs/heads/self-reference && + test_cmp expect actual +' + +test_expect_success 'renaming a symref is not allowed' ' + git symbolic-ref refs/heads/topic refs/heads/main && + test_must_fail git branch -m topic new-topic && + git symbolic-ref refs/heads/topic && + test_path_is_file .git/refs/heads/main && + test_path_is_missing .git/refs/heads/new-topic +' + +test_expect_success SYMLINKS 'git branch -m u v should fail when the reflog for u is a symlink' ' + git branch --create-reflog u && + mv .git/logs/refs/heads/u real-u && + ln -s real-u .git/logs/refs/heads/u && + test_must_fail git branch -m u v +' + +test_expect_success 'test tracking setup via --track' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track my1 local/main && + test $(git config branch.my1.remote) = local && + test $(git config branch.my1.merge) = refs/heads/main +' + +test_expect_success 'test tracking setup (non-wildcard, matching)' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/main:refs/remotes/local/main && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track my4 local/main && + test $(git config branch.my4.remote) = local && + test $(git config branch.my4.merge) = refs/heads/main +' + +test_expect_success 'tracking setup fails on non-matching refspec' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git config remote.local.fetch refs/heads/s:refs/remotes/local/s && + test_must_fail git branch --track my5 local/main && + test_must_fail git config branch.my5.remote && + test_must_fail git config branch.my5.merge +' + +test_expect_success 'test tracking setup via config' ' + git config branch.autosetupmerge true && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch my3 local/main && + test $(git config branch.my3.remote) = local && + test $(git config branch.my3.merge) = refs/heads/main +' + +test_expect_success 'test overriding tracking setup via --no-track' ' + git config branch.autosetupmerge true && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track my2 local/main && + git config branch.autosetupmerge false && + ! test "$(git config branch.my2.remote)" = local && + ! test "$(git config branch.my2.merge)" = refs/heads/main +' + +test_expect_success 'no tracking without .fetch entries' ' + git config branch.autosetupmerge true && + git branch my6 s && + git config branch.autosetupmerge false && + test -z "$(git config branch.my6.remote)" && + test -z "$(git config branch.my6.merge)" +' + +test_expect_success 'test tracking setup via --track but deeper' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/o/o || git fetch local) && + git branch --track my7 local/o/o && + test "$(git config branch.my7.remote)" = local && + test "$(git config branch.my7.merge)" = refs/heads/o/o +' + +test_expect_success 'test deleting branch deletes branch config' ' + git branch -d my7 && + test -z "$(git config branch.my7.remote)" && + test -z "$(git config branch.my7.merge)" +' + +test_expect_success 'test deleting branch without config' ' + git branch my7 s && + sha1=$(git rev-parse my7 | cut -c 1-7) && + echo "Deleted branch my7 (was $sha1)." >expect && + git branch -d my7 >actual 2>&1 && + test_cmp expect actual +' + +test_expect_success 'deleting currently checked out branch fails' ' + git worktree add -b my7 my7 && + test_must_fail git -C my7 branch -d my7 && + test_must_fail git branch -d my7 && + rm -r my7 && + git worktree prune +' + +test_expect_success 'test --track without .fetch entries' ' + git branch --track my8 && + test "$(git config branch.my8.remote)" && + test "$(git config branch.my8.merge)" +' + +test_expect_success 'branch from non-branch HEAD w/autosetupmerge=always' ' + git config branch.autosetupmerge always && + git branch my9 HEAD^ && + git config branch.autosetupmerge false +' + +test_expect_success 'branch from non-branch HEAD w/--track causes failure' ' + test_must_fail git branch --track my10 HEAD^ +' + +test_expect_success 'branch from tag w/--track causes failure' ' + git tag foobar && + test_must_fail git branch --track my11 foobar +' + +test_expect_success '--set-upstream-to fails on multiple branches' ' + echo "fatal: too many arguments to set new upstream" >expect && + test_must_fail git branch --set-upstream-to main a b c 2>err && + test_cmp expect err +' + +test_expect_success '--set-upstream-to fails on detached HEAD' ' + git checkout HEAD^{} && + test_when_finished git checkout - && + echo "fatal: could not set upstream of HEAD to main when it does not point to any branch." >expect && + test_must_fail git branch --set-upstream-to main 2>err && + test_cmp expect err +' + +test_expect_success '--set-upstream-to fails on a missing dst branch' ' + echo "fatal: branch '"'"'does-not-exist'"'"' does not exist" >expect && + test_must_fail git branch --set-upstream-to main does-not-exist 2>err && + test_cmp expect err +' + +test_expect_success '--set-upstream-to fails on a missing src branch' ' + test_must_fail git branch --set-upstream-to does-not-exist main 2>err && + test_i18ngrep "the requested upstream branch '"'"'does-not-exist'"'"' does not exist" err +' + +test_expect_success '--set-upstream-to fails on a non-ref' ' + echo "fatal: Cannot setup tracking information; starting point '"'"'HEAD^{}'"'"' is not a branch." >expect && + test_must_fail git branch --set-upstream-to HEAD^{} 2>err && + test_cmp expect err +' + +test_expect_success '--set-upstream-to fails on locked config' ' + test_when_finished "rm -f .git/config.lock" && + >.git/config.lock && + git branch locked && + test_must_fail git branch --set-upstream-to locked 2>err && + test_i18ngrep "could not lock config file .git/config" err +' + +test_expect_success 'use --set-upstream-to modify HEAD' ' + test_config branch.main.remote foo && + test_config branch.main.merge foo && + git branch my12 && + git branch --set-upstream-to my12 && + test "$(git config branch.main.remote)" = "." && + test "$(git config branch.main.merge)" = "refs/heads/my12" +' + +test_expect_success 'use --set-upstream-to modify a particular branch' ' + git branch my13 && + git branch --set-upstream-to main my13 && + test_when_finished "git branch --unset-upstream my13" && + test "$(git config branch.my13.remote)" = "." && + test "$(git config branch.my13.merge)" = "refs/heads/main" +' + +test_expect_success '--unset-upstream should fail if given a non-existent branch' ' + echo "fatal: Branch '"'"'i-dont-exist'"'"' has no upstream information" >expect && + test_must_fail git branch --unset-upstream i-dont-exist 2>err && + test_cmp expect err +' + +test_expect_success '--unset-upstream should fail if config is locked' ' + test_when_finished "rm -f .git/config.lock" && + git branch --set-upstream-to locked && + >.git/config.lock && + test_must_fail git branch --unset-upstream 2>err && + test_i18ngrep "could not lock config file .git/config" err +' + +test_expect_success 'test --unset-upstream on HEAD' ' + git branch my14 && + test_config branch.main.remote foo && + test_config branch.main.merge foo && + git branch --set-upstream-to my14 && + git branch --unset-upstream && + test_must_fail git config branch.main.remote && + test_must_fail git config branch.main.merge && + # fail for a branch without upstream set + echo "fatal: Branch '"'"'main'"'"' has no upstream information" >expect && + test_must_fail git branch --unset-upstream 2>err && + test_cmp expect err +' + +test_expect_success '--unset-upstream should fail on multiple branches' ' + echo "fatal: too many arguments to unset upstream" >expect && + test_must_fail git branch --unset-upstream a b c 2>err && + test_cmp expect err +' + +test_expect_success '--unset-upstream should fail on detached HEAD' ' + git checkout HEAD^{} && + test_when_finished git checkout - && + echo "fatal: could not unset upstream of HEAD when it does not point to any branch." >expect && + test_must_fail git branch --unset-upstream 2>err && + test_cmp expect err +' + +test_expect_success 'test --unset-upstream on a particular branch' ' + git branch my15 && + git branch --set-upstream-to main my14 && + git branch --unset-upstream my14 && + test_must_fail git config branch.my14.remote && + test_must_fail git config branch.my14.merge +' + +test_expect_success 'disabled option --set-upstream fails' ' + test_must_fail git branch --set-upstream origin/main +' + +test_expect_success '--set-upstream-to notices an error to set branch as own upstream' ' + git branch --set-upstream-to refs/heads/my13 my13 2>actual && + cat >expect <<-\EOF && + warning: Not setting branch my13 as its own upstream. + EOF + test_expect_code 1 git config branch.my13.remote && + test_expect_code 1 git config branch.my13.merge && + test_cmp expect actual +' + +# Keep this test last, as it changes the current branch +cat >expect < 1117150200 +0000 branch: Created from main +EOF +test_expect_success 'git checkout -b g/h/i -l should create a branch and a log' ' + GIT_COMMITTER_DATE="2005-05-26 23:30" \ + git checkout -b g/h/i -l main && + test_path_is_file .git/refs/heads/g/h/i && + test_path_is_file .git/logs/refs/heads/g/h/i && + test_cmp expect .git/logs/refs/heads/g/h/i +' + +test_expect_success 'checkout -b makes reflog by default' ' + git checkout main && + git config --unset core.logAllRefUpdates && + git checkout -b alpha && + git rev-parse --verify alpha@{0} +' + +test_expect_success 'checkout -b does not make reflog when core.logAllRefUpdates = false' ' + git checkout main && + git config core.logAllRefUpdates false && + git checkout -b beta && + test_must_fail git rev-parse --verify beta@{0} +' + +test_expect_success 'checkout -b with -l makes reflog when core.logAllRefUpdates = false' ' + git checkout main && + git checkout -lb gamma && + git config --unset core.logAllRefUpdates && + git rev-parse --verify gamma@{0} +' + +test_expect_success 'avoid ambiguous track' ' + git config branch.autosetupmerge true && + git config remote.ambi1.url lalala && + git config remote.ambi1.fetch refs/heads/lalala:refs/heads/main && + git config remote.ambi2.url lilili && + git config remote.ambi2.fetch refs/heads/lilili:refs/heads/main && + test_must_fail git branch all1 main && + test -z "$(git config branch.all1.merge)" +' + +test_expect_success 'autosetuprebase local on a tracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase local && + (git show-ref -q refs/remotes/local/o || git fetch local) && + git branch mybase && + git branch --track myr1 mybase && + test "$(git config branch.myr1.remote)" = . && + test "$(git config branch.myr1.merge)" = refs/heads/mybase && + test "$(git config branch.myr1.rebase)" = true +' + +test_expect_success 'autosetuprebase always on a tracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase always && + (git show-ref -q refs/remotes/local/o || git fetch local) && + git branch mybase2 && + git branch --track myr2 mybase && + test "$(git config branch.myr2.remote)" = . && + test "$(git config branch.myr2.merge)" = refs/heads/mybase && + test "$(git config branch.myr2.rebase)" = true +' + +test_expect_success 'autosetuprebase remote on a tracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase remote && + (git show-ref -q refs/remotes/local/o || git fetch local) && + git branch mybase3 && + git branch --track myr3 mybase2 && + test "$(git config branch.myr3.remote)" = . && + test "$(git config branch.myr3.merge)" = refs/heads/mybase2 && + ! test "$(git config branch.myr3.rebase)" = true +' + +test_expect_success 'autosetuprebase never on a tracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase never && + (git show-ref -q refs/remotes/local/o || git fetch local) && + git branch mybase4 && + git branch --track myr4 mybase2 && + test "$(git config branch.myr4.remote)" = . && + test "$(git config branch.myr4.merge)" = refs/heads/mybase2 && + ! test "$(git config branch.myr4.rebase)" = true +' + +test_expect_success 'autosetuprebase local on a tracked remote branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase local && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track myr5 local/main && + test "$(git config branch.myr5.remote)" = local && + test "$(git config branch.myr5.merge)" = refs/heads/main && + ! test "$(git config branch.myr5.rebase)" = true +' + +test_expect_success 'autosetuprebase never on a tracked remote branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase never && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track myr6 local/main && + test "$(git config branch.myr6.remote)" = local && + test "$(git config branch.myr6.merge)" = refs/heads/main && + ! test "$(git config branch.myr6.rebase)" = true +' + +test_expect_success 'autosetuprebase remote on a tracked remote branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase remote && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track myr7 local/main && + test "$(git config branch.myr7.remote)" = local && + test "$(git config branch.myr7.merge)" = refs/heads/main && + test "$(git config branch.myr7.rebase)" = true +' + +test_expect_success 'autosetuprebase always on a tracked remote branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + git config branch.autosetuprebase remote && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track myr8 local/main && + test "$(git config branch.myr8.remote)" = local && + test "$(git config branch.myr8.merge)" = refs/heads/main && + test "$(git config branch.myr8.rebase)" = true +' + +test_expect_success 'autosetuprebase unconfigured on a tracked remote branch' ' + git config --unset branch.autosetuprebase && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --track myr9 local/main && + test "$(git config branch.myr9.remote)" = local && + test "$(git config branch.myr9.merge)" = refs/heads/main && + test "z$(git config branch.myr9.rebase)" = z +' + +test_expect_success 'autosetuprebase unconfigured on a tracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/o || git fetch local) && + git branch mybase10 && + git branch --track myr10 mybase2 && + test "$(git config branch.myr10.remote)" = . && + test "$(git config branch.myr10.merge)" = refs/heads/mybase2 && + test "z$(git config branch.myr10.rebase)" = z +' + +test_expect_success 'autosetuprebase unconfigured on untracked local branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr11 mybase2 && + test "z$(git config branch.myr11.remote)" = z && + test "z$(git config branch.myr11.merge)" = z && + test "z$(git config branch.myr11.rebase)" = z +' + +test_expect_success 'autosetuprebase unconfigured on untracked remote branch' ' + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr12 local/main && + test "z$(git config branch.myr12.remote)" = z && + test "z$(git config branch.myr12.merge)" = z && + test "z$(git config branch.myr12.rebase)" = z +' + +test_expect_success 'autosetuprebase never on an untracked local branch' ' + git config branch.autosetuprebase never && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr13 mybase2 && + test "z$(git config branch.myr13.remote)" = z && + test "z$(git config branch.myr13.merge)" = z && + test "z$(git config branch.myr13.rebase)" = z +' + +test_expect_success 'autosetuprebase local on an untracked local branch' ' + git config branch.autosetuprebase local && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr14 mybase2 && + test "z$(git config branch.myr14.remote)" = z && + test "z$(git config branch.myr14.merge)" = z && + test "z$(git config branch.myr14.rebase)" = z +' + +test_expect_success 'autosetuprebase remote on an untracked local branch' ' + git config branch.autosetuprebase remote && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr15 mybase2 && + test "z$(git config branch.myr15.remote)" = z && + test "z$(git config branch.myr15.merge)" = z && + test "z$(git config branch.myr15.rebase)" = z +' + +test_expect_success 'autosetuprebase always on an untracked local branch' ' + git config branch.autosetuprebase always && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr16 mybase2 && + test "z$(git config branch.myr16.remote)" = z && + test "z$(git config branch.myr16.merge)" = z && + test "z$(git config branch.myr16.rebase)" = z +' + +test_expect_success 'autosetuprebase never on an untracked remote branch' ' + git config branch.autosetuprebase never && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr17 local/main && + test "z$(git config branch.myr17.remote)" = z && + test "z$(git config branch.myr17.merge)" = z && + test "z$(git config branch.myr17.rebase)" = z +' + +test_expect_success 'autosetuprebase local on an untracked remote branch' ' + git config branch.autosetuprebase local && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr18 local/main && + test "z$(git config branch.myr18.remote)" = z && + test "z$(git config branch.myr18.merge)" = z && + test "z$(git config branch.myr18.rebase)" = z +' + +test_expect_success 'autosetuprebase remote on an untracked remote branch' ' + git config branch.autosetuprebase remote && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr19 local/main && + test "z$(git config branch.myr19.remote)" = z && + test "z$(git config branch.myr19.merge)" = z && + test "z$(git config branch.myr19.rebase)" = z +' + +test_expect_success 'autosetuprebase always on an untracked remote branch' ' + git config branch.autosetuprebase always && + git config remote.local.url . && + git config remote.local.fetch refs/heads/*:refs/remotes/local/* && + (git show-ref -q refs/remotes/local/main || git fetch local) && + git branch --no-track myr20 local/main && + test "z$(git config branch.myr20.remote)" = z && + test "z$(git config branch.myr20.merge)" = z && + test "z$(git config branch.myr20.rebase)" = z +' + +test_expect_success 'autosetuprebase always on detached HEAD' ' + git config branch.autosetupmerge always && + test_when_finished git checkout main && + git checkout HEAD^0 && + git branch my11 && + test -z "$(git config branch.my11.remote)" && + test -z "$(git config branch.my11.merge)" +' + +test_expect_success 'detect misconfigured autosetuprebase (bad value)' ' + git config branch.autosetuprebase garbage && + test_must_fail git branch +' + +test_expect_success 'detect misconfigured autosetuprebase (no value)' ' + git config --unset branch.autosetuprebase && + echo "[branch] autosetuprebase" >>.git/config && + test_must_fail git branch && + git config --unset branch.autosetuprebase +' + +test_expect_success 'attempt to delete a branch without base and unmerged to HEAD' ' + git checkout my9 && + git config --unset branch.my8.merge && + test_must_fail git branch -d my8 +' + +test_expect_success 'attempt to delete a branch merged to its base' ' + # we are on my9 which is the initial commit; traditionally + # we would not have allowed deleting my8 that is not merged + # to my9, but it is set to track main that already has my8 + git config branch.my8.merge refs/heads/main && + git branch -d my8 +' + +test_expect_success 'attempt to delete a branch merged to its base' ' + git checkout main && + echo Third >>A && + git commit -m "Third commit" A && + git branch -t my10 my9 && + git branch -f my10 HEAD^ && + # we are on main which is at the third commit, and my10 + # is behind us, so traditionally we would have allowed deleting + # it; but my10 is set to track my9 that is further behind. + test_must_fail git branch -d my10 +' + +test_expect_success 'branch --delete --force removes dangling branch' ' + git checkout main && + test_commit unstable && + hash=$(git rev-parse HEAD) && + objpath=$(echo $hash | sed -e "s|^..|.git/objects/&/|") && + git branch --no-track dangling && + mv $objpath $objpath.x && + test_when_finished "mv $objpath.x $objpath" && + git branch --delete --force dangling && + git for-each-ref refs/heads/dangling >actual && + test_must_be_empty actual +' + +test_expect_success 'use --edit-description' ' + write_script editor <<-\EOF && + echo "New contents" >"$1" + EOF + EDITOR=./editor git branch --edit-description && + write_script editor <<-\EOF && + git stripspace -s <"$1" >"EDITOR_OUTPUT" + EOF + EDITOR=./editor git branch --edit-description && + echo "New contents" >expect && + test_cmp expect EDITOR_OUTPUT +' + +test_expect_success 'detect typo in branch name when using --edit-description' ' + write_script editor <<-\EOF && + echo "New contents" >"$1" + EOF + test_must_fail env EDITOR=./editor git branch --edit-description no-such-branch +' + +test_expect_success 'refuse --edit-description on unborn branch for now' ' + test_when_finished "git checkout main" && + write_script editor <<-\EOF && + echo "New contents" >"$1" + EOF + git checkout --orphan unborn && + test_must_fail env EDITOR=./editor git branch --edit-description +' + +test_expect_success '--merged catches invalid object names' ' + test_must_fail git branch --merged 0000000000000000000000000000000000000000 +' + +test_expect_success '--list during rebase' ' + test_when_finished "reset_rebase" && + git checkout main && + FAKE_LINES="1 edit 2" && + export FAKE_LINES && + set_fake_editor && + git rebase -i HEAD~2 && + git branch --list >actual && + test_i18ngrep "rebasing main" actual +' + +test_expect_success '--list during rebase from detached HEAD' ' + test_when_finished "reset_rebase && git checkout main" && + git checkout main^0 && + oid=$(git rev-parse --short HEAD) && + FAKE_LINES="1 edit 2" && + export FAKE_LINES && + set_fake_editor && + git rebase -i HEAD~2 && + git branch --list >actual && + test_i18ngrep "rebasing detached HEAD $oid" actual +' + +test_expect_success 'tracking with unexpected .fetch refspec' ' + rm -rf a b c d && + git init -b main a && + ( + cd a && + test_commit a + ) && + git init -b main b && + ( + cd b && + test_commit b + ) && + git init -b main c && + ( + cd c && + test_commit c && + git remote add a ../a && + git remote add b ../b && + git fetch --all + ) && + git init -b main d && + ( + cd d && + git remote add c ../c && + git config remote.c.fetch "+refs/remotes/*:refs/remotes/*" && + git fetch c && + git branch --track local/a/main remotes/a/main && + test "$(git config branch.local/a/main.remote)" = "c" && + test "$(git config branch.local/a/main.merge)" = "refs/remotes/a/main" && + git rev-parse --verify a >expect && + git rev-parse --verify local/a/main >actual && + test_cmp expect actual + ) +' + +test_expect_success 'configured committerdate sort' ' + git init -b main sort && + ( + cd sort && + git config branch.sort committerdate && + test_commit initial && + git checkout -b a && + test_commit a && + git checkout -b c && + test_commit c && + git checkout -b b && + test_commit b && + git branch >actual && + cat >expect <<-\EOF && + main + a + c + * b + EOF + test_cmp expect actual + ) +' + +test_expect_success 'option override configured sort' ' + ( + cd sort && + git config branch.sort committerdate && + git branch --sort=refname >actual && + cat >expect <<-\EOF && + a + * b + c + main + EOF + test_cmp expect actual + ) +' + +test_expect_success 'invalid sort parameter in configuration' ' + ( + cd sort && + git config branch.sort "v:notvalid" && + test_must_fail git branch + ) +' + +test_done diff --git a/t/t3420-rebase-autostash.sh b/t/t3420-rebase-autostash.sh index 43fcb68f27e439..693934ee8be960 100755 --- a/t/t3420-rebase-autostash.sh +++ b/t/t3420-rebase-autostash.sh @@ -310,7 +310,7 @@ test_expect_success 'autostash is saved on editor failure with conflict' ' test_expect_success 'autostash with dirty submodules' ' test_when_finished "git reset --hard && git checkout main" && git checkout -b with-submodule && - git submodule add ./ sub && + git -c protocol.file.allow=always submodule add ./ sub && test_tick && git commit -m add-submodule && echo changed >sub/file0 && diff --git a/t/t3420-rebase-autostash.sh.orig b/t/t3420-rebase-autostash.sh.orig new file mode 100755 index 00000000000000..43fcb68f27e439 --- /dev/null +++ b/t/t3420-rebase-autostash.sh.orig @@ -0,0 +1,336 @@ +#!/bin/sh +# +# Copyright (c) 2013 Ramkumar Ramachandra +# + +test_description='git rebase --autostash tests' +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +test_expect_success setup ' + echo hello-world >file0 && + git add . && + test_tick && + git commit -m "initial commit" && + git checkout -b feature-branch && + echo another-hello >file1 && + echo goodbye >file2 && + git add . && + test_tick && + git commit -m "second commit" && + echo final-goodbye >file3 && + git add . && + test_tick && + git commit -m "third commit" && + git checkout -b unrelated-onto-branch main && + echo unrelated >file4 && + git add . && + test_tick && + git commit -m "unrelated commit" && + git checkout -b related-onto-branch main && + echo conflicting-change >file2 && + git add . && + test_tick && + git commit -m "related commit" && + remove_progress_re="$(printf "s/.*\\r//")" +' + +create_expected_success_apply () { + cat >expected <<-EOF + $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) + First, rewinding head to replay your work on top of it... + Applying: second commit + Applying: third commit + Applied autostash. + EOF +} + +create_expected_success_merge () { + q_to_cr >expected <<-EOF + $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) + Applied autostash. + Successfully rebased and updated refs/heads/rebased-feature-branch. + EOF +} + +create_expected_failure_apply () { + cat >expected <<-EOF + $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) + First, rewinding head to replay your work on top of it... + Applying: second commit + Applying: third commit + Applying autostash resulted in conflicts. + Your changes are safe in the stash. + You can run "git stash pop" or "git stash drop" at any time. + EOF +} + +create_expected_failure_merge () { + cat >expected <<-EOF + $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual) + Applying autostash resulted in conflicts. + Your changes are safe in the stash. + You can run "git stash pop" or "git stash drop" at any time. + Successfully rebased and updated refs/heads/rebased-feature-branch. + EOF +} + +testrebase () { + type=$1 + dotest=$2 + + test_expect_success "rebase$type: dirty worktree, --no-autostash" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + test_when_finished git checkout feature-branch && + echo dirty >>file3 && + test_must_fail git rebase$type --no-autostash unrelated-onto-branch + ' + + test_expect_success "rebase$type: dirty worktree, non-conflicting rebase" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + echo dirty >>file3 && + git rebase$type unrelated-onto-branch >actual 2>&1 && + grep unrelated file4 && + grep dirty file3 && + git checkout feature-branch + ' + + test_expect_success "rebase$type --autostash: check output" ' + test_when_finished git branch -D rebased-feature-branch && + suffix=${type#\ --} && suffix=${suffix:-apply} && + if test ${suffix} = "interactive"; then + suffix=merge + fi && + create_expected_success_$suffix && + sed "$remove_progress_re" actual2 && + test_cmp expected actual2 + ' + + test_expect_success "rebase$type: dirty index, non-conflicting rebase" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + git add file3 && + git rebase$type unrelated-onto-branch && + grep unrelated file4 && + grep dirty file3 && + git checkout feature-branch + ' + + test_expect_success "rebase$type: conflicting rebase" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + test_must_fail git rebase$type related-onto-branch && + test_path_is_file $dotest/autostash && + test_path_is_missing file3 && + rm -rf $dotest && + git reset --hard && + git checkout feature-branch + ' + + test_expect_success "rebase$type: --continue" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + test_must_fail git rebase$type related-onto-branch && + test_path_is_file $dotest/autostash && + test_path_is_missing file3 && + echo "conflicting-plus-goodbye" >file2 && + git add file2 && + git rebase --continue && + test_path_is_missing $dotest/autostash && + grep dirty file3 && + git checkout feature-branch + ' + + test_expect_success "rebase$type: --skip" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + test_must_fail git rebase$type related-onto-branch && + test_path_is_file $dotest/autostash && + test_path_is_missing file3 && + git rebase --skip && + test_path_is_missing $dotest/autostash && + grep dirty file3 && + git checkout feature-branch + ' + + test_expect_success "rebase$type: --abort" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + test_must_fail git rebase$type related-onto-branch && + test_path_is_file $dotest/autostash && + test_path_is_missing file3 && + git rebase --abort && + test_path_is_missing $dotest/autostash && + grep dirty file3 && + git checkout feature-branch + ' + + test_expect_success "rebase$type: --quit" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + test_when_finished git branch -D rebased-feature-branch && + echo dirty >>file3 && + git diff >expect && + test_must_fail git rebase$type related-onto-branch && + test_path_is_file $dotest/autostash && + test_path_is_missing file3 && + git rebase --quit && + test_when_finished git stash drop && + test_path_is_missing $dotest/autostash && + ! grep dirty file3 && + git stash show -p >actual && + test_cmp expect actual && + git reset --hard && + git checkout feature-branch + ' + + test_expect_success "rebase$type: non-conflicting rebase, conflicting stash" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b rebased-feature-branch feature-branch && + echo dirty >file4 && + git add file4 && + git rebase$type unrelated-onto-branch >actual 2>&1 && + test_path_is_missing $dotest && + git reset --hard && + grep unrelated file4 && + ! grep dirty file4 && + git checkout feature-branch && + git stash pop && + grep dirty file4 + ' + + test_expect_success "rebase$type: check output with conflicting stash" ' + test_when_finished git branch -D rebased-feature-branch && + suffix=${type#\ --} && suffix=${suffix:-apply} && + if test ${suffix} = "interactive"; then + suffix=merge + fi && + create_expected_failure_$suffix && + sed "$remove_progress_re" actual2 && + test_cmp expected actual2 + ' +} + +test_expect_success "rebase: fast-forward rebase" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b behind-feature-branch feature-branch~1 && + test_when_finished git branch -D behind-feature-branch && + echo dirty >>file1 && + git rebase feature-branch && + grep dirty file1 && + git checkout feature-branch +' + +test_expect_success "rebase: noop rebase" ' + test_config rebase.autostash true && + git reset --hard && + git checkout -b same-feature-branch feature-branch && + test_when_finished git branch -D same-feature-branch && + echo dirty >>file1 && + git rebase feature-branch && + grep dirty file1 && + git checkout feature-branch +' + +testrebase " --apply" .git/rebase-apply +testrebase " --merge" .git/rebase-merge +testrebase " --interactive" .git/rebase-merge + +test_expect_success 'abort rebase -i with --autostash' ' + test_when_finished "git reset --hard" && + echo uncommitted-content >file0 && + ( + write_script abort-editor.sh <<-\EOF && + echo >"$1" + EOF + test_set_editor "$(pwd)/abort-editor.sh" && + test_must_fail git rebase -i --autostash HEAD^ && + rm -f abort-editor.sh + ) && + echo uncommitted-content >expected && + test_cmp expected file0 +' + +test_expect_success 'restore autostash on editor failure' ' + test_when_finished "git reset --hard" && + echo uncommitted-content >file0 && + ( + test_set_editor "false" && + test_must_fail git rebase -i --autostash HEAD^ + ) && + echo uncommitted-content >expected && + test_cmp expected file0 +' + +test_expect_success 'autostash is saved on editor failure with conflict' ' + test_when_finished "git reset --hard" && + echo uncommitted-content >file0 && + ( + write_script abort-editor.sh <<-\EOF && + echo conflicting-content >file0 + exit 1 + EOF + test_set_editor "$(pwd)/abort-editor.sh" && + test_must_fail git rebase -i --autostash HEAD^ && + rm -f abort-editor.sh + ) && + echo conflicting-content >expected && + test_cmp expected file0 && + git checkout file0 && + git stash pop && + echo uncommitted-content >expected && + test_cmp expected file0 +' + +test_expect_success 'autostash with dirty submodules' ' + test_when_finished "git reset --hard && git checkout main" && + git checkout -b with-submodule && + git submodule add ./ sub && + test_tick && + git commit -m add-submodule && + echo changed >sub/file0 && + git rebase -i --autostash HEAD +' + +test_expect_success 'branch is left alone when possible' ' + git checkout -b unchanged-branch && + echo changed >file0 && + git rebase --autostash unchanged-branch && + test changed = "$(cat file0)" && + test unchanged-branch = "$(git rev-parse --abbrev-ref HEAD)" +' + +test_expect_success 'never change active branch' ' + git checkout -b not-the-feature-branch unrelated-onto-branch && + test_when_finished "git reset --hard && git checkout main" && + echo changed >file0 && + git rebase --autostash not-the-feature-branch feature-branch && + test_cmp_rev not-the-feature-branch unrelated-onto-branch +' + +test_done diff --git a/t/t3426-rebase-submodule.sh b/t/t3426-rebase-submodule.sh index 0ad3a07bf47093..fb21f675bb94c1 100755 --- a/t/t3426-rebase-submodule.sh +++ b/t/t3426-rebase-submodule.sh @@ -47,7 +47,8 @@ test_expect_success 'rebase interactive ignores modified submodules' ' git init sub && git -C sub commit --allow-empty -m "Initial commit" && git init super && - git -C super submodule add ../sub && + git -c protocol.file.allow=always \ + -C super submodule add ../sub && git -C super config submodule.sub.ignore dirty && >super/foo && git -C super add foo && diff --git a/t/t3512-cherry-pick-submodule.sh b/t/t3512-cherry-pick-submodule.sh index c657840db33b6e..f22d1ddead1ac9 100755 --- a/t/t3512-cherry-pick-submodule.sh +++ b/t/t3512-cherry-pick-submodule.sh @@ -16,6 +16,8 @@ fi test_submodule_switch "cherry-pick" test_expect_success 'unrelated submodule/file conflict is ignored' ' + test_config_global protocol.file.allow always && + test_create_repo sub && touch sub/file && diff --git a/t/t3512-cherry-pick-submodule.sh.orig b/t/t3512-cherry-pick-submodule.sh.orig new file mode 100755 index 00000000000000..c657840db33b6e --- /dev/null +++ b/t/t3512-cherry-pick-submodule.sh.orig @@ -0,0 +1,54 @@ +#!/bin/sh + +test_description='cherry-pick can handle submodules' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh +. "$TEST_DIRECTORY"/lib-submodule-update.sh + +if test "$GIT_TEST_MERGE_ALGORITHM" != ort +then + KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1 + KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1 +fi +test_submodule_switch "cherry-pick" + +test_expect_success 'unrelated submodule/file conflict is ignored' ' + test_create_repo sub && + + touch sub/file && + git -C sub add file && + git -C sub commit -m "add a file in a submodule" && + + test_create_repo a_repo && + ( + cd a_repo && + >a_file && + git add a_file && + git commit -m "add a file" && + + git branch test && + git checkout test && + + mkdir sub && + >sub/content && + git add sub/content && + git commit -m "add a regular folder with name sub" && + + echo "123" >a_file && + git add a_file && + git commit -m "modify a file" && + + git checkout main && + + git submodule add ../sub sub && + git submodule update sub && + git commit -m "add a submodule info folder with name sub" && + + git cherry-pick test + ) +' + +test_done diff --git a/t/t3600-rm.sh b/t/t3600-rm.sh index bb9ef35dac082e..a0eda54cde0632 100755 --- a/t/t3600-rm.sh +++ b/t/t3600-rm.sh @@ -336,7 +336,7 @@ test_expect_success 'rm removes empty submodules from work tree' ' test_expect_success 'rm removes removed submodule from index and .gitmodules' ' git reset --hard && - git submodule update && + git -c protocol.file.allow=always submodule update && rm -rf submod && git rm submod && git status -s -uno --ignore-submodules=none >actual && @@ -642,6 +642,7 @@ cat >expect.deepmodified <foo && + git add foo && + git rm --cached foo +' + +test_expect_success 'Test that git rm --cached foo succeeds if the index matches the file' ' + echo content >foo && + git add foo && + git commit -m foo && + echo "other content" >foo && + git rm --cached foo +' + +test_expect_success 'Test that git rm --cached foo fails if the index matches neither the file nor HEAD' ' + echo content >foo && + git add foo && + git commit -m foo --allow-empty && + echo "other content" >foo && + git add foo && + echo "yet another content" >foo && + test_must_fail git rm --cached foo +' + +test_expect_success 'Test that git rm --cached -f foo works in case where --cached only did not' ' + echo content >foo && + git add foo && + git commit -m foo --allow-empty && + echo "other content" >foo && + git add foo && + echo "yet another content" >foo && + git rm --cached -f foo +' + +test_expect_success 'Post-check that foo exists but is not in index after git rm foo' ' + test_path_is_file foo && + test_must_fail git ls-files --error-unmatch foo +' + +test_expect_success 'Pre-check that bar exists and is in index before "git rm bar"' ' + test_path_is_file bar && + git ls-files --error-unmatch bar +' + +test_expect_success 'Test that "git rm bar" succeeds' ' + git rm bar +' + +test_expect_success 'Post-check that bar does not exist and is not in index after "git rm -f bar"' ' + test_path_is_missing bar && + test_must_fail git ls-files --error-unmatch bar +' + +test_expect_success 'Test that "git rm -- -q" succeeds (remove a file that looks like an option)' ' + git rm -- -q +' + +test_expect_success FUNNYNAMES 'Test that "git rm -f" succeeds with embedded space, tab, or newline characters.' ' + git rm -f "space embedded" "tab embedded" "newline${LF}embedded" +' + +test_expect_success SANITY 'Test that "git rm -f" fails if its rm fails' ' + test_when_finished "chmod 775 ." && + chmod a-w . && + test_must_fail git rm -f baz +' + +test_expect_success 'When the rm in "git rm -f" fails, it should not remove the file from the index' ' + git ls-files --error-unmatch baz +' + +test_expect_success 'Remove nonexistent file with --ignore-unmatch' ' + git rm --ignore-unmatch nonexistent +' + +test_expect_success '"rm" command printed' ' + echo frotz >test-file && + git add test-file && + git commit -m "add file for rm test" && + git rm test-file >rm-output.raw && + grep "^rm " rm-output.raw >rm-output && + test_line_count = 1 rm-output && + rm -f test-file rm-output.raw rm-output && + git commit -m "remove file from rm test" +' + +test_expect_success '"rm" command suppressed with --quiet' ' + echo frotz >test-file && + git add test-file && + git commit -m "add file for rm --quiet test" && + git rm --quiet test-file >rm-output && + test_must_be_empty rm-output && + rm -f test-file rm-output && + git commit -m "remove file from rm --quiet test" +' + +# Now, failure cases. +test_expect_success 'Re-add foo and baz' ' + git add foo baz && + git ls-files --error-unmatch foo baz +' + +test_expect_success 'Modify foo -- rm should refuse' ' + echo >>foo && + test_must_fail git rm foo baz && + test_path_is_file foo && + test_path_is_file baz && + git ls-files --error-unmatch foo baz +' + +test_expect_success 'Modified foo -- rm -f should work' ' + git rm -f foo baz && + test_path_is_missing foo && + test_path_is_missing baz && + test_must_fail git ls-files --error-unmatch foo && + test_must_fail git ls-files --error-unmatch bar +' + +test_expect_success 'Re-add foo and baz for HEAD tests' ' + echo frotz >foo && + git checkout HEAD -- baz && + git add foo baz && + git ls-files --error-unmatch foo baz +' + +test_expect_success 'foo is different in index from HEAD -- rm should refuse' ' + test_must_fail git rm foo baz && + test_path_is_file foo && + test_path_is_file baz && + git ls-files --error-unmatch foo baz +' + +test_expect_success 'but with -f it should work.' ' + git rm -f foo baz && + test_path_is_missing foo && + test_path_is_missing baz && + test_must_fail git ls-files --error-unmatch foo && + test_must_fail git ls-files --error-unmatch baz +' + +test_expect_success 'refuse to remove cached empty file with modifications' ' + >empty && + git add empty && + echo content >empty && + test_must_fail git rm --cached empty +' + +test_expect_success 'remove intent-to-add file without --force' ' + echo content >intent-to-add && + git add -N intent-to-add && + git rm --cached intent-to-add +' + +test_expect_success 'Recursive test setup' ' + mkdir -p frotz && + echo qfwfq >frotz/nitfol && + git add frotz && + git commit -m "subdir test" +' + +test_expect_success 'Recursive without -r fails' ' + test_must_fail git rm frotz && + test_path_is_dir frotz && + test_path_is_file frotz/nitfol +' + +test_expect_success 'Recursive with -r but dirty' ' + echo qfwfq >>frotz/nitfol && + test_must_fail git rm -r frotz && + test_path_is_dir frotz && + test_path_is_file frotz/nitfol +' + +test_expect_success 'Recursive with -r -f' ' + git rm -f -r frotz && + test_path_is_missing frotz/nitfol && + test_path_is_missing frotz +' + +test_expect_success 'Remove nonexistent file returns nonzero exit status' ' + test_must_fail git rm nonexistent +' + +test_expect_success 'Call "rm" from outside the work tree' ' + mkdir repo && + ( + cd repo && + git init && + echo something >somefile && + git add somefile && + git commit -m "add a file" && + ( + cd .. && + git --git-dir=repo/.git --work-tree=repo rm somefile + ) && + test_must_fail git ls-files --error-unmatch somefile + ) +' + +test_expect_success 'refresh index before checking if it is up-to-date' ' + git reset --hard && + test-tool chmtime -86400 frotz/nitfol && + git rm frotz/nitfol && + test_path_is_missing frotz/nitfol +' + +choke_git_rm_setup() { + git reset -q --hard && + test_when_finished "rm -f .git/index.lock && git reset -q --hard" && + i=0 && + hash=$(test_oid deadbeef) && + while test $i -lt 12000 + do + echo "100644 $hash 0 some-file-$i" + i=$(( $i + 1 )) + done | git update-index --index-info +} + +test_expect_success 'choking "git rm" should not let it die with cruft (induce SIGPIPE)' ' + choke_git_rm_setup && + # git command is intentionally placed upstream of pipe to induce SIGPIPE + git rm -n "some-file-*" | : && + test_path_is_missing .git/index.lock +' + + +test_expect_success !MINGW 'choking "git rm" should not let it die with cruft (induce and check SIGPIPE)' ' + choke_git_rm_setup && + OUT=$( ((trap "" PIPE; git rm -n "some-file-*"; echo $? 1>&3) | :) 3>&1 ) && + test_match_signal 13 "$OUT" && + test_path_is_missing .git/index.lock +' + +test_expect_success 'Resolving by removal is not a warning-worthy event' ' + git reset -q --hard && + test_when_finished "rm -f .git/index.lock msg && git reset -q --hard" && + blob=$(echo blob | git hash-object -w --stdin) && + for stage in 1 2 3 + do + echo "100644 $blob $stage blob" + done | git update-index --index-info && + git rm blob >msg 2>&1 && + test_i18ngrep ! "needs merge" msg && + test_must_fail git ls-files -s --error-unmatch blob +' + +test_expect_success 'rm removes subdirectories recursively' ' + mkdir -p dir/subdir/subsubdir && + echo content >dir/subdir/subsubdir/file && + git add dir/subdir/subsubdir/file && + git rm -f dir/subdir/subsubdir/file && + test_path_is_missing dir +' + +cat >expect <expect.modified <expect.modified_inside <expect.modified_untracked <expect.cached <expect.both_deleted<actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm removes removed submodule from index and .gitmodules' ' + git reset --hard && + git submodule update && + rm -rf submod && + git rm submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm removes work tree of unmodified submodules' ' + git reset --hard && + git submodule update && + git rm submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm removes a submodule with a trailing /' ' + git reset --hard && + git submodule update && + git rm submod/ && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm fails when given a file with a trailing /' ' + test_must_fail git rm empty/ +' + +test_expect_success 'rm succeeds when given a directory with a trailing /' ' + git rm -r frotz/ +' + +test_expect_success 'rm of a populated submodule with different HEAD fails unless forced' ' + git reset --hard && + git submodule update && + git -C submod checkout HEAD^ && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm --cached leaves work tree of populated submodules and .gitmodules alone' ' + git reset --hard && + git submodule update && + git rm --cached submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno >actual && + test_cmp expect.cached actual && + git config -f .gitmodules submodule.sub.url && + git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm --dry-run does not touch the submodule or .gitmodules' ' + git reset --hard && + git submodule update && + git rm -n submod && + test_path_is_file submod/.git && + git diff-index --exit-code HEAD +' + +test_expect_success 'rm does not complain when no .gitmodules file is found' ' + git reset --hard && + git submodule update && + git rm .gitmodules && + git rm submod >actual 2>actual.err && + test_must_be_empty actual.err && + test_path_is_missing submod && + test_path_is_missing submod/.git && + git status -s -uno >actual && + test_cmp expect.both_deleted actual +' + +test_expect_success 'rm will error out on a modified .gitmodules file unless staged' ' + git reset --hard && + git submodule update && + git config -f .gitmodules foo.bar true && + test_must_fail git rm submod >actual 2>actual.err && + test_file_not_empty actual.err && + test_path_is_dir submod && + test_path_is_file submod/.git && + git diff-files --quiet -- submod && + git add .gitmodules && + git rm submod >actual 2>actual.err && + test_must_be_empty actual.err && + test_path_is_missing submod && + test_path_is_missing submod/.git && + git status -s -uno >actual && + test_cmp expect actual +' +test_expect_success 'rm will not error out on .gitmodules file with zero stat data' ' + git reset --hard && + git submodule update && + git read-tree HEAD && + git rm submod && + test_path_is_missing submod +' + +test_expect_success 'rm issues a warning when section is not found in .gitmodules' ' + git reset --hard && + git submodule update && + git config -f .gitmodules --remove-section submodule.sub && + git add .gitmodules && + echo "warning: Could not find section in .gitmodules where path=submod" >expect.err && + git rm submod >actual 2>actual.err && + test_cmp expect.err actual.err && + test_path_is_missing submod && + test_path_is_missing submod/.git && + git status -s -uno >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated submodule with modifications fails unless forced' ' + git reset --hard && + git submodule update && + echo X >submod/empty && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified_inside actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated submodule with untracked files fails unless forced' ' + git reset --hard && + git submodule update && + echo X >submod/untracked && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified_untracked actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'setup submodule conflict' ' + git reset --hard && + git submodule update && + git checkout -b branch1 && + echo 1 >nitfol && + git add nitfol && + git commit -m "added nitfol 1" && + git checkout -b branch2 main && + echo 2 >nitfol && + git add nitfol && + git commit -m "added nitfol 2" && + git checkout -b conflict1 main && + git -C submod fetch && + git -C submod checkout branch1 && + git add submod && + git commit -m "submod 1" && + git checkout -b conflict2 main && + git -C submod checkout branch2 && + git add submod && + git commit -m "submod 2" +' + +cat >expect.conflict <actual && + test_cmp expect actual +' + +test_expect_success 'rm of a conflicted populated submodule with different HEAD fails unless forced' ' + git checkout conflict1 && + git reset --hard && + git submodule update && + git -C submod checkout HEAD^ && + test_must_fail git merge conflict2 && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.conflict actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm of a conflicted populated submodule with modifications fails unless forced' ' + git checkout conflict1 && + git reset --hard && + git submodule update && + echo X >submod/empty && + test_must_fail git merge conflict2 && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.conflict actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual && + test_must_fail git config -f .gitmodules submodule.sub.url && + test_must_fail git config -f .gitmodules submodule.sub.path +' + +test_expect_success 'rm of a conflicted populated submodule with untracked files fails unless forced' ' + git checkout conflict1 && + git reset --hard && + git submodule update && + echo X >submod/untracked && + test_must_fail git merge conflict2 && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.conflict actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a conflicted populated submodule with a .git directory fails even when forced' ' + git checkout conflict1 && + git reset --hard && + git submodule update && + ( + cd submod && + rm .git && + cp -R ../.git/modules/sub .git && + GIT_WORK_TREE=. git config --unset core.worktree + ) && + test_must_fail git merge conflict2 && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_dir submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.conflict actual && + test_must_fail git rm -f submod && + test_path_is_dir submod && + test_path_is_dir submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.conflict actual && + git merge --abort && + rm -rf submod +' + +test_expect_success 'rm of a conflicted unpopulated submodule succeeds' ' + git checkout conflict1 && + git reset --hard && + test_must_fail git merge conflict2 && + git rm submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated submodule with a .git directory migrates git dir' ' + git checkout -f main && + git reset --hard && + git submodule update && + ( + cd submod && + rm .git && + cp -R ../.git/modules/sub .git && + GIT_WORK_TREE=. git config --unset core.worktree && + rm -r ../.git/modules/sub + ) && + git rm submod 2>output.err && + test_path_is_missing submod && + test_path_is_missing submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_file_not_empty actual && + test_i18ngrep Migrating output.err +' + +cat >expect.deepmodified <actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated nested submodule with different nested HEAD fails unless forced' ' + git reset --hard && + git submodule update --recursive && + git -C submod/subsubmod checkout HEAD^ && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified_inside actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated nested submodule with nested modifications fails unless forced' ' + git reset --hard && + git submodule update --recursive && + echo X >submod/subsubmod/empty && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified_inside actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success 'rm of a populated nested submodule with nested untracked files fails unless forced' ' + git reset --hard && + git submodule update --recursive && + echo X >submod/subsubmod/untracked && + test_must_fail git rm submod && + test_path_is_dir submod && + test_path_is_file submod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect.modified_untracked actual && + git rm -f submod && + test_path_is_missing submod && + git status -s -uno --ignore-submodules=none >actual && + test_cmp expect actual +' + +test_expect_success "rm absorbs submodule's nested .git directory" ' + git reset --hard && + git submodule update --recursive && + ( + cd submod/subsubmod && + rm .git && + mv ../../.git/modules/sub/modules/sub .git && + GIT_WORK_TREE=. git config --unset core.worktree + ) && + git rm submod 2>output.err && + test_path_is_missing submod && + test_path_is_missing submod/subsubmod/.git && + git status -s -uno --ignore-submodules=none >actual && + test_file_not_empty actual && + test_i18ngrep Migrating output.err +' + +test_expect_success 'checking out a commit after submodule removal needs manual updates' ' + git commit -m "submodule removal" submod .gitmodules && + git checkout HEAD^ && + git submodule update && + git checkout -q HEAD^ && + git checkout -q main 2>actual && + test_i18ngrep "^warning: unable to rmdir '\''submod'\'':" actual && + git status -s submod >actual && + echo "?? submod/" >expected && + test_cmp expected actual && + rm -rf submod && + git status -s -uno --ignore-submodules=none >actual && + test_must_be_empty actual +' + +test_expect_success 'rm of d/f when d has become a non-directory' ' + rm -rf d && + mkdir d && + >d/f && + git add d && + rm -rf d && + >d && + git rm d/f && + test_must_fail git rev-parse --verify :d/f && + test_path_is_file d +' + +test_expect_success SYMLINKS 'rm of d/f when d has become a dangling symlink' ' + rm -rf d && + mkdir d && + >d/f && + git add d && + rm -rf d && + ln -s nonexistent d && + git rm d/f && + test_must_fail git rev-parse --verify :d/f && + test -h d && + test_path_is_missing d +' + +test_expect_success 'rm of file when it has become a directory' ' + rm -rf d && + >d && + git add d && + rm -f d && + mkdir d && + >d/f && + test_must_fail git rm d && + git rev-parse --verify :d && + test_path_is_file d/f +' + +test_expect_success SYMLINKS 'rm across a symlinked leading path (no index)' ' + rm -rf d e && + mkdir e && + echo content >e/f && + ln -s e d && + git add -A e d && + git commit -m "symlink d to e, e/f exists" && + test_must_fail git rm d/f && + git rev-parse --verify :d && + git rev-parse --verify :e/f && + test -h d && + test_path_is_file e/f +' + +test_expect_failure SYMLINKS 'rm across a symlinked leading path (w/ index)' ' + rm -rf d e && + mkdir d && + echo content >d/f && + git add -A e d && + git commit -m "d/f exists" && + mv d e && + ln -s e d && + test_must_fail git rm d/f && + git rev-parse --verify :d/f && + test -h d && + test_path_is_file e/f +' + +test_expect_success 'setup for testing rm messages' ' + >bar.txt && + >foo.txt && + git add bar.txt foo.txt +' + +test_expect_success 'rm files with different staged content' ' + cat >expect <<-\EOF && + error: the following files have staged content different from both the + file and the HEAD: + bar.txt + foo.txt + (use -f to force removal) + EOF + echo content1 >foo.txt && + echo content1 >bar.txt && + test_must_fail git rm foo.txt bar.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm files with different staged content without hints' ' + cat >expect <<-\EOF && + error: the following files have staged content different from both the + file and the HEAD: + bar.txt + foo.txt + EOF + echo content2 >foo.txt && + echo content2 >bar.txt && + test_must_fail git -c advice.rmhints=false rm foo.txt bar.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm file with local modification' ' + cat >expect <<-\EOF && + error: the following file has local modifications: + foo.txt + (use --cached to keep the file, or -f to force removal) + EOF + git commit -m "testing rm 3" && + echo content3 >foo.txt && + test_must_fail git rm foo.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm file with local modification without hints' ' + cat >expect <<-\EOF && + error: the following file has local modifications: + bar.txt + EOF + echo content4 >bar.txt && + test_must_fail git -c advice.rmhints=false rm bar.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm file with changes in the index' ' + cat >expect <<-\EOF && + error: the following file has changes staged in the index: + foo.txt + (use --cached to keep the file, or -f to force removal) + EOF + git reset --hard && + echo content5 >foo.txt && + git add foo.txt && + test_must_fail git rm foo.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm file with changes in the index without hints' ' + cat >expect <<-\EOF && + error: the following file has changes staged in the index: + foo.txt + EOF + test_must_fail git -c advice.rmhints=false rm foo.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm files with two different errors' ' + cat >expect <<-\EOF && + error: the following file has staged content different from both the + file and the HEAD: + foo1.txt + (use -f to force removal) + error: the following file has changes staged in the index: + bar1.txt + (use --cached to keep the file, or -f to force removal) + EOF + echo content >foo1.txt && + git add foo1.txt && + echo content6 >foo1.txt && + echo content6 >bar1.txt && + git add bar1.txt && + test_must_fail git rm bar1.txt foo1.txt 2>actual && + test_cmp expect actual +' + +test_expect_success 'rm empty string should fail' ' + test_must_fail git rm -rf "" +' + +test_done diff --git a/t/t3906-stash-submodule.sh b/t/t3906-stash-submodule.sh index a52e53dd2da481..0f7348ec21b882 100755 --- a/t/t3906-stash-submodule.sh +++ b/t/t3906-stash-submodule.sh @@ -36,7 +36,7 @@ setup_basic () { git init main && ( cd main && - git submodule add ../sub && + git -c protocol.file.allow=always submodule add ../sub && test_commit main_file ) } diff --git a/t/t4059-diff-submodule-not-initialized.sh b/t/t4059-diff-submodule-not-initialized.sh index 49bca7b48d910f..d489230df89663 100755 --- a/t/t4059-diff-submodule-not-initialized.sh +++ b/t/t4059-diff-submodule-not-initialized.sh @@ -49,7 +49,7 @@ test_expect_success 'setup - submodules' ' ' test_expect_success 'setup - git submodule add' ' - git submodule add ./sm2 sm1 && + git -c protocol.file.allow=always submodule add ./sm2 sm1 && commit_file sm1 .gitmodules && git diff-tree -p --no-commit-id --submodule=log HEAD -- sm1 >actual && cat >expected <<-EOF && diff --git a/t/t4060-diff-submodule-option-diff-format.sh b/t/t4060-diff-submodule-option-diff-format.sh index dc7b242697d776..a90add57f7d647 100755 --- a/t/t4060-diff-submodule-option-diff-format.sh +++ b/t/t4060-diff-submodule-option-diff-format.sh @@ -759,9 +759,9 @@ test_expect_success 'diff --submodule=diff with .git file' ' ' test_expect_success 'setup nested submodule' ' - git submodule add -f ./sm2 && + git -c protocol.file.allow=always submodule add -f ./sm2 && git commit -a -m "add sm2" && - git -C sm2 submodule add ../sm2 nested && + git -c protocol.file.allow=always -C sm2 submodule add ../sm2 nested && git -C sm2 commit -a -m "nested sub" && head10=$(git -C sm2 rev-parse --short --verify HEAD) ' diff --git a/t/t4067-diff-partial-clone.sh b/t/t4067-diff-partial-clone.sh index 804f2a82e8315d..28f42a4046e08e 100755 --- a/t/t4067-diff-partial-clone.sh +++ b/t/t4067-diff-partial-clone.sh @@ -77,6 +77,7 @@ test_expect_success 'diff skips same-OID blobs' ' test_expect_success 'when fetching missing objects, diff skips GITLINKs' ' test_when_finished "rm -rf sub server client trace" && + test_config_global protocol.file.allow always && test_create_repo sub && test_commit -C sub first && diff --git a/t/t4115-apply-symlink.sh b/t/t4115-apply-symlink.sh index 872fcda6cb6dce..2b034ff771465f 100755 --- a/t/t4115-apply-symlink.sh +++ b/t/t4115-apply-symlink.sh @@ -44,4 +44,100 @@ test_expect_success 'apply --index symlink patch' ' ' +test_expect_success 'symlink setup' ' + ln -s .git symlink && + git add symlink && + git commit -m "add symlink" +' + +test_expect_success SYMLINKS 'symlink escape when creating new files' ' + test_when_finished "git reset --hard && git clean -dfx" && + + cat >patch <<-EOF && + diff --git a/symlink b/renamed-symlink + similarity index 100% + rename from symlink + rename to renamed-symlink + -- + diff --git /dev/null b/renamed-symlink/create-me + new file mode 100644 + index 0000000..039727e + --- /dev/null + +++ b/renamed-symlink/create-me + @@ -0,0 +1,1 @@ + +busted + EOF + + test_must_fail git apply patch 2>stderr && + cat >expected_stderr <<-EOF && + error: affected file ${SQ}renamed-symlink/create-me${SQ} is beyond a symbolic link + EOF + test_cmp expected_stderr stderr && + ! test_path_exists .git/create-me +' + +test_expect_success SYMLINKS 'symlink escape when modifying file' ' + test_when_finished "git reset --hard && git clean -dfx" && + touch .git/modify-me && + + cat >patch <<-EOF && + diff --git a/symlink b/renamed-symlink + similarity index 100% + rename from symlink + rename to renamed-symlink + -- + diff --git a/renamed-symlink/modify-me b/renamed-symlink/modify-me + index 1111111..2222222 100644 + --- a/renamed-symlink/modify-me + +++ b/renamed-symlink/modify-me + @@ -0,0 +1,1 @@ + +busted + EOF + + test_must_fail git apply patch 2>stderr && + cat >expected_stderr <<-EOF && + error: renamed-symlink/modify-me: No such file or directory + EOF + test_cmp expected_stderr stderr && + test_must_be_empty .git/modify-me +' + +test_expect_success SYMLINKS 'symlink escape when deleting file' ' + test_when_finished "git reset --hard && git clean -dfx && rm .git/delete-me" && + touch .git/delete-me && + + cat >patch <<-EOF && + diff --git a/symlink b/renamed-symlink + similarity index 100% + rename from symlink + rename to renamed-symlink + -- + diff --git a/renamed-symlink/delete-me b/renamed-symlink/delete-me + deleted file mode 100644 + index 1111111..0000000 100644 + EOF + + test_must_fail git apply patch 2>stderr && + cat >expected_stderr <<-EOF && + error: renamed-symlink/delete-me: No such file or directory + EOF + test_cmp expected_stderr stderr && + test_path_is_file .git/delete-me +' + +test_expect_success SYMLINKS '--reject removes .rej symlink if it exists' ' + test_when_finished "git reset --hard && git clean -dfx" && + + test_commit file && + echo modified >file.t && + git diff -- file.t >patch && + echo modified-again >file.t && + + ln -s foo file.t.rej && + test_must_fail git apply patch --reject 2>err && + test_i18ngrep "Rejected hunk" err && + test_path_is_missing foo && + test_path_is_file file.t.rej +' + test_done diff --git a/t/t4205-log-pretty-formats.sh b/t/t4205-log-pretty-formats.sh index 5865daa8f8d223..c66db6391f3a0a 100755 --- a/t/t4205-log-pretty-formats.sh +++ b/t/t4205-log-pretty-formats.sh @@ -1002,4 +1002,80 @@ test_expect_success '%(describe:exclude=...) vs git describe --exclude ...' ' test_cmp expect actual ' +test_expect_success 'log --pretty with space stealing' ' + printf mm0 >expect && + git log -1 --pretty="format:mm%>>|(1)%x30" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with invalid padding format' ' + printf "%s%%<(20" "$(git rev-parse HEAD)" >expect && + git log -1 --pretty="format:%H%<(20" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with magical wrapping directives' ' + commit_id=$(git commit-tree HEAD^{tree} -m "describe me") && + git tag describe-me $commit_id && + printf "\n(tag:\ndescribe-me)%%+w(2)" >expect && + git log -1 --pretty="format:%w(1)%+d%+w(2)" $commit_id >actual && + test_cmp expect actual +' + +test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' ' + printf "%%w(2147483649,1,1)0" >expect && + git log -1 --pretty="format:%w(2147483649,1,1)%x30" >actual && + test_cmp expect actual && + printf "%%w(1,2147483649,1)0" >expect && + git log -1 --pretty="format:%w(1,2147483649,1)%x30" >actual && + test_cmp expect actual && + printf "%%w(1,1,2147483649)0" >expect && + git log -1 --pretty="format:%w(1,1,2147483649)%x30" >actual && + test_cmp expect actual +' + +test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing padding directive' ' + printf "%%<(2147483649)0" >expect && + git log -1 --pretty="format:%<(2147483649)%x30" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with padding and preceding control chars' ' + printf "\20\20 0" >expect && + git log -1 --pretty="format:%x10%x10%>|(4)%x30" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty truncation with control chars' ' + test_commit "$(printf "\20\20\20\20xxxx")" file contents commit-with-control-chars && + printf "\20\20\20\20x.." >expect && + git log -1 --pretty="format:%<(3,trunc)%s" commit-with-control-chars >actual && + test_cmp expect actual +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' ' + # We only assert that this command does not crash. This needs to be + # executed with the address sanitizer to demonstrate failure. + git log -1 --pretty="format:%>(2147483646)%x41%41%>(2147483646)%x41" >/dev/null +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'set up huge commit' ' + test-tool genzeros 2147483649 | tr "\000" "1" >expect && + huge_commit=$(git commit-tree -F expect HEAD^{tree}) +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' ' + git log -1 --format="%B%<(1)%x30" $huge_commit >actual && + echo 0 >>expect && + test_cmp expect actual +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message does not cause allocation failure' ' + test_must_fail git log -1 --format="%<(1)%B" $huge_commit 2>error && + cat >expect <<-EOF && + fatal: number too large to represent as int on this platform: 2147483649 + EOF + test_cmp expect error +' + test_done diff --git a/t/t4205-log-pretty-formats.sh.orig b/t/t4205-log-pretty-formats.sh.orig new file mode 100755 index 00000000000000..197b642a5d3742 --- /dev/null +++ b/t/t4205-log-pretty-formats.sh.orig @@ -0,0 +1,1075 @@ +#!/bin/sh +# +# Copyright (c) 2010, Will Palmer +# Copyright (c) 2011, Alexey Shumkin (+ non-UTF-8 commit encoding tests) +# + +test_description='Test pretty formats' +. ./test-lib.sh + +# Tested non-UTF-8 encoding +test_encoding="ISO8859-1" + +sample_utf8_part=$(printf "f\303\244ng") + +commit_msg () { + # String "initial. initial" partly in German + # (translated with Google Translate), + # encoded in UTF-8, used as a commit log message below. + msg="initial. an${sample_utf8_part}lich\n" + if test -n "$1" + then + printf "$msg" | iconv -f utf-8 -t "$1" + else + printf "$msg" + fi +} + +test_expect_success 'set up basic repos' ' + >foo && + >bar && + git add foo && + test_tick && + git config i18n.commitEncoding $test_encoding && + commit_msg $test_encoding | git commit -F - && + git add bar && + test_tick && + git commit -m "add bar" && + git config --unset i18n.commitEncoding +' + +test_expect_success 'alias builtin format' ' + git log --pretty=oneline >expected && + git config pretty.test-alias oneline && + git log --pretty=test-alias >actual && + test_cmp expected actual +' + +test_expect_success 'alias masking builtin format' ' + git log --pretty=oneline >expected && + git config pretty.oneline "%H" && + git log --pretty=oneline >actual && + test_cmp expected actual +' + +test_expect_success 'alias user-defined format' ' + git log --pretty="format:%h" >expected && + git config pretty.test-alias "format:%h" && + git log --pretty=test-alias >actual && + test_cmp expected actual +' + +test_expect_success 'alias user-defined tformat with %s (ISO8859-1 encoding)' ' + git config i18n.logOutputEncoding $test_encoding && + git log --oneline >expected-s && + git log --pretty="tformat:%h %s" >actual-s && + git config --unset i18n.logOutputEncoding && + test_cmp expected-s actual-s +' + +test_expect_success 'alias user-defined tformat with %s (utf-8 encoding)' ' + git log --oneline >expected-s && + git log --pretty="tformat:%h %s" >actual-s && + test_cmp expected-s actual-s +' + +test_expect_success 'alias user-defined tformat' ' + git log --pretty="tformat:%h" >expected && + git config pretty.test-alias "tformat:%h" && + git log --pretty=test-alias >actual && + test_cmp expected actual +' + +test_expect_success 'alias non-existent format' ' + git config pretty.test-alias format-that-will-never-exist && + test_must_fail git log --pretty=test-alias +' + +test_expect_success 'alias of an alias' ' + git log --pretty="tformat:%h" >expected && + git config pretty.test-foo "tformat:%h" && + git config pretty.test-bar test-foo && + git log --pretty=test-bar >actual && test_cmp expected actual +' + +test_expect_success 'alias masking an alias' ' + git log --pretty=format:"Two %H" >expected && + git config pretty.duplicate "format:One %H" && + git config --add pretty.duplicate "format:Two %H" && + git log --pretty=duplicate >actual && + test_cmp expected actual +' + +test_expect_success 'alias loop' ' + git config pretty.test-foo test-bar && + git config pretty.test-bar test-foo && + test_must_fail git log --pretty=test-foo +' + +test_expect_success 'NUL separation' ' + printf "add bar\0$(commit_msg)" >expected && + git log -z --pretty="format:%s" >actual && + test_cmp expected actual +' + +test_expect_success 'NUL termination' ' + printf "add bar\0$(commit_msg)\0" >expected && + git log -z --pretty="tformat:%s" >actual && + test_cmp expected actual +' + +test_expect_success 'NUL separation with --stat' ' + stat0_part=$(git diff --stat HEAD^ HEAD) && + stat1_part=$(git diff-tree --no-commit-id --stat --root HEAD^) && + printf "add bar\n$stat0_part\n\0$(commit_msg)\n$stat1_part\n" >expected && + git log -z --stat --pretty="format:%s" >actual && + test_cmp expected actual +' + +test_expect_failure 'NUL termination with --stat' ' + stat0_part=$(git diff --stat HEAD^ HEAD) && + stat1_part=$(git diff-tree --no-commit-id --stat --root HEAD^) && + printf "add bar\n$stat0_part\n\0$(commit_msg)\n$stat1_part\n0" >expected && + git log -z --stat --pretty="tformat:%s" >actual && + test_cmp expected actual +' + +for p in short medium full fuller email raw +do + test_expect_success "NUL termination with --reflog --pretty=$p" ' + revs="$(git rev-list --reflog)" && + for r in $revs + do + git show -s "$r" --pretty="$p" && + printf "\0" || return 1 + done >expect && + { + git log -z --reflog --pretty="$p" && + printf "\0" + } >actual && + test_cmp expect actual + ' +done + +test_expect_success 'NUL termination with --reflog --pretty=oneline' ' + revs="$(git rev-list --reflog)" && + for r in $revs + do + git show -s --pretty=oneline "$r" >raw && + cat raw | lf_to_nul || exit 1 + done >expect && + # the trailing NUL is already produced so we do not need to + # output another one + git log -z --pretty=oneline --reflog >actual && + test_cmp expect actual +' + +test_expect_success 'setup more commits' ' + test_commit "message one" one one message-one && + test_commit "message two" two two message-two && + head1=$(git rev-parse --verify --short HEAD~0) && + head2=$(git rev-parse --verify --short HEAD~1) && + head3=$(git rev-parse --verify --short HEAD~2) && + head4=$(git rev-parse --verify --short HEAD~3) +' + +test_expect_success 'left alignment formatting' ' + git log --pretty="tformat:%<(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + message two Z + message one Z + add bar Z + $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + message two Z + message one Z + add bar Z + $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting at the nth column' ' + git log --pretty="tformat:%h %<|(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting at the nth column' ' + COLUMNS=50 git log --pretty="tformat:%h %<|(-10)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting at the nth column. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%h %<|(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with no padding' ' + git log --pretty="tformat:%<(1)%s" >actual && + cat <<-EOF >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with no padding. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(1)%s" >actual && + cat <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with trunc' ' + git log --pretty="tformat:%<(10,trunc)%s" >actual && + qz_to_tab_space <<-\EOF >expected && + message .. + message .. + add bar Z + initial... + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with trunc. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(10,trunc)%s" >actual && + qz_to_tab_space <<-\EOF | iconv -f utf-8 -t $test_encoding >expected && + message .. + message .. + add bar Z + initial... + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with ltrunc' ' + git log --pretty="tformat:%<(10,ltrunc)%s" >actual && + qz_to_tab_space <<-EOF >expected && + ..sage two + ..sage one + add bar Z + ..${sample_utf8_part}lich + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with ltrunc. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(10,ltrunc)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + ..sage two + ..sage one + add bar Z + ..${sample_utf8_part}lich + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with mtrunc' ' + git log --pretty="tformat:%<(10,mtrunc)%s" >actual && + qz_to_tab_space <<-\EOF >expected && + mess.. two + mess.. one + add bar Z + init..lich + EOF + test_cmp expected actual +' + +test_expect_success 'left alignment formatting with mtrunc. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(10,mtrunc)%s" >actual && + qz_to_tab_space <<-\EOF | iconv -f utf-8 -t $test_encoding >expected && + mess.. two + mess.. one + add bar Z + init..lich + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting' ' + git log --pretty="tformat:%>(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + Z message two + Z message one + Z add bar + Z $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%>(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + Z message two + Z message one + Z add bar + Z $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting at the nth column' ' + git log --pretty="tformat:%h %>|(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two + $head2 message one + $head3 add bar + $head4 $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting at the nth column' ' + COLUMNS=50 git log --pretty="tformat:%h %>|(-10)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two + $head2 message one + $head3 add bar + $head4 $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting at the nth column. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%h %>|(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + $head1 message two + $head2 message one + $head3 add bar + $head4 $(commit_msg) + EOF + test_cmp expected actual +' + +# Note: Space between 'message' and 'two' should be in the same column +# as in previous test. +test_expect_success 'right alignment formatting at the nth column with --graph. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --graph --pretty="tformat:%h %>|(40)%s" >actual && + iconv -f utf-8 -t $test_encoding >expected <<-EOF && + * $head1 message two + * $head2 message one + * $head3 add bar + * $head4 $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting with no padding' ' + git log --pretty="tformat:%>(1)%s" >actual && + cat <<-EOF >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting with no padding and with --graph' ' + git log --graph --pretty="tformat:%>(1)%s" >actual && + cat <<-EOF >expected && + * message two + * message one + * add bar + * $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'right alignment formatting with no padding. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%>(1)%s" >actual && + cat <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'center alignment formatting' ' + git log --pretty="tformat:%><(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + Z message two Z + Z message one Z + Z add bar Z + Z $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'center alignment formatting. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%><(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + Z message two Z + Z message one Z + Z add bar Z + Z $(commit_msg) Z + EOF + test_cmp expected actual +' +test_expect_success 'center alignment formatting at the nth column' ' + git log --pretty="tformat:%h %><|(40)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'center alignment formatting at the nth column' ' + COLUMNS=70 git log --pretty="tformat:%h %><|(-30)%s" >actual && + qz_to_tab_space <<-EOF >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'center alignment formatting at the nth column. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%h %><|(40)%s" >actual && + qz_to_tab_space <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + $head1 message two Z + $head2 message one Z + $head3 add bar Z + $head4 $(commit_msg) Z + EOF + test_cmp expected actual +' + +test_expect_success 'center alignment formatting with no padding' ' + git log --pretty="tformat:%><(1)%s" >actual && + cat <<-EOF >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +# save HEAD's SHA-1 digest (with no abbreviations) to use it below +# as far as the next test amends HEAD +old_head1=$(git rev-parse --verify HEAD~0) +test_expect_success 'center alignment formatting with no padding. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%><(1)%s" >actual && + cat <<-EOF | iconv -f utf-8 -t $test_encoding >expected && + message two + message one + add bar + $(commit_msg) + EOF + test_cmp expected actual +' + +test_expect_success 'left/right alignment formatting with stealing' ' + git commit --amend -m short --author "long long long " && + git log --pretty="tformat:%<(10,trunc)%s%>>(10,ltrunc)% an" >actual && + cat <<-\EOF >expected && + short long long long + message .. A U Thor + add bar A U Thor + initial... A U Thor + EOF + test_cmp expected actual +' +test_expect_success 'left/right alignment formatting with stealing. i18n.logOutputEncoding' ' + git -c i18n.logOutputEncoding=$test_encoding log --pretty="tformat:%<(10,trunc)%s%>>(10,ltrunc)% an" >actual && + cat <<-\EOF | iconv -f utf-8 -t $test_encoding >expected && + short long long long + message .. A U Thor + add bar A U Thor + initial... A U Thor + EOF + test_cmp expected actual +' + +test_expect_success 'strbuf_utf8_replace() not producing NUL' ' + git log --color --pretty="tformat:%<(10,trunc)%s%>>(10,ltrunc)%C(auto)%d" | + test_decode_color | + nul_to_q >actual && + ! grep Q actual +' + +# --date=[XXX] and corresponding %a[X] %c[X] format equivalency +test_expect_success '--date=iso-strict %ad%cd is the same as %aI%cI' ' + git log --format=%ad%n%cd --date=iso-strict >expected && + git log --format=%aI%n%cI >actual && + test_cmp expected actual +' + +test_expect_success '--date=short %ad%cd is the same as %as%cs' ' + git log --format=%ad%n%cd --date=short >expected && + git log --format=%as%n%cs >actual && + test_cmp expected actual +' + +test_expect_success '--date=human %ad%cd is the same as %ah%ch' ' + git log --format=%ad%n%cd --date=human >expected && + git log --format=%ah%n%ch >actual && + test_cmp expected actual +' + +# get new digests (with no abbreviations) +test_expect_success 'set up log decoration tests' ' + head1=$(git rev-parse --verify HEAD~0) && + head2=$(git rev-parse --verify HEAD~1) +' + +test_expect_success 'log decoration properly follows tag chain' ' + git tag -a tag1 -m tag1 && + git tag -a tag2 -m tag2 tag1 && + git tag -d tag1 && + git commit --amend -m shorter && + git log --no-walk --tags --pretty="%H %d" --decorate=full >actual && + cat <<-EOF >expected && + $head2 (tag: refs/tags/message-one) + $old_head1 (tag: refs/tags/message-two) + $head1 (tag: refs/tags/tag2) + EOF + sort -k3 actual >actual1 && + test_cmp expected actual1 +' + +test_expect_success 'clean log decoration' ' + git log --no-walk --tags --pretty="%H %D" --decorate=full >actual && + cat >expected <<-EOF && + $head2 tag: refs/tags/message-one + $old_head1 tag: refs/tags/message-two + $head1 tag: refs/tags/tag2 + EOF + sort -k3 actual >actual1 && + test_cmp expected actual1 +' + +cat >trailers < +Acked-by: A U Thor +[ v2 updated patch description ] +Signed-off-by: A U Thor + +EOF + +unfold () { + perl -0pe 's/\n\s+/ /g' +} + +test_expect_success 'set up trailer tests' ' + echo "Some contents" >trailerfile && + git add trailerfile && + git commit -F - <<-EOF + trailers: this commit message has trailers + + This commit is a test commit with trailers at the end. We parse this + message and display the trailers using %(trailers). + + $(cat trailers) + EOF +' + +test_expect_success 'pretty format %(trailers) shows trailers' ' + git log --no-walk --pretty="%(trailers)" >actual && + { + cat trailers && + echo + } >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:) enables no options' ' + git log --no-walk --pretty="%(trailers:)" >actual && + # "expect" the same as the test above + test_cmp expect actual +' + +test_expect_success '%(trailers:only) shows only "key: value" trailers' ' + git log --no-walk --pretty="%(trailers:only)" >actual && + { + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:only=yes) shows only "key: value" trailers' ' + git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual && + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:only=no) shows all trailers' ' + git log --no-walk --pretty=format:"%(trailers:only=no)" >actual && + cat trailers >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:only=no,only=true) shows only "key: value" trailers' ' + git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual && + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:unfold) unfolds trailers' ' + git log --no-walk --pretty="%(trailers:unfold)" >actual && + { + unfold expect && + test_cmp expect actual +' + +test_expect_success ':only and :unfold work together' ' + git log --no-walk --pretty="%(trailers:only,unfold)" >actual && + git log --no-walk --pretty="%(trailers:unfold,only)" >reverse && + test_cmp actual reverse && + { + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key=foo) shows that trailer' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by)" >actual && + echo "Acked-by: A U Thor " >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key=foo) is case insensitive' ' + git log --no-walk --pretty="format:%(trailers:key=AcKed-bY)" >actual && + echo "Acked-by: A U Thor " >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key=foo:) trailing colon also works' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by:)" >actual && + echo "Acked-by: A U Thor " >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key=foo) multiple keys' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by:,key=Signed-off-By)" >actual && + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=nonexistent) becomes empty' ' + git log --no-walk --pretty="x%(trailers:key=Nacked-by)x" >actual && + echo "xx" >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=foo) handles multiple lines even if folded' ' + git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by)" >actual && + grep -v patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=foo,unfold) properly unfolds' ' + git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by,unfold)" >actual && + unfold expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key=foo,only=no) also includes nontrailer lines' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by,only=no)" >actual && + { + echo "Acked-by: A U Thor " && + grep patch.description expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key) without value is error' ' + git log --no-walk --pretty="tformat:%(trailers:key)" >actual && + echo "%(trailers:key)" >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:keyonly) shows only keys' ' + git log --no-walk --pretty="format:%(trailers:keyonly)" >actual && + test_write_lines \ + "Signed-off-by" \ + "Acked-by" \ + "[ v2 updated patch description ]" \ + "Signed-off-by" >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=foo,keyonly) shows only key' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by,keyonly)" >actual && + echo "Acked-by" >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=foo,valueonly) shows only value' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by,valueonly)" >actual && + echo "A U Thor " >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:valueonly) shows only values' ' + git log --no-walk --pretty="format:%(trailers:valueonly)" >actual && + test_write_lines \ + "A U Thor " \ + "A U Thor " \ + "[ v2 updated patch description ]" \ + "A U Thor" \ + " " >expect && + test_cmp expect actual +' + +test_expect_success '%(trailers:key=foo,keyonly,valueonly) shows nothing' ' + git log --no-walk --pretty="format:%(trailers:key=Acked-by,keyonly,valueonly)" >actual && + echo >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:separator) changes separator' ' + git log --no-walk --pretty=format:"X%(trailers:separator=%x00)X" >actual && + ( + printf "XSigned-off-by: A U Thor \0" && + printf "Acked-by: A U Thor \0" && + printf "[ v2 updated patch description ]\0" && + printf "Signed-off-by: A U Thor\n X" + ) >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:separator=X,unfold) changes separator' ' + git log --no-walk --pretty=format:"X%(trailers:separator=%x00,unfold)X" >actual && + ( + printf "XSigned-off-by: A U Thor \0" && + printf "Acked-by: A U Thor \0" && + printf "[ v2 updated patch description ]\0" && + printf "Signed-off-by: A U Thor X" + ) >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key_value_separator) changes key-value separator' ' + git log --no-walk --pretty=format:"X%(trailers:key_value_separator=%x00)X" >actual && + ( + printf "XSigned-off-by\0A U Thor \n" && + printf "Acked-by\0A U Thor \n" && + printf "[ v2 updated patch description ]\n" && + printf "Signed-off-by\0A U Thor\n \nX" + ) >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:key_value_separator,unfold) changes key-value separator' ' + git log --no-walk --pretty=format:"X%(trailers:key_value_separator=%x00,unfold)X" >actual && + ( + printf "XSigned-off-by\0A U Thor \n" && + printf "Acked-by\0A U Thor \n" && + printf "[ v2 updated patch description ]\n" && + printf "Signed-off-by\0A U Thor \nX" + ) >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers:separator,key_value_separator) changes both separators' ' + git log --no-walk --pretty=format:"%(trailers:separator=%x00,key_value_separator=%x00%x00,unfold)" >actual && + ( + printf "Signed-off-by\0\0A U Thor \0" && + printf "Acked-by\0\0A U Thor \0" && + printf "[ v2 updated patch description ]\0" && + printf "Signed-off-by\0\0A U Thor " + ) >expect && + test_cmp expect actual +' + +test_expect_success 'pretty format %(trailers) combining separator/key/keyonly/valueonly' ' + git commit --allow-empty -F - <<-\EOF && + Important fix + + The fix is explained here + + Closes: #1234 + EOF + + git commit --allow-empty -F - <<-\EOF && + Another fix + + The fix is explained here + + Closes: #567 + Closes: #890 + EOF + + git commit --allow-empty -F - <<-\EOF && + Does not close any tickets + EOF + + git log --pretty="%s% (trailers:separator=%x2c%x20,key=Closes,valueonly)" HEAD~3.. >actual && + test_write_lines \ + "Does not close any tickets" \ + "Another fix #567, #890" \ + "Important fix #1234" >expect && + test_cmp expect actual && + + git log --pretty="%s% (trailers:separator=%x2c%x20,key=Closes,keyonly)" HEAD~3.. >actual && + test_write_lines \ + "Does not close any tickets" \ + "Another fix Closes, Closes" \ + "Important fix Closes" >expect && + test_cmp expect actual +' + +test_expect_success 'trailer parsing not fooled by --- line' ' + git commit --allow-empty -F - <<-\EOF && + this is the subject + + This is the body. The message has a "---" line which would confuse a + message+patch parser. But here we know we have only a commit message, + so we get it right. + + trailer: wrong + --- + This is more body. + + trailer: right + EOF + + { + echo "trailer: right" && + echo + } >expect && + git log --no-walk --format="%(trailers)" >actual && + test_cmp expect actual +' + +test_expect_success 'set up %S tests' ' + git checkout --orphan source-a && + test_commit one && + test_commit two && + git checkout -b source-b HEAD^ && + test_commit three +' + +test_expect_success 'log --format=%S paints branch names' ' + cat >expect <<-\EOF && + source-b + source-a + source-b + EOF + git log --format=%S source-a source-b >actual && + test_cmp expect actual +' + +test_expect_success 'log --format=%S paints tag names' ' + git tag -m tagged source-tag && + cat >expect <<-\EOF && + source-tag + source-a + source-tag + EOF + git log --format=%S source-tag source-a >actual && + test_cmp expect actual +' + +test_expect_success 'log --format=%S paints symmetric ranges' ' + cat >expect <<-\EOF && + source-b + source-a + EOF + git log --format=%S source-a...source-b >actual && + test_cmp expect actual +' + +test_expect_success '%S in git log --format works with other placeholders (part 1)' ' + git log --format="source-b %h" source-b >expect && + git log --format="%S %h" source-b >actual && + test_cmp expect actual +' + +test_expect_success '%S in git log --format works with other placeholders (part 2)' ' + git log --format="%h source-b" source-b >expect && + git log --format="%h %S" source-b >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference' ' + git log --pretty="tformat:%h (%s, %as)" >expect && + git log --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference with log.date is overridden by short date' ' + git log --pretty="tformat:%h (%s, %as)" >expect && + test_config log.date rfc && + git log --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference with explicit date overrides short date' ' + git log --date=rfc --pretty="tformat:%h (%s, %ad)" >expect && + git log --date=rfc --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference is never unabbreviated' ' + git log --pretty="tformat:%h (%s, %as)" >expect && + git log --no-abbrev-commit --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference is never decorated' ' + git log --pretty="tformat:%h (%s, %as)" >expect && + git log --decorate=short --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference does not output reflog info' ' + git log --walk-reflogs --pretty="tformat:%h (%s, %as)" >expect && + git log --walk-reflogs --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty=reference is colored appropriately' ' + git log --color=always --pretty="tformat:%C(auto)%h (%s, %as)" >expect && + git log --color=always --pretty=reference >actual && + test_cmp expect actual +' + +test_expect_success '%(describe) vs git describe' ' + git log --format="%H" | while read hash + do + if desc=$(git describe $hash) + then + : >expect-contains-good + else + : >expect-contains-bad + fi && + echo "$hash $desc" + done >expect && + test_path_exists expect-contains-good && + test_path_exists expect-contains-bad && + + git log --format="%H %(describe)" >actual 2>err && + test_cmp expect actual && + test_must_be_empty err +' + +test_expect_success '%(describe:match=...) vs git describe --match ...' ' + test_when_finished "git tag -d tag-match" && + git tag -a -m tagged tag-match && + git describe --match "*-match" >expect && + git log -1 --format="%(describe:match=*-match)" >actual && + test_cmp expect actual +' + +test_expect_success '%(describe:exclude=...) vs git describe --exclude ...' ' + test_when_finished "git tag -d tag-exclude" && + git tag -a -m tagged tag-exclude && + git describe --exclude "*-exclude" >expect && + git log -1 --format="%(describe:exclude=*-exclude)" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with space stealing' ' + printf mm0 >expect && + git log -1 --pretty="format:mm%>>|(1)%x30" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with invalid padding format' ' + printf "%s%%<(20" "$(git rev-parse HEAD)" >expect && + git log -1 --pretty="format:%H%<(20" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty with magical wrapping directives' ' + commit_id=$(git commit-tree HEAD^{tree} -m "describe me") && + git tag describe-me $commit_id && + printf "\n(tag:\ndescribe-me)%%+w(2)" >expect && + git log -1 --pretty="format:%w(1)%+d%+w(2)" $commit_id >actual && + test_cmp expect actual +' + +test_expect_success SIZE_T_IS_64BIT 'log --pretty with overflowing wrapping directive' ' + cat >expect <<-EOF && + fatal: number too large to represent as int on this platform: 2147483649 + EOF + test_must_fail git log -1 --pretty="format:%w(2147483649,1,1)%d" 2>error && + test_cmp expect error && + test_must_fail git log -1 --pretty="format:%w(1,2147483649,1)%d" 2>error && + test_cmp expect error && + test_must_fail git log -1 --pretty="format:%w(1,1,2147483649)%d" 2>error && + test_cmp expect error +' + +test_expect_success 'log --pretty with padding and preceding control chars' ' + printf "\20\20 0" >expect && + git log -1 --pretty="format:%x10%x10%>|(4)%x30" >actual && + test_cmp expect actual +' + +test_expect_success 'log --pretty truncation with control chars' ' + test_commit "$(printf "\20\20\20\20xxxx")" file contents commit-with-control-chars && + printf "\20\20\20\20x.." >expect && + git log -1 --pretty="format:%<(3,trunc)%s" commit-with-control-chars >actual && + test_cmp expect actual +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' ' + # We only assert that this command does not crash. This needs to be + # executed with the address sanitizer to demonstrate failure. + git log -1 --pretty="format:%>(2147483646)%x41%41%>(2147483646)%x41" >/dev/null +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'set up huge commit' ' + test-tool genzeros 2147483649 | tr "\000" "1" >expect && + huge_commit=$(git commit-tree -F expect HEAD^{tree}) +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message' ' + git log -1 --format="%B%<(1)%x30" $huge_commit >actual && + echo 0 >>expect && + test_cmp expect actual +' + +test_expect_success EXPENSIVE,SIZE_T_IS_64BIT 'log --pretty with huge commit message does not cause allocation failure' ' + test_must_fail git log -1 --format="%<(1)%B" $huge_commit 2>error && + cat >expect <<-EOF && + fatal: number too large to represent as int on this platform: 2147483649 + EOF + test_cmp expect error +' + +test_done diff --git a/t/t4208-log-magic-pathspec.sh b/t/t4208-log-magic-pathspec.sh index 7f0c1dcc0f0bfb..2e8f5ad7b822b2 100755 --- a/t/t4208-log-magic-pathspec.sh +++ b/t/t4208-log-magic-pathspec.sh @@ -124,6 +124,7 @@ test_expect_success 'command line pathspec parsing for "git log"' ' test_expect_success 'tree_entry_interesting does not match past submodule boundaries' ' test_when_finished "rm -rf repo submodule" && + test_config_global protocol.file.allow always && git init submodule && test_commit -C submodule initial && git init repo && diff --git a/t/t4208-log-magic-pathspec.sh.orig b/t/t4208-log-magic-pathspec.sh.orig new file mode 100755 index 00000000000000..7f0c1dcc0f0bfb --- /dev/null +++ b/t/t4208-log-magic-pathspec.sh.orig @@ -0,0 +1,144 @@ +#!/bin/sh + +test_description='magic pathspec tests using git-log' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +test_expect_success 'setup' ' + test_commit initial && + test_tick && + git commit --allow-empty -m empty && + mkdir sub +' + +test_expect_success '"git log :/" should not be ambiguous' ' + git log :/ +' + +test_expect_success '"git log :/a" should be ambiguous (applied both rev and worktree)' ' + : >a && + test_must_fail git log :/a 2>error && + test_i18ngrep ambiguous error +' + +test_expect_success '"git log :/a -- " should not be ambiguous' ' + git log :/a -- +' + +test_expect_success '"git log :/detached -- " should find a commit only in HEAD' ' + test_when_finished "git checkout main" && + git checkout --detach && + test_commit --no-tag detached && + test_commit --no-tag something-else && + git log :/detached -- +' + +test_expect_success '"git log :/detached -- " should not find an orphaned commit' ' + test_must_fail git log :/detached -- +' + +test_expect_success '"git log :/detached -- " should find HEAD only of own worktree' ' + git worktree add other-tree HEAD && + git -C other-tree checkout --detach && + test_tick && + git -C other-tree commit --allow-empty -m other-detached && + git -C other-tree log :/other-detached -- && + test_must_fail git log :/other-detached -- +' + +test_expect_success '"git log -- :/a" should not be ambiguous' ' + git log -- :/a +' + +test_expect_success '"git log :/any/path/" should not segfault' ' + test_must_fail git log :/any/path/ +' + +# This differs from the ":/a" check above in that :/in looks like a pathspec, +# but doesn't match an actual file. +test_expect_success '"git log :/in" should not be ambiguous' ' + git log :/in +' + +test_expect_success '"git log :" should be ambiguous' ' + test_must_fail git log : 2>error && + test_i18ngrep ambiguous error +' + +test_expect_success 'git log -- :' ' + git log -- : +' + +test_expect_success 'git log HEAD -- :/' ' + initial=$(git rev-parse --short HEAD^) && + cat >expected <<-EOF && + $initial initial + EOF + (cd sub && git log --oneline HEAD -- :/ >../actual) && + test_cmp expected actual +' + +test_expect_success '"git log :^sub" is not ambiguous' ' + git log :^sub +' + +test_expect_success '"git log :^does-not-exist" does not match anything' ' + test_must_fail git log :^does-not-exist +' + +test_expect_success '"git log :!" behaves the same as :^' ' + git log :!sub && + test_must_fail git log :!does-not-exist +' + +test_expect_success '"git log :(exclude)sub" is not ambiguous' ' + git log ":(exclude)sub" +' + +test_expect_success '"git log :(exclude)sub --" must resolve as an object' ' + test_must_fail git log ":(exclude)sub" -- +' + +test_expect_success '"git log :(unknown-magic) complains of bogus magic' ' + test_must_fail git log ":(unknown-magic)" 2>error && + test_i18ngrep pathspec.magic error +' + +test_expect_success 'command line pathspec parsing for "git log"' ' + git reset --hard && + >a && + git add a && + git commit -m "add an empty a" --allow-empty && + echo 1 >a && + git commit -a -m "update a to 1" && + git checkout HEAD^ && + echo 2 >a && + git commit -a -m "update a to 2" && + test_must_fail git merge main && + git add a && + git log --merge -- a +' + +test_expect_success 'tree_entry_interesting does not match past submodule boundaries' ' + test_when_finished "rm -rf repo submodule" && + git init submodule && + test_commit -C submodule initial && + git init repo && + >"repo/[bracket]" && + git -C repo add "[bracket]" && + test_tick && + git -C repo commit -m bracket && + git -C repo rev-list HEAD -- "[bracket]" >expect && + + git -C repo submodule add ../submodule && + test_tick && + git -C repo commit -m submodule && + + git -C repo rev-list HEAD -- "[bracket]" >actual && + test_cmp expect actual +' + +test_done diff --git a/t/t5604-clone-reference.sh b/t/t5604-clone-reference.sh index 24340e6d56e18d..7ccebb40c33825 100755 --- a/t/t5604-clone-reference.sh +++ b/t/t5604-clone-reference.sh @@ -303,8 +303,6 @@ test_expect_success SYMLINKS 'setup repo with manually symlinked or unknown file ln -s ../an-object $obj && cd ../ && - find . -type f | sort >../../../T.objects-files.raw && - find . -type l | sort >../../../T.objects-symlinks.raw && echo unknown_content >unknown_file ) && git -C T fsck && @@ -313,19 +311,27 @@ test_expect_success SYMLINKS 'setup repo with manually symlinked or unknown file test_expect_success SYMLINKS 'clone repo with symlinked or unknown files at objects/' ' - for option in --local --no-hardlinks --shared --dissociate + # None of these options work when cloning locally, since T has + # symlinks in its `$GIT_DIR/objects` directory + for option in --local --no-hardlinks --dissociate do - git clone $option T T$option || return 1 && - git -C T$option fsck || return 1 && - git -C T$option rev-list --all --objects >T$option.objects && - test_cmp T.objects T$option.objects && - ( - cd T$option/.git/objects && - find . -type f | sort >../../../T$option.objects-files.raw && - find . -type l | sort >../../../T$option.objects-symlinks.raw - ) + test_must_fail git clone $option T T$option 2>err || return 1 && + test_i18ngrep "symlink.*exists" err || return 1 done && + # But `--shared` clones should still work, even when specifying + # a local path *and* that repository has symlinks present in its + # `$GIT_DIR/objects` directory. + git clone --shared T T--shared && + git -C T--shared fsck && + git -C T--shared rev-list --all --objects >T--shared.objects && + test_cmp T.objects T--shared.objects && + ( + cd T--shared/.git/objects && + find . -type f | sort >../../../T--shared.objects-files.raw && + find . -type l | sort >../../../T--shared.objects-symlinks.raw + ) && + for raw in $(ls T*.raw) do sed -e "s!/../!/Y/!; s![0-9a-f]\{38,\}!Z!" -e "/commit-graph/d" \ @@ -333,29 +339,25 @@ test_expect_success SYMLINKS 'clone repo with symlinked or unknown files at obje sort $raw.de-sha-1 >$raw.de-sha || return 1 done && - cat >expected-files <<-EOF && - ./Y/Z - ./Y/Z - ./Y/Z - ./a-loose-dir/Z - ./an-object - ./info/packs - ./pack/pack-Z.idx - ./pack/pack-Z.pack - ./packs/pack-Z.idx - ./packs/pack-Z.pack - ./unknown_file - EOF - - for option in --local --no-hardlinks --dissociate - do - test_cmp expected-files T$option.objects-files.raw.de-sha || return 1 && - test_must_be_empty T$option.objects-symlinks.raw.de-sha || return 1 - done && - echo ./info/alternates >expected-files && test_cmp expected-files T--shared.objects-files.raw && test_must_be_empty T--shared.objects-symlinks.raw ' +test_expect_success SYMLINKS 'clone repo with symlinked objects directory' ' + test_when_finished "rm -fr sensitive malicious" && + + mkdir -p sensitive && + echo "secret" >sensitive/file && + + git init malicious && + rm -fr malicious/.git/objects && + ln -s "$(pwd)/sensitive" ./malicious/.git/objects && + + test_must_fail git clone --local malicious clone 2>err && + + test_path_is_missing clone && + grep "failed to start iterator over" err +' + test_done diff --git a/t/t5604-clone-reference.sh.orig b/t/t5604-clone-reference.sh.orig new file mode 100755 index 00000000000000..2734e37e8804cd --- /dev/null +++ b/t/t5604-clone-reference.sh.orig @@ -0,0 +1,347 @@ +#!/bin/sh +# +# Copyright (C) 2006 Martin Waitz +# + +test_description='test clone --reference' +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +base_dir=$(pwd) + +U=$base_dir/UPLOAD_LOG + +# create a commit in repo $1 with name $2 +commit_in () { + ( + cd "$1" && + echo "$2" >"$2" && + git add "$2" && + git commit -m "$2" + ) +} + +# check that there are $2 loose objects in repo $1 +test_objcount () { + echo "$2" >expect && + git -C "$1" count-objects >actual.raw && + cut -d' ' -f1 actual && + test_cmp expect actual +} + +test_expect_success 'preparing first repository' ' + test_create_repo A && + commit_in A file1 +' + +test_expect_success 'preparing second repository' ' + git clone A B && + commit_in B file2 && + git -C B repack -ad && + git -C B prune +' + +test_expect_success 'cloning with reference (-l -s)' ' + git clone -l -s --reference B A C +' + +test_expect_success 'existence of info/alternates' ' + test_line_count = 2 C/.git/objects/info/alternates +' + +test_expect_success 'pulling from reference' ' + git -C C pull ../B main +' + +test_expect_success 'that reference gets used' ' + test_objcount C 0 +' + +test_expect_success 'cloning with reference (no -l -s)' ' + GIT_TRACE_PACKET=$U.D git clone --reference B "file://$(pwd)/A" D +' + +test_expect_success 'fetched no objects' ' + test -s "$U.D" && + ! grep " want" "$U.D" +' + +test_expect_success 'existence of info/alternates' ' + test_line_count = 1 D/.git/objects/info/alternates +' + +test_expect_success 'pulling from reference' ' + git -C D pull ../B main +' + +test_expect_success 'that reference gets used' ' + test_objcount D 0 +' + +test_expect_success 'updating origin' ' + commit_in A file3 && + git -C A repack -ad && + git -C A prune +' + +test_expect_success 'pulling changes from origin' ' + git -C C pull --no-rebase origin +' + +# the 2 local objects are commit and tree from the merge +test_expect_success 'that alternate to origin gets used' ' + test_objcount C 2 +' + +test_expect_success 'pulling changes from origin' ' + git -C D pull --no-rebase origin +' + +# the 5 local objects are expected; file3 blob, commit in A to add it +# and its tree, and 2 are our tree and the merge commit. +test_expect_success 'check objects expected to exist locally' ' + test_objcount D 5 +' + +test_expect_success 'preparing alternate repository #1' ' + test_create_repo F && + commit_in F file1 +' + +test_expect_success 'cloning alternate repo #2 and adding changes to repo #1' ' + git clone F G && + commit_in F file2 +' + +test_expect_success 'cloning alternate repo #1, using #2 as reference' ' + git clone --reference G F H +' + +test_expect_success 'cloning with reference being subset of source (-l -s)' ' + git clone -l -s --reference A B E +' + +test_expect_success 'cloning with multiple references drops duplicates' ' + git clone -s --reference B --reference A --reference B A dups && + test_line_count = 2 dups/.git/objects/info/alternates +' + +test_expect_success 'clone with reference from a tagged repository' ' + ( + cd A && git tag -a -m tagged HEAD + ) && + git clone --reference=A A I +' + +test_expect_success 'prepare branched repository' ' + git clone A J && + ( + cd J && + git checkout -b other main^ && + echo other >otherfile && + git add otherfile && + git commit -m other && + git checkout main + ) +' + +test_expect_success 'fetch with incomplete alternates' ' + git init K && + echo "$base_dir/A/.git/objects" >K/.git/objects/info/alternates && + ( + cd K && + git remote add J "file://$base_dir/J" && + GIT_TRACE_PACKET=$U.K git fetch J + ) && + main_object=$(cd A && git for-each-ref --format="%(objectname)" refs/heads/main) && + test -s "$U.K" && + ! grep " want $main_object" "$U.K" && + tag_object=$(cd A && git for-each-ref --format="%(objectname)" refs/tags/HEAD) && + ! grep " want $tag_object" "$U.K" +' + +test_expect_success 'clone using repo with gitfile as a reference' ' + git clone --separate-git-dir=L A M && + git clone --reference=M A N && + echo "$base_dir/L/objects" >expected && + test_cmp expected "$base_dir/N/.git/objects/info/alternates" +' + +test_expect_success 'clone using repo pointed at by gitfile as reference' ' + git clone --reference=M/.git A O && + echo "$base_dir/L/objects" >expected && + test_cmp expected "$base_dir/O/.git/objects/info/alternates" +' + +test_expect_success 'clone and dissociate from reference' ' + git init P && + ( + cd P && test_commit one + ) && + git clone P Q && + ( + cd Q && test_commit two + ) && + git clone --no-local --reference=P Q R && + git clone --no-local --reference=P --dissociate Q S && + # removing the reference P would corrupt R but not S + rm -fr P && + test_must_fail git -C R fsck && + git -C S fsck +' +test_expect_success 'clone, dissociate from partial reference and repack' ' + rm -fr P Q R && + git init P && + ( + cd P && + test_commit one && + git repack && + test_commit two && + git repack + ) && + git clone --bare P Q && + ( + cd P && + git checkout -b second && + test_commit three && + git repack + ) && + git clone --bare --dissociate --reference=P Q R && + ls R/objects/pack/*.pack >packs.txt && + test_line_count = 1 packs.txt +' + +test_expect_success 'clone, dissociate from alternates' ' + rm -fr A B C && + test_create_repo A && + commit_in A file1 && + git clone --reference=A A B && + test_line_count = 1 B/.git/objects/info/alternates && + git clone --local --dissociate B C && + ! test -f C/.git/objects/info/alternates && + ( cd C && git fsck ) +' + +test_expect_success 'setup repo with garbage in objects/*' ' + git init S && + ( + cd S && + test_commit A && + + cd .git/objects && + >.some-hidden-file && + >some-file && + mkdir .some-hidden-dir && + >.some-hidden-dir/some-file && + >.some-hidden-dir/.some-dot-file && + mkdir some-dir && + >some-dir/some-file && + >some-dir/.some-dot-file + ) +' + +test_expect_success 'clone a repo with garbage in objects/*' ' + for option in --local --no-hardlinks --shared --dissociate + do + git clone $option S S$option || return 1 && + git -C S$option fsck || return 1 + done && + find S-* -name "*some*" | sort >actual && + cat >expected <<-EOF && + S--dissociate/.git/objects/.some-hidden-dir + S--dissociate/.git/objects/.some-hidden-dir/.some-dot-file + S--dissociate/.git/objects/.some-hidden-dir/some-file + S--dissociate/.git/objects/.some-hidden-file + S--dissociate/.git/objects/some-dir + S--dissociate/.git/objects/some-dir/.some-dot-file + S--dissociate/.git/objects/some-dir/some-file + S--dissociate/.git/objects/some-file + S--local/.git/objects/.some-hidden-dir + S--local/.git/objects/.some-hidden-dir/.some-dot-file + S--local/.git/objects/.some-hidden-dir/some-file + S--local/.git/objects/.some-hidden-file + S--local/.git/objects/some-dir + S--local/.git/objects/some-dir/.some-dot-file + S--local/.git/objects/some-dir/some-file + S--local/.git/objects/some-file + S--no-hardlinks/.git/objects/.some-hidden-dir + S--no-hardlinks/.git/objects/.some-hidden-dir/.some-dot-file + S--no-hardlinks/.git/objects/.some-hidden-dir/some-file + S--no-hardlinks/.git/objects/.some-hidden-file + S--no-hardlinks/.git/objects/some-dir + S--no-hardlinks/.git/objects/some-dir/.some-dot-file + S--no-hardlinks/.git/objects/some-dir/some-file + S--no-hardlinks/.git/objects/some-file + EOF + test_cmp expected actual +' + +test_expect_success SYMLINKS 'setup repo with manually symlinked or unknown files at objects/' ' + git init T && + ( + cd T && + git config gc.auto 0 && + test_commit A && + git gc && + test_commit B && + + cd .git/objects && + mv pack packs && + ln -s packs pack && + find ?? -type d >loose-dirs && + last_loose=$(tail -n 1 loose-dirs) && + mv $last_loose a-loose-dir && + ln -s a-loose-dir $last_loose && + first_loose=$(head -n 1 loose-dirs) && + rm -f loose-dirs && + + cd $first_loose && + obj=$(ls *) && + mv $obj ../an-object && + ln -s ../an-object $obj && + + cd ../ && + echo unknown_content >unknown_file + ) && + git -C T fsck && + git -C T rev-list --all --objects >T.objects +' + + +test_expect_success SYMLINKS 'clone repo with symlinked or unknown files at objects/' ' + # None of these options work when cloning locally, since T has + # symlinks in its `$GIT_DIR/objects` directory + for option in --local --no-hardlinks --dissociate + do + test_must_fail git clone $option T T$option 2>err || return 1 && + test_i18ngrep "symlink.*exists" err || return 1 + done && + + # But `--shared` clones should still work, even when specifying + # a local path *and* that repository has symlinks present in its + # `$GIT_DIR/objects` directory. + git clone --shared T T--shared && + git -C T--shared fsck && + git -C T--shared rev-list --all --objects >T--shared.objects && + test_cmp T.objects T--shared.objects && + ( + cd T--shared/.git/objects && + find . -type f | sort >../../../T--shared.objects-files.raw && + find . -type l | sort >../../../T--shared.objects-symlinks.raw + ) && + + for raw in $(ls T*.raw) + do + sed -e "s!/../!/Y/!; s![0-9a-f]\{38,\}!Z!" -e "/commit-graph/d" \ + -e "/multi-pack-index/d" -e "/rev/d" <$raw >$raw.de-sha-1 && + sort $raw.de-sha-1 >$raw.de-sha || return 1 + done && + + echo ./info/alternates >expected-files && + test_cmp expected-files T--shared.objects-files.raw && + test_must_be_empty T--shared.objects-symlinks.raw +' + +test_done diff --git a/t/t5619-clone-local-ambiguous-transport.sh b/t/t5619-clone-local-ambiguous-transport.sh new file mode 100755 index 00000000000000..cce62bf78d3351 --- /dev/null +++ b/t/t5619-clone-local-ambiguous-transport.sh @@ -0,0 +1,70 @@ +#!/bin/sh + +test_description='test local clone with ambiguous transport' + +. ./test-lib.sh +. "$TEST_DIRECTORY/lib-httpd.sh" + +if ! test_have_prereq SYMLINKS +then + skip_all='skipping test, symlink support unavailable' + test_done +fi + +start_httpd + +REPO="$HTTPD_DOCUMENT_ROOT_PATH/sub.git" +URI="$HTTPD_URL/dumb/sub.git" + +test_expect_success 'setup' ' + mkdir -p sensitive && + echo "secret" >sensitive/secret && + + git init --bare "$REPO" && + test_commit_bulk -C "$REPO" --ref=main 1 && + + git -C "$REPO" update-ref HEAD main && + git -C "$REPO" update-server-info && + + git init malicious && + ( + cd malicious && + + git submodule add "$URI" && + + mkdir -p repo/refs && + touch repo/refs/.gitkeep && + printf "ref: refs/heads/a" >repo/HEAD && + ln -s "$(cd .. && pwd)/sensitive" repo/objects && + + mkdir -p "$HTTPD_URL/dumb" && + ln -s "../../../.git/modules/sub/../../../repo/" "$URI" && + + git add . && + git commit -m "initial commit" + ) && + + # Delete all of the references in our malicious submodule to + # avoid the client attempting to checkout any objects (which + # will be missing, and thus will cause the clone to fail before + # we can trigger the exploit). + git -C "$REPO" for-each-ref --format="delete %(refname)" >in && + git -C "$REPO" update-ref --stdin err && + + test_path_is_missing clone/.git/modules/sub/objects/secret && + # We would actually expect "transport .file. not allowed" here, + # but due to quirks of the URL detection in Git, we mis-parse + # the absolute path as a bogus URL and die before that step. + # + # This works for now, and if we ever fix the URL detection, it + # is OK to change this to detect the transport error. + grep "protocol .* is not supported" err +' + +test_done diff --git a/t/t6008-rev-list-submodule.sh b/t/t6008-rev-list-submodule.sh index 3153a0d8910464..a0a070b404293a 100755 --- a/t/t6008-rev-list-submodule.sh +++ b/t/t6008-rev-list-submodule.sh @@ -26,7 +26,7 @@ test_expect_success 'setup' ' : > super-file && git add super-file && - git submodule add "$(pwd)" sub && + git -c protocol.file.allow=always submodule add "$(pwd)" sub && git symbolic-ref HEAD refs/heads/super && test_tick && git commit -m super-initial && diff --git a/t/t6008-rev-list-submodule.sh.orig b/t/t6008-rev-list-submodule.sh.orig new file mode 100755 index 00000000000000..3153a0d8910464 --- /dev/null +++ b/t/t6008-rev-list-submodule.sh.orig @@ -0,0 +1,45 @@ +#!/bin/sh +# +# Copyright (c) 2007 Johannes E. Schindelin +# + +test_description='git rev-list involving submodules that this repo has' + +GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +. ./test-lib.sh + +test_expect_success 'setup' ' + : > file && + git add file && + test_tick && + git commit -m initial && + echo 1 > file && + test_tick && + git commit -m second file && + echo 2 > file && + test_tick && + git commit -m third file && + + rm .git/index && + + : > super-file && + git add super-file && + git submodule add "$(pwd)" sub && + git symbolic-ref HEAD refs/heads/super && + test_tick && + git commit -m super-initial && + echo 1 > super-file && + test_tick && + git commit -m super-first super-file && + echo 2 > super-file && + test_tick && + git commit -m super-second super-file +' + +test_expect_success "Ilari's test" ' + git rev-list --objects super main ^super^ +' + +test_done diff --git a/t/t6134-pathspec-in-submodule.sh b/t/t6134-pathspec-in-submodule.sh index 0f1cb49cedc645..7a8c9e32473b3f 100755 --- a/t/t6134-pathspec-in-submodule.sh +++ b/t/t6134-pathspec-in-submodule.sh @@ -9,7 +9,7 @@ test_expect_success 'setup a submodule' ' : >pretzel/a && git -C pretzel add a && git -C pretzel commit -m "add a file" -- a && - git submodule add ./pretzel sub && + git -c protocol.file.allow=always submodule add ./pretzel sub && git commit -a -m "add submodule" && git submodule deinit --all ' diff --git a/t/t9850-shell.sh b/t/t9850-shell.sh new file mode 100755 index 00000000000000..cfc71c3bd43187 --- /dev/null +++ b/t/t9850-shell.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +test_description='git shell tests' +. ./test-lib.sh + +test_expect_success 'shell allows upload-pack' ' + printf 0000 >input && + git upload-pack . expect && + git shell -c "git-upload-pack $SQ.$SQ" actual && + test_cmp expect actual +' + +test_expect_success 'shell forbids other commands' ' + test_must_fail git shell -c "git config foo.bar baz" +' + +test_expect_success 'shell forbids interactive use by default' ' + test_must_fail git shell +' + +test_expect_success 'shell allows interactive command' ' + mkdir git-shell-commands && + write_script git-shell-commands/ping <<-\EOF && + echo pong + EOF + echo pong >expect && + echo ping | git shell >actual && + test_cmp expect actual +' + +test_expect_success 'shell complains of overlong commands' ' + perl -e "print \"a\" x 2**12 for (0..2**19)" | + test_must_fail git shell 2>err && + grep "too long" err +' + +test_done diff --git a/t/test-lib.sh b/t/test-lib.sh index fc1e521519833c..7df8aa1477c553 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh @@ -1687,6 +1687,10 @@ build_option () { sed -ne "s/^$1: //p" } +test_lazy_prereq SIZE_T_IS_64BIT ' + test 8 -eq "$(build_option sizeof-size_t)" +' + test_lazy_prereq LONG_IS_64BIT ' test 8 -le "$(build_option sizeof-long)" ' diff --git a/t/test-lib.sh.orig b/t/test-lib.sh.orig new file mode 100644 index 00000000000000..fc1e521519833c --- /dev/null +++ b/t/test-lib.sh.orig @@ -0,0 +1,1721 @@ +# Test framework for git. See t/README for usage. +# +# Copyright (c) 2005 Junio C Hamano +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see http://www.gnu.org/licenses/ . + +# Test the binaries we have just built. The tests are kept in +# t/ subdirectory and are run in 'trash directory' subdirectory. +if test -z "$TEST_DIRECTORY" +then + # We allow tests to override this, in case they want to run tests + # outside of t/, e.g. for running tests on the test library + # itself. + TEST_DIRECTORY=$(pwd) +else + # ensure that TEST_DIRECTORY is an absolute path so that it + # is valid even if the current working directory is changed + TEST_DIRECTORY=$(cd "$TEST_DIRECTORY" && pwd) || exit 1 +fi +if test -z "$TEST_OUTPUT_DIRECTORY" +then + # Similarly, override this to store the test-results subdir + # elsewhere + TEST_OUTPUT_DIRECTORY=$TEST_DIRECTORY +fi +GIT_BUILD_DIR="$TEST_DIRECTORY"/.. + +# If we were built with ASAN, it may complain about leaks +# of program-lifetime variables. Disable it by default to lower +# the noise level. This needs to happen at the start of the script, +# before we even do our "did we build git yet" check (since we don't +# want that one to complain to stderr). +: ${ASAN_OPTIONS=detect_leaks=0:abort_on_error=1} +export ASAN_OPTIONS + +# If LSAN is in effect we _do_ want leak checking, but we still +# want to abort so that we notice the problems. +: ${LSAN_OPTIONS=abort_on_error=1} +export LSAN_OPTIONS + +if test ! -f "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS +then + echo >&2 'error: GIT-BUILD-OPTIONS missing (has Git been built?).' + exit 1 +fi +. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS +export PERL_PATH SHELL_PATH + +# In t0000, we need to override test directories of nested testcases. In case +# the developer has TEST_OUTPUT_DIRECTORY part of his build options, then we'd +# reset this value to instead contain what the developer has specified. We thus +# have this knob to allow overriding the directory. +if test -n "${TEST_OUTPUT_DIRECTORY_OVERRIDE}" +then + TEST_OUTPUT_DIRECTORY="${TEST_OUTPUT_DIRECTORY_OVERRIDE}" +fi + +# Disallow the use of abbreviated options in the test suite by default +if test -z "${GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS}" +then + GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=true + export GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS +fi + +# Explicitly set the default branch name for testing, to avoid the +# transitory "git init" warning under --verbose. +: ${GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME:=master} +export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME + +################################################################ +# It appears that people try to run tests without building... +"${GIT_TEST_INSTALLED:-$GIT_BUILD_DIR}/git$X" >/dev/null +if test $? != 1 +then + if test -n "$GIT_TEST_INSTALLED" + then + echo >&2 "error: there is no working Git at '$GIT_TEST_INSTALLED'" + else + echo >&2 'error: you do not seem to have built git yet.' + fi + exit 1 +fi + +store_arg_to= +opt_required_arg= +# $1: option string +# $2: name of the var where the arg will be stored +mark_option_requires_arg () { + if test -n "$opt_required_arg" + then + echo "error: options that require args cannot be bundled" \ + "together: '$opt_required_arg' and '$1'" >&2 + exit 1 + fi + opt_required_arg=$1 + store_arg_to=$2 +} + +parse_option () { + local opt="$1" + + case "$opt" in + -d|--d|--de|--deb|--debu|--debug) + debug=t ;; + -i|--i|--im|--imm|--imme|--immed|--immedi|--immedia|--immediat|--immediate) + immediate=t ;; + -l|--l|--lo|--lon|--long|--long-|--long-t|--long-te|--long-tes|--long-test|--long-tests) + GIT_TEST_LONG=t; export GIT_TEST_LONG ;; + -r) + mark_option_requires_arg "$opt" run_list + ;; + --run=*) + run_list=${opt#--*=} ;; + -h|--h|--he|--hel|--help) + help=t ;; + -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose) + verbose=t ;; + --verbose-only=*) + verbose_only=${opt#--*=} + ;; + -q|--q|--qu|--qui|--quie|--quiet) + # Ignore --quiet under a TAP::Harness. Saying how many tests + # passed without the ok/not ok details is always an error. + test -z "$HARNESS_ACTIVE" && quiet=t ;; + --with-dashes) + with_dashes=t ;; + --no-bin-wrappers) + no_bin_wrappers=t ;; + --no-color) + color= ;; + --va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind) + valgrind=memcheck + tee=t + ;; + --valgrind=*) + valgrind=${opt#--*=} + tee=t + ;; + --valgrind-only=*) + valgrind_only=${opt#--*=} + tee=t + ;; + --tee) + tee=t ;; + --root=*) + root=${opt#--*=} ;; + --chain-lint) + GIT_TEST_CHAIN_LINT=1 ;; + --no-chain-lint) + GIT_TEST_CHAIN_LINT=0 ;; + -x) + trace=t ;; + -V|--verbose-log) + verbose_log=t + tee=t + ;; + --write-junit-xml) + write_junit_xml=t + ;; + --stress) + stress=t ;; + --stress=*) + echo "error: --stress does not accept an argument: '$opt'" >&2 + echo "did you mean --stress-jobs=${opt#*=} or --stress-limit=${opt#*=}?" >&2 + exit 1 + ;; + --stress-jobs=*) + stress=t; + stress_jobs=${opt#--*=} + case "$stress_jobs" in + *[!0-9]*|0*|"") + echo "error: --stress-jobs= requires the number of jobs to run" >&2 + exit 1 + ;; + *) # Good. + ;; + esac + ;; + --stress-limit=*) + stress=t; + stress_limit=${opt#--*=} + case "$stress_limit" in + *[!0-9]*|0*|"") + echo "error: --stress-limit= requires the number of repetitions" >&2 + exit 1 + ;; + *) # Good. + ;; + esac + ;; + *) + echo "error: unknown test option '$opt'" >&2; exit 1 ;; + esac +} + +# Parse options while taking care to leave $@ intact, so we will still +# have all the original command line options when executing the test +# script again for '--tee' and '--verbose-log' later. +for opt +do + if test -n "$store_arg_to" + then + eval $store_arg_to=\$opt + store_arg_to= + opt_required_arg= + continue + fi + + case "$opt" in + --*|-?) + parse_option "$opt" ;; + -?*) + # bundled short options must be fed separately to parse_option + opt=${opt#-} + while test -n "$opt" + do + extra=${opt#?} + this=${opt%$extra} + opt=$extra + parse_option "-$this" + done + ;; + *) + echo "error: unknown test option '$opt'" >&2; exit 1 ;; + esac +done +if test -n "$store_arg_to" +then + echo "error: $opt_required_arg requires an argument" >&2 + exit 1 +fi + +if test -n "$valgrind_only" +then + test -z "$valgrind" && valgrind=memcheck + test -z "$verbose" && verbose_only="$valgrind_only" +elif test -n "$valgrind" +then + test -z "$verbose_log" && verbose=t +fi + +if test -n "$stress" +then + verbose=t + trace=t + immediate=t +fi + +TEST_STRESS_JOB_SFX="${GIT_TEST_STRESS_JOB_NR:+.stress-$GIT_TEST_STRESS_JOB_NR}" +TEST_NAME="$(basename "$0" .sh)" +TEST_NUMBER="${TEST_NAME%%-*}" +TEST_NUMBER="${TEST_NUMBER#t}" +TEST_RESULTS_DIR="$TEST_OUTPUT_DIRECTORY/test-results" +TEST_RESULTS_BASE="$TEST_RESULTS_DIR/$TEST_NAME$TEST_STRESS_JOB_SFX" +TRASH_DIRECTORY="trash directory.$TEST_NAME$TEST_STRESS_JOB_SFX" +test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY" +case "$TRASH_DIRECTORY" in +/*) ;; # absolute path is good + *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;; +esac + +# If --stress was passed, run this test repeatedly in several parallel loops. +if test "$GIT_TEST_STRESS_STARTED" = "done" +then + : # Don't stress test again. +elif test -n "$stress" +then + if test -n "$stress_jobs" + then + job_count=$stress_jobs + elif test -n "$GIT_TEST_STRESS_LOAD" + then + job_count="$GIT_TEST_STRESS_LOAD" + elif job_count=$(getconf _NPROCESSORS_ONLN 2>/dev/null) && + test -n "$job_count" + then + job_count=$((2 * $job_count)) + else + job_count=8 + fi + + mkdir -p "$TEST_RESULTS_DIR" + stressfail="$TEST_RESULTS_BASE.stress-failed" + rm -f "$stressfail" + + stress_exit=0 + trap ' + kill $job_pids 2>/dev/null + wait + stress_exit=1 + ' TERM INT HUP + + job_pids= + job_nr=0 + while test $job_nr -lt "$job_count" + do + ( + GIT_TEST_STRESS_STARTED=done + GIT_TEST_STRESS_JOB_NR=$job_nr + export GIT_TEST_STRESS_STARTED GIT_TEST_STRESS_JOB_NR + + trap ' + kill $test_pid 2>/dev/null + wait + exit 1 + ' TERM INT + + cnt=1 + while ! test -e "$stressfail" && + { test -z "$stress_limit" || + test $cnt -le $stress_limit ; } + do + $TEST_SHELL_PATH "$0" "$@" >"$TEST_RESULTS_BASE.stress-$job_nr.out" 2>&1 & + test_pid=$! + + if wait $test_pid + then + printf "OK %2d.%d\n" $GIT_TEST_STRESS_JOB_NR $cnt + else + echo $GIT_TEST_STRESS_JOB_NR >>"$stressfail" + printf "FAIL %2d.%d\n" $GIT_TEST_STRESS_JOB_NR $cnt + fi + cnt=$(($cnt + 1)) + done + ) & + job_pids="$job_pids $!" + job_nr=$(($job_nr + 1)) + done + + wait + + if test -f "$stressfail" + then + stress_exit=1 + echo "Log(s) of failed test run(s):" + for failed_job_nr in $(sort -n "$stressfail") + do + echo "Contents of '$TEST_RESULTS_BASE.stress-$failed_job_nr.out':" + cat "$TEST_RESULTS_BASE.stress-$failed_job_nr.out" + done + rm -rf "$TRASH_DIRECTORY.stress-failed" + # Move the last one. + mv "$TRASH_DIRECTORY.stress-$failed_job_nr" "$TRASH_DIRECTORY.stress-failed" + fi + + exit $stress_exit +fi + +# if --tee was passed, write the output not only to the terminal, but +# additionally to the file test-results/$BASENAME.out, too. +if test "$GIT_TEST_TEE_STARTED" = "done" +then + : # do not redirect again +elif test -n "$tee" +then + mkdir -p "$TEST_RESULTS_DIR" + + # Make this filename available to the sub-process in case it is using + # --verbose-log. + GIT_TEST_TEE_OUTPUT_FILE=$TEST_RESULTS_BASE.out + export GIT_TEST_TEE_OUTPUT_FILE + + # Truncate before calling "tee -a" to get rid of the results + # from any previous runs. + >"$GIT_TEST_TEE_OUTPUT_FILE" + + (GIT_TEST_TEE_STARTED=done ${TEST_SHELL_PATH} "$0" "$@" 2>&1; + echo $? >"$TEST_RESULTS_BASE.exit") | tee -a "$GIT_TEST_TEE_OUTPUT_FILE" + test "$(cat "$TEST_RESULTS_BASE.exit")" = 0 + exit +fi + +if test -n "$trace" && test -n "$test_untraceable" +then + # '-x' tracing requested, but this test script can't be reliably + # traced, unless it is run with a Bash version supporting + # BASH_XTRACEFD (introduced in Bash v4.1). + # + # Perform this version check _after_ the test script was + # potentially re-executed with $TEST_SHELL_PATH for '--tee' or + # '--verbose-log', so the right shell is checked and the + # warning is issued only once. + if test -n "$BASH_VERSION" && eval ' + test ${BASH_VERSINFO[0]} -gt 4 || { + test ${BASH_VERSINFO[0]} -eq 4 && + test ${BASH_VERSINFO[1]} -ge 1 + } + ' + then + : Executed by a Bash version supporting BASH_XTRACEFD. Good. + else + echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD" + trace= + fi +fi +if test -n "$trace" && test -z "$verbose_log" +then + verbose=t +fi + +# Since bash 5.0, checkwinsize is enabled by default which does +# update the COLUMNS variable every time a non-builtin command +# completes, even for non-interactive shells. +# Disable that since we are aiming for repeatability. +test -n "$BASH_VERSION" && shopt -u checkwinsize 2>/dev/null + +# For repeatability, reset the environment to known value. +# TERM is sanitized below, after saving color control sequences. +LANG=C +LC_ALL=C +PAGER=cat +TZ=UTC +COLUMNS=80 +export LANG LC_ALL PAGER TZ COLUMNS +EDITOR=: + +# A call to "unset" with no arguments causes at least Solaris 10 +# /usr/xpg4/bin/sh and /bin/ksh to bail out. So keep the unsets +# deriving from the command substitution clustered with the other +# ones. +unset VISUAL EMAIL LANGUAGE $("$PERL_PATH" -e ' + my @env = keys %ENV; + my $ok = join("|", qw( + TRACE + DEBUG + TEST + .*_TEST + PROVE + VALGRIND + UNZIP + PERF_ + CURL_VERBOSE + TRACE_CURL + )); + my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env); + print join("\n", @vars); +') +unset XDG_CACHE_HOME +unset XDG_CONFIG_HOME +unset GITPERLLIB +TEST_AUTHOR_LOCALNAME=author +TEST_AUTHOR_DOMAIN=example.com +GIT_AUTHOR_EMAIL=${TEST_AUTHOR_LOCALNAME}@${TEST_AUTHOR_DOMAIN} +GIT_AUTHOR_NAME='A U Thor' +GIT_AUTHOR_DATE='1112354055 +0200' +TEST_COMMITTER_LOCALNAME=committer +TEST_COMMITTER_DOMAIN=example.com +GIT_COMMITTER_EMAIL=${TEST_COMMITTER_LOCALNAME}@${TEST_COMMITTER_DOMAIN} +GIT_COMMITTER_NAME='C O Mitter' +GIT_COMMITTER_DATE='1112354055 +0200' +GIT_MERGE_VERBOSITY=5 +GIT_MERGE_AUTOEDIT=no +export GIT_MERGE_VERBOSITY GIT_MERGE_AUTOEDIT +export GIT_AUTHOR_EMAIL GIT_AUTHOR_NAME +export GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME +export GIT_COMMITTER_DATE GIT_AUTHOR_DATE +export EDITOR + +GIT_DEFAULT_HASH="${GIT_TEST_DEFAULT_HASH:-sha1}" +export GIT_DEFAULT_HASH +GIT_TEST_MERGE_ALGORITHM="${GIT_TEST_MERGE_ALGORITHM:-ort}" +export GIT_TEST_MERGE_ALGORITHM + +# Tests using GIT_TRACE typically don't want : output +GIT_TRACE_BARE=1 +export GIT_TRACE_BARE + +# Use specific version of the index file format +if test -n "${GIT_TEST_INDEX_VERSION:+isset}" +then + GIT_INDEX_VERSION="$GIT_TEST_INDEX_VERSION" + export GIT_INDEX_VERSION +fi + +if test -n "$GIT_TEST_PERL_FATAL_WARNINGS" +then + GIT_PERL_FATAL_WARNINGS=1 + export GIT_PERL_FATAL_WARNINGS +fi + +# Add libc MALLOC and MALLOC_PERTURB test +# only if we are not executing the test with valgrind +if test -n "$valgrind" || + test -n "$TEST_NO_MALLOC_CHECK" +then + setup_malloc_check () { + : nothing + } + teardown_malloc_check () { + : nothing + } +else + setup_malloc_check () { + MALLOC_CHECK_=3 MALLOC_PERTURB_=165 + export MALLOC_CHECK_ MALLOC_PERTURB_ + } + teardown_malloc_check () { + unset MALLOC_CHECK_ MALLOC_PERTURB_ + } +fi + +# Protect ourselves from common misconfiguration to export +# CDPATH into the environment +unset CDPATH + +unset GREP_OPTIONS +unset UNZIP + +case $(echo $GIT_TRACE |tr "[A-Z]" "[a-z]") in +1|2|true) + GIT_TRACE=4 + ;; +esac + +# Line feed +LF=' +' + +# Single quote +SQ=\' + +# UTF-8 ZERO WIDTH NON-JOINER, which HFS+ ignores +# when case-folding filenames +u200c=$(printf '\342\200\214') + +export _x05 _x35 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB ZERO_OID OID_REGEX + +# Each test should start with something like this, after copyright notices: +# +# test_description='Description of this test... +# This test checks if command xyzzy does the right thing... +# ' +# . ./test-lib.sh +test "x$TERM" != "xdumb" && ( + test -t 1 && + tput bold >/dev/null 2>&1 && + tput setaf 1 >/dev/null 2>&1 && + tput sgr0 >/dev/null 2>&1 + ) && + color=t + +if test -n "$color" +then + # Save the color control sequences now rather than run tput + # each time say_color() is called. This is done for two + # reasons: + # * TERM will be changed to dumb + # * HOME will be changed to a temporary directory and tput + # might need to read ~/.terminfo from the original HOME + # directory to get the control sequences + # Note: This approach assumes the control sequences don't end + # in a newline for any terminal of interest (command + # substitutions strip trailing newlines). Given that most + # (all?) terminals in common use are related to ECMA-48, this + # shouldn't be a problem. + say_color_error=$(tput bold; tput setaf 1) # bold red + say_color_skip=$(tput setaf 4) # blue + say_color_warn=$(tput setaf 3) # brown/yellow + say_color_pass=$(tput setaf 2) # green + say_color_info=$(tput setaf 6) # cyan + say_color_reset=$(tput sgr0) + say_color_="" # no formatting for normal text + say_color () { + test -z "$1" && test -n "$quiet" && return + eval "say_color_color=\$say_color_$1" + shift + printf "%s\\n" "$say_color_color$*$say_color_reset" + } +else + say_color() { + test -z "$1" && test -n "$quiet" && return + shift + printf "%s\n" "$*" + } +fi + +TERM=dumb +export TERM + +error () { + say_color error "error: $*" + finalize_junit_xml + GIT_EXIT_OK=t + exit 1 +} + +BUG () { + error >&7 "bug in the test script: $*" +} + +say () { + say_color info "$*" +} + +if test -n "$HARNESS_ACTIVE" +then + if test "$verbose" = t || test -n "$verbose_only" + then + printf 'Bail out! %s\n' \ + 'verbose mode forbidden under TAP harness; try --verbose-log' + exit 1 + fi +fi + +test "${test_description}" != "" || +error "Test script did not set test_description." + +if test "$help" = "t" +then + printf '%s\n' "$test_description" + exit 0 +fi + +exec 5>&1 +exec 6<&0 +exec 7>&2 +if test "$verbose_log" = "t" +then + exec 3>>"$GIT_TEST_TEE_OUTPUT_FILE" 4>&3 +elif test "$verbose" = "t" +then + exec 4>&2 3>&1 +else + exec 4>/dev/null 3>/dev/null +fi + +# Send any "-x" output directly to stderr to avoid polluting tests +# which capture stderr. We can do this unconditionally since it +# has no effect if tracing isn't turned on. +# +# Note that this sets up the trace fd as soon as we assign the variable, so it +# must come after the creation of descriptor 4 above. Likewise, we must never +# unset this, as it has the side effect of closing descriptor 4, which we +# use to show verbose tests to the user. +# +# Note also that we don't need or want to export it. The tracing is local to +# this shell, and we would not want to influence any shells we exec. +BASH_XTRACEFD=4 + +test_failure=0 +test_count=0 +test_fixed=0 +test_broken=0 +test_success=0 + +test_external_has_tap=0 + +die () { + code=$? + # This is responsible for running the atexit commands even when a + # test script run with '--immediate' fails, or when the user hits + # ctrl-C, i.e. when 'test_done' is not invoked at all. + test_atexit_handler || code=$? + if test -n "$GIT_EXIT_OK" + then + exit $code + else + echo >&5 "FATAL: Unexpected exit with code $code" + exit 1 + fi +} + +GIT_EXIT_OK= +trap 'die' EXIT +# Disable '-x' tracing, because with some shells, notably dash, it +# prevents running the cleanup commands when a test script run with +# '--verbose-log -x' is interrupted. +trap '{ code=$?; set +x; } 2>/dev/null; exit $code' INT TERM HUP + +# The user-facing functions are loaded from a separate file so that +# test_perf subshells can have them too +. "$TEST_DIRECTORY/test-lib-functions.sh" + +# You are not expected to call test_ok_ and test_failure_ directly, use +# the test_expect_* functions instead. + +test_ok_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$*" + fi + test_success=$(($test_success + 1)) + say_color "" "ok $test_count - $@" +} + +test_failure_ () { + if test -n "$write_junit_xml" + then + junit_insert="" + junit_insert="$junit_insert $(xml_attr_encode \ + "$(if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + test-tool path-utils skip-n-bytes \ + "$GIT_TEST_TEE_OUTPUT_FILE" $GIT_TEST_TEE_OFFSET + else + printf '%s\n' "$@" | sed 1d + fi)")" + junit_insert="$junit_insert" + if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + junit_insert="$junit_insert$(xml_attr_encode \ + "$(cat "$GIT_TEST_TEE_OUTPUT_FILE")")" + fi + write_junit_xml_testcase "$1" " $junit_insert" + fi + test_failure=$(($test_failure + 1)) + say_color error "not ok $test_count - $1" + shift + printf '%s\n' "$*" | sed -e 's/^/# /' + test "$immediate" = "" || { finalize_junit_xml; GIT_EXIT_OK=t; exit 1; } +} + +test_known_broken_ok_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$* (breakage fixed)" + fi + test_fixed=$(($test_fixed+1)) + say_color error "ok $test_count - $@ # TODO known breakage vanished" +} + +test_known_broken_failure_ () { + if test -n "$write_junit_xml" + then + write_junit_xml_testcase "$* (known breakage)" + fi + test_broken=$(($test_broken+1)) + say_color warn "not ok $test_count - $@ # TODO known breakage" +} + +test_debug () { + test "$debug" = "" || eval "$1" +} + +match_pattern_list () { + arg="$1" + shift + test -z "$*" && return 1 + # We need to use "$*" to get field-splitting, but we want to + # disable globbing, since we are matching against an arbitrary + # $arg, not what's in the filesystem. Using "set -f" accomplishes + # that, but we must do it in a subshell to avoid impacting the + # rest of the script. The exit value of the subshell becomes + # the function's return value. + ( + set -f + for pattern_ in $* + do + case "$arg" in + $pattern_) + exit 0 + ;; + esac + done + exit 1 + ) +} + +match_test_selector_list () { + operation="$1" + shift + title="$1" + shift + arg="$1" + shift + test -z "$1" && return 0 + + # Commas are accepted as separators. + OLDIFS=$IFS + IFS=',' + set -- $1 + IFS=$OLDIFS + + # If the first selector is negative we include by default. + include= + case "$1" in + !*) include=t ;; + esac + + for selector + do + orig_selector=$selector + + positive=t + case "$selector" in + !*) + positive= + selector=${selector##?} + ;; + esac + + test -z "$selector" && continue + + case "$selector" in + *-*) + if expr "z${selector%%-*}" : "z[0-9]*[^0-9]" >/dev/null + then + echo "error: $operation: invalid non-numeric in range" \ + "start: '$orig_selector'" >&2 + exit 1 + fi + if expr "z${selector#*-}" : "z[0-9]*[^0-9]" >/dev/null + then + echo "error: $operation: invalid non-numeric in range" \ + "end: '$orig_selector'" >&2 + exit 1 + fi + ;; + *) + if expr "z$selector" : "z[0-9]*[^0-9]" >/dev/null + then + case "$title" in *${selector}*) + include=$positive + ;; + esac + continue + fi + esac + + # Short cut for "obvious" cases + test -z "$include" && test -z "$positive" && continue + test -n "$include" && test -n "$positive" && continue + + case "$selector" in + -*) + if test $arg -le ${selector#-} + then + include=$positive + fi + ;; + *-) + if test $arg -ge ${selector%-} + then + include=$positive + fi + ;; + *-*) + if test ${selector%%-*} -le $arg \ + && test $arg -le ${selector#*-} + then + include=$positive + fi + ;; + *) + if test $arg -eq $selector + then + include=$positive + fi + ;; + esac + done + + test -n "$include" +} + +maybe_teardown_verbose () { + test -z "$verbose_only" && return + exec 4>/dev/null 3>/dev/null + verbose= +} + +last_verbose=t +maybe_setup_verbose () { + test -z "$verbose_only" && return + if match_pattern_list $test_count "$verbose_only" + then + exec 4>&2 3>&1 + # Emit a delimiting blank line when going from + # non-verbose to verbose. Within verbose mode the + # delimiter is printed by test_expect_*. The choice + # of the initial $last_verbose is such that before + # test 1, we do not print it. + test -z "$last_verbose" && echo >&3 "" + verbose=t + else + exec 4>/dev/null 3>/dev/null + verbose= + fi + last_verbose=$verbose +} + +maybe_teardown_valgrind () { + test -z "$GIT_VALGRIND" && return + GIT_VALGRIND_ENABLED= +} + +maybe_setup_valgrind () { + test -z "$GIT_VALGRIND" && return + if test -z "$valgrind_only" + then + GIT_VALGRIND_ENABLED=t + return + fi + GIT_VALGRIND_ENABLED= + if match_pattern_list $test_count "$valgrind_only" + then + GIT_VALGRIND_ENABLED=t + fi +} + +trace_level_=0 +want_trace () { + test "$trace" = t && { + test "$verbose" = t || test "$verbose_log" = t + } +} + +# This is a separate function because some tests use +# "return" to end a test_expect_success block early +# (and we want to make sure we run any cleanup like +# "set +x"). +test_eval_inner_ () { + # Do not add anything extra (including LF) after '$*' + eval " + want_trace && trace_level_=$(($trace_level_+1)) && set -x + $*" +} + +test_eval_ () { + # If "-x" tracing is in effect, then we want to avoid polluting stderr + # with non-test commands. But once in "set -x" mode, we cannot prevent + # the shell from printing the "set +x" to turn it off (nor the saving + # of $? before that). But we can make sure that the output goes to + # /dev/null. + # + # There are a few subtleties here: + # + # - we have to redirect descriptor 4 in addition to 2, to cover + # BASH_XTRACEFD + # + # - the actual eval has to come before the redirection block (since + # it needs to see descriptor 4 to set up its stderr) + # + # - likewise, any error message we print must be outside the block to + # access descriptor 4 + # + # - checking $? has to come immediately after the eval, but it must + # be _inside_ the block to avoid polluting the "set -x" output + # + + test_eval_inner_ "$@" &3 2>&4 + { + test_eval_ret_=$? + if want_trace + then + test 1 = $trace_level_ && set +x + trace_level_=$(($trace_level_-1)) + fi + } 2>/dev/null 4>&2 + + if test "$test_eval_ret_" != 0 && want_trace + then + say_color error >&4 "error: last command exited with \$?=$test_eval_ret_" + fi + return $test_eval_ret_ +} + +test_run_ () { + test_cleanup=: + expecting_failure=$2 + + if test "${GIT_TEST_CHAIN_LINT:-1}" != 0; then + # turn off tracing for this test-eval, as it simply creates + # confusing noise in the "-x" output + trace_tmp=$trace + trace= + # 117 is magic because it is unlikely to match the exit + # code of other programs + if test "OK-117" != "$(test_eval_ "(exit 117) && $1${LF}${LF}echo OK-\$?" 3>&1)" || + { + test "${GIT_TEST_CHAIN_LINT_HARDER:-${GIT_TEST_CHAIN_LINT_HARDER_DEFAULT:-1}}" != 0 && + $(printf '%s\n' "$1" | sed -f "$GIT_BUILD_DIR/t/chainlint.sed" | grep -q '?![A-Z][A-Z]*?!') + } + then + BUG "broken &&-chain or run-away HERE-DOC: $1" + fi + trace=$trace_tmp + fi + + setup_malloc_check + test_eval_ "$1" + eval_ret=$? + teardown_malloc_check + + if test -z "$immediate" || test $eval_ret = 0 || + test -n "$expecting_failure" && test "$test_cleanup" != ":" + then + setup_malloc_check + test_eval_ "$test_cleanup" + teardown_malloc_check + fi + if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE" + then + echo "" + fi + return "$eval_ret" +} + +test_start_ () { + test_count=$(($test_count+1)) + maybe_setup_verbose + maybe_setup_valgrind + if test -n "$write_junit_xml" + then + junit_start=$(test-tool date getnanos) + fi +} + +test_finish_ () { + echo >&3 "" + maybe_teardown_valgrind + maybe_teardown_verbose + if test -n "$GIT_TEST_TEE_OFFSET" + then + GIT_TEST_TEE_OFFSET=$(test-tool path-utils file-size \ + "$GIT_TEST_TEE_OUTPUT_FILE") + fi +} + +test_skip () { + to_skip= + skipped_reason= + if match_pattern_list $this_test.$test_count "$GIT_SKIP_TESTS" + then + to_skip=t + skipped_reason="GIT_SKIP_TESTS" + fi + if test -z "$to_skip" && test -n "$run_list" && + ! match_test_selector_list '--run' "$1" $test_count "$run_list" + then + to_skip=t + skipped_reason="--run" + fi + if test -z "$to_skip" && test -n "$test_prereq" && + ! test_have_prereq "$test_prereq" + then + to_skip=t + + of_prereq= + if test "$missing_prereq" != "$test_prereq" + then + of_prereq=" of $test_prereq" + fi + skipped_reason="missing $missing_prereq${of_prereq}" + fi + + case "$to_skip" in + t) + if test -n "$write_junit_xml" + then + message="$(xml_attr_encode "$skipped_reason")" + write_junit_xml_testcase "$1" \ + " " + fi + + say_color skip "ok $test_count # skip $1 ($skipped_reason)" + : true + ;; + *) + false + ;; + esac +} + +# stub; perf-lib overrides it +test_at_end_hook_ () { + : +} + +write_junit_xml () { + case "$1" in + --truncate) + >"$junit_xml_path" + junit_have_testcase= + shift + ;; + esac + printf '%s\n' "$@" >>"$junit_xml_path" +} + +xml_attr_encode () { + printf '%s\n' "$@" | test-tool xml-encode +} + +write_junit_xml_testcase () { + junit_attrs="name=\"$(xml_attr_encode "$this_test.$test_count $1")\"" + shift + junit_attrs="$junit_attrs classname=\"$this_test\"" + junit_attrs="$junit_attrs time=\"$(test-tool \ + date getnanos $junit_start)\"" + write_junit_xml "$(printf '%s\n' \ + " " "$@" " ")" + junit_have_testcase=t +} + +finalize_junit_xml () { + if test -n "$write_junit_xml" && test -n "$junit_xml_path" + then + test -n "$junit_have_testcase" || { + junit_start=$(test-tool date getnanos) + write_junit_xml_testcase "all tests skipped" + } + + # adjust the overall time + junit_time=$(test-tool date getnanos $junit_suite_start) + sed -e "s/\(]*/& time=\"$junit_time\"/" \ + -e '/^ *<\/testsuite/d' \ + <"$junit_xml_path" >"$junit_xml_path.new" + mv "$junit_xml_path.new" "$junit_xml_path" + + write_junit_xml " " "" + write_junit_xml= + fi +} + +test_atexit_cleanup=: +test_atexit_handler () { + # In a succeeding test script 'test_atexit_handler' is invoked + # twice: first from 'test_done', then from 'die' in the trap on + # EXIT. + # This condition and resetting 'test_atexit_cleanup' below makes + # sure that the registered cleanup commands are run only once. + test : != "$test_atexit_cleanup" || return 0 + + setup_malloc_check + test_eval_ "$test_atexit_cleanup" + test_atexit_cleanup=: + teardown_malloc_check +} + +test_done () { + GIT_EXIT_OK=t + + # Run the atexit commands _before_ the trash directory is + # removed, so the commands can access pidfiles and socket files. + test_atexit_handler + + finalize_junit_xml + + if test -z "$HARNESS_ACTIVE" + then + mkdir -p "$TEST_RESULTS_DIR" + + cat >"$TEST_RESULTS_BASE.counts" <<-EOF + total $test_count + success $test_success + fixed $test_fixed + broken $test_broken + failed $test_failure + + EOF + fi + + if test "$test_fixed" != 0 + then + say_color error "# $test_fixed known breakage(s) vanished; please update test(s)" + fi + if test "$test_broken" != 0 + then + say_color warn "# still have $test_broken known breakage(s)" + fi + if test "$test_broken" != 0 || test "$test_fixed" != 0 + then + test_remaining=$(( $test_count - $test_broken - $test_fixed )) + msg="remaining $test_remaining test(s)" + else + test_remaining=$test_count + msg="$test_count test(s)" + fi + case "$test_failure" in + 0) + if test $test_external_has_tap -eq 0 + then + if test $test_remaining -gt 0 + then + say_color pass "# passed all $msg" + fi + + # Maybe print SKIP message + test -z "$skip_all" || skip_all="# SKIP $skip_all" + case "$test_count" in + 0) + say "1..$test_count${skip_all:+ $skip_all}" + ;; + *) + test -z "$skip_all" || + say_color warn "$skip_all" + say "1..$test_count" + ;; + esac + fi + + if test -z "$debug" && test -n "$remove_trash" + then + test -d "$TRASH_DIRECTORY" || + error "Tests passed but trash directory already removed before test cleanup; aborting" + + cd "$TRASH_DIRECTORY/.." && + rm -fr "$TRASH_DIRECTORY" || { + # try again in a bit + sleep 5; + rm -fr "$TRASH_DIRECTORY" + } || + error "Tests passed but test cleanup failed; aborting" + fi + test_at_end_hook_ + + exit 0 ;; + + *) + if test $test_external_has_tap -eq 0 + then + say_color error "# failed $test_failure among $msg" + say "1..$test_count" + fi + + exit 1 ;; + + esac +} + +if test -n "$valgrind" +then + make_symlink () { + test -h "$2" && + test "$1" = "$(readlink "$2")" || { + # be super paranoid + if mkdir "$2".lock + then + rm -f "$2" && + ln -s "$1" "$2" && + rm -r "$2".lock + else + while test -d "$2".lock + do + say "Waiting for lock on $2." + sleep 1 + done + fi + } + } + + make_valgrind_symlink () { + # handle only executables, unless they are shell libraries that + # need to be in the exec-path. + test -x "$1" || + test "# " = "$(test_copy_bytes 2 <"$1")" || + return; + + base=$(basename "$1") + case "$base" in + test-*) + symlink_target="$GIT_BUILD_DIR/t/helper/$base" + ;; + *) + symlink_target="$GIT_BUILD_DIR/$base" + ;; + esac + # do not override scripts + if test -x "$symlink_target" && + test ! -d "$symlink_target" && + test "#!" != "$(test_copy_bytes 2 <"$symlink_target")" + then + symlink_target=../valgrind.sh + fi + case "$base" in + *.sh|*.perl) + symlink_target=../unprocessed-script + esac + # create the link, or replace it if it is out of date + make_symlink "$symlink_target" "$GIT_VALGRIND/bin/$base" || exit + } + + # override all git executables in TEST_DIRECTORY/.. + GIT_VALGRIND=$TEST_DIRECTORY/valgrind + mkdir -p "$GIT_VALGRIND"/bin + for file in $GIT_BUILD_DIR/git* $GIT_BUILD_DIR/t/helper/test-* + do + make_valgrind_symlink $file + done + # special-case the mergetools loadables + make_symlink "$GIT_BUILD_DIR"/mergetools "$GIT_VALGRIND/bin/mergetools" + OLDIFS=$IFS + IFS=: + for path in $PATH + do + ls "$path"/git-* 2> /dev/null | + while read file + do + make_valgrind_symlink "$file" + done + done + IFS=$OLDIFS + PATH=$GIT_VALGRIND/bin:$PATH + GIT_EXEC_PATH=$GIT_VALGRIND/bin + export GIT_VALGRIND + GIT_VALGRIND_MODE="$valgrind" + export GIT_VALGRIND_MODE + GIT_VALGRIND_ENABLED=t + test -n "$valgrind_only" && GIT_VALGRIND_ENABLED= + export GIT_VALGRIND_ENABLED +elif test -n "$GIT_TEST_INSTALLED" +then + GIT_EXEC_PATH=$($GIT_TEST_INSTALLED/git --exec-path) || + error "Cannot run git from $GIT_TEST_INSTALLED." + PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR/t/helper:$PATH + GIT_EXEC_PATH=${GIT_TEST_EXEC_PATH:-$GIT_EXEC_PATH} +else # normal case, use ../bin-wrappers only unless $with_dashes: + if test -n "$no_bin_wrappers" + then + with_dashes=t + else + git_bin_dir="$GIT_BUILD_DIR/bin-wrappers" + if ! test -x "$git_bin_dir/git" + then + if test -z "$with_dashes" + then + say "$git_bin_dir/git is not executable; using GIT_EXEC_PATH" + fi + with_dashes=t + fi + PATH="$git_bin_dir:$PATH" + fi + GIT_EXEC_PATH=$GIT_BUILD_DIR + if test -n "$with_dashes" + then + PATH="$GIT_BUILD_DIR:$GIT_BUILD_DIR/t/helper:$PATH" + fi +fi +GIT_TEMPLATE_DIR="$GIT_BUILD_DIR"/templates/blt +GIT_CONFIG_NOSYSTEM=1 +GIT_ATTR_NOSYSTEM=1 +GIT_CEILING_DIRECTORIES="$TRASH_DIRECTORY/.." +export PATH GIT_EXEC_PATH GIT_TEMPLATE_DIR GIT_CONFIG_NOSYSTEM GIT_ATTR_NOSYSTEM GIT_CEILING_DIRECTORIES + +if test -z "$GIT_TEST_CMP" +then + if test -n "$GIT_TEST_CMP_USE_COPIED_CONTEXT" + then + GIT_TEST_CMP="$DIFF -c" + else + GIT_TEST_CMP="$DIFF -u" + fi +fi + +GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib +export GITPERLLIB +test -d "$GIT_BUILD_DIR"/templates/blt || { + error "You haven't built things yet, have you?" +} + +if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool$X +then + echo >&2 'You need to build test-tool:' + echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory' + exit 1 +fi + +# Are we running this test at all? +remove_trash= +this_test=${0##*/} +this_test=${this_test%%-*} +if match_pattern_list "$this_test" "$GIT_SKIP_TESTS" +then + say_color info >&3 "skipping test $this_test altogether" + skip_all="skip all tests in $this_test" + test_done +fi + +# Last-minute variable setup +HOME="$TRASH_DIRECTORY" +GNUPGHOME="$HOME/gnupg-home-not-used" +export HOME GNUPGHOME + +# Test repository +rm -fr "$TRASH_DIRECTORY" || { + GIT_EXIT_OK=t + echo >&5 "FATAL: Cannot prepare test area" + exit 1 +} + +remove_trash=t +if test -z "$TEST_NO_CREATE_REPO" +then + git init "$TRASH_DIRECTORY" >&3 2>&4 || + error "cannot run git init" +else + mkdir -p "$TRASH_DIRECTORY" +fi + +# Use -P to resolve symlinks in our working directory so that the cwd +# in subprocesses like git equals our $PWD (for pathname comparisons). +cd -P "$TRASH_DIRECTORY" || exit 1 + +if test -n "$write_junit_xml" +then + junit_xml_dir="$TEST_OUTPUT_DIRECTORY/out" + mkdir -p "$junit_xml_dir" + junit_xml_base=${0##*/} + junit_xml_path="$junit_xml_dir/TEST-${junit_xml_base%.sh}.xml" + junit_attrs="name=\"${junit_xml_base%.sh}\"" + junit_attrs="$junit_attrs timestamp=\"$(TZ=UTC \ + date +%Y-%m-%dT%H:%M:%S)\"" + write_junit_xml --truncate "" " " + junit_suite_start=$(test-tool date getnanos) + if test -n "$GIT_TEST_TEE_OUTPUT_FILE" + then + GIT_TEST_TEE_OFFSET=0 + fi +fi + +# Convenience +# A regexp to match 5, 35 and 40 hexdigits +_x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' +_x35="$_x05$_x05$_x05$_x05$_x05$_x05$_x05" +_x40="$_x35$_x05" + +test_oid_init + +ZERO_OID=$(test_oid zero) +OID_REGEX=$(echo $ZERO_OID | sed -e 's/0/[0-9a-f]/g') +OIDPATH_REGEX=$(test_oid_to_path $ZERO_OID | sed -e 's/0/[0-9a-f]/g') +EMPTY_TREE=$(test_oid empty_tree) +EMPTY_BLOB=$(test_oid empty_blob) +_z40=$ZERO_OID + +# Provide an implementation of the 'yes' utility; the upper bound +# limit is there to help Windows that cannot stop this loop from +# wasting cycles when the downstream stops reading, so do not be +# tempted to turn it into an infinite loop. cf. 6129c930 ("test-lib: +# limit the output of the yes utility", 2016-02-02) +yes () { + if test $# = 0 + then + y=y + else + y="$*" + fi + + i=0 + while test $i -lt 99 + do + echo "$y" + i=$(($i+1)) + done +} + +# The GIT_TEST_FAIL_PREREQS code hooks into test_set_prereq(), and +# thus needs to be set up really early, and set an internal variable +# for convenience so the hot test_set_prereq() codepath doesn't need +# to call "git env--helper" (via test_bool_env). Only do that work +# if needed by seeing if GIT_TEST_FAIL_PREREQS is set at all. +GIT_TEST_FAIL_PREREQS_INTERNAL= +if test -n "$GIT_TEST_FAIL_PREREQS" +then + if test_bool_env GIT_TEST_FAIL_PREREQS false + then + GIT_TEST_FAIL_PREREQS_INTERNAL=true + test_set_prereq FAIL_PREREQS + fi +else + test_lazy_prereq FAIL_PREREQS ' + test_bool_env GIT_TEST_FAIL_PREREQS false + ' +fi + +# Fix some commands on Windows, and other OS-specific things +uname_s=$(uname -s) +case $uname_s in +*MINGW*) + # Windows has its own (incompatible) sort and find + sort () { + /usr/bin/sort "$@" + } + find () { + /usr/bin/find "$@" + } + # git sees Windows-style pwd + pwd () { + builtin pwd -W + } + # no POSIX permissions + # backslashes in pathspec are converted to '/' + # exec does not inherit the PID + test_set_prereq MINGW + test_set_prereq NATIVE_CRLF + test_set_prereq SED_STRIPS_CR + test_set_prereq GREP_STRIPS_CR + test_set_prereq WINDOWS + GIT_TEST_CMP=mingw_test_cmp + ;; +*CYGWIN*) + test_set_prereq POSIXPERM + test_set_prereq EXECKEEPSPID + test_set_prereq CYGWIN + test_set_prereq SED_STRIPS_CR + test_set_prereq GREP_STRIPS_CR + test_set_prereq WINDOWS + ;; +*) + test_set_prereq POSIXPERM + test_set_prereq BSLASHPSPEC + test_set_prereq EXECKEEPSPID + ;; +esac + +# Detect arches where a few things don't work +uname_m=$(uname -m) +case $uname_m in +parisc* | hppa*) + test_set_prereq HPPA + ;; +esac + +test_set_prereq REFFILES + +( COLUMNS=1 && test $COLUMNS = 1 ) && test_set_prereq COLUMNS_CAN_BE_1 +test -z "$NO_PERL" && test_set_prereq PERL +test -z "$NO_PTHREADS" && test_set_prereq PTHREADS +test -z "$NO_PYTHON" && test_set_prereq PYTHON +test -n "$USE_LIBPCRE2" && test_set_prereq PCRE +test -n "$USE_LIBPCRE2" && test_set_prereq LIBPCRE2 +test -z "$NO_GETTEXT" && test_set_prereq GETTEXT + +if test -z "$GIT_TEST_CHECK_CACHE_TREE" +then + GIT_TEST_CHECK_CACHE_TREE=true + export GIT_TEST_CHECK_CACHE_TREE +fi + +test_lazy_prereq PIPE ' + # test whether the filesystem supports FIFOs + test_have_prereq !MINGW,!CYGWIN && + rm -f testfifo && mkfifo testfifo +' + +test_lazy_prereq SYMLINKS ' + # test whether the filesystem supports symbolic links + ln -s x y && test -h y +' + +test_lazy_prereq SYMLINKS_WINDOWS ' + # test whether symbolic links are enabled on Windows + test_have_prereq MINGW && + cmd //c "mklink y x" &> /dev/null && test -h y +' + +test_lazy_prereq FILEMODE ' + test "$(git config --bool core.filemode)" = true +' + +test_lazy_prereq CASE_INSENSITIVE_FS ' + echo good >CamelCase && + echo bad >camelcase && + test "$(cat CamelCase)" != good +' + +test_lazy_prereq FUNNYNAMES ' + test_have_prereq !MINGW && + touch -- \ + "FUNNYNAMES tab embedded" \ + "FUNNYNAMES \"quote embedded\"" \ + "FUNNYNAMES newline +embedded" 2>/dev/null && + rm -- \ + "FUNNYNAMES tab embedded" \ + "FUNNYNAMES \"quote embedded\"" \ + "FUNNYNAMES newline +embedded" 2>/dev/null +' + +test_lazy_prereq UTF8_NFD_TO_NFC ' + # check whether FS converts nfd unicode to nfc + auml=$(printf "\303\244") + aumlcdiar=$(printf "\141\314\210") + >"$auml" && + test -f "$aumlcdiar" +' + +test_lazy_prereq AUTOIDENT ' + sane_unset GIT_AUTHOR_NAME && + sane_unset GIT_AUTHOR_EMAIL && + git var GIT_AUTHOR_IDENT +' + +test_lazy_prereq EXPENSIVE ' + test -n "$GIT_TEST_LONG" +' + +test_lazy_prereq EXPENSIVE_ON_WINDOWS ' + test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN +' + +test_lazy_prereq USR_BIN_TIME ' + test -x /usr/bin/time +' + +test_lazy_prereq NOT_ROOT ' + uid=$(id -u) && + test "$uid" != 0 +' + +test_lazy_prereq JGIT ' + jgit --version +' + +# SANITY is about "can you correctly predict what the filesystem would +# do by only looking at the permission bits of the files and +# directories?" A typical example of !SANITY is running the test +# suite as root, where a test may expect "chmod -r file && cat file" +# to fail because file is supposed to be unreadable after a successful +# chmod. In an environment (i.e. combination of what filesystem is +# being used and who is running the tests) that lacks SANITY, you may +# be able to delete or create a file when the containing directory +# doesn't have write permissions, or access a file even if the +# containing directory doesn't have read or execute permissions. + +test_lazy_prereq SANITY ' + mkdir SANETESTD.1 SANETESTD.2 && + + chmod +w SANETESTD.1 SANETESTD.2 && + >SANETESTD.1/x 2>SANETESTD.2/x && + chmod -w SANETESTD.1 && + chmod -r SANETESTD.1/x && + chmod -rx SANETESTD.2 || + BUG "cannot prepare SANETESTD" + + ! test -r SANETESTD.1/x && + ! rm SANETESTD.1/x && ! test -f SANETESTD.2/x + status=$? + + chmod +rwx SANETESTD.1 SANETESTD.2 && + rm -rf SANETESTD.1 SANETESTD.2 || + BUG "cannot clean SANETESTD" + return $status +' + +test FreeBSD != $uname_s || GIT_UNZIP=${GIT_UNZIP:-/usr/local/bin/unzip} +GIT_UNZIP=${GIT_UNZIP:-unzip} +test_lazy_prereq UNZIP ' + "$GIT_UNZIP" -v + test $? -ne 127 +' + +run_with_limited_cmdline () { + (ulimit -s 128 && "$@") +} + +test_lazy_prereq CMDLINE_LIMIT ' + test_have_prereq !HPPA,!MINGW,!CYGWIN && + run_with_limited_cmdline true +' + +run_with_limited_stack () { + (ulimit -s 128 && "$@") +} + +test_lazy_prereq ULIMIT_STACK_SIZE ' + test_have_prereq !HPPA,!MINGW,!CYGWIN && + run_with_limited_stack true +' + +run_with_limited_open_files () { + (ulimit -n 32 && "$@") +} + +test_lazy_prereq ULIMIT_FILE_DESCRIPTORS ' + test_have_prereq !MINGW,!CYGWIN && + run_with_limited_open_files true +' + +build_option () { + git version --build-options | + sed -ne "s/^$1: //p" +} + +test_lazy_prereq LONG_IS_64BIT ' + test 8 -le "$(build_option sizeof-long)" +' + +test_lazy_prereq TIME_IS_64BIT 'test-tool date is64bit' +test_lazy_prereq TIME_T_IS_64BIT 'test-tool date time_t-is64bit' + +test_lazy_prereq CURL ' + curl --version +' + +# SHA1 is a test if the hash algorithm in use is SHA-1. This is both for tests +# which will not work with other hash algorithms and tests that work but don't +# test anything meaningful (e.g. special values which cause short collisions). +test_lazy_prereq SHA1 ' + case "$GIT_DEFAULT_HASH" in + sha1) true ;; + "") test $(git hash-object /dev/null) = e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 ;; + *) false ;; + esac +' + +test_lazy_prereq REBASE_P ' + test -z "$GIT_TEST_SKIP_REBASE_P" +' + +# Ensure that no test accidentally triggers a Git command +# that runs the actual maintenance scheduler, affecting a user's +# system permanently. +# Tests that verify the scheduler integration must set this locally +# to avoid errors. +GIT_TEST_MAINT_SCHEDULER="none:exit 1" diff --git a/transport.c b/transport.c index 17e9629710a2f8..eded823627951b 100644 --- a/transport.c +++ b/transport.c @@ -993,8 +993,7 @@ static enum protocol_allow_config get_protocol_config(const char *type) if (!strcmp(type, "http") || !strcmp(type, "https") || !strcmp(type, "git") || - !strcmp(type, "ssh") || - !strcmp(type, "file")) + !strcmp(type, "ssh")) return PROTOCOL_ALLOW_ALWAYS; /* known scary; err on the side of caution */ diff --git a/transport.c.orig b/transport.c.orig new file mode 100644 index 00000000000000..17e9629710a2f8 --- /dev/null +++ b/transport.c.orig @@ -0,0 +1,1535 @@ +#include "cache.h" +#include "config.h" +#include "transport.h" +#include "run-command.h" +#include "pkt-line.h" +#include "fetch-pack.h" +#include "remote.h" +#include "connect.h" +#include "send-pack.h" +#include "walker.h" +#include "bundle.h" +#include "dir.h" +#include "refs.h" +#include "refspec.h" +#include "branch.h" +#include "url.h" +#include "submodule.h" +#include "string-list.h" +#include "oid-array.h" +#include "sigchain.h" +#include "transport-internal.h" +#include "protocol.h" +#include "object-store.h" +#include "color.h" + +static int transport_use_color = -1; +static char transport_colors[][COLOR_MAXLEN] = { + GIT_COLOR_RESET, + GIT_COLOR_RED /* REJECTED */ +}; + +enum color_transport { + TRANSPORT_COLOR_RESET = 0, + TRANSPORT_COLOR_REJECTED = 1 +}; + +static int transport_color_config(void) +{ + const char *keys[] = { + "color.transport.reset", + "color.transport.rejected" + }, *key = "color.transport"; + char *value; + int i; + static int initialized; + + if (initialized) + return 0; + initialized = 1; + + if (!git_config_get_string(key, &value)) + transport_use_color = git_config_colorbool(key, value); + + if (!want_color_stderr(transport_use_color)) + return 0; + + for (i = 0; i < ARRAY_SIZE(keys); i++) + if (!git_config_get_string(keys[i], &value)) { + if (!value) + return config_error_nonbool(keys[i]); + if (color_parse(value, transport_colors[i]) < 0) + return -1; + } + + return 0; +} + +static const char *transport_get_color(enum color_transport ix) +{ + if (want_color_stderr(transport_use_color)) + return transport_colors[ix]; + return ""; +} + +static void set_upstreams(struct transport *transport, struct ref *refs, + int pretend) +{ + struct ref *ref; + for (ref = refs; ref; ref = ref->next) { + const char *localname; + const char *tmp; + const char *remotename; + int flag = 0; + /* + * Check suitability for tracking. Must be successful / + * already up-to-date ref create/modify (not delete). + */ + if (ref->status != REF_STATUS_OK && + ref->status != REF_STATUS_UPTODATE) + continue; + if (!ref->peer_ref) + continue; + if (is_null_oid(&ref->new_oid)) + continue; + + /* Follow symbolic refs (mainly for HEAD). */ + localname = ref->peer_ref->name; + remotename = ref->name; + tmp = resolve_ref_unsafe(localname, RESOLVE_REF_READING, + NULL, &flag); + if (tmp && flag & REF_ISSYMREF && + starts_with(tmp, "refs/heads/")) + localname = tmp; + + /* Both source and destination must be local branches. */ + if (!localname || !starts_with(localname, "refs/heads/")) + continue; + if (!remotename || !starts_with(remotename, "refs/heads/")) + continue; + + if (!pretend) { + int flag = transport->verbose < 0 ? 0 : BRANCH_CONFIG_VERBOSE; + install_branch_config(flag, localname + 11, + transport->remote->name, remotename); + } else if (transport->verbose >= 0) + printf(_("Would set upstream of '%s' to '%s' of '%s'\n"), + localname + 11, remotename + 11, + transport->remote->name); + } +} + +struct bundle_transport_data { + int fd; + struct bundle_header header; + unsigned get_refs_from_bundle_called : 1; +}; + +static struct ref *get_refs_from_bundle(struct transport *transport, + int for_push, + struct transport_ls_refs_options *transport_options) +{ + struct bundle_transport_data *data = transport->data; + struct ref *result = NULL; + int i; + + if (for_push) + return NULL; + + data->get_refs_from_bundle_called = 1; + + if (data->fd > 0) + close(data->fd); + data->fd = read_bundle_header(transport->url, &data->header); + if (data->fd < 0) + die(_("could not read bundle '%s'"), transport->url); + + transport->hash_algo = data->header.hash_algo; + + for (i = 0; i < data->header.references.nr; i++) { + struct string_list_item *e = data->header.references.items + i; + const char *name = e->string; + struct ref *ref = alloc_ref(name); + struct object_id *oid = e->util; + oidcpy(&ref->old_oid, oid); + ref->next = result; + result = ref; + } + return result; +} + +static int fetch_refs_from_bundle(struct transport *transport, + int nr_heads, struct ref **to_fetch) +{ + struct bundle_transport_data *data = transport->data; + int ret; + + if (!data->get_refs_from_bundle_called) + get_refs_from_bundle(transport, 0, NULL); + ret = unbundle(the_repository, &data->header, data->fd, + transport->progress ? BUNDLE_VERBOSE : 0); + transport->hash_algo = data->header.hash_algo; + return ret; +} + +static int close_bundle(struct transport *transport) +{ + struct bundle_transport_data *data = transport->data; + if (data->fd > 0) + close(data->fd); + bundle_header_release(&data->header); + free(data); + return 0; +} + +struct git_transport_data { + struct git_transport_options options; + struct child_process *conn; + int fd[2]; + unsigned got_remote_heads : 1; + enum protocol_version version; + struct oid_array extra_have; + struct oid_array shallow; +}; + +static int set_git_option(struct git_transport_options *opts, + const char *name, const char *value) +{ + if (!strcmp(name, TRANS_OPT_UPLOADPACK)) { + opts->uploadpack = value; + return 0; + } else if (!strcmp(name, TRANS_OPT_RECEIVEPACK)) { + opts->receivepack = value; + return 0; + } else if (!strcmp(name, TRANS_OPT_THIN)) { + opts->thin = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_FOLLOWTAGS)) { + opts->followtags = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_KEEP)) { + opts->keep = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_UPDATE_SHALLOW)) { + opts->update_shallow = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_DEPTH)) { + if (!value) + opts->depth = 0; + else { + char *end; + opts->depth = strtol(value, &end, 0); + if (*end) + die(_("transport: invalid depth option '%s'"), value); + } + return 0; + } else if (!strcmp(name, TRANS_OPT_DEEPEN_SINCE)) { + opts->deepen_since = value; + return 0; + } else if (!strcmp(name, TRANS_OPT_DEEPEN_NOT)) { + opts->deepen_not = (const struct string_list *)value; + return 0; + } else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) { + opts->deepen_relative = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) { + opts->from_promisor = !!value; + return 0; + } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) { + list_objects_filter_die_if_populated(&opts->filter_options); + parse_list_objects_filter(&opts->filter_options, value); + return 0; + } else if (!strcmp(name, TRANS_OPT_REJECT_SHALLOW)) { + opts->reject_shallow = !!value; + return 0; + } + return 1; +} + +static int connect_setup(struct transport *transport, int for_push) +{ + struct git_transport_data *data = transport->data; + int flags = transport->verbose > 0 ? CONNECT_VERBOSE : 0; + + if (data->conn) + return 0; + + switch (transport->family) { + case TRANSPORT_FAMILY_ALL: break; + case TRANSPORT_FAMILY_IPV4: flags |= CONNECT_IPV4; break; + case TRANSPORT_FAMILY_IPV6: flags |= CONNECT_IPV6; break; + } + + data->conn = git_connect(data->fd, transport->url, + for_push ? data->options.receivepack : + data->options.uploadpack, + flags); + + return 0; +} + +static void die_if_server_options(struct transport *transport) +{ + if (!transport->server_options || !transport->server_options->nr) + return; + advise(_("see protocol.version in 'git help config' for more details")); + die(_("server options require protocol version 2 or later")); +} + +/* + * Obtains the protocol version from the transport and writes it to + * transport->data->version, first connecting if not already connected. + * + * If the protocol version is one that allows skipping the listing of remote + * refs, and must_list_refs is 0, the listing of remote refs is skipped and + * this function returns NULL. Otherwise, this function returns the list of + * remote refs. + */ +static struct ref *handshake(struct transport *transport, int for_push, + struct transport_ls_refs_options *options, + int must_list_refs) +{ + struct git_transport_data *data = transport->data; + struct ref *refs = NULL; + struct packet_reader reader; + int sid_len; + const char *server_sid; + + connect_setup(transport, for_push); + + packet_reader_init(&reader, data->fd[0], NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_GENTLE_ON_EOF | + PACKET_READ_DIE_ON_ERR_PACKET); + + data->version = discover_version(&reader); + switch (data->version) { + case protocol_v2: + if (server_feature_v2("session-id", &server_sid)) + trace2_data_string("transfer", NULL, "server-sid", server_sid); + if (must_list_refs) + get_remote_refs(data->fd[1], &reader, &refs, for_push, + options, + transport->server_options, + transport->stateless_rpc); + break; + case protocol_v1: + case protocol_v0: + die_if_server_options(transport); + get_remote_heads(&reader, &refs, + for_push ? REF_NORMAL : 0, + &data->extra_have, + &data->shallow); + server_sid = server_feature_value("session-id", &sid_len); + if (server_sid) { + char *sid = xstrndup(server_sid, sid_len); + trace2_data_string("transfer", NULL, "server-sid", sid); + free(sid); + } + break; + case protocol_unknown_version: + BUG("unknown protocol version"); + } + data->got_remote_heads = 1; + transport->hash_algo = reader.hash_algo; + + if (reader.line_peeked) + BUG("buffer must be empty at the end of handshake()"); + + return refs; +} + +static struct ref *get_refs_via_connect(struct transport *transport, int for_push, + struct transport_ls_refs_options *options) +{ + return handshake(transport, for_push, options, 1); +} + +static int fetch_refs_via_pack(struct transport *transport, + int nr_heads, struct ref **to_fetch) +{ + int ret = 0; + struct git_transport_data *data = transport->data; + struct ref *refs = NULL; + struct fetch_pack_args args; + struct ref *refs_tmp = NULL; + + memset(&args, 0, sizeof(args)); + args.uploadpack = data->options.uploadpack; + args.keep_pack = data->options.keep; + args.lock_pack = 1; + args.use_thin_pack = data->options.thin; + args.include_tag = data->options.followtags; + args.verbose = (transport->verbose > 1); + args.quiet = (transport->verbose < 0); + args.no_progress = !transport->progress; + args.depth = data->options.depth; + args.deepen_since = data->options.deepen_since; + args.deepen_not = data->options.deepen_not; + args.deepen_relative = data->options.deepen_relative; + args.check_self_contained_and_connected = + data->options.check_self_contained_and_connected; + args.cloning = transport->cloning; + args.update_shallow = data->options.update_shallow; + args.from_promisor = data->options.from_promisor; + args.filter_options = data->options.filter_options; + args.stateless_rpc = transport->stateless_rpc; + args.server_options = transport->server_options; + args.negotiation_tips = data->options.negotiation_tips; + args.reject_shallow_remote = transport->smart_options->reject_shallow; + + if (!data->got_remote_heads) { + int i; + int must_list_refs = 0; + for (i = 0; i < nr_heads; i++) { + if (!to_fetch[i]->exact_oid) { + must_list_refs = 1; + break; + } + } + refs_tmp = handshake(transport, 0, NULL, must_list_refs); + } + + if (data->version == protocol_unknown_version) + BUG("unknown protocol version"); + else if (data->version <= protocol_v1) + die_if_server_options(transport); + + if (data->options.acked_commits) { + if (data->version < protocol_v2) { + warning(_("--negotiate-only requires protocol v2")); + ret = -1; + } else if (!server_supports_feature("fetch", "wait-for-done", 0)) { + warning(_("server does not support wait-for-done")); + ret = -1; + } else { + negotiate_using_fetch(data->options.negotiation_tips, + transport->server_options, + transport->stateless_rpc, + data->fd, + data->options.acked_commits); + ret = 0; + } + goto cleanup; + } + + refs = fetch_pack(&args, data->fd, + refs_tmp ? refs_tmp : transport->remote_refs, + to_fetch, nr_heads, &data->shallow, + &transport->pack_lockfiles, data->version); + + data->got_remote_heads = 0; + data->options.self_contained_and_connected = + args.self_contained_and_connected; + data->options.connectivity_checked = args.connectivity_checked; + + if (refs == NULL) + ret = -1; + if (report_unmatched_refs(to_fetch, nr_heads)) + ret = -1; + +cleanup: + close(data->fd[0]); + if (data->fd[1] >= 0) + close(data->fd[1]); + if (finish_connect(data->conn)) + ret = -1; + data->conn = NULL; + + free_refs(refs_tmp); + free_refs(refs); + return ret; +} + +static int push_had_errors(struct ref *ref) +{ + for (; ref; ref = ref->next) { + switch (ref->status) { + case REF_STATUS_NONE: + case REF_STATUS_UPTODATE: + case REF_STATUS_OK: + break; + default: + return 1; + } + } + return 0; +} + +int transport_refs_pushed(struct ref *ref) +{ + for (; ref; ref = ref->next) { + switch(ref->status) { + case REF_STATUS_NONE: + case REF_STATUS_UPTODATE: + break; + default: + return 1; + } + } + return 0; +} + +static void update_one_tracking_ref(struct remote *remote, char *refname, + struct object_id *new_oid, int deletion, + int verbose) +{ + struct refspec_item rs; + + memset(&rs, 0, sizeof(rs)); + rs.src = refname; + rs.dst = NULL; + + if (!remote_find_tracking(remote, &rs)) { + if (verbose) + fprintf(stderr, "updating local tracking ref '%s'\n", rs.dst); + if (deletion) + delete_ref(NULL, rs.dst, NULL, 0); + else + update_ref("update by push", rs.dst, new_oid, + NULL, 0, 0); + free(rs.dst); + } +} + +void transport_update_tracking_ref(struct remote *remote, struct ref *ref, int verbose) +{ + char *refname; + struct object_id *new_oid; + struct ref_push_report *report; + + if (ref->status != REF_STATUS_OK && ref->status != REF_STATUS_UPTODATE) + return; + + report = ref->report; + if (!report) + update_one_tracking_ref(remote, ref->name, &ref->new_oid, + ref->deletion, verbose); + else + for (; report; report = report->next) { + refname = report->ref_name ? (char *)report->ref_name : ref->name; + new_oid = report->new_oid ? report->new_oid : &ref->new_oid; + update_one_tracking_ref(remote, refname, new_oid, + is_null_oid(new_oid), verbose); + } +} + +static void print_ref_status(char flag, const char *summary, + struct ref *to, struct ref *from, const char *msg, + struct ref_push_report *report, + int porcelain, int summary_width) +{ + const char *to_name; + + if (report && report->ref_name) + to_name = report->ref_name; + else + to_name = to->name; + + if (porcelain) { + if (from) + fprintf(stdout, "%c\t%s:%s\t", flag, from->name, to_name); + else + fprintf(stdout, "%c\t:%s\t", flag, to_name); + if (msg) + fprintf(stdout, "%s (%s)\n", summary, msg); + else + fprintf(stdout, "%s\n", summary); + } else { + const char *red = "", *reset = ""; + if (push_had_errors(to)) { + red = transport_get_color(TRANSPORT_COLOR_REJECTED); + reset = transport_get_color(TRANSPORT_COLOR_RESET); + } + fprintf(stderr, " %s%c %-*s%s ", red, flag, summary_width, + summary, reset); + if (from) + fprintf(stderr, "%s -> %s", + prettify_refname(from->name), + prettify_refname(to_name)); + else + fputs(prettify_refname(to_name), stderr); + if (msg) { + fputs(" (", stderr); + fputs(msg, stderr); + fputc(')', stderr); + } + fputc('\n', stderr); + } +} + +static void print_ok_ref_status(struct ref *ref, + struct ref_push_report *report, + int porcelain, int summary_width) +{ + struct object_id *old_oid; + struct object_id *new_oid; + const char *ref_name; + int forced_update; + + if (report && report->old_oid) + old_oid = report->old_oid; + else + old_oid = &ref->old_oid; + if (report && report->new_oid) + new_oid = report->new_oid; + else + new_oid = &ref->new_oid; + if (report && report->forced_update) + forced_update = report->forced_update; + else + forced_update = ref->forced_update; + if (report && report->ref_name) + ref_name = report->ref_name; + else + ref_name = ref->name; + + if (ref->deletion) + print_ref_status('-', "[deleted]", ref, NULL, NULL, + report, porcelain, summary_width); + else if (is_null_oid(old_oid)) + print_ref_status('*', + (starts_with(ref_name, "refs/tags/") + ? "[new tag]" + : (starts_with(ref_name, "refs/heads/") + ? "[new branch]" + : "[new reference]")), + ref, ref->peer_ref, NULL, + report, porcelain, summary_width); + else { + struct strbuf quickref = STRBUF_INIT; + char type; + const char *msg; + + strbuf_add_unique_abbrev(&quickref, old_oid, + DEFAULT_ABBREV); + if (forced_update) { + strbuf_addstr(&quickref, "..."); + type = '+'; + msg = "forced update"; + } else { + strbuf_addstr(&quickref, ".."); + type = ' '; + msg = NULL; + } + strbuf_add_unique_abbrev(&quickref, new_oid, + DEFAULT_ABBREV); + + print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg, + report, porcelain, summary_width); + strbuf_release(&quickref); + } +} + +static int print_one_push_report(struct ref *ref, const char *dest, int count, + struct ref_push_report *report, + int porcelain, int summary_width) +{ + if (!count) { + char *url = transport_anonymize_url(dest); + fprintf(porcelain ? stdout : stderr, "To %s\n", url); + free(url); + } + + switch(ref->status) { + case REF_STATUS_NONE: + print_ref_status('X', "[no match]", ref, NULL, NULL, + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_NODELETE: + print_ref_status('!', "[rejected]", ref, NULL, + "remote does not support deleting refs", + report, porcelain, summary_width); + break; + case REF_STATUS_UPTODATE: + print_ref_status('=', "[up to date]", ref, + ref->peer_ref, NULL, + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_NONFASTFORWARD: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "non-fast-forward", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_ALREADY_EXISTS: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "already exists", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_FETCH_FIRST: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "fetch first", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_NEEDS_FORCE: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "needs force", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_STALE: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "stale info", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_REMOTE_UPDATED: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "remote ref updated since checkout", + report, porcelain, summary_width); + break; + case REF_STATUS_REJECT_SHALLOW: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "new shallow roots not allowed", + report, porcelain, summary_width); + break; + case REF_STATUS_REMOTE_REJECT: + print_ref_status('!', "[remote rejected]", ref, + ref->deletion ? NULL : ref->peer_ref, + ref->remote_status, + report, porcelain, summary_width); + break; + case REF_STATUS_EXPECTING_REPORT: + print_ref_status('!', "[remote failure]", ref, + ref->deletion ? NULL : ref->peer_ref, + "remote failed to report status", + report, porcelain, summary_width); + break; + case REF_STATUS_ATOMIC_PUSH_FAILED: + print_ref_status('!', "[rejected]", ref, ref->peer_ref, + "atomic push failed", + report, porcelain, summary_width); + break; + case REF_STATUS_OK: + print_ok_ref_status(ref, report, porcelain, summary_width); + break; + } + + return 1; +} + +static int print_one_push_status(struct ref *ref, const char *dest, int count, + int porcelain, int summary_width) +{ + struct ref_push_report *report; + int n = 0; + + if (!ref->report) + return print_one_push_report(ref, dest, count, + NULL, porcelain, summary_width); + + for (report = ref->report; report; report = report->next) + print_one_push_report(ref, dest, count + n++, + report, porcelain, summary_width); + return n; +} + +static int measure_abbrev(const struct object_id *oid, int sofar) +{ + char hex[GIT_MAX_HEXSZ + 1]; + int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV); + + return (w < sofar) ? sofar : w; +} + +int transport_summary_width(const struct ref *refs) +{ + int maxw = -1; + + for (; refs; refs = refs->next) { + maxw = measure_abbrev(&refs->old_oid, maxw); + maxw = measure_abbrev(&refs->new_oid, maxw); + } + if (maxw < 0) + maxw = FALLBACK_DEFAULT_ABBREV; + return (2 * maxw + 3); +} + +void transport_print_push_status(const char *dest, struct ref *refs, + int verbose, int porcelain, unsigned int *reject_reasons) +{ + struct ref *ref; + int n = 0; + char *head; + int summary_width = transport_summary_width(refs); + + if (transport_color_config() < 0) + warning(_("could not parse transport.color.* config")); + + head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL); + + if (verbose) { + for (ref = refs; ref; ref = ref->next) + if (ref->status == REF_STATUS_UPTODATE) + n += print_one_push_status(ref, dest, n, + porcelain, summary_width); + } + + for (ref = refs; ref; ref = ref->next) + if (ref->status == REF_STATUS_OK) + n += print_one_push_status(ref, dest, n, + porcelain, summary_width); + + *reject_reasons = 0; + for (ref = refs; ref; ref = ref->next) { + if (ref->status != REF_STATUS_NONE && + ref->status != REF_STATUS_UPTODATE && + ref->status != REF_STATUS_OK) + n += print_one_push_status(ref, dest, n, + porcelain, summary_width); + if (ref->status == REF_STATUS_REJECT_NONFASTFORWARD) { + if (head != NULL && !strcmp(head, ref->name)) + *reject_reasons |= REJECT_NON_FF_HEAD; + else + *reject_reasons |= REJECT_NON_FF_OTHER; + } else if (ref->status == REF_STATUS_REJECT_ALREADY_EXISTS) { + *reject_reasons |= REJECT_ALREADY_EXISTS; + } else if (ref->status == REF_STATUS_REJECT_FETCH_FIRST) { + *reject_reasons |= REJECT_FETCH_FIRST; + } else if (ref->status == REF_STATUS_REJECT_NEEDS_FORCE) { + *reject_reasons |= REJECT_NEEDS_FORCE; + } else if (ref->status == REF_STATUS_REJECT_REMOTE_UPDATED) { + *reject_reasons |= REJECT_REF_NEEDS_UPDATE; + } + } + free(head); +} + +static int git_transport_push(struct transport *transport, struct ref *remote_refs, int flags) +{ + struct git_transport_data *data = transport->data; + struct send_pack_args args; + int ret = 0; + + if (transport_color_config() < 0) + return -1; + + if (!data->got_remote_heads) + get_refs_via_connect(transport, 1, NULL); + + memset(&args, 0, sizeof(args)); + args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR); + args.force_update = !!(flags & TRANSPORT_PUSH_FORCE); + args.use_thin_pack = data->options.thin; + args.verbose = (transport->verbose > 0); + args.quiet = (transport->verbose < 0); + args.progress = transport->progress; + args.dry_run = !!(flags & TRANSPORT_PUSH_DRY_RUN); + args.porcelain = !!(flags & TRANSPORT_PUSH_PORCELAIN); + args.atomic = !!(flags & TRANSPORT_PUSH_ATOMIC); + args.push_options = transport->push_options; + args.url = transport->url; + + if (flags & TRANSPORT_PUSH_CERT_ALWAYS) + args.push_cert = SEND_PACK_PUSH_CERT_ALWAYS; + else if (flags & TRANSPORT_PUSH_CERT_IF_ASKED) + args.push_cert = SEND_PACK_PUSH_CERT_IF_ASKED; + else + args.push_cert = SEND_PACK_PUSH_CERT_NEVER; + + switch (data->version) { + case protocol_v2: + die(_("support for protocol v2 not implemented yet")); + break; + case protocol_v1: + case protocol_v0: + ret = send_pack(&args, data->fd, data->conn, remote_refs, + &data->extra_have); + break; + case protocol_unknown_version: + BUG("unknown protocol version"); + } + + close(data->fd[1]); + close(data->fd[0]); + /* + * Atomic push may abort the connection early and close the pipe, + * which may cause an error for `finish_connect()`. Ignore this error + * for atomic git-push. + */ + if (ret || args.atomic) + finish_connect(data->conn); + else + ret = finish_connect(data->conn); + data->conn = NULL; + data->got_remote_heads = 0; + + return ret; +} + +static int connect_git(struct transport *transport, const char *name, + const char *executable, int fd[2]) +{ + struct git_transport_data *data = transport->data; + data->conn = git_connect(data->fd, transport->url, + executable, 0); + fd[0] = data->fd[0]; + fd[1] = data->fd[1]; + return 0; +} + +static int disconnect_git(struct transport *transport) +{ + struct git_transport_data *data = transport->data; + if (data->conn) { + if (data->got_remote_heads && !transport->stateless_rpc) + packet_flush(data->fd[1]); + close(data->fd[0]); + if (data->fd[1] >= 0) + close(data->fd[1]); + finish_connect(data->conn); + } + + free(data); + return 0; +} + +static struct transport_vtable taken_over_vtable = { + NULL, + get_refs_via_connect, + fetch_refs_via_pack, + git_transport_push, + NULL, + disconnect_git +}; + +void transport_take_over(struct transport *transport, + struct child_process *child) +{ + struct git_transport_data *data; + + if (!transport->smart_options) + BUG("taking over transport requires non-NULL " + "smart_options field."); + + CALLOC_ARRAY(data, 1); + data->options = *transport->smart_options; + data->conn = child; + data->fd[0] = data->conn->out; + data->fd[1] = data->conn->in; + data->got_remote_heads = 0; + transport->data = data; + + transport->vtable = &taken_over_vtable; + transport->smart_options = &(data->options); + + transport->cannot_reuse = 1; +} + +static int is_file(const char *url) +{ + struct stat buf; + if (stat(url, &buf)) + return 0; + return S_ISREG(buf.st_mode); +} + +static int external_specification_len(const char *url) +{ + return strchr(url, ':') - url; +} + +static const struct string_list *protocol_whitelist(void) +{ + static int enabled = -1; + static struct string_list allowed = STRING_LIST_INIT_DUP; + + if (enabled < 0) { + const char *v = getenv("GIT_ALLOW_PROTOCOL"); + if (v) { + string_list_split(&allowed, v, ':', -1); + string_list_sort(&allowed); + enabled = 1; + } else { + enabled = 0; + } + } + + return enabled ? &allowed : NULL; +} + +enum protocol_allow_config { + PROTOCOL_ALLOW_NEVER = 0, + PROTOCOL_ALLOW_USER_ONLY, + PROTOCOL_ALLOW_ALWAYS +}; + +static enum protocol_allow_config parse_protocol_config(const char *key, + const char *value) +{ + if (!strcasecmp(value, "always")) + return PROTOCOL_ALLOW_ALWAYS; + else if (!strcasecmp(value, "never")) + return PROTOCOL_ALLOW_NEVER; + else if (!strcasecmp(value, "user")) + return PROTOCOL_ALLOW_USER_ONLY; + + die(_("unknown value for config '%s': %s"), key, value); +} + +static enum protocol_allow_config get_protocol_config(const char *type) +{ + char *key = xstrfmt("protocol.%s.allow", type); + char *value; + + /* first check the per-protocol config */ + if (!git_config_get_string(key, &value)) { + enum protocol_allow_config ret = + parse_protocol_config(key, value); + free(key); + free(value); + return ret; + } + free(key); + + /* if defined, fallback to user-defined default for unknown protocols */ + if (!git_config_get_string("protocol.allow", &value)) { + enum protocol_allow_config ret = + parse_protocol_config("protocol.allow", value); + free(value); + return ret; + } + + /* fallback to built-in defaults */ + /* known safe */ + if (!strcmp(type, "http") || + !strcmp(type, "https") || + !strcmp(type, "git") || + !strcmp(type, "ssh") || + !strcmp(type, "file")) + return PROTOCOL_ALLOW_ALWAYS; + + /* known scary; err on the side of caution */ + if (!strcmp(type, "ext")) + return PROTOCOL_ALLOW_NEVER; + + /* unknown; by default let them be used only directly by the user */ + return PROTOCOL_ALLOW_USER_ONLY; +} + +int is_transport_allowed(const char *type, int from_user) +{ + const struct string_list *whitelist = protocol_whitelist(); + if (whitelist) + return string_list_has_string(whitelist, type); + + switch (get_protocol_config(type)) { + case PROTOCOL_ALLOW_ALWAYS: + return 1; + case PROTOCOL_ALLOW_NEVER: + return 0; + case PROTOCOL_ALLOW_USER_ONLY: + if (from_user < 0) + from_user = git_env_bool("GIT_PROTOCOL_FROM_USER", 1); + return from_user; + } + + BUG("invalid protocol_allow_config type"); +} + +void transport_check_allowed(const char *type) +{ + if (!is_transport_allowed(type, -1)) + die(_("transport '%s' not allowed"), type); +} + +static struct transport_vtable bundle_vtable = { + NULL, + get_refs_from_bundle, + fetch_refs_from_bundle, + NULL, + NULL, + close_bundle +}; + +static struct transport_vtable builtin_smart_vtable = { + NULL, + get_refs_via_connect, + fetch_refs_via_pack, + git_transport_push, + connect_git, + disconnect_git +}; + +struct transport *transport_get(struct remote *remote, const char *url) +{ + const char *helper; + struct transport *ret = xcalloc(1, sizeof(*ret)); + + ret->progress = isatty(2); + string_list_init_dup(&ret->pack_lockfiles); + + if (!remote) + BUG("No remote provided to transport_get()"); + + ret->got_remote_refs = 0; + ret->remote = remote; + helper = remote->foreign_vcs; + + if (!url && remote->url) + url = remote->url[0]; + ret->url = url; + + /* maybe it is a foreign URL? */ + if (url) { + const char *p = url; + + while (is_urlschemechar(p == url, *p)) + p++; + if (starts_with(p, "::")) + helper = xstrndup(url, p - url); + } + + if (helper) { + transport_helper_init(ret, helper); + } else if (starts_with(url, "rsync:")) { + die(_("git-over-rsync is no longer supported")); + } else if (url_is_local_not_ssh(url) && is_file(url) && is_bundle(url, 1)) { + struct bundle_transport_data *data = xcalloc(1, sizeof(*data)); + bundle_header_init(&data->header); + transport_check_allowed("file"); + ret->data = data; + ret->vtable = &bundle_vtable; + ret->smart_options = NULL; + } else if (!is_url(url) + || starts_with(url, "file://") + || starts_with(url, "git://") + || starts_with(url, "ssh://") + || starts_with(url, "git+ssh://") /* deprecated - do not use */ + || starts_with(url, "ssh+git://") /* deprecated - do not use */ + ) { + /* + * These are builtin smart transports; "allowed" transports + * will be checked individually in git_connect. + */ + struct git_transport_data *data = xcalloc(1, sizeof(*data)); + ret->data = data; + ret->vtable = &builtin_smart_vtable; + ret->smart_options = &(data->options); + + data->conn = NULL; + data->got_remote_heads = 0; + } else { + /* Unknown protocol in URL. Pass to external handler. */ + int len = external_specification_len(url); + char *handler = xmemdupz(url, len); + transport_helper_init(ret, handler); + } + + if (ret->smart_options) { + ret->smart_options->thin = 1; + ret->smart_options->uploadpack = "git-upload-pack"; + if (remote->uploadpack) + ret->smart_options->uploadpack = remote->uploadpack; + ret->smart_options->receivepack = "git-receive-pack"; + if (remote->receivepack) + ret->smart_options->receivepack = remote->receivepack; + } + + ret->hash_algo = &hash_algos[GIT_HASH_SHA1]; + + return ret; +} + +const struct git_hash_algo *transport_get_hash_algo(struct transport *transport) +{ + return transport->hash_algo; +} + +int transport_set_option(struct transport *transport, + const char *name, const char *value) +{ + int git_reports = 1, protocol_reports = 1; + + if (transport->smart_options) + git_reports = set_git_option(transport->smart_options, + name, value); + + if (transport->vtable->set_option) + protocol_reports = transport->vtable->set_option(transport, + name, value); + + /* If either report is 0, report 0 (success). */ + if (!git_reports || !protocol_reports) + return 0; + /* If either reports -1 (invalid value), report -1. */ + if ((git_reports == -1) || (protocol_reports == -1)) + return -1; + /* Otherwise if both report unknown, report unknown. */ + return 1; +} + +void transport_set_verbosity(struct transport *transport, int verbosity, + int force_progress) +{ + if (verbosity >= 1) + transport->verbose = verbosity <= 3 ? verbosity : 3; + if (verbosity < 0) + transport->verbose = -1; + + /** + * Rules used to determine whether to report progress (processing aborts + * when a rule is satisfied): + * + * . Report progress, if force_progress is 1 (ie. --progress). + * . Don't report progress, if force_progress is 0 (ie. --no-progress). + * . Don't report progress, if verbosity < 0 (ie. -q/--quiet ). + * . Report progress if isatty(2) is 1. + **/ + if (force_progress >= 0) + transport->progress = !!force_progress; + else + transport->progress = verbosity >= 0 && isatty(2); +} + +static void die_with_unpushed_submodules(struct string_list *needs_pushing) +{ + int i; + + fprintf(stderr, _("The following submodule paths contain changes that can\n" + "not be found on any remote:\n")); + for (i = 0; i < needs_pushing->nr; i++) + fprintf(stderr, " %s\n", needs_pushing->items[i].string); + fprintf(stderr, _("\nPlease try\n\n" + " git push --recurse-submodules=on-demand\n\n" + "or cd to the path and use\n\n" + " git push\n\n" + "to push them to a remote.\n\n")); + + string_list_clear(needs_pushing, 0); + + die(_("Aborting.")); +} + +static int run_pre_push_hook(struct transport *transport, + struct ref *remote_refs) +{ + int ret = 0, x; + struct ref *r; + struct child_process proc = CHILD_PROCESS_INIT; + struct strbuf buf; + const char *argv[4]; + + if (!(argv[0] = find_hook("pre-push"))) + return 0; + + argv[1] = transport->remote->name; + argv[2] = transport->url; + argv[3] = NULL; + + proc.argv = argv; + proc.in = -1; + proc.trace2_hook_name = "pre-push"; + + if (start_command(&proc)) { + finish_command(&proc); + return -1; + } + + sigchain_push(SIGPIPE, SIG_IGN); + + strbuf_init(&buf, 256); + + for (r = remote_refs; r; r = r->next) { + if (!r->peer_ref) continue; + if (r->status == REF_STATUS_REJECT_NONFASTFORWARD) continue; + if (r->status == REF_STATUS_REJECT_STALE) continue; + if (r->status == REF_STATUS_REJECT_REMOTE_UPDATED) continue; + if (r->status == REF_STATUS_UPTODATE) continue; + + strbuf_reset(&buf); + strbuf_addf( &buf, "%s %s %s %s\n", + r->peer_ref->name, oid_to_hex(&r->new_oid), + r->name, oid_to_hex(&r->old_oid)); + + if (write_in_full(proc.in, buf.buf, buf.len) < 0) { + /* We do not mind if a hook does not read all refs. */ + if (errno != EPIPE) + ret = -1; + break; + } + } + + strbuf_release(&buf); + + x = close(proc.in); + if (!ret) + ret = x; + + sigchain_pop(SIGPIPE); + + x = finish_command(&proc); + if (!ret) + ret = x; + + return ret; +} + +int transport_push(struct repository *r, + struct transport *transport, + struct refspec *rs, int flags, + unsigned int *reject_reasons) +{ + *reject_reasons = 0; + + if (transport_color_config() < 0) + return -1; + + if (transport->vtable->push_refs) { + struct ref *remote_refs; + struct ref *local_refs = get_local_heads(); + int match_flags = MATCH_REFS_NONE; + int verbose = (transport->verbose > 0); + int quiet = (transport->verbose < 0); + int porcelain = flags & TRANSPORT_PUSH_PORCELAIN; + int pretend = flags & TRANSPORT_PUSH_DRY_RUN; + int push_ret, ret, err; + struct transport_ls_refs_options transport_options = + TRANSPORT_LS_REFS_OPTIONS_INIT; + + if (check_push_refs(local_refs, rs) < 0) + return -1; + + refspec_ref_prefixes(rs, &transport_options.ref_prefixes); + + trace2_region_enter("transport_push", "get_refs_list", r); + remote_refs = transport->vtable->get_refs_list(transport, 1, + &transport_options); + trace2_region_leave("transport_push", "get_refs_list", r); + + strvec_clear(&transport_options.ref_prefixes); + + if (flags & TRANSPORT_PUSH_ALL) + match_flags |= MATCH_REFS_ALL; + if (flags & TRANSPORT_PUSH_MIRROR) + match_flags |= MATCH_REFS_MIRROR; + if (flags & TRANSPORT_PUSH_PRUNE) + match_flags |= MATCH_REFS_PRUNE; + if (flags & TRANSPORT_PUSH_FOLLOW_TAGS) + match_flags |= MATCH_REFS_FOLLOW_TAGS; + + if (match_push_refs(local_refs, &remote_refs, rs, match_flags)) + return -1; + + if (transport->smart_options && + transport->smart_options->cas && + !is_empty_cas(transport->smart_options->cas)) + apply_push_cas(transport->smart_options->cas, + transport->remote, remote_refs); + + set_ref_status_for_push(remote_refs, + flags & TRANSPORT_PUSH_MIRROR, + flags & TRANSPORT_PUSH_FORCE); + + if (!(flags & TRANSPORT_PUSH_NO_HOOK)) + if (run_pre_push_hook(transport, remote_refs)) + return -1; + + if ((flags & (TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND | + TRANSPORT_RECURSE_SUBMODULES_ONLY)) && + !is_bare_repository()) { + struct ref *ref = remote_refs; + struct oid_array commits = OID_ARRAY_INIT; + + trace2_region_enter("transport_push", "push_submodules", r); + for (; ref; ref = ref->next) + if (!is_null_oid(&ref->new_oid)) + oid_array_append(&commits, + &ref->new_oid); + + if (!push_unpushed_submodules(r, + &commits, + transport->remote, + rs, + transport->push_options, + pretend)) { + oid_array_clear(&commits); + trace2_region_leave("transport_push", "push_submodules", r); + die(_("failed to push all needed submodules")); + } + oid_array_clear(&commits); + trace2_region_leave("transport_push", "push_submodules", r); + } + + if (((flags & TRANSPORT_RECURSE_SUBMODULES_CHECK) || + ((flags & (TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND | + TRANSPORT_RECURSE_SUBMODULES_ONLY)) && + !pretend)) && !is_bare_repository()) { + struct ref *ref = remote_refs; + struct string_list needs_pushing = STRING_LIST_INIT_DUP; + struct oid_array commits = OID_ARRAY_INIT; + + trace2_region_enter("transport_push", "check_submodules", r); + for (; ref; ref = ref->next) + if (!is_null_oid(&ref->new_oid)) + oid_array_append(&commits, + &ref->new_oid); + + if (find_unpushed_submodules(r, + &commits, + transport->remote->name, + &needs_pushing)) { + oid_array_clear(&commits); + trace2_region_leave("transport_push", "check_submodules", r); + die_with_unpushed_submodules(&needs_pushing); + } + string_list_clear(&needs_pushing, 0); + oid_array_clear(&commits); + trace2_region_leave("transport_push", "check_submodules", r); + } + + if (!(flags & TRANSPORT_RECURSE_SUBMODULES_ONLY)) { + trace2_region_enter("transport_push", "push_refs", r); + push_ret = transport->vtable->push_refs(transport, remote_refs, flags); + trace2_region_leave("transport_push", "push_refs", r); + } else + push_ret = 0; + err = push_had_errors(remote_refs); + ret = push_ret | err; + + if (!quiet || err) + transport_print_push_status(transport->url, remote_refs, + verbose | porcelain, porcelain, + reject_reasons); + + if (flags & TRANSPORT_PUSH_SET_UPSTREAM) + set_upstreams(transport, remote_refs, pretend); + + if (!(flags & (TRANSPORT_PUSH_DRY_RUN | + TRANSPORT_RECURSE_SUBMODULES_ONLY))) { + struct ref *ref; + for (ref = remote_refs; ref; ref = ref->next) + transport_update_tracking_ref(transport->remote, ref, verbose); + } + + if (porcelain && !push_ret) + puts("Done"); + else if (!quiet && !ret && !transport_refs_pushed(remote_refs)) + fprintf(stderr, "Everything up-to-date\n"); + + return ret; + } + return 1; +} + +const struct ref *transport_get_remote_refs(struct transport *transport, + struct transport_ls_refs_options *transport_options) +{ + if (!transport->got_remote_refs) { + transport->remote_refs = + transport->vtable->get_refs_list(transport, 0, + transport_options); + transport->got_remote_refs = 1; + } + + return transport->remote_refs; +} + +int transport_fetch_refs(struct transport *transport, struct ref *refs) +{ + int rc; + int nr_heads = 0, nr_alloc = 0, nr_refs = 0; + struct ref **heads = NULL; + struct ref *rm; + + for (rm = refs; rm; rm = rm->next) { + nr_refs++; + if (rm->peer_ref && + !is_null_oid(&rm->old_oid) && + oideq(&rm->peer_ref->old_oid, &rm->old_oid)) + continue; + ALLOC_GROW(heads, nr_heads + 1, nr_alloc); + heads[nr_heads++] = rm; + } + + if (!nr_heads) { + /* + * When deepening of a shallow repository is requested, + * then local and remote refs are likely to still be equal. + * Just feed them all to the fetch method in that case. + * This condition shouldn't be met in a non-deepening fetch + * (see builtin/fetch.c:quickfetch()). + */ + ALLOC_ARRAY(heads, nr_refs); + for (rm = refs; rm; rm = rm->next) + heads[nr_heads++] = rm; + } + + rc = transport->vtable->fetch(transport, nr_heads, heads); + + free(heads); + return rc; +} + +void transport_unlock_pack(struct transport *transport) +{ + int i; + + for (i = 0; i < transport->pack_lockfiles.nr; i++) + unlink_or_warn(transport->pack_lockfiles.items[i].string); + string_list_clear(&transport->pack_lockfiles, 0); +} + +int transport_connect(struct transport *transport, const char *name, + const char *exec, int fd[2]) +{ + if (transport->vtable->connect) + return transport->vtable->connect(transport, name, exec, fd); + else + die(_("operation not supported by protocol")); +} + +int transport_disconnect(struct transport *transport) +{ + int ret = 0; + if (transport->vtable->disconnect) + ret = transport->vtable->disconnect(transport); + if (transport->got_remote_refs) + free_refs((void *)transport->remote_refs); + free(transport); + return ret; +} + +/* + * Strip username (and password) from a URL and return + * it in a newly allocated string. + */ +char *transport_anonymize_url(const char *url) +{ + char *scheme_prefix, *anon_part; + size_t anon_len, prefix_len = 0; + + anon_part = strchr(url, '@'); + if (url_is_local_not_ssh(url) || !anon_part) + goto literal_copy; + + anon_len = strlen(++anon_part); + scheme_prefix = strstr(url, "://"); + if (!scheme_prefix) { + if (!strchr(anon_part, ':')) + /* cannot be "me@there:/path/name" */ + goto literal_copy; + } else { + const char *cp; + /* make sure scheme is reasonable */ + for (cp = url; cp < scheme_prefix; cp++) { + switch (*cp) { + /* RFC 1738 2.1 */ + case '+': case '.': case '-': + break; /* ok */ + default: + if (isalnum(*cp)) + break; + /* it isn't */ + goto literal_copy; + } + } + /* @ past the first slash does not count */ + cp = strchr(scheme_prefix + 3, '/'); + if (cp && cp < anon_part) + goto literal_copy; + prefix_len = scheme_prefix - url + 3; + } + return xstrfmt("%.*s%.*s", (int)prefix_len, url, + (int)anon_len, anon_part); +literal_copy: + return xstrdup(url); +} diff --git a/utf8.c b/utf8.c index de4ce5c0e68adc..6a0dd25b0fe414 100644 --- a/utf8.c +++ b/utf8.c @@ -206,26 +206,34 @@ int utf8_width(const char **start, size_t *remainder_p) * string, assuming that the string is utf8. Returns strlen() instead * if the string does not look like a valid utf8 string. */ -int utf8_strnwidth(const char *string, int len, int skip_ansi) +int utf8_strnwidth(const char *string, size_t len, int skip_ansi) { - int width = 0; const char *orig = string; + size_t width = 0; - if (len == -1) - len = strlen(string); while (string && string < orig + len) { - int skip; + int glyph_width; + size_t skip; + while (skip_ansi && (skip = display_mode_esc_sequence_len(string)) != 0) string += skip; - width += utf8_width(&string, NULL); + + glyph_width = utf8_width(&string, NULL); + if (glyph_width > 0) + width += glyph_width; } - return string ? width : len; + + /* + * TODO: fix the interface of this function and `utf8_strwidth()` to + * return `size_t` instead of `int`. + */ + return cast_size_t_to_int(string ? width : len); } int utf8_strwidth(const char *string) { - return utf8_strnwidth(string, -1, 0); + return utf8_strnwidth(string, strlen(string), 0); } int is_utf8(const char *text) @@ -357,51 +365,52 @@ void strbuf_add_wrapped_bytes(struct strbuf *buf, const char *data, int len, void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width, const char *subst) { - struct strbuf sb_dst = STRBUF_INIT; - char *src = sb_src->buf; - char *end = src + sb_src->len; - char *dst; - int w = 0, subst_len = 0; + const char *src = sb_src->buf, *end = sb_src->buf + sb_src->len; + struct strbuf dst; + int w = 0; - if (subst) - subst_len = strlen(subst); - strbuf_grow(&sb_dst, sb_src->len + subst_len); - dst = sb_dst.buf; + strbuf_init(&dst, sb_src->len); while (src < end) { - char *old; + const char *old; + int glyph_width; size_t n; while ((n = display_mode_esc_sequence_len(src))) { - memcpy(dst, src, n); + strbuf_add(&dst, src, n); src += n; - dst += n; } if (src >= end) break; old = src; - n = utf8_width((const char**)&src, NULL); - if (!src) /* broken utf-8, do nothing */ + glyph_width = utf8_width((const char**)&src, NULL); + if (!src) /* broken utf-8, do nothing */ goto out; - if (n && w >= pos && w < pos + width) { + + /* + * In case we see a control character we copy it into the + * buffer, but don't add it to the width. + */ + if (glyph_width < 0) + glyph_width = 0; + + if (glyph_width && w >= pos && w < pos + width) { if (subst) { - memcpy(dst, subst, subst_len); - dst += subst_len; + strbuf_addstr(&dst, subst); subst = NULL; } - w += n; - continue; + } else { + strbuf_add(&dst, old, src - old); } - memcpy(dst, old, src - old); - dst += src - old; - w += n; + + w += glyph_width; } - strbuf_setlen(&sb_dst, dst - sb_dst.buf); - strbuf_swap(sb_src, &sb_dst); + + strbuf_swap(sb_src, &dst); out: - strbuf_release(&sb_dst); + strbuf_release(&dst); } /* @@ -796,7 +805,7 @@ int skip_utf8_bom(char **text, size_t len) void strbuf_utf8_align(struct strbuf *buf, align_type position, unsigned int width, const char *s) { - int slen = strlen(s); + size_t slen = strlen(s); int display_len = utf8_strnwidth(s, slen, 0); int utf8_compensation = slen - display_len; diff --git a/utf8.c.orig b/utf8.c.orig new file mode 100644 index 00000000000000..de4ce5c0e68adc --- /dev/null +++ b/utf8.c.orig @@ -0,0 +1,815 @@ +#include "git-compat-util.h" +#include "strbuf.h" +#include "utf8.h" + +/* This code is originally from http://www.cl.cam.ac.uk/~mgk25/ucs/ */ + +static const char utf16_be_bom[] = {'\xFE', '\xFF'}; +static const char utf16_le_bom[] = {'\xFF', '\xFE'}; +static const char utf32_be_bom[] = {'\0', '\0', '\xFE', '\xFF'}; +static const char utf32_le_bom[] = {'\xFF', '\xFE', '\0', '\0'}; + +struct interval { + ucs_char_t first; + ucs_char_t last; +}; + +size_t display_mode_esc_sequence_len(const char *s) +{ + const char *p = s; + if (*p++ != '\033') + return 0; + if (*p++ != '[') + return 0; + while (isdigit(*p) || *p == ';') + p++; + if (*p++ != 'm') + return 0; + return p - s; +} + +/* auxiliary function for binary search in interval table */ +static int bisearch(ucs_char_t ucs, const struct interval *table, int max) +{ + int min = 0; + int mid; + + if (ucs < table[0].first || ucs > table[max].last) + return 0; + while (max >= min) { + mid = min + (max - min) / 2; + if (ucs > table[mid].last) + min = mid + 1; + else if (ucs < table[mid].first) + max = mid - 1; + else + return 1; + } + + return 0; +} + +/* The following two functions define the column width of an ISO 10646 + * character as follows: + * + * - The null character (U+0000) has a column width of 0. + * + * - Other C0/C1 control characters and DEL will lead to a return + * value of -1. + * + * - Non-spacing and enclosing combining characters (general + * category code Mn or Me in the Unicode database) have a + * column width of 0. + * + * - SOFT HYPHEN (U+00AD) has a column width of 1. + * + * - Other format characters (general category code Cf in the Unicode + * database) and ZERO WIDTH SPACE (U+200B) have a column width of 0. + * + * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) + * have a column width of 0. + * + * - Spacing characters in the East Asian Wide (W) or East Asian + * Full-width (F) category as defined in Unicode Technical + * Report #11 have a column width of 2. + * + * - All remaining characters (including all printable + * ISO 8859-1 and WGL4 characters, Unicode control characters, + * etc.) have a column width of 1. + * + * This implementation assumes that ucs_char_t characters are encoded + * in ISO 10646. + */ + +static int git_wcwidth(ucs_char_t ch) +{ + /* + * Sorted list of non-overlapping intervals of non-spacing characters, + */ +#include "unicode-width.h" + + /* test for 8-bit control characters */ + if (ch == 0) + return 0; + if (ch < 32 || (ch >= 0x7f && ch < 0xa0)) + return -1; + + /* binary search in table of non-spacing characters */ + if (bisearch(ch, zero_width, ARRAY_SIZE(zero_width) - 1)) + return 0; + + /* binary search in table of double width characters */ + if (bisearch(ch, double_width, ARRAY_SIZE(double_width) - 1)) + return 2; + + return 1; +} + +/* + * Pick one ucs character starting from the location *start points at, + * and return it, while updating the *start pointer to point at the + * end of that character. When remainder_p is not NULL, the location + * holds the number of bytes remaining in the string that we are allowed + * to pick from. Otherwise we are allowed to pick up to the NUL that + * would eventually appear in the string. *remainder_p is also reduced + * by the number of bytes we have consumed. + * + * If the string was not a valid UTF-8, *start pointer is set to NULL + * and the return value is undefined. + */ +static ucs_char_t pick_one_utf8_char(const char **start, size_t *remainder_p) +{ + unsigned char *s = (unsigned char *)*start; + ucs_char_t ch; + size_t remainder, incr; + + /* + * A caller that assumes NUL terminated text can choose + * not to bother with the remainder length. We will + * stop at the first NUL. + */ + remainder = (remainder_p ? *remainder_p : 999); + + if (remainder < 1) { + goto invalid; + } else if (*s < 0x80) { + /* 0xxxxxxx */ + ch = *s; + incr = 1; + } else if ((s[0] & 0xe0) == 0xc0) { + /* 110XXXXx 10xxxxxx */ + if (remainder < 2 || + (s[1] & 0xc0) != 0x80 || + (s[0] & 0xfe) == 0xc0) + goto invalid; + ch = ((s[0] & 0x1f) << 6) | (s[1] & 0x3f); + incr = 2; + } else if ((s[0] & 0xf0) == 0xe0) { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + if (remainder < 3 || + (s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + /* overlong? */ + (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || + /* surrogate? */ + (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || + /* U+FFFE or U+FFFF? */ + (s[0] == 0xef && s[1] == 0xbf && + (s[2] & 0xfe) == 0xbe)) + goto invalid; + ch = ((s[0] & 0x0f) << 12) | + ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); + incr = 3; + } else if ((s[0] & 0xf8) == 0xf0) { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + if (remainder < 4 || + (s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[3] & 0xc0) != 0x80 || + /* overlong? */ + (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || + /* > U+10FFFF? */ + (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) + goto invalid; + ch = ((s[0] & 0x07) << 18) | ((s[1] & 0x3f) << 12) | + ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); + incr = 4; + } else { +invalid: + *start = NULL; + return 0; + } + + *start += incr; + if (remainder_p) + *remainder_p = remainder - incr; + return ch; +} + +/* + * This function returns the number of columns occupied by the character + * pointed to by the variable start. The pointer is updated to point at + * the next character. When remainder_p is not NULL, it points at the + * location that stores the number of remaining bytes we can use to pick + * a character (see pick_one_utf8_char() above). + */ +int utf8_width(const char **start, size_t *remainder_p) +{ + ucs_char_t ch = pick_one_utf8_char(start, remainder_p); + if (!*start) + return 0; + return git_wcwidth(ch); +} + +/* + * Returns the total number of columns required by a null-terminated + * string, assuming that the string is utf8. Returns strlen() instead + * if the string does not look like a valid utf8 string. + */ +int utf8_strnwidth(const char *string, int len, int skip_ansi) +{ + int width = 0; + const char *orig = string; + + if (len == -1) + len = strlen(string); + while (string && string < orig + len) { + int skip; + while (skip_ansi && + (skip = display_mode_esc_sequence_len(string)) != 0) + string += skip; + width += utf8_width(&string, NULL); + } + return string ? width : len; +} + +int utf8_strwidth(const char *string) +{ + return utf8_strnwidth(string, -1, 0); +} + +int is_utf8(const char *text) +{ + while (*text) { + if (*text == '\n' || *text == '\t' || *text == '\r') { + text++; + continue; + } + utf8_width(&text, NULL); + if (!text) + return 0; + } + return 1; +} + +static void strbuf_add_indented_text(struct strbuf *buf, const char *text, + int indent, int indent2) +{ + if (indent < 0) + indent = 0; + while (*text) { + const char *eol = strchrnul(text, '\n'); + if (*eol == '\n') + eol++; + strbuf_addchars(buf, ' ', indent); + strbuf_add(buf, text, eol - text); + text = eol; + indent = indent2; + } +} + +/* + * Wrap the text, if necessary. The variable indent is the indent for the + * first line, indent2 is the indent for all other lines. + * If indent is negative, assume that already -indent columns have been + * consumed (and no extra indent is necessary for the first line). + */ +void strbuf_add_wrapped_text(struct strbuf *buf, + const char *text, int indent1, int indent2, int width) +{ + int indent, w, assume_utf8 = 1; + const char *bol, *space, *start = text; + size_t orig_len = buf->len; + + if (width <= 0) { + strbuf_add_indented_text(buf, text, indent1, indent2); + return; + } + +retry: + bol = text; + w = indent = indent1; + space = NULL; + if (indent < 0) { + w = -indent; + space = text; + } + + for (;;) { + char c; + size_t skip; + + while ((skip = display_mode_esc_sequence_len(text))) + text += skip; + + c = *text; + if (!c || isspace(c)) { + if (w <= width || !space) { + const char *start = bol; + if (!c && text == start) + return; + if (space) + start = space; + else + strbuf_addchars(buf, ' ', indent); + strbuf_add(buf, start, text - start); + if (!c) + return; + space = text; + if (c == '\t') + w |= 0x07; + else if (c == '\n') { + space++; + if (*space == '\n') { + strbuf_addch(buf, '\n'); + goto new_line; + } + else if (!isalnum(*space)) + goto new_line; + else + strbuf_addch(buf, ' '); + } + w++; + text++; + } + else { +new_line: + strbuf_addch(buf, '\n'); + text = bol = space + isspace(*space); + space = NULL; + w = indent = indent2; + } + continue; + } + if (assume_utf8) { + w += utf8_width(&text, NULL); + if (!text) { + assume_utf8 = 0; + text = start; + strbuf_setlen(buf, orig_len); + goto retry; + } + } else { + w++; + text++; + } + } +} + +void strbuf_add_wrapped_bytes(struct strbuf *buf, const char *data, int len, + int indent, int indent2, int width) +{ + char *tmp = xstrndup(data, len); + strbuf_add_wrapped_text(buf, tmp, indent, indent2, width); + free(tmp); +} + +void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width, + const char *subst) +{ + struct strbuf sb_dst = STRBUF_INIT; + char *src = sb_src->buf; + char *end = src + sb_src->len; + char *dst; + int w = 0, subst_len = 0; + + if (subst) + subst_len = strlen(subst); + strbuf_grow(&sb_dst, sb_src->len + subst_len); + dst = sb_dst.buf; + + while (src < end) { + char *old; + size_t n; + + while ((n = display_mode_esc_sequence_len(src))) { + memcpy(dst, src, n); + src += n; + dst += n; + } + + if (src >= end) + break; + + old = src; + n = utf8_width((const char**)&src, NULL); + if (!src) /* broken utf-8, do nothing */ + goto out; + if (n && w >= pos && w < pos + width) { + if (subst) { + memcpy(dst, subst, subst_len); + dst += subst_len; + subst = NULL; + } + w += n; + continue; + } + memcpy(dst, old, src - old); + dst += src - old; + w += n; + } + strbuf_setlen(&sb_dst, dst - sb_dst.buf); + strbuf_swap(sb_src, &sb_dst); +out: + strbuf_release(&sb_dst); +} + +/* + * Returns true (1) if the src encoding name matches the dst encoding + * name directly or one of its alternative names. E.g. UTF-16BE is the + * same as UTF16BE. + */ +static int same_utf_encoding(const char *src, const char *dst) +{ + if (skip_iprefix(src, "utf", &src) && skip_iprefix(dst, "utf", &dst)) { + skip_prefix(src, "-", &src); + skip_prefix(dst, "-", &dst); + return !strcasecmp(src, dst); + } + return 0; +} + +int is_encoding_utf8(const char *name) +{ + if (!name) + return 1; + if (same_utf_encoding("utf-8", name)) + return 1; + return 0; +} + +int same_encoding(const char *src, const char *dst) +{ + static const char utf8[] = "UTF-8"; + + if (!src) + src = utf8; + if (!dst) + dst = utf8; + if (same_utf_encoding(src, dst)) + return 1; + return !strcasecmp(src, dst); +} + +/* + * Wrapper for fprintf and returns the total number of columns required + * for the printed string, assuming that the string is utf8. + */ +int utf8_fprintf(FILE *stream, const char *format, ...) +{ + struct strbuf buf = STRBUF_INIT; + va_list arg; + int columns; + + va_start(arg, format); + strbuf_vaddf(&buf, format, arg); + va_end(arg); + + columns = fputs(buf.buf, stream); + if (0 <= columns) /* keep the error from the I/O */ + columns = utf8_strwidth(buf.buf); + strbuf_release(&buf); + return columns; +} + +/* + * Given a buffer and its encoding, return it re-encoded + * with iconv. If the conversion fails, returns NULL. + */ +#ifndef NO_ICONV +#if defined(OLD_ICONV) || (defined(__sun__) && !defined(_XPG6)) + typedef const char * iconv_ibp; +#else + typedef char * iconv_ibp; +#endif +char *reencode_string_iconv(const char *in, size_t insz, iconv_t conv, + size_t bom_len, size_t *outsz_p) +{ + size_t outsz, outalloc; + char *out, *outpos; + iconv_ibp cp; + + outsz = insz; + outalloc = st_add(outsz, 1 + bom_len); /* for terminating NUL */ + out = xmalloc(outalloc); + outpos = out + bom_len; + cp = (iconv_ibp)in; + + while (1) { + size_t cnt = iconv(conv, &cp, &insz, &outpos, &outsz); + + if (cnt == (size_t) -1) { + size_t sofar; + if (errno != E2BIG) { + free(out); + return NULL; + } + /* insz has remaining number of bytes. + * since we started outsz the same as insz, + * it is likely that insz is not enough for + * converting the rest. + */ + sofar = outpos - out; + outalloc = st_add3(sofar, st_mult(insz, 2), 32); + out = xrealloc(out, outalloc); + outpos = out + sofar; + outsz = outalloc - sofar - 1; + } + else { + *outpos = '\0'; + if (outsz_p) + *outsz_p = outpos - out; + break; + } + } + return out; +} + +static const char *fallback_encoding(const char *name) +{ + /* + * Some platforms do not have the variously spelled variants of + * UTF-8, so let's fall back to trying the most official + * spelling. We do so only as a fallback in case the platform + * does understand the user's spelling, but not our official + * one. + */ + if (is_encoding_utf8(name)) + return "UTF-8"; + + /* + * Even though latin-1 is still seen in e-mail + * headers, some platforms only install ISO-8859-1. + */ + if (!strcasecmp(name, "latin-1")) + return "ISO-8859-1"; + + return name; +} + +char *reencode_string_len(const char *in, size_t insz, + const char *out_encoding, const char *in_encoding, + size_t *outsz) +{ + iconv_t conv; + char *out; + const char *bom_str = NULL; + size_t bom_len = 0; + + if (!in_encoding) + return NULL; + + /* UTF-16LE-BOM is the same as UTF-16 for reading */ + if (same_utf_encoding("UTF-16LE-BOM", in_encoding)) + in_encoding = "UTF-16"; + + /* + * For writing, UTF-16 iconv typically creates "UTF-16BE-BOM" + * Some users under Windows want the little endian version + * + * We handle UTF-16 and UTF-32 ourselves only if the platform does not + * provide a BOM (which we require), since we want to match the behavior + * of the system tools and libc as much as possible. + */ + if (same_utf_encoding("UTF-16LE-BOM", out_encoding)) { + bom_str = utf16_le_bom; + bom_len = sizeof(utf16_le_bom); + out_encoding = "UTF-16LE"; + } else if (same_utf_encoding("UTF-16BE-BOM", out_encoding)) { + bom_str = utf16_be_bom; + bom_len = sizeof(utf16_be_bom); + out_encoding = "UTF-16BE"; +#ifdef ICONV_OMITS_BOM + } else if (same_utf_encoding("UTF-16", out_encoding)) { + bom_str = utf16_be_bom; + bom_len = sizeof(utf16_be_bom); + out_encoding = "UTF-16BE"; + } else if (same_utf_encoding("UTF-32", out_encoding)) { + bom_str = utf32_be_bom; + bom_len = sizeof(utf32_be_bom); + out_encoding = "UTF-32BE"; +#endif + } + + conv = iconv_open(out_encoding, in_encoding); + if (conv == (iconv_t) -1) { + in_encoding = fallback_encoding(in_encoding); + out_encoding = fallback_encoding(out_encoding); + + conv = iconv_open(out_encoding, in_encoding); + if (conv == (iconv_t) -1) + return NULL; + } + out = reencode_string_iconv(in, insz, conv, bom_len, outsz); + iconv_close(conv); + if (out && bom_str && bom_len) + memcpy(out, bom_str, bom_len); + return out; +} +#endif + +static int has_bom_prefix(const char *data, size_t len, + const char *bom, size_t bom_len) +{ + return data && bom && (len >= bom_len) && !memcmp(data, bom, bom_len); +} + +int has_prohibited_utf_bom(const char *enc, const char *data, size_t len) +{ + return ( + (same_utf_encoding("UTF-16BE", enc) || + same_utf_encoding("UTF-16LE", enc)) && + (has_bom_prefix(data, len, utf16_be_bom, sizeof(utf16_be_bom)) || + has_bom_prefix(data, len, utf16_le_bom, sizeof(utf16_le_bom))) + ) || ( + (same_utf_encoding("UTF-32BE", enc) || + same_utf_encoding("UTF-32LE", enc)) && + (has_bom_prefix(data, len, utf32_be_bom, sizeof(utf32_be_bom)) || + has_bom_prefix(data, len, utf32_le_bom, sizeof(utf32_le_bom))) + ); +} + +int is_missing_required_utf_bom(const char *enc, const char *data, size_t len) +{ + return ( + (same_utf_encoding(enc, "UTF-16")) && + !(has_bom_prefix(data, len, utf16_be_bom, sizeof(utf16_be_bom)) || + has_bom_prefix(data, len, utf16_le_bom, sizeof(utf16_le_bom))) + ) || ( + (same_utf_encoding(enc, "UTF-32")) && + !(has_bom_prefix(data, len, utf32_be_bom, sizeof(utf32_be_bom)) || + has_bom_prefix(data, len, utf32_le_bom, sizeof(utf32_le_bom))) + ); +} + +/* + * Returns first character length in bytes for multi-byte `text` according to + * `encoding`. + * + * - The `text` pointer is updated to point at the next character. + * - When `remainder_p` is not NULL, on entry `*remainder_p` is how much bytes + * we can consume from text, and on exit `*remainder_p` is reduced by returned + * character length. Otherwise `text` is treated as limited by NUL. + */ +int mbs_chrlen(const char **text, size_t *remainder_p, const char *encoding) +{ + int chrlen; + const char *p = *text; + size_t r = (remainder_p ? *remainder_p : SIZE_MAX); + + if (r < 1) + return 0; + + if (is_encoding_utf8(encoding)) { + pick_one_utf8_char(&p, &r); + + chrlen = p ? (p - *text) + : 1 /* not valid UTF-8 -> raw byte sequence */; + } + else { + /* + * TODO use iconv to decode one char and obtain its chrlen + * for now, let's treat encodings != UTF-8 as one-byte + */ + chrlen = 1; + } + + *text += chrlen; + if (remainder_p) + *remainder_p -= chrlen; + + return chrlen; +} + +/* + * Pick the next char from the stream, ignoring codepoints an HFS+ would. + * Note that this is _not_ complete by any means. It's just enough + * to make is_hfs_dotgit() work, and should not be used otherwise. + */ +static ucs_char_t next_hfs_char(const char **in) +{ + while (1) { + ucs_char_t out = pick_one_utf8_char(in, NULL); + /* + * check for malformed utf8. Technically this + * gets converted to a percent-sequence, but + * returning 0 is good enough for is_hfs_dotgit + * to realize it cannot be .git + */ + if (!*in) + return 0; + + /* these code points are ignored completely */ + switch (out) { + case 0x200c: /* ZERO WIDTH NON-JOINER */ + case 0x200d: /* ZERO WIDTH JOINER */ + case 0x200e: /* LEFT-TO-RIGHT MARK */ + case 0x200f: /* RIGHT-TO-LEFT MARK */ + case 0x202a: /* LEFT-TO-RIGHT EMBEDDING */ + case 0x202b: /* RIGHT-TO-LEFT EMBEDDING */ + case 0x202c: /* POP DIRECTIONAL FORMATTING */ + case 0x202d: /* LEFT-TO-RIGHT OVERRIDE */ + case 0x202e: /* RIGHT-TO-LEFT OVERRIDE */ + case 0x206a: /* INHIBIT SYMMETRIC SWAPPING */ + case 0x206b: /* ACTIVATE SYMMETRIC SWAPPING */ + case 0x206c: /* INHIBIT ARABIC FORM SHAPING */ + case 0x206d: /* ACTIVATE ARABIC FORM SHAPING */ + case 0x206e: /* NATIONAL DIGIT SHAPES */ + case 0x206f: /* NOMINAL DIGIT SHAPES */ + case 0xfeff: /* ZERO WIDTH NO-BREAK SPACE */ + continue; + } + + return out; + } +} + +static int is_hfs_dot_generic(const char *path, + const char *needle, size_t needle_len) +{ + ucs_char_t c; + + c = next_hfs_char(&path); + if (c != '.') + return 0; + + /* + * there's a great deal of other case-folding that occurs + * in HFS+, but this is enough to catch our fairly vanilla + * hard-coded needles. + */ + for (; needle_len > 0; needle++, needle_len--) { + c = next_hfs_char(&path); + + /* + * We know our needles contain only ASCII, so we clamp here to + * make the results of tolower() sane. + */ + if (c > 127) + return 0; + if (tolower(c) != *needle) + return 0; + } + + c = next_hfs_char(&path); + if (c && !is_dir_sep(c)) + return 0; + + return 1; +} + +/* + * Inline wrapper to make sure the compiler resolves strlen() on literals at + * compile time. + */ +static inline int is_hfs_dot_str(const char *path, const char *needle) +{ + return is_hfs_dot_generic(path, needle, strlen(needle)); +} + +int is_hfs_dotgit(const char *path) +{ + return is_hfs_dot_str(path, "git"); +} + +int is_hfs_dotgitmodules(const char *path) +{ + return is_hfs_dot_str(path, "gitmodules"); +} + +int is_hfs_dotgitignore(const char *path) +{ + return is_hfs_dot_str(path, "gitignore"); +} + +int is_hfs_dotgitattributes(const char *path) +{ + return is_hfs_dot_str(path, "gitattributes"); +} + +int is_hfs_dotmailmap(const char *path) +{ + return is_hfs_dot_str(path, "mailmap"); +} + +const char utf8_bom[] = "\357\273\277"; + +int skip_utf8_bom(char **text, size_t len) +{ + if (len < strlen(utf8_bom) || + memcmp(*text, utf8_bom, strlen(utf8_bom))) + return 0; + *text += strlen(utf8_bom); + return 1; +} + +void strbuf_utf8_align(struct strbuf *buf, align_type position, unsigned int width, + const char *s) +{ + int slen = strlen(s); + int display_len = utf8_strnwidth(s, slen, 0); + int utf8_compensation = slen - display_len; + + if (display_len >= width) { + strbuf_addstr(buf, s); + return; + } + + if (position == ALIGN_LEFT) + strbuf_addf(buf, "%-*s", width + utf8_compensation, s); + else if (position == ALIGN_MIDDLE) { + int left = (width - display_len) / 2; + strbuf_addf(buf, "%*s%-*s", left, "", width - left + utf8_compensation, s); + } else if (position == ALIGN_RIGHT) + strbuf_addf(buf, "%*s", width + utf8_compensation, s); +} diff --git a/utf8.h b/utf8.h index 9a16c8679dcd08..b68efef6f4398d 100644 --- a/utf8.h +++ b/utf8.h @@ -7,7 +7,7 @@ typedef unsigned int ucs_char_t; /* assuming 32bit int */ size_t display_mode_esc_sequence_len(const char *s); int utf8_width(const char **start, size_t *remainder_p); -int utf8_strnwidth(const char *string, int len, int skip_ansi); +int utf8_strnwidth(const char *string, size_t len, int skip_ansi); int utf8_strwidth(const char *string); int is_utf8(const char *text); int is_encoding_utf8(const char *name);