Revision 9cf85473209ea8ae2b56c13145c4704d12ee1374 authored by Filip Hejsek on 28 January 2024, 04:09:17 UTC, committed by Johannes Schindelin on 17 April 2024, 20:30:01 UTC
While it is expected to have several git dirs within the `.git/modules/`
tree, it is important that they do not interfere with each other. For
example, if one submodule was called "captain" and another submodule
"captain/hooks", their respective git dirs would clash, as they would be
located in `.git/modules/captain/` and `.git/modules/captain/hooks/`,
respectively, i.e. the latter's files could clash with the actual Git
hooks of the former.

To prevent these clashes, and in particular to prevent hooks from being
written and then executed as part of a recursive clone, we introduced
checks as part of the fix for CVE-2019-1387 in a8dee3ca61 (Disallow
dubiously-nested submodule git directories, 2019-10-01).

It is currently possible to bypass the check for clashing submodule
git dirs in two ways:

1. parallel cloning
2. checkout --recurse-submodules

Let's check not only before, but also after parallel cloning (and before
checking out the submodule), that the git dir is not clashing with
another one, otherwise fail. This addresses the parallel cloning issue.

As to the parallel checkout issue: It requires quite a few manual steps
to create clashing git dirs because Git itself would refuse to
initialize the inner one, as demonstrated by the test case.

Nevertheless, let's teach the recursive checkout (namely, the
`submodule_move_head()` function that is used by the recursive checkout)
to be careful to verify that it does not use a clashing git dir, and if
it does, disable it (by deleting the `HEAD` file so that subsequent Git
calls won't recognize it as a git dir anymore).

Note: The parallel cloning test case contains a `cat err` that proved to
be highly useful when analyzing the racy nature of the operation (the
operation can fail with three different error messages, depending on
timing), and was left on purpose to ease future debugging should the
need arise.

Signed-off-by: Filip Hejsek <filip.hejsek@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
1 parent b20c10f
Raw File
oidtree.c
/*
 * A wrapper around cbtree which stores oids
 * May be used to replace oid-array for prefix (abbreviation) matches
 */
#include "oidtree.h"
#include "alloc.h"
#include "hash.h"

struct oidtree_iter_data {
	oidtree_iter fn;
	void *arg;
	size_t *last_nibble_at;
	int algo;
	uint8_t last_byte;
};

void oidtree_init(struct oidtree *ot)
{
	cb_init(&ot->tree);
	mem_pool_init(&ot->mem_pool, 0);
}

void oidtree_clear(struct oidtree *ot)
{
	if (ot) {
		mem_pool_discard(&ot->mem_pool, 0);
		oidtree_init(ot);
	}
}

void oidtree_insert(struct oidtree *ot, const struct object_id *oid)
{
	struct cb_node *on;
	struct object_id k;

	if (!oid->algo)
		BUG("oidtree_insert requires oid->algo");

	on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid));

	/*
	 * Clear the padding and copy the result in separate steps to
	 * respect the 4-byte alignment needed by struct object_id.
	 */
	oidcpy_with_padding(&k, oid);
	memcpy(on->k, &k, sizeof(k));

	/*
	 * n.b. Current callers won't get us duplicates, here.  If a
	 * future caller causes duplicates, there'll be a a small leak
	 * that won't be freed until oidtree_clear.  Currently it's not
	 * worth maintaining a free list
	 */
	cb_insert(&ot->tree, on, sizeof(*oid));
}


int oidtree_contains(struct oidtree *ot, const struct object_id *oid)
{
	struct object_id k;
	size_t klen = sizeof(k);

	oidcpy_with_padding(&k, oid);

	if (oid->algo == GIT_HASH_UNKNOWN)
		klen -= sizeof(oid->algo);

	/* cb_lookup relies on memcmp on the struct, so order matters: */
	klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) <
				offsetof(struct object_id, algo));

	return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0;
}

static enum cb_next iter(struct cb_node *n, void *arg)
{
	struct oidtree_iter_data *x = arg;
	struct object_id k;

	/* Copy to provide 4-byte alignment needed by struct object_id. */
	memcpy(&k, n->k, sizeof(k));

	if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo)
		return CB_CONTINUE;

	if (x->last_nibble_at) {
		if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0)
			return CB_CONTINUE;
	}

	return x->fn(&k, x->arg);
}

void oidtree_each(struct oidtree *ot, const struct object_id *oid,
			size_t oidhexsz, oidtree_iter fn, void *arg)
{
	size_t klen = oidhexsz / 2;
	struct oidtree_iter_data x = { 0 };
	assert(oidhexsz <= GIT_MAX_HEXSZ);

	x.fn = fn;
	x.arg = arg;
	x.algo = oid->algo;
	if (oidhexsz & 1) {
		x.last_byte = oid->hash[klen];
		x.last_nibble_at = &klen;
	}
	cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x);
}
back to top