> -----Original Message-----
> From: HuiHuang [mailto:yellow.flying_at_yahoo.com.cn]
> Sent: woensdag 1 juli 2009 8:00
> To: Stefan Sperling
> Cc: dev
> Subject: [PATCH]commit_from_multi_wc
>
> Hey Stefan,
>
> According the new plan I make a patch. I have compiled but not tested
> it.
> You can see whether there are any logical problems? Hope for your
> suggestions.
> Thank you:)
>
> log message:
>
> [[[
>
> * subversion/include/svn_client.h
> (svn_client_commit5): New API.
> Which can do commit from multiple working copies. It does one
> single
> commit for each working copy.
> * subversion/libsvn_client/commit.c
> (commit_packet_t): New struct.
> Which is used to store information needed by commit.
> (create_commit_packet): New function.
> Which is used to create commit_packet_t.
> (do_commit): New function.
> Which is used to do one single commit according to the
> collected
> commit packet.
> (collect_commit_packets): New function.
> Which is used to collect commit_packet_t when locking root
> failed.
> (svn_client_commit5): New function.
> Implement of API svn_client_commit5 in
> subversion/include/svn_client.h.
> ]]]
>
> Index: subversion/include/svn_client.h
> ===================================================================
> --- subversion/include/svn_client.h (revision 38280)
> +++ subversion/include/svn_client.h (working copy)
> @@ -1699,6 +1699,17 @@
> * @{
> */
>
> +svn_error_t *
> +svn_client_commit5(apr_array_header_t **commit_info_p,
> + const apr_array_header_t *targets,
> + svn_depth_t depth,
> + svn_boolean_t keep_locks,
> + svn_boolean_t keep_changelists,
> + const apr_array_header_t *changelists,
> + const apr_hash_t *revprop_table,
> + svn_client_ctx_t *ctx,
> + apr_pool_t *pool);
> +
This function needs a docstring.
And can you add a argument that determines whether performing multiple commits over the same targets is allowed or not? This would allow implementing svn_client_commit4() as a wrapper over svn_client_commit5() and would also allow disabling the new functionality for library users that would like to explicitly manage this behavior.
Bert
> /**
> * Commit files or directories into repository, authenticating with
> * the authentication baton cached in @a ctx, and using
> Index: subversion/libsvn_client/commit.c
> ===================================================================
> --- subversion/libsvn_client/commit.c (revision 38280)
> +++ subversion/libsvn_client/commit.c (working copy)
> @@ -67,7 +67,45 @@
>
> } import_ctx_t;
>
> +typedef struct commit_packet_t
> +{
> + /* Working copy root of a wc */
> + const char *base_dir;
>
> + /* Targets under base_dir */
> + apr_array_header_t *targets;
> +
> + apr_array_header_t *rel_targets;
> +
> + svn_wc_adm_access_t *base_dir_access;
> +
> +} commit_packet_t;
> +
> +static commit_packet_t *
> +create_commit_packet(const char *base_dir,
> + const char *target,
> + const char * rel_target,
> + svn_wc_adm_access_t *base_dir_access,
> + apr_pool_t *pool)
> +{
> + apr_array_header_t *targets;
> + apr_array_header_t *rel_targets;
> + commit_packet_t * commit_packet_p;
> +
> + targets = apr_array_make(pool, 1, sizeof(char *));
> + rel_targets = apr_array_make(pool, 1, sizeof(char *));
> + APR_ARRAY_PUSH(targets, char *) = apr_pstrdup(pool, target);
> + APR_ARRAY_PUSH(rel_targets, char *) = apr_pstrdup(pool, rel_target);
> +
> + commit_packet_p = apr_pcalloc(pool, sizeof(commit_packet_t));
> + commit_packet_p->base_dir = apr_pstrdup(pool, base_dir);
> + commit_packet_p->base_dir_access = base_dir_access;
> + commit_packet_p->rel_targets = rel_targets;
> + commit_packet_p->targets = targets;
> +
> + return commit_packet_p;
> +}
> +
> /* Apply PATH's contents (as a delta against the empty string) to
> FILE_BATON in EDITOR. Use POOL for any temporary allocation.
> PROPERTIES is the set of node properties set on this file.
> @@ -1722,3 +1760,611 @@
>
> return reconcile_errors(cmt_err, unlock_err, bump_err, cleanup_err,
> pool);
> }
> +
> +static svn_error_t *
> +do_commit(svn_commit_info_t **commit_info_p,
> + svn_wc_adm_access_t *base_dir_access,
> + const char *base_dir,
> + const apr_array_header_t *targets,
> + const apr_array_header_t *rel_targets,
> + svn_depth_t depth,
> + svn_boolean_t keep_locks,
> + svn_boolean_t keep_changelists,
> + const apr_array_header_t *changelists,
> + const apr_hash_t *revprop_table,
> + svn_client_ctx_t *ctx,
> + apr_pool_t *pool)
> +{
> + const svn_delta_editor_t *editor;
> + void *edit_baton;
> + svn_ra_session_t *ra_session;
> + const char *log_msg;
> + const char *base_url;
> + apr_hash_t *committables;
> + apr_hash_t *lock_tokens;
> + apr_hash_t *tempfiles = NULL;
> + apr_hash_t *checksums;
> + apr_array_header_t *commit_items;
> + svn_error_t *cmt_err = SVN_NO_ERROR, *unlock_err = SVN_NO_ERROR;
> + svn_error_t *bump_err = SVN_NO_ERROR, *cleanup_err = SVN_NO_ERROR;
> + svn_boolean_t commit_in_progress = FALSE;
> + const char *current_dir = "";
> + const char *notify_prefix;
> +
> + /* One day we might support committing from multiple working copies,
> but
> + we don't yet. This check ensures that we don't silently commit a
> + subset of the targets.
> +
> + At the same time, if a non-recursive commit is desired, do not
> + allow a deleted directory as one of the targets. */
> + {
> + struct check_dir_delete_baton btn;
> +
> + btn.base_dir_access = base_dir_access;
> + btn.depth = depth;
> + SVN_ERR(svn_iter_apr_array(NULL, targets,
> + check_nonrecursive_dir_delete, &btn,
> + pool));
> + }
> +
> + /* Crawl the working copy for commit items. */
> + if ((cmt_err = svn_client__harvest_committables(&committables,
> + &lock_tokens,
> + base_dir_access,
> + rel_targets,
> + depth,
> + ! keep_locks,
> + changelists,
> + ctx,
> + pool)))
> + goto cleanup;
> +
> + /* ### todo: Currently there should be only one hash entry, which
> + has a hacked name until we have the entries files storing
> + canonical repository URLs. Then, the hacked name can go away
> + and be replaced with a canonical repos URL, and from there we
> + are poised to started handling nested working copies. See
> + http://subversion.tigris.org/issues/show_bug.cgi?id=960. */
> + if (! ((commit_items = apr_hash_get(committables,
> + SVN_CLIENT__SINGLE_REPOS_NAME,
> + APR_HASH_KEY_STRING))))
> + goto cleanup;
> +
> + /* If our array of targets contains only locks (and no actual file
> + or prop modifications), then we return here to avoid committing a
> + revision with no changes. */
> + {
> + svn_boolean_t not_found_changed_path = TRUE;
> +
> +
> + cmt_err = svn_iter_apr_array(¬_found_changed_path,
> + commit_items,
> + commit_item_is_changed, NULL, pool);
> + if (not_found_changed_path || cmt_err)
> + goto cleanup;
> + }
> +
> + /* Go get a log message. If an error occurs, or no log message is
> + specified, abort the operation. */
> + if (SVN_CLIENT__HAS_LOG_MSG_FUNC(ctx))
> + {
> + const char *tmp_file;
> + cmt_err = svn_client__get_log_msg(&log_msg, &tmp_file,
> commit_items,
> + ctx, pool);
> +
> + if (cmt_err || (! log_msg))
> + goto cleanup;
> + }
> + else
> + log_msg = "";
> +
> + /* Sort and condense our COMMIT_ITEMS. */
> + if ((cmt_err = svn_client__condense_commit_items(&base_url,
> + commit_items,
> + pool)))
> + goto cleanup;
> +
> + /* Collect our lock tokens with paths relative to base_url. */
> + if ((cmt_err = collect_lock_tokens(&lock_tokens, lock_tokens,
> base_url,
> + pool)))
> + goto cleanup;
> +
> + if ((cmt_err = get_ra_editor(&ra_session,
> + &editor, &edit_baton, ctx,
> + base_url, base_dir, base_dir_access,
> log_msg,
> + commit_items, revprop_table,
> commit_info_p,
> + TRUE, lock_tokens, keep_locks, pool)))
> + goto cleanup;
> +
> + /* Make a note that we have a commit-in-progress. */
> + commit_in_progress = TRUE;
> +
> + if ((cmt_err = svn_dirent_get_absolute(¤t_dir,
> + current_dir, pool)))
> + goto cleanup;
> +
> + /* Determine prefix to strip from the commit notify messages */
> + notify_prefix = svn_dirent_get_longest_ancestor(current_dir,
> base_dir, pool);
> +
> + /* Perform the commit. */
> + cmt_err = svn_client__do_commit(base_url, commit_items,
> base_dir_access,
> + editor, edit_baton,
> + notify_prefix,
> + &tempfiles, &checksums, ctx, pool);
> +
> + /* Handle a successful commit. */
> + if ((! cmt_err)
> + || (cmt_err->apr_err == SVN_ERR_REPOS_POST_COMMIT_HOOK_FAILED))
> + {
> + svn_wc_committed_queue_t *queue =
> svn_wc_committed_queue_create(pool);
> + struct post_commit_baton btn;
> +
> + btn.queue = queue;
> + btn.qpool = pool;
> + btn.base_dir_access = base_dir_access;
> + btn.keep_changelists = keep_changelists;
> + btn.keep_locks = keep_locks;
> + btn.checksums = checksums;
> +
> + /* Make a note that our commit is finished. */
> + commit_in_progress = FALSE;
> +
> + bump_err = svn_iter_apr_array(NULL, commit_items,
> + post_process_commit_item, &btn,
> + pool);
> + if (bump_err)
> + goto cleanup;
> +
> + SVN_ERR_ASSERT(*commit_info_p);
> + bump_err
> + = svn_wc_process_committed_queue(queue, base_dir_access,
> + (*commit_info_p)->revision,
> + (*commit_info_p)->date,
> + (*commit_info_p)->author,
> + pool);
> + }
> +
> + /* Sleep to ensure timestamp integrity. */
> + svn_io_sleep_for_timestamps(base_dir, pool);
> +
> + cleanup:
> + /* Abort the commit if it is still in progress. */
> + if (commit_in_progress)
> + svn_error_clear(editor->abort_edit(edit_baton, pool));
> +
> + /* A bump error is likely to occur while running a working copy log
> file,
> + explicitly unlocking and removing temporary files would be wrong
> in
> + that case. A commit error (cmt_err) should only occur before any
> + attempt to modify the working copy, so it doesn't prevent
> explicit
> + clean-up. */
> + if (! bump_err)
> + {
> + unlock_err = svn_wc_adm_close2(base_dir_access, pool);
> +
> + if (! unlock_err)
> + cleanup_err = remove_tmpfiles(tempfiles, pool);
> + }
> +
> + /* As per our promise, if *commit_info_p isn't set, provide a
> default where
> + rev = SVN_INVALID_REVNUM. */
> + if (! *commit_info_p)
> + *commit_info_p = svn_create_commit_info(pool);
> +
> + return reconcile_errors(cmt_err, unlock_err, bump_err, cleanup_err,
> pool);
> +}
> +
> +static svn_error_t *
> +collect_commit_packets(apr_array_header_t **commit_packets,
> + const apr_array_header_t *targets,
> + const char *base_dir,
> + svn_client_ctx_t *ctx,
> + apr_pool_t *pool)
> +{
> + commit_packet_t *commit_packet;
> + svn_boolean_t find_parent;
> + apr_pool_t *scratch_pool;
> + apr_pool_t *iter_pool;
> + svn_wc_adm_access_t *base_dir_access;
> + svn_error_t *lock_err = SVN_NO_ERROR;
> + const char *rel_target;
> + const char *target;
> + const char *new_base_dir;
> + char *dirname;
> + int i, j;
> +
> + *commit_packets = apr_array_make(pool, 1, sizeof(commit_packet));
> + scratch_pool = svn_pool_create(pool);
> +
> + for (i = 0; i < targets->nelts; i++)
> + {
> + svn_pool_clear(scratch_pool);
> + iter_pool = svn_pool_create(scratch_pool);
> + target = APR_ARRAY_IDX(targets, i, const char *);
> + find_parent = FALSE;
> +
> + /* Try to find target's locked parent. */
> + for (j = 0; j < (*commit_packets)->nelts; j++)
> + {
> + svn_pool_clear(iter_pool);
> + commit_packet = APR_ARRAY_IDX(*commit_packets, j,
> commit_packet_t *);
> + rel_target = svn_dirent_is_child(commit_packet->base_dir,
> + target, iter_pool);
> + if (rel_target)
> + {
> + APR_ARRAY_PUSH(commit_packet->targets, const char
> *) = target;
> + APR_ARRAY_PUSH(commit_packet->rel_targets, const
> char *)
> + = apr_pstrdup(pool, rel_target);
> + find_parent = TRUE;
> + break;
> + }
> + }
> + svn_pool_destroy(iter_pool);
> +
> + /* No suitable working copy root was found for the current
> target path.
> + * Walk the current target path downwards, starting from the
> common
> + * root (the root which we could not lock, in the code this is
> often
> + * called the "base_dir").
> + * Try to lock the current directory at each step.
> + * If locking succeeds, we have found a new WC root!
> + * Store its access baton in the set of known working copy
> roots.
> + * Put the current target path into the group of the root we
> just found.
> + */
> + if (! find_parent)
> + {
> + new_base_dir = apr_pstrdup(scratch_pool, base_dir);
> + rel_target = svn_dirent_is_child(new_base_dir, target,
> + scratch_pool);
> + iter_pool = svn_pool_create(scratch_pool);
> + while (1)
> + {
> + svn_pool_clear(iter_pool);
> + dirname = svn_dirent_dirname(rel_target, iter_pool);
> + new_base_dir = svn_dirent_join(new_base_dir, dirname,
> + iter_pool);
> + lock_err = svn_wc_adm_open3(&base_dir_access, NULL,
> + new_base_dir,
> + TRUE, /* Write lock */
> + -1, /* lock levels */
> + ctx->cancel_func,
> + ctx->cancel_baton,
> + iter_pool);
> + if (! lock_err)
> + {
> + commit_packet = create_commit_packet(new_base_dir,
> target,
> + rel_target,
> +
> base_dir_access, pool);
> + APR_ARRAY_PUSH(*commit_packets, commit_packet_t *)
> + = commit_packet;
> + }
> + else
> + if (lock_err->apr_err == SVN_ERR_WC_NOT_DIRECTORY)
> + continue;
> + else
> + {
> + svn_pool_destroy(iter_pool);
> + svn_pool_destroy(scratch_pool);
> + return lock_err;
> + }
> + }
> + svn_pool_destroy(iter_pool);
> + }
> + }
> + svn_pool_destroy(scratch_pool);
> + return SVN_NO_ERROR;
> +}
> +
> +svn_error_t *
> +svn_client_commit5(apr_array_header_t **commit_info,
> + const apr_array_header_t *targets,
> + svn_depth_t depth,
> + svn_boolean_t keep_locks,
> + svn_boolean_t keep_changelists,
> + const apr_array_header_t *changelists,
> + const apr_hash_t *revprop_table,
> + svn_client_ctx_t *ctx,
> + apr_pool_t *pool)
> +{
> + const char *base_dir;
> + const char *target;
> + apr_array_header_t *rel_targets;
> + apr_array_header_t *dirs_to_lock;
> + apr_array_header_t *dirs_to_lock_recursive;
> + apr_array_header_t *commit_packets;
> + svn_boolean_t lock_base_dir_recursive = FALSE;
> + apr_pool_t *iter_pool;
> + svn_wc_adm_access_t *base_dir_access;
> + svn_commit_info_t *commit_info_p;
> + commit_packet_t *commit_packet;
> + svn_error_t *lock_err = SVN_NO_ERROR, *cmt_err = SVN_NO_ERROR;
> + int i;
> +
> + *commit_info = apr_array_make(pool, 1, sizeof(commit_info_p));
> +
> + /* Committing URLs doesn't make sense, so error if it's tried. */
> + for (i = 0; i < targets->nelts; i++)
> + {
> + target = APR_ARRAY_IDX(targets, i, const char *);
> + if (svn_path_is_url(target))
> + return svn_error_createf
> + (SVN_ERR_ILLEGAL_TARGET, NULL,
> + _("'%s' is a URL, but URLs cannot be commit targets"),
> target);
> + }
> +
> + /* Condense the target list. */
> + SVN_ERR(svn_dirent_condense_targets(&base_dir, &rel_targets,
> targets,
> + depth == svn_depth_infinity,
> + pool, pool));
> +
> + /* No targets means nothing to commit, so just return. */
> + if (! base_dir)
> + {
> + /* As per our promise, if *commit_info isn't set, provide a
> + * default where rev = SVN_INVALID_REVNUM. */
> + commit_info_p = svn_create_commit_info(pool);
> + APR_ARRAY_PUSH(*commit_info, svn_commit_info_t *) =
> commit_info_p;
> +
> + return SVN_NO_ERROR;
> + }
> +
> + /* When svn_path_condense_targets() was written, we didn't have real
> + * depths, we just had recursive / nonrecursive.
> + *
> + * Nowadays things are more complex. If depth == svn_depth_files,
> + * for example, and two targets are "foo" and "foo/bar", then
> + * ideally we should condense out "foo/bar" if it's a file and not
> + * if it's a directory. And, of course, later when we get adm
> + * access batons for the commit, we'd ideally lock directories to
> + * precisely the depth required and no deeper.
> + *
> + * But for now we don't do that. Instead, we lock recursively from
> + * base_dir, if depth indicates that we might need anything below
> + * there (but note that above, we don't condense away targets that
> + * need to be named explicitly when depth != svn_depth_infinity).
> + *
> + * Here's a case where this all matters:
> + *
> + * $ svn st -q
> + * M A/D/G/rho
> + * M iota
> + * $ svn ci -m "log msg" --depth=immediates . A/D/G
> + *
> + * If we don't lock base_dir recursively, then it will get an
> error...
> + *
> + * subversion/libsvn_wc/lock.c:570: (apr_err=155004)
> + * svn: Working copy '/blah/blah/blah/wc' locked
> + * svn: run 'svn cleanup' to remove locks \
> + * (type 'svn help cleanup' for details)
> + *
> + * ...because later (see dirs_to_lock_recursively and dirs_to_lock)
> + * we'd call svn_wc_adm_open3() to get access objects for "" and
> + * "A/D/G", but the request for "" would fail because
> base_dir_access
> + * would already be open for that directory. (In that circumstance,
> + * you're supposed to use svn_wc_adm_retrieve() instead; but it
> + * would be clumsy to have a conditional path just to decide between
> + * open3() and retrieve().)
> + *
> + * (Note that the results would be the same if even the working copy
> + * were an explicit argument, e.g.:
> + * 'svn ci -m "log msg" --depth=immediates wc wc/A/D/G'.)
> + *
> + * So we set lock_base_dir_recursive=TRUE now, and end up locking
> + * more than we need to, but this keeps the code simple and correct.
> + *
> + * In an inspired bit of foresight, the adm locking code anticipated
> + * the eventual addition of svn_depth_immediates, and allows us to
> + * set the exact number of lock levels. So optimizing the code here
> + * at least shouldn't require any changes to the adm locking system.
> + */
> + if (depth == svn_depth_files || depth == svn_depth_immediates)
> + {
> + for (i = 0; i < rel_targets->nelts; ++i)
> + {
> + const char *rel_target = APR_ARRAY_IDX(rel_targets, i, const
> char *);
> +
> + if (rel_target[0] == '\0')
> + {
> + lock_base_dir_recursive = TRUE;
> + break;
> + }
> + }
> + }
> +
> + /* Prepare an array to accumulate dirs to lock */
> + dirs_to_lock = apr_array_make(pool, 1, sizeof(target));
> + dirs_to_lock_recursive = apr_array_make(pool, 1, sizeof(target));
> +
> + /* If we calculated only a base_dir and no relative targets, this
> + must mean that we are being asked to commit (effectively) a
> + single path. */
> + if ((! rel_targets) || (! rel_targets->nelts))
> + {
> + const char *parent_dir, *name;
> +
> + SVN_ERR(svn_wc_get_actual_target(base_dir, &parent_dir, &name,
> pool));
> + if (*name)
> + {
> + svn_node_kind_t kind;
> +
> + /* Our new "grandfather directory" is the parent directory
> + of the former one. */
> + base_dir = apr_pstrdup(pool, parent_dir);
> +
> + /* Make the array if it wasn't already created. */
> + if (! rel_targets)
> + rel_targets = apr_array_make(pool, targets->nelts,
> sizeof(name));
> +
> + /* Now, push this name as a relative path to our new
> + base directory. */
> + APR_ARRAY_PUSH(rel_targets, const char *) = name;
> +
> + target = svn_dirent_join(base_dir, name, pool);
> + SVN_ERR(svn_io_check_path(target, &kind, pool));
> +
> + /* If the final target is a dir, we want to recursively lock
> it */
> + if (kind == svn_node_dir)
> + {
> + if (depth == svn_depth_infinity || depth ==
> svn_depth_immediates)
> + APR_ARRAY_PUSH(dirs_to_lock_recursive, const char *) =
> target;
> + else
> + APR_ARRAY_PUSH(dirs_to_lock, const char *) = target;
> + }
> + }
> + else
> + {
> + /* Unconditionally lock recursively down from base_dir. */
> + lock_base_dir_recursive = TRUE;
> + }
> + }
> + else if (! lock_base_dir_recursive)
> + {
> + apr_pool_t *subpool = svn_pool_create(pool);
> +
> + SVN_ERR(adjust_rel_targets(&base_dir, &rel_targets,
> + base_dir, rel_targets,
> + pool));
> +
> + for (i = 0; i < rel_targets->nelts; i++)
> + {
> + svn_node_kind_t kind;
> +
> + svn_pool_clear(subpool);
> +
> + target = svn_dirent_join(base_dir,
> + APR_ARRAY_IDX(rel_targets, i, const
> char *),
> + subpool);
> +
> + SVN_ERR(svn_io_check_path(target, &kind, subpool));
> +
> + /* If the final target is a dir, we want to lock it */
> + if (kind == svn_node_dir)
> + {
> + /* Notice how here we test infinity||immediates, but up
> + in the call to svn_path_condense_targets(), we only
> + tested depth==infinity. That's because condensation
> + and adm lock acquisition serve different purposes. */
> + if (depth == svn_depth_infinity || depth ==
> svn_depth_immediates)
> + APR_ARRAY_PUSH(dirs_to_lock_recursive,
> + const char *) = apr_pstrdup(pool,
> target);
> + else
> + /* Don't lock if target is the base_dir, base_dir will
> be
> + locked anyway and we can't lock it twice */
> + if (strcmp(target, base_dir) != 0)
> + APR_ARRAY_PUSH(dirs_to_lock,
> + const char *) = apr_pstrdup(pool,
> target);
> + }
> +
> + /* Now we need to iterate over the parent paths of this path
> + adding them to the set of directories we want to lock.
> + Do nothing if target is already the base_dir. */
> + if (strcmp(target, base_dir) != 0)
> + {
> + target = svn_dirent_dirname(target, subpool);
> +
> + while (strcmp(target, base_dir) != 0)
> + {
> + const char *parent_dir;
> +
> + APR_ARRAY_PUSH(dirs_to_lock,
> + const char *) = apr_pstrdup(pool,
> target);
> +
> + parent_dir = svn_dirent_dirname(target, subpool);
> +
> + if (strcmp(parent_dir, target) == 0)
> + break; /* Reached root directory */
> +
> + target = parent_dir;
> + }
> + }
> + }
> +
> + svn_pool_destroy(subpool);
> + }
> +
> + lock_err = svn_wc_adm_open3(&base_dir_access, NULL, base_dir,
> + TRUE, /* Write lock */
> + lock_base_dir_recursive ? -1 : 0, /* lock
> levels */
> + ctx->cancel_func, ctx->cancel_baton,
> + pool);
> +
> + if (!lock_err && !lock_base_dir_recursive)
> + {
> + apr_array_header_t *unique_dirs_to_lock;
> + struct lock_dirs_baton btn;
> +
> + /* Sort the paths in a depth-last directory-ish order. */
> + qsort(dirs_to_lock->elts, dirs_to_lock->nelts,
> + dirs_to_lock->elt_size, svn_sort_compare_paths);
> + qsort(dirs_to_lock_recursive->elts, dirs_to_lock_recursive-
> >nelts,
> + dirs_to_lock_recursive->elt_size, svn_sort_compare_paths);
> +
> + /* Remove any duplicates */
> + SVN_ERR(svn_path_remove_redundancies(&unique_dirs_to_lock,
> + dirs_to_lock_recursive,
> + pool));
> + dirs_to_lock_recursive = unique_dirs_to_lock;
> +
> + /* Remove dirs and descendants from dirs_to_lock if there is
> + any ancestor in dirs_to_lock_recursive */
> + SVN_ERR(remove_redundancies(&unique_dirs_to_lock,
> + dirs_to_lock,
> + dirs_to_lock_recursive,
> + pool));
> + dirs_to_lock = unique_dirs_to_lock;
> +
> + btn.base_dir_access = base_dir_access;
> + btn.ctx = ctx;
> + btn.levels_to_lock = 0;
> + /* First lock all the dirs to be locked non-recursively */
> + if (dirs_to_lock)
> + SVN_ERR(svn_iter_apr_array(NULL, dirs_to_lock,
> + lock_dirs_for_commit, &btn, pool));
> +
> + /* Lock the rest of the targets (recursively) */
> + btn.levels_to_lock = -1;
> + if (dirs_to_lock_recursive)
> + SVN_ERR(svn_iter_apr_array(NULL, dirs_to_lock_recursive,
> + lock_dirs_for_commit, &btn, pool));
> + }
> +
> + /* do commit */
> + if (!lock_err)
> + {
> + commit_info_p = NULL;
> + cmt_err = do_commit(&commit_info_p, base_dir_access, base_dir,
> targets,
> + rel_targets, depth, keep_locks,
> keep_changelists,
> + changelists, revprop_table, ctx, pool);
> + if (commit_info_p)
> + APR_ARRAY_PUSH(*commit_info, svn_commit_info_t *) =
> commit_info_p;
> + return cmt_err;
> + }
> + else
> + if (lock_err->apr_err == SVN_ERR_WC_NOT_DIRECTORY)
> + {
> + SVN_ERR(collect_commit_packets(&commit_packets, targets,
> base_dir,
> + ctx, pool));
> +
> + /* Commit in turn */
> + iter_pool = svn_pool_create(pool);
> + for (i = 0; i < commit_packets->nelts; i++)
> + {
> + svn_pool_clear(iter_pool);
> + commit_info_p = NULL;
> + commit_packet = APR_ARRAY_IDX(commit_packets, i,
> commit_packet_t *);
> +
> + cmt_err = do_commit(&commit_info_p,
> + commit_packet->base_dir_access,
> + commit_packet->base_dir,
> + commit_packet->targets,
> + commit_packet->rel_targets,
> + depth, keep_locks, keep_changelists,
> + changelists, revprop_table, ctx,
> + iter_pool);
> + if (commit_info_p)
> + APR_ARRAY_PUSH(*commit_info, svn_commit_info_t *) =
> commit_info_p;
> + SVN_ERR(cmt_err);
> + }
> + svn_pool_destroy(iter_pool);
> + }
> +
> + return lock_err;
> +}
>
> Best Wishes!
> Huihuang
> --------------
> yellow.flying
> 2009-07-01
>
> __________________________________________________
> 赶快注册雅虎超大容量免费邮箱?
> http://cn.mail.yahoo.com
>
> ------------------------------------------------------
> http://subversion.tigris.org/ds/viewMessage.do?dsForumId=462&dsMessageI
> d=2366930
------------------------------------------------------
http://subversion.tigris.org/ds/viewMessage.do?dsForumId=462&dsMessageId=2366962
Received on 2009-07-01 10:17:41 CEST