diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index c3124a643bb..890d819a6ae 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -402,6 +402,10 @@ authentication_db = _users ; max_iterations, password_scheme, password_regexp, proxy_use_secret, ; public_fields, secret, users_db_public, cookie_domain, same_site +; Per document access settings +[per_doc_access] +;enable = false + ; CSP (Content Security Policy) Support [csp] ;utils_enable = true diff --git a/src/chttpd/src/chttpd.erl b/src/chttpd/src/chttpd.erl index c8e6fdc9741..f3cc52e4995 100644 --- a/src/chttpd/src/chttpd.erl +++ b/src/chttpd/src/chttpd.erl @@ -1034,6 +1034,8 @@ error_info({bad_request, Error, Reason}) -> {400, couch_util:to_binary(Error), couch_util:to_binary(Reason)}; error_info({query_parse_error, Reason}) -> {400, <<"query_parse_error">>, Reason}; +error_info(access) -> + {403, <<"forbidden">>, <<"access">>}; error_info(database_does_not_exist) -> {404, <<"not_found">>, <<"Database does not exist.">>}; error_info(not_found) -> diff --git a/src/chttpd/src/chttpd_db.erl b/src/chttpd/src/chttpd_db.erl index e2de301b287..6911b5ecc32 100644 --- a/src/chttpd/src/chttpd_db.erl +++ b/src/chttpd/src/chttpd_db.erl @@ -958,16 +958,18 @@ view_cb(Msg, Acc) -> couch_mrview_http:view_cb(Msg, Acc). db_doc_req(#httpd{method = 'DELETE'} = Req, Db, DocId) -> - % check for the existence of the doc to handle the 404 case. - couch_doc_open(Db, DocId, nil, []), - case chttpd:qs_value(Req, "rev") of + % fetch the old doc revision, so we can compare access control + % in send_update_doc() later. + Doc0 = couch_doc_open(Db, DocId, nil, [{user_ctx, Req#httpd.user_ctx}]), + Rev = chttpd:qs_value(Req, "rev"), + case Rev of undefined -> Body = {[{<<"_deleted">>, true}]}; Rev -> Body = {[{<<"_rev">>, ?l2b(Rev)}, {<<"_deleted">>, true}]} end, - Doc = couch_doc_from_req(Req, Db, DocId, Body), - send_updated_doc(Req, Db, DocId, Doc); + Doc = #doc{revs = Rev, body = Body, deleted = true, access = Doc0#doc.access}, + send_updated_doc(Req, Db, DocId, couch_doc_from_req(Req, Db, DocId, Doc)); db_doc_req(#httpd{method = 'GET', mochi_req = MochiReq} = Req, Db, DocId) -> #doc_query_args{ rev = Rev0, @@ -1417,6 +1419,8 @@ receive_request_data(Req, LenLeft) when LenLeft > 0 -> receive_request_data(_Req, _) -> throw(<<"expected more data">>). +update_doc_result_to_json({#doc{id = Id, revs = Rev}, access}) -> + update_doc_result_to_json({{Id, Rev}, access}); update_doc_result_to_json({error, _} = Error) -> {_Code, Err, Msg} = chttpd:error_info(Error), {[ @@ -1936,9 +1940,11 @@ parse_doc_query(Req) -> lists:foldl(fun parse_doc_query/2, #doc_query_args{}, chttpd:qs(Req)). parse_shards_opt(Req) -> + AccessValue = list_to_existing_atom(chttpd:qs_value(Req, "access", "false")), [ {n, parse_shards_opt("n", Req, config:get_integer("cluster", "n", 3))}, {q, parse_shards_opt("q", Req, config:get_integer("cluster", "q", 2))}, + {access, parse_shards_opt("access", Req, AccessValue)}, {placement, parse_shards_opt( "placement", Req, config:get("cluster", "placement") @@ -1967,12 +1973,26 @@ parse_shards_opt("placement", Req, Default) -> throw({bad_request, Err}) end end; +parse_shards_opt("access", _Req, true) -> + case config:get_boolean("per_doc_access", "enable", false) of + true -> + true; + false -> + Err = <<"The `access` option is not available on this CouchDB installation.">>, + throw({bad_request, Err}) + end; +parse_shards_opt("access", _Req, false) -> + false; +parse_shards_opt("access", _Req, _Value) -> + Err = <<"The `access` value should be a boolean.">>, + throw({bad_request, Err}); parse_shards_opt(Param, Req, Default) -> Val = chttpd:qs_value(Req, Param, Default), - Err = ?l2b(["The `", Param, "` value should be a positive integer."]), case couch_util:validate_positive_int(Val) of true -> Val; - false -> throw({bad_request, Err}) + false -> + Err = ?l2b(["The `", Param, "` value should be a positive integer."]), + throw({bad_request, Err}) end. parse_engine_opt(Req) -> diff --git a/src/couch/include/couch_db.hrl b/src/couch/include/couch_db.hrl index 9c1df21b690..3ce2c78d08e 100644 --- a/src/couch/include/couch_db.hrl +++ b/src/couch/include/couch_db.hrl @@ -67,7 +67,8 @@ -record(doc_info, { id = <<"">>, high_seq = 0, - revs = [] % rev_info + revs = [], % rev_info + access = [] }). -record(size_info, { @@ -80,7 +81,8 @@ update_seq = 0, deleted = false, rev_tree = [], - sizes = #size_info{} + sizes = #size_info{}, + access = [] }). -record(httpd, { @@ -124,7 +126,8 @@ % key/value tuple of meta information, provided when using special options: % couch_db:open_doc(Db, Id, Options). - meta = [] + meta = [], + access = [] }). @@ -210,7 +213,8 @@ ptr, seq, sizes = #size_info{}, - atts = [] + atts = [], + access = [] }). -record (fabric_changes_acc, { diff --git a/src/couch/src/couch_access_native_proc.erl b/src/couch/src/couch_access_native_proc.erl new file mode 100644 index 00000000000..494221a5e9b --- /dev/null +++ b/src/couch/src/couch_access_native_proc.erl @@ -0,0 +1,137 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_access_native_proc). +-behavior(gen_server). + +-export([ + start_link/0, + set_timeout/2, + prompt/2 +]). + +-export([ + init/1, + terminate/2, + handle_call/3, + handle_cast/2, + handle_info/2, + code_change/3 +]). + +-record(st, { + indexes = [], + % TODO: make configurable + timeout = 5000 +}). + +start_link() -> + gen_server:start_link(?MODULE, [], []). + +set_timeout(Pid, TimeOut) when is_integer(TimeOut), TimeOut > 0 -> + gen_server:call(Pid, {set_timeout, TimeOut}). + +prompt(Pid, Data) -> + gen_server:call(Pid, {prompt, Data}). + +init(_) -> + {ok, #st{}}. + +terminate(_Reason, _St) -> + ok. + +handle_call({set_timeout, TimeOut}, _From, St) -> + {reply, ok, St#st{timeout = TimeOut}}; +handle_call({prompt, [<<"reset">>]}, _From, St) -> + {reply, true, St#st{indexes = []}}; +handle_call({prompt, [<<"reset">>, _QueryConfig]}, _From, St) -> + {reply, true, St#st{indexes = []}}; +handle_call({prompt, [<<"add_fun">>, IndexInfo]}, _From, St) -> + {reply, true, St}; +handle_call({prompt, [<<"map_doc">>, Doc]}, _From, St) -> + {reply, map_doc(St, mango_json:to_binary(Doc)), St}; +handle_call({prompt, [<<"reduce">>, _, _]}, _From, St) -> + {reply, null, St}; +handle_call({prompt, [<<"rereduce">>, _, _]}, _From, St) -> + {reply, null, St}; +handle_call({prompt, [<<"index_doc">>, Doc]}, _From, St) -> + {reply, [[]], St}; +handle_call(Msg, _From, St) -> + {stop, {invalid_call, Msg}, {invalid_call, Msg}, St}. + +handle_cast(garbage_collect, St) -> + erlang:garbage_collect(), + {noreply, St}; +handle_cast(Msg, St) -> + {stop, {invalid_cast, Msg}, St}. + +handle_info(Msg, St) -> + {stop, {invalid_info, Msg}, St}. + +code_change(_OldVsn, St, _Extra) -> + {ok, St}. + +% Return value is an array of arrays, first dimension is the different indexes +% [0] will be by-access-id // for this test, later we should make this by-access +% -seq, since that one we will always need, and by-access-id can be opt-in. +% the second dimension is the number of emit kv pairs: +% [ // the return value +% [ // the first view +% ['k1', 'v1'], // the first k/v pair for the first view +% ['k2', 'v2'] // second, etc. +% ], +% [ // second view +% ['l1', 'w1'] // first k/v par in second view +% ] +% ] +% {"id":"account/bongel","key":"account/bongel","value":{"rev":"1-967a00dff5e02add41819138abb3284d"}}, + +map_doc(_St, {Doc}) -> + case couch_util:get_value(<<"_access">>, Doc) of + undefined -> + % do not index this doc + [[], []]; + Access when is_list(Access) -> + Id = couch_util:get_value(<<"_id">>, Doc), + Rev = couch_util:get_value(<<"_rev">>, Doc), + Seq = couch_util:get_value(<<"_seq">>, Doc), + Deleted = couch_util:get_value(<<"_deleted">>, Doc, false), + BodySp = couch_util:get_value(<<"_body_sp">>, Doc), + % by-access-id + ById = + case Deleted of + false -> + lists:map( + fun(UserOrRole) -> + [ + [[UserOrRole, Id], Rev] + ] + end, + Access + ); + _True -> + [[]] + end, + + % by-access-seq + BySeq = lists:map( + fun(UserOrRole) -> + [ + [[UserOrRole, Seq], [{rev, Rev}, {deleted, Deleted}, {body_sp, BodySp}]] + ] + end, + Access + ), + ById ++ BySeq; + _Else -> + [[], []] + end. diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl index 7bc02146e9f..dfe7abafd2b 100644 --- a/src/couch/src/couch_bt_engine.erl +++ b/src/couch/src/couch_bt_engine.erl @@ -664,20 +664,24 @@ id_tree_split(#full_doc_info{} = Info) -> update_seq = Seq, deleted = Deleted, sizes = SizeInfo, - rev_tree = Tree + rev_tree = Tree, + access = Access } = Info, - {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree)}}. + {Id, {Seq, ?b2i(Deleted), split_sizes(SizeInfo), disk_tree(Tree), split_access(Access)}}. id_tree_join(Id, {HighSeq, Deleted, DiskTree}) -> % Handle old formats before data_size was added id_tree_join(Id, {HighSeq, Deleted, #size_info{}, DiskTree}); id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree}) -> + id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree, []}); +id_tree_join(Id, {HighSeq, Deleted, Sizes, DiskTree, Access}) -> #full_doc_info{ id = Id, update_seq = HighSeq, deleted = ?i2b(Deleted), sizes = couch_db_updater:upgrade_sizes(Sizes), - rev_tree = rev_tree(DiskTree) + rev_tree = rev_tree(DiskTree), + access = join_access(Access) }. id_tree_reduce(reduce, FullDocInfos) -> @@ -714,21 +718,27 @@ seq_tree_split(#full_doc_info{} = Info) -> update_seq = Seq, deleted = Del, sizes = SizeInfo, - rev_tree = Tree + rev_tree = Tree, + access = Access } = Info, - {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree)}}. + {Seq, {Id, ?b2i(Del), split_sizes(SizeInfo), disk_tree(Tree), split_access(Access)}}. seq_tree_join(Seq, {Id, Del, DiskTree}) when is_integer(Del) -> seq_tree_join(Seq, {Id, Del, {0, 0}, DiskTree}); seq_tree_join(Seq, {Id, Del, Sizes, DiskTree}) when is_integer(Del) -> + seq_tree_join(Seq, {Id, Del, Sizes, DiskTree, []}); +seq_tree_join(Seq, {Id, Del, Sizes, DiskTree, Access}) when is_integer(Del) -> #full_doc_info{ id = Id, update_seq = Seq, deleted = ?i2b(Del), sizes = join_sizes(Sizes), - rev_tree = rev_tree(DiskTree) + rev_tree = rev_tree(DiskTree), + access = join_access(Access) }; seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) -> + seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos, []}); +seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos, Access}) -> % Older versions stored #doc_info records in the seq_tree. % Compact to upgrade. Revs = lists:map( @@ -746,7 +756,8 @@ seq_tree_join(KeySeq, {Id, RevInfos, DeletedRevInfos}) -> #doc_info{ id = Id, high_seq = KeySeq, - revs = Revs ++ DeletedRevs + revs = Revs ++ DeletedRevs, + access = Access }. seq_tree_reduce(reduce, DocInfos) -> @@ -755,6 +766,9 @@ seq_tree_reduce(reduce, DocInfos) -> seq_tree_reduce(rereduce, Reds) -> lists:sum(Reds). +join_access(Access) -> Access. +split_access(Access) -> Access. + local_tree_split(#doc{revs = {0, [Rev]}} = Doc) when is_binary(Rev) -> #doc{ id = Id, diff --git a/src/couch/src/couch_btree.erl b/src/couch/src/couch_btree.erl index b974a22eeca..7a8c47d71a8 100644 --- a/src/couch/src/couch_btree.erl +++ b/src/couch/src/couch_btree.erl @@ -16,6 +16,7 @@ -export([fold/4, full_reduce/1, final_reduce/2, size/1, foldl/3, foldl/4]). -export([fold_reduce/4, lookup/2, get_state/1, set_options/2]). -export([extract/2, assemble/3, less/3]). +-export([full_reduce_with_options/2]). -include_lib("couch/include/couch_db.hrl"). @@ -109,6 +110,12 @@ full_reduce(#btree{root = nil, reduce = Reduce}) -> full_reduce(#btree{root = Root}) -> {ok, element(2, Root)}. +full_reduce_with_options(Bt, Options) -> + CountFun = fun(_SeqStart, PartialReds, 0) -> + {ok, couch_btree:final_reduce(Bt, PartialReds)} + end, + fold_reduce(Bt, CountFun, 0, Options). + size(#btree{root = nil}) -> 0; size(#btree{root = {_P, _Red}}) -> diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 2ef89ced3a6..5b603072f07 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -31,6 +31,9 @@ is_admin/1, check_is_admin/1, check_is_member/1, + validate_access/2, + check_access/2, + has_access_enabled/1, name/1, get_after_doc_read_fun/1, @@ -288,6 +291,12 @@ wait_for_compaction(#db{main_pid = Pid} = Db, Timeout) -> is_compacting(DbName) -> couch_server:is_compacting(DbName). +has_access_enabled(#db{access = true}) -> true; +has_access_enabled(_) -> false. + +is_read_from_ddoc_cache(Options) -> + lists:member(ddoc_cache, Options). + delete_doc(Db, Id, Revisions) -> DeletedDocs = [#doc{id = Id, revs = [Rev], deleted = true} || Rev <- Revisions], {ok, [Result]} = update_docs(Db, DeletedDocs, []), @@ -302,17 +311,23 @@ open_doc(Db, Id, Options) -> {ok, #doc{deleted = true} = Doc} -> case lists:member(deleted, Options) of true -> - apply_open_options({ok, Doc}, Options); + {ok, Doc}; false -> {not_found, deleted} end; Else -> - apply_open_options(Else, Options) + Else end. -apply_open_options({ok, Doc}, Options) -> +apply_open_options(Db, {ok, Doc}, Options) -> + ok = validate_access(Db, Doc, Options), + apply_open_options1({ok, Doc}, Options); +apply_open_options(_Db, Else, _Options) -> + Else. + +apply_open_options1({ok, Doc}, Options) -> apply_open_options2(Doc, Options); -apply_open_options(Else, _Options) -> +apply_open_options1(Else, _Options) -> Else. apply_open_options2(Doc, []) -> @@ -358,14 +373,14 @@ open_doc_revs(Db, IdRevsOpts, Options) when is_list(IdRevsOpts) -> AllResults = open_doc_revs_int(Db, IdRevs, Options), % Apply document open options like {atts_since, ...} etc ResultsZipFun = fun(DocOpts, {ok, Results}) -> - [apply_open_options(R, DocOpts) || R <- Results] + [apply_open_options(Db, R, DocOpts) || R <- Results] end, lists:zipwith(ResultsZipFun, DocOptsOnly, AllResults). open_doc_revs(Db, Id, Revs, Options) -> increment_stat(Db, [couchdb, database_reads]), [{ok, Results}] = open_doc_revs_int(Db, [{Id, Revs}], Options), - {ok, [apply_open_options(Result, Options) || Result <- Results]}. + {ok, [apply_open_options(Db, Result, Options) || Result <- Results]}. % Each returned result is a list of tuples: % {Id, MissingRevs, PossibleAncestors} @@ -609,7 +624,8 @@ get_db_info(Db) -> name = Name, compactor_pid = Compactor, instance_start_time = StartTime, - committed_update_seq = CommittedUpdateSeq + committed_update_seq = CommittedUpdateSeq, + access = Access } = Db, {ok, DocCount} = get_doc_count(Db), {ok, DelDocCount} = get_del_doc_count(Db), @@ -644,7 +660,8 @@ get_db_info(Db) -> {committed_update_seq, CommittedUpdateSeq}, {compacted_seq, CompactedSeq}, {props, Props}, - {uuid, Uuid} + {uuid, Uuid}, + {access, Access} ], {ok, InfoList}. @@ -769,6 +786,55 @@ security_error_type(#user_ctx{name = null}) -> security_error_type(#user_ctx{name = _}) -> forbidden. +is_per_user_ddoc(#doc{access = []}) -> false; +is_per_user_ddoc(#doc{access = [<<"_users">>]}) -> false; +is_per_user_ddoc(_) -> true. + +validate_access(Db, Doc) -> + validate_access(Db, Doc, []). + +validate_access(Db, Doc, Options) -> + validate_access1(has_access_enabled(Db), Db, Doc, Options). + +validate_access1(false, _Db, _Doc, _Options) -> + ok; +validate_access1(true, Db, #doc{id = <<"_design", _/binary>>} = Doc, Options) -> + case is_read_from_ddoc_cache(Options) andalso is_per_user_ddoc(Doc) of + true -> throw({not_found, missing}); + _False -> validate_access2(Db, Doc) + end; +validate_access1(true, Db, #doc{} = Doc, _Options) -> + validate_access2(Db, Doc). +validate_access2(Db, Doc) -> + validate_access3(check_access(Db, Doc)). + +validate_access3(true) -> ok; +validate_access3(_) -> throw({forbidden, <<"access denied">>}). + +check_access(Db, #doc{access = Access}) -> + check_access(Db, Access); +check_access(Db, Access) -> + #user_ctx{ + name = UserName, + roles = UserRoles + } = Db#db.user_ctx, + case Access of + [] -> + % if doc has no _access, userCtx must be admin + is_admin(Db); + Access -> + % if doc has _access, userCtx must be admin OR matching user or role + is_admin(Db) or (check_name(UserName, Access) or check_roles(UserRoles, Access)) + end. + +check_name(null, _Access) -> false; +check_name(UserName, Access) -> lists:member(UserName, Access). + +check_roles(Roles, Access) -> + UserRolesSet = ordsets:from_list(Roles), + RolesSet = ordsets:from_list(Access ++ ["_users"]), + not ordsets:is_disjoint(UserRolesSet, RolesSet). + get_admins(#db{security = SecProps}) -> couch_util:get_value(<<"admins">>, SecProps, {[]}). @@ -910,9 +976,14 @@ group_alike_docs([Doc | Rest], [Bucket | RestBuckets]) -> end. validate_doc_update(#db{} = Db, #doc{id = <<"_design/", _/binary>>} = Doc, _GetDiskDocFun) -> - case catch check_is_admin(Db) of - ok -> validate_ddoc(Db, Doc); - Error -> Error + case couch_doc:has_access(Doc) of + true -> + validate_ddoc(Db, Doc); + false -> + case catch check_is_admin(Db) of + ok -> validate_ddoc(Db, Doc); + Error -> Error + end end; validate_doc_update(#db{validate_doc_funs = undefined} = Db, Doc, Fun) -> ValidationFuns = load_validation_funs(Db), @@ -1330,13 +1401,41 @@ update_docs(Db, Docs0, Options, ?REPLICATED_CHANGES) -> ] || Bucket <- DocBuckets ], - {ok, _} = write_and_commit( + {ok, Results} = write_and_commit( Db, DocBuckets2, LocalDocs, [?REPLICATED_CHANGES | Options] ), - {ok, DocErrors}; + case couch_db:has_access_enabled(Db) of + false -> + % we’re done here + {ok, DocErrors}; + true -> + AccessViolations = lists:filter(fun({_Ref, Tag}) -> Tag == access end, Results), + case length(AccessViolations) of + 0 -> + % we’re done here + {ok, DocErrors}; + N when N > 0 -> + % dig out FDIs from Docs matching our tags/refs + DocsDict = lists:foldl( + fun(Doc, Dict) -> + Tag = doc_tag(Doc), + dict:store(Tag, Doc, Dict) + end, + dict:new(), + Docs + ), + AccessResults = lists:map( + fun({Ref, Access}) -> + {dict:fetch(Ref, DocsDict), Access} + end, + AccessViolations + ), + {ok, AccessResults} + end + end; update_docs(Db, Docs0, Options, ?INTERACTIVE_EDIT) -> BlockInteractiveDatabaseWrites = couch_disk_monitor:block_interactive_database_writes(), if @@ -1446,6 +1545,7 @@ collect_results_with_metrics(Pid, MRef, []) -> end. collect_results(Pid, MRef, ResultsAcc) -> + % TDOD: need to receiver access? receive {result, Pid, Result} -> collect_results(Pid, MRef, [Result | ResultsAcc]); @@ -1458,7 +1558,7 @@ collect_results(Pid, MRef, ResultsAcc) -> end. write_and_commit( - #db{main_pid = Pid, user_ctx = Ctx} = Db, + #db{main_pid = Pid, user_ctx = UserCtx0} = Db, DocBuckets1, LocalDocs, Options @@ -1466,15 +1566,21 @@ write_and_commit( DocBuckets = prepare_doc_summaries(Db, DocBuckets1), ReplicatedChanges = lists:member(?REPLICATED_CHANGES, Options), MRef = erlang:monitor(process, Pid), + UserCtx = + case has_access_enabled(Db) of + true -> UserCtx0; + false -> [] + end, + try - Pid ! {update_docs, self(), DocBuckets, LocalDocs, ReplicatedChanges}, + Pid ! {update_docs, self(), DocBuckets, LocalDocs, ReplicatedChanges, UserCtx}, case collect_results_with_metrics(Pid, MRef, []) of {ok, Results} -> {ok, Results}; retry -> % This can happen if the db file we wrote to was swapped out by % compaction. Retry by reopening the db and writing to the current file - {ok, Db2} = open(Db#db.name, [{user_ctx, Ctx}]), + {ok, Db2} = open(Db#db.name, [{user_ctx, UserCtx}]), DocBuckets2 = [ [doc_flush_atts(Db2, Doc) || Doc <- Bucket] || Bucket <- DocBuckets1 @@ -1803,7 +1909,10 @@ fold_changes(Db, StartSeq, UserFun, UserAcc) -> fold_changes(Db, StartSeq, UserFun, UserAcc, []). fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) -> - couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts). + case couch_db:has_access_enabled(Db) and not couch_db:is_admin(Db) of + true -> couch_mrview:query_changes_access(Db, StartSeq, UserFun, Opts, UserAcc); + false -> couch_db_engine:fold_changes(Db, StartSeq, UserFun, UserAcc, Opts) + end. fold_purge_infos(Db, StartPurgeSeq, Fun, Acc) -> fold_purge_infos(Db, StartPurgeSeq, Fun, Acc, []). @@ -1821,7 +1930,7 @@ open_doc_revs_int(Db, IdRevs, Options) -> lists:zipwith( fun({Id, Revs}, Lookup) -> case Lookup of - #full_doc_info{rev_tree = RevTree} -> + #full_doc_info{rev_tree = RevTree, access = Access} -> {FoundRevs, MissingRevs} = case Revs of all -> @@ -1842,7 +1951,10 @@ open_doc_revs_int(Db, IdRevs, Options) -> % we have the rev in our list but know nothing about it {{not_found, missing}, {Pos, Rev}}; #leaf{deleted = IsDeleted, ptr = SummaryPtr} -> - {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)} + {ok, + make_doc( + Db, Id, IsDeleted, SummaryPtr, FoundRevPath, Access + )} end end, FoundRevs @@ -1864,23 +1976,35 @@ open_doc_revs_int(Db, IdRevs, Options) -> open_doc_int(Db, <> = Id, Options) -> case couch_db_engine:open_local_docs(Db, [Id]) of [#doc{} = Doc] -> - apply_open_options({ok, Doc}, Options); + case Doc#doc.body of + {Body} -> + Access = couch_util:get_value(<<"_access">>, Body), + apply_open_options(Db, {ok, Doc#doc{access = Access}}, Options); + _Else -> + apply_open_options(Db, {ok, Doc}, Options) + end; [not_found] -> {not_found, missing} end; -open_doc_int(Db, #doc_info{id = Id, revs = [RevInfo | _]} = DocInfo, Options) -> +open_doc_int(Db, #doc_info{id = Id, revs = [RevInfo | _], access = Access} = DocInfo, Options) -> #rev_info{deleted = IsDeleted, rev = {Pos, RevId}, body_sp = Bp} = RevInfo, - Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos, [RevId]}), + Doc = make_doc(Db, Id, IsDeleted, Bp, {Pos, [RevId]}, Access), apply_open_options( - {ok, Doc#doc{meta = doc_meta_info(DocInfo, [], Options)}}, Options + Db, + {ok, Doc#doc{meta = doc_meta_info(DocInfo, [], Options)}}, + Options ); -open_doc_int(Db, #full_doc_info{id = Id, rev_tree = RevTree} = FullDocInfo, Options) -> +open_doc_int( + Db, #full_doc_info{id = Id, rev_tree = RevTree, access = Access} = FullDocInfo, Options +) -> #doc_info{revs = [#rev_info{deleted = IsDeleted, rev = Rev, body_sp = Bp} | _]} = DocInfo = couch_doc:to_doc_info(FullDocInfo), {[{_, RevPath}], []} = couch_key_tree:get(RevTree, [Rev]), - Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath), + Doc = make_doc(Db, Id, IsDeleted, Bp, RevPath, Access), apply_open_options( - {ok, Doc#doc{meta = doc_meta_info(DocInfo, RevTree, Options)}}, Options + Db, + {ok, Doc#doc{meta = doc_meta_info(DocInfo, RevTree, Options)}}, + Options ); open_doc_int(Db, Id, Options) -> case get_full_doc_info(Db, Id) of @@ -1941,21 +2065,26 @@ doc_meta_info( true -> [{local_seq, Seq}] end. -make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath) -> +make_doc(Db, Id, Deleted, Bp, {Pos, Revs}) -> + make_doc(Db, Id, Deleted, Bp, {Pos, Revs}, []). + +make_doc(_Db, Id, Deleted, nil = _Bp, RevisionPath, Access) -> #doc{ id = Id, revs = RevisionPath, body = [], atts = [], - deleted = Deleted + deleted = Deleted, + access = Access }; -make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}) -> +make_doc(#db{} = Db, Id, Deleted, Bp, {Pos, Revs}, Access) -> RevsLimit = get_revs_limit(Db), Doc0 = couch_db_engine:read_doc_body(Db, #doc{ id = Id, revs = {Pos, lists:sublist(Revs, 1, RevsLimit)}, body = Bp, - deleted = Deleted + deleted = Deleted, + access = Access }), Doc1 = case Doc0#doc.atts of diff --git a/src/couch/src/couch_db_int.hrl b/src/couch/src/couch_db_int.hrl index 7da0ce5dfe2..b67686fab88 100644 --- a/src/couch/src/couch_db_int.hrl +++ b/src/couch/src/couch_db_int.hrl @@ -37,7 +37,8 @@ waiting_delayed_commit_deprecated, options = [], - compression + compression, + access = false }). diff --git a/src/couch/src/couch_db_updater.erl b/src/couch/src/couch_db_updater.erl index 767a3190a6f..21ae6e9314c 100644 --- a/src/couch/src/couch_db_updater.erl +++ b/src/couch/src/couch_db_updater.erl @@ -24,6 +24,11 @@ % 10 GiB -define(DEFAULT_MAX_PARTITION_SIZE, 16#280000000). +-define(DEFAULT_SECURITY_OBJECT, [ + {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}}, + {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} +]). + -record(merge_acc, { revs_limit, replicated_changes, @@ -36,7 +41,7 @@ init({Engine, DbName, FilePath, Options0}) -> erlang:put(io_priority, {db_update, DbName}), update_idle_limit_from_config(), - DefaultSecObj = default_security_object(DbName), + DefaultSecObj = default_security_object(DbName, Options0), Options = [{default_security_object, DefaultSecObj} | Options0], try {ok, EngineState} = couch_db_engine:init(Engine, FilePath, Options), @@ -165,10 +170,10 @@ handle_cast(Msg, #db{name = Name} = Db) -> {stop, Msg, Db}. handle_info( - {update_docs, Client, GroupedDocs, LocalDocs, ReplicatedChanges}, + {update_docs, Client, GroupedDocs, LocalDocs, ReplicatedChanges, UserCtx}, Db ) -> - GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), + GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx), if LocalDocs == [] -> {GroupedDocs3, Clients} = collect_updates( @@ -255,7 +260,7 @@ handle_info(Msg, Db) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -sort_and_tag_grouped_docs(Client, GroupedDocs) -> +sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx) -> % These groups should already be sorted but sometimes clients misbehave. % The merge_updates function will fail and the database can end up with % duplicate documents if the incoming groups are not sorted, so as a sanity @@ -263,7 +268,7 @@ sort_and_tag_grouped_docs(Client, GroupedDocs) -> Cmp = fun([#doc{id = A} | _], [#doc{id = B} | _]) -> A < B end, lists:map( fun(DocGroup) -> - [{Client, maybe_tag_doc(D)} || D <- DocGroup] + [{Client, maybe_tag_doc(D), UserCtx} || D <- DocGroup] end, lists:sort(Cmp, GroupedDocs) ). @@ -277,11 +282,15 @@ maybe_tag_doc(#doc{id = Id, revs = {Pos, [_Rev | PrevRevs]}, meta = Meta0} = Doc Doc#doc{meta = [{ref, Key} | Meta0]} end. -merge_updates([[{_, #doc{id = X}} | _] = A | RestA], [[{_, #doc{id = X}} | _] = B | RestB]) -> +merge_updates([[{_, #doc{id = X}, _} | _] = A | RestA], [[{_, #doc{id = X}, _} | _] = B | RestB]) -> [A ++ B | merge_updates(RestA, RestB)]; -merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X < Y -> +merge_updates([[{_, #doc{id = X}, _} | _] | _] = A, [[{_, #doc{id = Y}, _} | _] | _] = B) when + X < Y +-> [hd(A) | merge_updates(tl(A), B)]; -merge_updates([[{_, #doc{id = X}} | _] | _] = A, [[{_, #doc{id = Y}} | _] | _] = B) when X > Y -> +merge_updates([[{_, #doc{id = X}, _} | _] | _] = A, [[{_, #doc{id = Y}, _} | _] | _] = B) when + X > Y +-> [hd(B) | merge_updates(A, tl(B))]; merge_updates([], RestB) -> RestB; @@ -294,12 +303,12 @@ collect_updates(GroupedDocsAcc, ClientsAcc, ReplicatedChanges) -> % local docs. It's easier to just avoid multiple _local doc % updaters than deal with their possible conflicts, and local docs % writes are relatively rare. Can be optmized later if really needed. - {update_docs, Client, GroupedDocs, [], ReplicatedChanges} -> + {update_docs, Client, GroupedDocs, [], ReplicatedChanges, UserCtx} -> case ReplicatedChanges of true -> couch_stats:increment_counter([couchdb, coalesced_updates, replicated]); false -> couch_stats:increment_counter([couchdb, coalesced_updates, interactive]) end, - GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs), + GroupedDocs2 = sort_and_tag_grouped_docs(Client, GroupedDocs, UserCtx), GroupedDocsAcc2 = merge_updates(GroupedDocsAcc, GroupedDocs2), collect_updates( @@ -324,6 +333,7 @@ init_db(DbName, FilePath, EngineState, Options) -> BDU = couch_util:get_value(before_doc_update, Options, nil), ADR = couch_util:get_value(after_doc_read, Options, nil), + Access = couch_util:get_value(access, Options, false), NonCreateOpts = [Opt || Opt <- Options, Opt /= create], InitDb = #db{ @@ -333,7 +343,8 @@ init_db(DbName, FilePath, EngineState, Options) -> instance_start_time = StartTime, options = NonCreateOpts, before_doc_update = BDU, - after_doc_read = ADR + after_doc_read = ADR, + access = Access }, DbProps = couch_db_engine:get_props(InitDb), @@ -394,7 +405,8 @@ flush_trees( active = WrittenSize, external = ExternalSize }, - atts = AttSizeInfo + atts = AttSizeInfo, + access = NewDoc#doc.access }, {Leaf, add_sizes(Type, Leaf, SizesAcc)}; #leaf{} -> @@ -478,6 +490,9 @@ doc_tag(#doc{meta = Meta}) -> Else -> throw({invalid_doc_tag, Else}) end. +merge_rev_trees([[]], [], Acc) -> + % validate_docs_access left us with no docs to merge + {ok, Acc}; merge_rev_trees([], [], Acc) -> {ok, Acc#merge_acc{ add_infos = lists:reverse(Acc#merge_acc.add_infos) @@ -492,7 +507,7 @@ merge_rev_trees([NewDocs | RestDocsList], [OldDocInfo | RestOldInfo], Acc) -> % Track doc ids so we can debug large revision trees erlang:put(last_id_merged, OldDocInfo#full_doc_info.id), NewDocInfo0 = lists:foldl( - fun({Client, NewDoc}, OldInfoAcc) -> + fun({Client, NewDoc, _UserCtx}, OldInfoAcc) -> NewInfo = merge_rev_tree(OldInfoAcc, NewDoc, Client, ReplicatedChanges), case is_overflowed(NewInfo, OldInfoAcc, FullPartitions) of true when not ReplicatedChanges -> @@ -589,7 +604,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) when send_result(Client, NewDoc, {ok, {OldPos + 1, NewRevId}}), OldInfo#full_doc_info{ rev_tree = NewTree1, - deleted = false + deleted = false, + access = NewDoc#doc.access }; _ -> throw(doc_recreation_failed) @@ -610,7 +626,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) -> {NewTree, new_leaf} when not NewDeleted -> OldInfo#full_doc_info{ rev_tree = NewTree, - deleted = false + deleted = false, + access = NewDoc#doc.access }; {NewTree, new_leaf} when NewDeleted -> % We have to check if we just deleted this @@ -618,7 +635,8 @@ merge_rev_tree(OldInfo, NewDoc, Client, false) -> % resolution. OldInfo#full_doc_info{ rev_tree = NewTree, - deleted = couch_doc:is_deleted(NewTree) + deleted = couch_doc:is_deleted(NewTree), + access = NewDoc#doc.access }; _ -> send_result(Client, NewDoc, conflict), @@ -663,7 +681,8 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges) -> UpdateSeq = couch_db_engine:get_update_seq(Db), RevsLimit = couch_db_engine:get_revs_limit(Db), - Ids = [Id || [{_Client, #doc{id = Id}} | _] <- DocsList], + Ids = [Id || [{_Client, #doc{id = Id}, _} | _] <- DocsList], + % lookup up the old documents, if they exist. OldDocLookups = couch_db_engine:open_docs(Db, Ids), OldDocInfos = lists:zipwith( @@ -711,7 +730,16 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges) -> cur_seq = UpdateSeq, full_partitions = FullPartitions }, - {ok, AccOut} = merge_rev_trees(DocsList, OldDocInfos, AccIn), + % Loop over DocsList, validate_access for each OldDocInfo on Db, + %. if no OldDocInfo, then send to DocsListValidated, keep OldDocsInfo + % if valid, then send to DocsListValidated, OldDocsInfo + %. if invalid, then send_result tagged `access`(c.f. `conflict) + %. and don’t add to DLV, nor ODI + + {DocsListValidated, OldDocInfosValidated} = validate_docs_access( + Db, DocsList, OldDocInfos + ), + {ok, AccOut} = merge_rev_trees(DocsListValidated, OldDocInfosValidated, AccIn), #merge_acc{ add_infos = NewFullDocInfos, rem_seqs = RemSeqs @@ -721,7 +749,9 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges) -> % the trees, the attachments are already written to disk) {ok, IndexFDIs} = flush_trees(Db, NewFullDocInfos, []), Pairs = pair_write_info(OldDocLookups, IndexFDIs), - LocalDocs2 = update_local_doc_revs(LocalDocs), + % TODO: local docs access needs validating + LocalDocs1 = apply_local_docs_access(Db, LocalDocs), + LocalDocs2 = update_local_doc_revs(LocalDocs1), {ok, Db1} = couch_db_engine:write_doc_infos(Db, Pairs, LocalDocs2), @@ -736,17 +766,89 @@ update_docs_int(Db, DocsList, LocalDocs, ReplicatedChanges) -> length(LocalDocs2) ), - % Check if we just updated any design documents, and update the validation - % funs if we did. - UpdatedDDocIds = lists:flatmap( - fun - (<<"_design/", _/binary>> = Id) -> [Id]; - (_) -> [] + % Check if we just updated any non-access design documents, + % and update the validation funs if we did. + UpdatedDDocIds = [ + Id + || [{_Client, #doc{id = <<"_design/", _/binary>> = Id, access = []}, _} | _] <- DocsList + ], + {ok, commit_data(Db1), UpdatedDDocIds}. + +% at this point, we already validated this Db is access enabled, so do the checks right away. +check_access(Db, UserCtx, Access) -> + couch_db:check_access(Db#db{user_ctx = UserCtx}, Access). + +validate_docs_access(Db, DocsList, OldDocInfos) -> + case couch_db:has_access_enabled(Db) of + true -> validate_docs_access_int(Db, DocsList, OldDocInfos); + false -> {DocsList, OldDocInfos} + end. + +validate_docs_access_int(Db, DocsList, OldDocInfos) -> + validate_docs_access(Db, DocsList, OldDocInfos, [], []). + +validate_docs_access(_Db, [], [], DocsListValidated, OldDocInfosValidated) -> + {lists:reverse(DocsListValidated), lists:reverse(OldDocInfosValidated)}; +validate_docs_access( + Db, [Docs | DocRest], [OldInfo | OldInfoRest], DocsListValidated, OldDocInfosValidated +) -> + % loop over Docs as {Client, NewDoc} + % validate Doc + % if valid, then put back in Docs + % if not, then send_result and skip + NewDocs = lists:foldl( + fun({Client, Doc, UserCtx}, Acc) -> + % check if we are allowed to update the doc, skip when new doc + OldDocMatchesAccess = + case OldInfo#full_doc_info.rev_tree of + [] -> true; + _ -> check_access(Db, UserCtx, OldInfo#full_doc_info.access) + end, + + NewDocMatchesAccess = check_access(Db, UserCtx, Doc#doc.access), + + case OldDocMatchesAccess andalso NewDocMatchesAccess of + % if valid, then send to DocsListValidated, OldDocsInfo + true -> + % and store the access context on the new doc + [{Client, Doc, UserCtx} | Acc]; + % if invalid, then send_result tagged `access`(c.f. `conflict) + false -> + % and don’t add to DLV, nor ODI + send_result(Client, Doc, access), + Acc + end end, - Ids + [], + Docs ), - {ok, commit_data(Db1), UpdatedDDocIds}. + {NewDocsListValidated, NewOldDocInfosValidated} = + %TODO: what if only 2/3? + case length(NewDocs) of + % we sent out all docs as invalid access, drop the old doc info associated with it + 0 -> + {DocsListValidated, OldDocInfosValidated}; + N when N > 0 -> + {[NewDocs | DocsListValidated], [OldInfo | OldDocInfosValidated]} + end, + validate_docs_access( + Db, DocRest, OldInfoRest, NewDocsListValidated, NewOldDocInfosValidated + ). + +apply_local_docs_access(Db, Docs) -> + apply_local_docs_access1(couch_db:has_access_enabled(Db), Docs). + +apply_local_docs_access1(false, Docs) -> + Docs; +apply_local_docs_access1(true, Docs) -> + lists:map( + fun({Client, #doc{access = Access, body = {Body}} = Doc}) -> + Doc1 = Doc#doc{body = {[{<<"_access">>, Access} | Body]}}, + {Client, Doc1} + end, + Docs + ). update_local_doc_revs(Docs) -> lists:foldl( @@ -764,6 +866,14 @@ update_local_doc_revs(Docs) -> Docs ). +default_security_object(DbName, []) -> + default_security_object(DbName); +default_security_object(DbName, Options) -> + case lists:member({access, true}, Options) of + false -> default_security_object(DbName); + true -> ?DEFAULT_SECURITY_OBJECT + end. + increment_local_doc_revs(#doc{deleted = true} = Doc) -> {ok, Doc#doc{revs = {0, [0]}}}; increment_local_doc_revs(#doc{revs = {0, []}} = Doc) -> @@ -929,20 +1039,14 @@ get_meta_body_size(Meta) -> default_security_object(<<"shards/", _/binary>>) -> case config:get("couchdb", "default_security", "admin_only") of "admin_only" -> - [ - {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}}, - {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} - ]; + ?DEFAULT_SECURITY_OBJECT; Everyone when Everyone == "everyone"; Everyone == "admin_local" -> [] end; default_security_object(_DbName) -> case config:get("couchdb", "default_security", "admin_only") of Admin when Admin == "admin_only"; Admin == "admin_local" -> - [ - {<<"members">>, {[{<<"roles">>, [<<"_admin">>]}]}}, - {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} - ]; + ?DEFAULT_SECURITY_OBJECT; "everyone" -> [] end. diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index 7b867f08d13..6e6cca9e4d6 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -26,6 +26,8 @@ -export([with_ejson_body/1]). -export([is_deleted/1]). +-export([has_access/1, has_no_access/1]). + -include_lib("couch/include/couch_db.hrl"). -spec to_path(#doc{}) -> path(). @@ -40,15 +42,24 @@ to_branch(Doc, [RevId | Rest]) -> [{RevId, ?REV_MISSING, to_branch(Doc, Rest)}]. % helpers used by to_json_obj +reduce_access({Access}) -> Access; +reduce_access(Access) -> Access. + to_json_rev(0, []) -> []; to_json_rev(Start, [FirstRevId | _]) -> [{<<"_rev">>, ?l2b([integer_to_list(Start), "-", revid_to_str(FirstRevId)])}]. -to_json_body(true, {Body}) -> +to_json_body(true, {Body}, []) -> Body ++ [{<<"_deleted">>, true}]; -to_json_body(false, {Body}) -> - Body. +to_json_body(false, {Body}, []) -> + Body; +to_json_body(true, {Body}, Access0) -> + Access = reduce_access(Access0), + Body ++ [{<<"_deleted">>, true}] ++ [{<<"_access">>, {Access}}]; +to_json_body(false, {Body}, Access0) -> + Access = reduce_access(Access0), + Body ++ [{<<"_access">>, Access}]. to_json_revisions(Options, Start, RevIds0) -> RevIds = @@ -138,14 +149,15 @@ doc_to_json_obj( deleted = Del, body = Body, revs = {Start, RevIds}, - meta = Meta + meta = Meta, + access = Access } = Doc, Options ) -> { [{<<"_id">>, Id}] ++ to_json_rev(Start, RevIds) ++ - to_json_body(Del, Body) ++ + to_json_body(Del, Body, Access) ++ to_json_revisions(Options, Start, RevIds) ++ to_json_meta(Meta) ++ to_json_attachments(Doc#doc.atts, Options) @@ -335,13 +347,8 @@ transfer_fields([{<<"_conflicts">>, _} | Rest], Doc, DbName) -> transfer_fields(Rest, Doc, DbName); transfer_fields([{<<"_deleted_conflicts">>, _} | Rest], Doc, DbName) -> transfer_fields(Rest, Doc, DbName); -% special field for per doc access control, for future compatibility -transfer_fields( - [{<<"_access">>, _} = Field | Rest], - #doc{body = Fields} = Doc, - DbName -) -> - transfer_fields(Rest, Doc#doc{body = [Field | Fields]}, DbName); +transfer_fields([{<<"_access">>, Access} | Rest], Doc, DbName) -> + transfer_fields(Rest, Doc#doc{access = Access}, DbName); % special fields for replication documents transfer_fields( [{<<"_replication_state">>, _} = Field | Rest], @@ -401,7 +408,7 @@ max_seq(Tree, UpdateSeq) -> end, couch_key_tree:fold(FoldFun, UpdateSeq, Tree). -to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) -> +to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq, access = Access}) -> RevInfosAndPath = [ {rev_info(Node), Path} || {_Leaf, Path} = Node <- @@ -419,7 +426,10 @@ to_doc_info_path(#full_doc_info{id = Id, rev_tree = Tree, update_seq = FDISeq}) ), [{_RevInfo, WinPath} | _] = SortedRevInfosAndPath, RevInfos = [RevInfo || {RevInfo, _Path} <- SortedRevInfosAndPath], - {#doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos}, WinPath}. + { + #doc_info{id = Id, high_seq = max_seq(Tree, FDISeq), revs = RevInfos, access = Access}, + WinPath + }. rev_info({#leaf{} = Leaf, {Pos, [RevId | _]}}) -> #rev_info{ @@ -459,6 +469,20 @@ is_deleted(Tree) -> false end. +get_access({Props}) -> + get_access(couch_doc:from_json_obj({Props})); +get_access(#doc{access = Access}) -> + Access. + +has_access(Doc) -> + has_access1(get_access(Doc)). + +has_no_access(Doc) -> + not has_access1(get_access(Doc)). + +has_access1([]) -> false; +has_access1(_) -> true. + get_validate_doc_fun(Db, {Props}) -> get_validate_doc_fun(Db, couch_doc:from_json_obj({Props})); get_validate_doc_fun(Db, #doc{body = {Props}} = DDoc) -> diff --git a/src/couch/src/couch_httpd_auth.erl b/src/couch/src/couch_httpd_auth.erl index c1e4c8e42d6..0b42760c81a 100644 --- a/src/couch/src/couch_httpd_auth.erl +++ b/src/couch/src/couch_httpd_auth.erl @@ -98,6 +98,13 @@ basic_name_pw(Req) -> nil end. +extract_roles(UserProps) -> + Roles = couch_util:get_value(<<"roles">>, UserProps, []), + case lists:member(<<"_admin">>, Roles) of + true -> Roles; + _ -> Roles ++ [<<"_users">>] + end. + default_authentication_handler(Req) -> default_authentication_handler(Req, couch_auth_cache). @@ -116,7 +123,7 @@ default_authentication_handler(Req, AuthModule) -> Req0 = Req#httpd{ user_ctx = #user_ctx{ name = UserName, - roles = couch_util:get_value(<<"roles">>, UserProps, []) + roles = extract_roles(UserProps) } }, case chttpd_util:get_chttpd_auth_config("secret") of @@ -199,7 +206,7 @@ proxy_auth_user(Req) -> Roles = case header_value(Req, XHeaderRoles) of undefined -> []; - Else -> re:split(Else, "\\s*,\\s*", [trim, {return, binary}]) + Else -> re:split(Else, "\\s*,\\s*", [trim, {return, binary}]) ++ [<<"_users">>] end, case chttpd_util:get_chttpd_auth_config_boolean( @@ -248,7 +255,7 @@ jwt_authentication_handler(Req) -> Req#httpd{ user_ctx = #user_ctx{ name = User, - roles = Roles + roles = Roles ++ [<<"_users">>] } } end; @@ -415,9 +422,7 @@ cookie_authentication_handler(#httpd{mochi_req = MochiReq} = Req, AuthModule) -> Req#httpd{ user_ctx = #user_ctx{ name = ?l2b(User), - roles = couch_util:get_value( - <<"roles">>, UserProps, [] - ) + roles = extract_roles(UserProps) }, auth = {FullSecret, TimeLeft < Timeout * 0.9, @@ -559,7 +564,7 @@ handle_session_req(#httpd{method = 'POST', mochi_req = MochiReq} = Req, AuthModu {[ {ok, true}, {name, UserName}, - {roles, couch_util:get_value(<<"roles">>, UserProps, [])} + {roles, extract_roles(UserProps)} ]} ); false -> diff --git a/src/couch/src/couch_proc_manager.erl b/src/couch/src/couch_proc_manager.erl index 623734e6eb1..e3396ab15ed 100644 --- a/src/couch/src/couch_proc_manager.erl +++ b/src/couch/src/couch_proc_manager.erl @@ -144,6 +144,7 @@ init([]) -> ets:insert(?SERVERS, get_servers_from_env("COUCHDB_QUERY_SERVER_")), ets:insert(?SERVERS, get_servers_from_env("COUCHDB_NATIVE_QUERY_SERVER_")), ets:insert(?SERVERS, [{"QUERY", {mango_native_proc, start_link, []}}]), + ets:insert(?SERVERS, [{"_ACCESS", {couch_access_native_proc, start_link, []}}]), maybe_configure_erlang_native_servers(), {ok, #state{ diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl index 739df28e59d..1379ce36f37 100644 --- a/src/couch/src/couch_util.erl +++ b/src/couch/src/couch_util.erl @@ -46,6 +46,7 @@ -export([verify_hash_names/2]). -export([get_config_hash_algorithms/0]). -export([remove_sensitive_data/1]). +-export([validate_design_access/1, validate_design_access/2]). -include_lib("couch/include/couch_db.hrl"). @@ -870,3 +871,15 @@ remove_sensitive_data(KVList) -> KVList1 = lists:keyreplace(<<"password">>, 1, KVList, {<<"password">>, <<"****">>}), % some KVList entries are atoms, so test fo this too lists:keyreplace(password, 1, KVList1, {password, <<"****">>}). + +validate_design_access(DDoc) -> + validate_design_access1(DDoc, true). + +validate_design_access(Db, DDoc) -> + validate_design_access1(DDoc, couch_db:has_access_enabled(Db)). + +validate_design_access1(_DDoc, false) -> ok; +validate_design_access1(DDoc, true) -> is_users_ddoc(DDoc). + +is_users_ddoc(#doc{access = [<<"_users">>]}) -> ok; +is_users_ddoc(_) -> throw({forbidden, <<"per-user ddoc access">>}). diff --git a/src/couch/test/eunit/couchdb_access_tests.erl b/src/couch/test/eunit/couchdb_access_tests.erl new file mode 100644 index 00000000000..bd19c9a51af --- /dev/null +++ b/src/couch/test/eunit/couchdb_access_tests.erl @@ -0,0 +1,1439 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couchdb_access_tests). + +-include_lib("couch/include/couch_eunit.hrl"). + +-define(CONTENT_JSON, {"Content-Type", "application/json"}). +-define(ADMIN_REQ_HEADERS, [?CONTENT_JSON, {basic_auth, {"a", "a"}}]). +-define(USERX_REQ_HEADERS, [?CONTENT_JSON, {basic_auth, {"x", "x"}}]). +-define(USERY_REQ_HEADERS, [?CONTENT_JSON, {basic_auth, {"y", "y"}}]). +-define(SECURITY_OBJECT, + {[ + {<<"members">>, {[{<<"roles">>, [<<"_admin">>, <<"_users">>]}]}}, + {<<"admins">>, {[{<<"roles">>, [<<"_admin">>]}]}} + ]} +). + +url() -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + lists:concat(["http://", Addr, ":", port()]). + +before_each(_) -> + R = test_request:put(url() ++ "/db?q=1&n=1&access=true", ?ADMIN_REQ_HEADERS, ""), + %?debugFmt("~nRequest: ~p~n", [R]), + {ok, 201, _, _} = R, + {ok, _, _, _} = test_request:put( + url() ++ "/db/_security", ?ADMIN_REQ_HEADERS, jiffy:encode(?SECURITY_OBJECT) + ), + url(). + +after_each(_, Url) -> + {ok, 200, _, _} = test_request:delete(Url ++ "/db", ?ADMIN_REQ_HEADERS), + {_, _, _, _} = test_request:delete(Url ++ "/db2", ?ADMIN_REQ_HEADERS), + {_, _, _, _} = test_request:delete(Url ++ "/db3", ?ADMIN_REQ_HEADERS), + ok. + +before_all() -> + Couch = test_util:start_couch([chttpd, couch_replicator]), + Hashed = couch_passwords:hash_admin_password("a"), + ok = config:set("admins", "a", binary_to_list(Hashed), false), + ok = config:set("couchdb", "uuid", "21ac467c1bc05e9d9e9d2d850bb1108f", false), + ok = config:set("log", "level", "debug", false), + ok = config:set("per_doc_access", "enable", "true", false), + + % cleanup and setup + {ok, _, _, _} = test_request:delete(url() ++ "/db", ?ADMIN_REQ_HEADERS), + % {ok, _, _, _} = test_request:put(url() ++ "/db?q=1&n=1&access=true", ?ADMIN_REQ_HEADERS, ""), + + % create users + UserDbUrl = url() ++ "/_users?q=1&n=1", + {ok, _, _, _} = test_request:delete(UserDbUrl, ?ADMIN_REQ_HEADERS, ""), + {ok, 201, _, _} = test_request:put(UserDbUrl, ?ADMIN_REQ_HEADERS, ""), + + UserXDocUrl = url() ++ "/_users/org.couchdb.user:x", + UserXDocBody = "{ \"name\":\"x\", \"roles\": [], \"password\":\"x\", \"type\": \"user\" }", + {ok, 201, _, _} = test_request:put(UserXDocUrl, ?ADMIN_REQ_HEADERS, UserXDocBody), + + UserYDocUrl = url() ++ "/_users/org.couchdb.user:y", + UserYDocBody = "{ \"name\":\"y\", \"roles\": [], \"password\":\"y\", \"type\": \"user\" }", + {ok, 201, _, _} = test_request:put(UserYDocUrl, ?ADMIN_REQ_HEADERS, UserYDocBody), + Couch. + +after_all(_) -> + UserDbUrl = url() ++ "/_users", + {ok, _, _, _} = test_request:delete(UserDbUrl, ?ADMIN_REQ_HEADERS, ""), + ok = test_util:stop_couch(done). + +access_test_() -> + Tests = [ + % Server config + fun should_not_let_create_access_db_if_disabled/2, + + % Doc creation + fun should_not_let_anonymous_user_create_doc/2, + fun should_let_admin_create_doc_with_access/2, + fun should_let_admin_create_doc_without_access/2, + fun should_let_user_create_doc_for_themselves/2, + fun should_not_let_user_create_doc_for_someone_else/2, + fun should_let_user_create_access_ddoc/2, + fun access_ddoc_should_have_no_effects/2, + + % Doc updates + fun users_with_access_can_update_doc/2, + fun users_without_access_can_not_update_doc/2, + fun users_with_access_can_not_change_access/2, + fun users_with_access_can_not_remove_access/2, + + % Doc reads + fun should_let_admin_read_doc_with_access/2, + fun user_with_access_can_read_doc/2, + fun user_without_access_can_not_read_doc/2, + fun user_can_not_read_doc_without_access/2, + + % Doc deletes + fun should_let_admin_delete_doc_with_access/2, + fun should_let_user_delete_doc_for_themselves/2, + fun should_not_let_user_delete_doc_for_someone_else/2, + + % _all_docs with include_docs + fun should_let_admin_fetch_all_docs/2, + fun should_let_user_fetch_their_own_all_docs/2, + + % _changes + fun should_let_admin_fetch_changes/2, + fun should_let_user_fetch_their_own_changes/2, + + % views + fun should_not_allow_admin_access_ddoc_view_request/2, + fun should_not_allow_user_access_ddoc_view_request/2, + fun should_allow_admin_users_access_ddoc_view_request/2, + fun should_allow_user_users_access_ddoc_view_request/2, + + % replication + fun should_allow_admin_to_replicate_from_access_to_access/2, + fun should_allow_admin_to_replicate_from_no_access_to_access/2, + fun should_allow_admin_to_replicate_from_access_to_no_access/2, + fun should_allow_admin_to_replicate_from_no_access_to_no_access/2, + + fun should_allow_user_to_replicate_from_access_to_access/2, + fun should_allow_user_to_replicate_from_access_to_no_access/2, + fun should_allow_user_to_replicate_from_no_access_to_access/2, + fun should_allow_user_to_replicate_from_no_access_to_no_access/2, + + % _revs_diff for docs you don’t have access to + fun should_not_allow_user_to_revs_diff_other_docs/2 + + % TODO: create test db with role and not _users in _security.members + % and make sure a user in that group can access while a user not + % in that group cant + % % potential future feature + % % fun should_let_user_fetch_their_own_all_docs_plus_users_ddocs/2%, + ], + { + "Access tests", + { + setup, + fun before_all/0, + fun after_all/1, + [ + make_test_cases(basic, Tests) + ] + } + }. + +make_test_cases(Mod, Funs) -> + { + lists:flatten(io_lib:format("~s", [Mod])), + {foreachx, fun before_each/1, fun after_each/2, [{Mod, Fun} || Fun <- Funs]} + }. + +% Doc creation +% http://127.0.0.1:64903/db/a?revs=true&open_revs=%5B%221-23202479633c2b380f79507a776743d5%22%5D&latest=true + +% should_do_the_thing(_PortType, Url) -> +% ?_test(begin +% {ok, _, _, _} = test_request:put(Url ++ "/db/a", +% ?ADMIN_REQ_HEADERS, "{\"a\":1,\"_access\":[\"x\"]}"), +% {ok, Code, _, _} = test_request:get(Url ++ "/db/a?revs=true&open_revs=%5B%221-23202479633c2b380f79507a776743d5%22%5D&latest=true", +% ?USERX_REQ_HEADERS), +% ?assertEqual(200, Code) +% end). +% + +should_not_let_create_access_db_if_disabled(_PortType, Url) -> + ok = config:set("per_doc_access", "enable", "false", false), + {ok, Code, _, _} = test_request:put(url() ++ "/db?q=1&n=1&access=true", ?ADMIN_REQ_HEADERS, ""), + ok = config:set("per_doc_access", "enable", "true", false), + ?_assertEqual(400, Code). + +should_not_let_anonymous_user_create_doc(_PortType, Url) -> + % TODO: debugging leftover + % BulkDocsBody = {[ + % {<<"docs">>, [ + % {[{<<"_id">>, <<"a">>}]}, + % {[{<<"_id">>, <<"a">>}]}, + % {[{<<"_id">>, <<"b">>}]}, + % {[{<<"_id">>, <<"c">>}]} + % ]} + % ]}, + % Resp = test_request:post(Url ++ "/db/_bulk_docs", ?ADMIN_REQ_HEADERS, jiffy:encode(BulkDocsBody)), + % ?debugFmt("~nResp: ~p~n", [Resp]), + {ok, Code, _, _} = test_request:put(Url ++ "/db/a", "{\"a\":1,\"_access\":[\"x\"]}"), + ?_assertEqual(401, Code). + +should_let_admin_create_doc_with_access(_PortType, Url) -> + {ok, Code, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + ?_assertEqual(201, Code). + +should_let_admin_create_doc_without_access(_PortType, Url) -> + {ok, Code, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1}" + ), + ?_assertEqual(201, Code). + +should_let_user_create_doc_for_themselves(_PortType, Url) -> + {ok, Code, _, _} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + ?_assertEqual(201, Code). + +should_not_let_user_create_doc_for_someone_else(_PortType, Url) -> + {ok, Code, _, _} = test_request:put( + Url ++ "/db/c", + ?USERY_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + ?_assertEqual(403, Code). + +should_let_user_create_access_ddoc(_PortType, Url) -> + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/dx", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + ?_assertEqual(201, Code). + +access_ddoc_should_have_no_effects(_PortType, Url) -> + ?_test(begin + Ddoc = + "{ \"_access\":[\"x\"], \"validate_doc_update\": \"function(newDoc, oldDoc, userCtx) { throw({unauthorized: 'throw error'})}\", \"views\": { \"foo\": { \"map\": \"function(doc) { emit(doc._id) }\" } }, \"shows\": { \"boo\": \"function() {}\" }, \"lists\": { \"hoo\": \"function() {}\" }, \"update\": { \"goo\": \"function() {}\" }, \"filters\": { \"loo\": \"function() {}\" } }", + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/dx", + ?USERX_REQ_HEADERS, + Ddoc + ), + ?assertEqual(201, Code), + {ok, Code1, _, B} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + ?assertEqual(201, Code1), + {ok, Code2, _, _} = test_request:get( + Url ++ "/db/_design/dx/_view/foo", + ?USERX_REQ_HEADERS + ), + ?assertEqual(404, Code2), + {ok, Code3, _, _} = test_request:get( + Url ++ "/db/_design/dx/_show/boo/b", + ?USERX_REQ_HEADERS + ), + ?assertEqual(404, Code3), + {ok, Code4, _, _} = test_request:get( + Url ++ "/db/_design/dx/_list/hoo/foo", + ?USERX_REQ_HEADERS + ), + ?assertEqual(404, Code4), + {ok, Code5, _, _} = test_request:post( + Url ++ "/db/_design/dx/_update/goo", + ?USERX_REQ_HEADERS, + "" + ), + ?assertEqual(404, Code5), + {ok, Code6, _, _} = test_request:get( + Url ++ "/db/_changes?filter=dx/loo", + ?USERX_REQ_HEADERS + ), + ?assertEqual(404, Code6), + {ok, Code7, _, _} = test_request:get( + Url ++ "/db/_changes?filter=_view&view=dx/foo", + ?USERX_REQ_HEADERS + ), + ?assertEqual(404, Code7) + end). + +% Doc updates + +users_with_access_can_update_doc(_PortType, Url) -> + {ok, _, _, Body} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {Json} = jiffy:decode(Body), + Rev = couch_util:get_value(<<"rev">>, Json), + {ok, Code, _, _} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":2,\"_access\":[\"x\"],\"_rev\":\"" ++ binary_to_list(Rev) ++ "\"}" + ), + ?_assertEqual(201, Code). + +users_without_access_can_not_update_doc(_PortType, Url) -> + {ok, _, _, Body} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {Json} = jiffy:decode(Body), + Rev = couch_util:get_value(<<"rev">>, Json), + {ok, Code, _, _} = test_request:put( + Url ++ "/db/b", + ?USERY_REQ_HEADERS, + "{\"a\":2,\"_access\":[\"y\"],\"_rev\":\"" ++ binary_to_list(Rev) ++ "\"}" + ), + ?_assertEqual(403, Code). + +users_with_access_can_not_change_access(_PortType, Url) -> + {ok, _, _, Body} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {Json} = jiffy:decode(Body), + Rev = couch_util:get_value(<<"rev">>, Json), + {ok, Code, _, _} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":2,\"_access\":[\"y\"],\"_rev\":\"" ++ binary_to_list(Rev) ++ "\"}" + ), + ?_assertEqual(403, Code). + +users_with_access_can_not_remove_access(_PortType, Url) -> + {ok, _, _, Body} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {Json} = jiffy:decode(Body), + Rev = couch_util:get_value(<<"rev">>, Json), + {ok, Code, _, _} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"a\":2,\"_rev\":\"" ++ binary_to_list(Rev) ++ "\"}" + ), + ?_assertEqual(403, Code). + +% Doc reads + +should_let_admin_read_doc_with_access(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, Code, _, _} = test_request:get( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS + ), + ?_assertEqual(200, Code). + +user_with_access_can_read_doc(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, Code, _, _} = test_request:get( + Url ++ "/db/a", + ?USERX_REQ_HEADERS + ), + ?_assertEqual(200, Code). + +user_without_access_can_not_read_doc(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, Code, _, _} = test_request:get( + Url ++ "/db/a", + ?USERY_REQ_HEADERS + ), + ?_assertEqual(403, Code). + +user_can_not_read_doc_without_access(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1}" + ), + {ok, Code, _, _} = test_request:get( + Url ++ "/db/a", + ?USERX_REQ_HEADERS + ), + ?_assertEqual(403, Code). + +% Doc deletes + +should_let_admin_delete_doc_with_access(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, Code, _, _} = test_request:delete( + Url ++ "/db/a?rev=1-23202479633c2b380f79507a776743d5", + ?ADMIN_REQ_HEADERS + ), + ?_assertEqual(200, Code). + +should_let_user_delete_doc_for_themselves(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:get( + Url ++ "/db/a", + ?USERX_REQ_HEADERS + ), + {ok, Code, _, _} = test_request:delete( + Url ++ "/db/a?rev=1-23202479633c2b380f79507a776743d5", + ?USERX_REQ_HEADERS + ), + ?_assertEqual(200, Code). + +should_not_let_user_delete_doc_for_someone_else(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?USERX_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, Code, _, _} = test_request:delete( + Url ++ "/db/a?rev=1-23202479633c2b380f79507a776743d5", + ?USERY_REQ_HEADERS + ), + ?_assertEqual(403, Code). + +% _all_docs with include_docs + +should_let_admin_fetch_all_docs(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/d", + ?ADMIN_REQ_HEADERS, + "{\"d\":4,\"_access\":[\"y\"]}" + ), + {ok, 200, _, Body} = test_request:get( + Url ++ "/db/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(Body), + ?_assertEqual(4, proplists:get_value(<<"total_rows">>, Json)). + +should_let_user_fetch_their_own_all_docs(_PortType, Url) -> + ?_test(begin + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/b", + ?USERX_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/d", + ?USERY_REQ_HEADERS, + "{\"d\":4,\"_access\":[\"y\"]}" + ), + {ok, 200, _, Body} = test_request:get( + Url ++ "/db/_all_docs?include_docs=true", + ?USERX_REQ_HEADERS + ), + {Json} = jiffy:decode(Body), + Rows = proplists:get_value(<<"rows">>, Json), + ?assertEqual( + [ + {[ + {<<"id">>, <<"a">>}, + {<<"key">>, <<"a">>}, + {<<"value">>, <<"1-23202479633c2b380f79507a776743d5">>}, + {<<"doc">>, + {[ + {<<"_id">>, <<"a">>}, + {<<"_rev">>, <<"1-23202479633c2b380f79507a776743d5">>}, + {<<"a">>, 1}, + {<<"_access">>, [<<"x">>]} + ]}} + ]}, + {[ + {<<"id">>, <<"b">>}, + {<<"key">>, <<"b">>}, + {<<"value">>, <<"1-d33fb05384fa65a8081da2046595de0f">>}, + {<<"doc">>, + {[ + {<<"_id">>, <<"b">>}, + {<<"_rev">>, <<"1-d33fb05384fa65a8081da2046595de0f">>}, + {<<"b">>, 2}, + {<<"_access">>, [<<"x">>]} + ]}} + ]} + ], + Rows + ), + ?assertEqual(2, length(Rows)), + ?assertEqual(4, proplists:get_value(<<"total_rows">>, Json)), + + {ok, 200, _, Body1} = test_request:get( + Url ++ "/db/_all_docs?include_docs=true", + ?USERY_REQ_HEADERS + ), + {Json1} = jiffy:decode(Body1), + ?assertEqual( + [ + {<<"total_rows">>, 4}, + {<<"offset">>, 2}, + {<<"rows">>, [ + {[ + {<<"id">>, <<"c">>}, + {<<"key">>, <<"c">>}, + {<<"value">>, <<"1-92aef5b0e4a3f4db0aba1320869bc95d">>}, + {<<"doc">>, + {[ + {<<"_id">>, <<"c">>}, + {<<"_rev">>, <<"1-92aef5b0e4a3f4db0aba1320869bc95d">>}, + {<<"c">>, 3}, + {<<"_access">>, [<<"y">>]} + ]}} + ]}, + {[ + {<<"id">>, <<"d">>}, + {<<"key">>, <<"d">>}, + {<<"value">>, <<"1-ae984f6550038b1ed1565ac4b6cd8c5d">>}, + {<<"doc">>, + {[ + {<<"_id">>, <<"d">>}, + {<<"_rev">>, <<"1-ae984f6550038b1ed1565ac4b6cd8c5d">>}, + {<<"d">>, 4}, + {<<"_access">>, [<<"y">>]} + ]}} + ]} + ]} + ], + Json1 + ) + end). + +% _changes + +should_let_admin_fetch_changes(_PortType, Url) -> + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/d", + ?ADMIN_REQ_HEADERS, + "{\"d\":4,\"_access\":[\"y\"]}" + ), + {ok, 200, _, Body} = test_request:get( + Url ++ "/db/_changes", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(Body), + AmountOfDocs = length(proplists:get_value(<<"results">>, Json)), + ?_assertEqual(4, AmountOfDocs). + +should_let_user_fetch_their_own_changes(_PortType, Url) -> + ?_test(begin + {ok, 201, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + {ok, 201, _, _} = test_request:put( + Url ++ "/db/d", + ?ADMIN_REQ_HEADERS, + "{\"d\":4,\"_access\":[\"y\"]}" + ), + {ok, 200, _, Body} = test_request:get( + Url ++ "/db/_changes", + ?USERX_REQ_HEADERS + ), + {Json} = jiffy:decode(Body), + ?assertMatch( + [ + {<<"results">>, [ + {[ + {<<"seq">>, <<"2-", _/binary>>}, + {<<"id">>, <<"a">>}, + {<<"changes">>, [{[{<<"rev">>, <<"1-23202479633c2b380f79507a776743d5">>}]}]} + ]}, + {[ + {<<"seq">>, <<"3-", _/binary>>}, + {<<"id">>, <<"b">>}, + {<<"changes">>, [{[{<<"rev">>, <<"1-d33fb05384fa65a8081da2046595de0f">>}]}]} + ]} + ]}, + {<<"last_seq">>, <<"3-", _/binary>>}, + {<<"pending">>, 2} + ], + Json + ), + AmountOfDocs = length(proplists:get_value(<<"results">>, Json)), + ?assertEqual(2, AmountOfDocs) + end). + +% views + +should_not_allow_admin_access_ddoc_view_request(_PortType, Url) -> + DDoc = "{\"a\":1,\"_access\":[\"x\"],\"views\":{\"foo\":{\"map\":\"function() {}\"}}}", + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/a", + ?ADMIN_REQ_HEADERS, + DDoc + ), + ?assertEqual(201, Code), + {ok, Code1, _, _} = test_request:get( + Url ++ "/db/_design/a/_view/foo", + ?ADMIN_REQ_HEADERS + ), + ?_assertEqual(404, Code1). + +should_not_allow_user_access_ddoc_view_request(_PortType, Url) -> + DDoc = "{\"a\":1,\"_access\":[\"x\"],\"views\":{\"foo\":{\"map\":\"function() {}\"}}}", + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/a", + ?ADMIN_REQ_HEADERS, + DDoc + ), + ?assertEqual(201, Code), + {ok, Code1, _, _} = test_request:get( + Url ++ "/db/_design/a/_view/foo", + ?USERX_REQ_HEADERS + ), + ?_assertEqual(404, Code1). + +should_allow_admin_users_access_ddoc_view_request(_PortType, Url) -> + DDoc = "{\"a\":1,\"_access\":[\"_users\"],\"views\":{\"foo\":{\"map\":\"function() {}\"}}}", + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/a", + ?ADMIN_REQ_HEADERS, + DDoc + ), + ?assertEqual(201, Code), + {ok, Code1, _, _} = test_request:get( + Url ++ "/db/_design/a/_view/foo", + ?ADMIN_REQ_HEADERS + ), + ?_assertEqual(200, Code1). + +should_allow_user_users_access_ddoc_view_request(_PortType, Url) -> + DDoc = "{\"a\":1,\"_access\":[\"_users\"],\"views\":{\"foo\":{\"map\":\"function() {}\"}}}", + {ok, Code, _, _} = test_request:put( + Url ++ "/db/_design/a", + ?ADMIN_REQ_HEADERS, + DDoc + ), + ?assertEqual(201, Code), + {ok, Code1, _, _} = test_request:get( + Url ++ "/db/_design/a/_view/foo", + ?USERX_REQ_HEADERS + ), + ?_assertEqual(200, Code1). + +% replication + +should_allow_admin_to_replicate_from_access_to_access(_PortType, Url) -> + ?_test(begin + % create target db + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1&access=true", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"x\"]}" + ), + + % replicate + AdminUrl = string:replace(Url, "http://", "http://a:a@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(AdminUrl ++ "/db")}, + {<<"target">>, list_to_binary(AdminUrl ++ "/db2")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?ADMIN_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsReard), + ?assertEqual(3, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db2/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(3, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_admin_to_replicate_from_no_access_to_access(_PortType, Url) -> + ?_test(begin + % create target db + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db2/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"x\"]}" + ), + + % replicate + AdminUrl = string:replace(Url, "http://", "http://a:a@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(AdminUrl ++ "/db2")}, + {<<"target">>, list_to_binary(AdminUrl ++ "/db")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?ADMIN_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsReard), + ?assertEqual(3, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(3, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_admin_to_replicate_from_access_to_no_access(_PortType, Url) -> + ?_test(begin + % create target db + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"x\"]}" + ), + + % replicate + AdminUrl = string:replace(Url, "http://", "http://a:a@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(AdminUrl ++ "/db")}, + {<<"target">>, list_to_binary(AdminUrl ++ "/db2")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?ADMIN_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsReard), + ?assertEqual(3, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db2/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(3, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_admin_to_replicate_from_no_access_to_no_access(_PortType, Url) -> + ?_test(begin + % create source and target dbs + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + {ok, 201, _, _} = test_request:put( + url() ++ "/db3?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db3/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db2/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"x\"]}" + ), + + % replicate + AdminUrl = string:replace(Url, "http://", "http://a:a@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(AdminUrl ++ "/db2")}, + {<<"target">>, list_to_binary(AdminUrl ++ "/db3")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?ADMIN_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsReard), + ?assertEqual(3, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db3/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(3, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_user_to_replicate_from_access_to_access(_PortType, Url) -> + ?_test(begin + % create source and target dbs + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1&access=true", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + + % replicate + UserXUrl = string:replace(Url, "http://", "http://x:x@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(UserXUrl ++ "/db")}, + {<<"target">>, list_to_binary(UserXUrl ++ "/db2")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?USERX_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + % ?debugFmt("~nResponseBody: ~p~n", [ResponseBody]), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(2, MissingChecked), + ?assertEqual(2, MissingFound), + ?assertEqual(2, DocsReard), + ?assertEqual(2, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert access in local doc + ReplicationId = couch_util:get_value(<<"replication_id">>, EJResponseBody), + {ok, 200, _, CheckPoint} = test_request:get( + Url ++ "/db/_local/" ++ ReplicationId, + ?USERX_REQ_HEADERS + ), + {EJCheckPoint} = jiffy:decode(CheckPoint), + Access = couch_util:get_value(<<"_access">>, EJCheckPoint), + ?assertEqual([<<"x">>], Access), + + % make sure others can’t read our local docs + {ok, 403, _, _} = test_request:get( + Url ++ "/db/_local/" ++ ReplicationId, + ?USERY_REQ_HEADERS + ), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db2/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(2, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_user_to_replicate_from_access_to_no_access(_PortType, Url) -> + ?_test(begin + % create source and target dbs + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + + % replicate + UserXUrl = string:replace(Url, "http://", "http://x:x@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(UserXUrl ++ "/db")}, + {<<"target">>, list_to_binary(UserXUrl ++ "/db2")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?USERX_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(2, MissingChecked), + ?assertEqual(2, MissingFound), + ?assertEqual(2, DocsReard), + ?assertEqual(2, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db2/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(2, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_user_to_replicate_from_no_access_to_access(_PortType, Url) -> + ?_test(begin + % create source and target dbs + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + % leave for easier debugging + % VduFun = <<"function(newdoc, olddoc, userctx) {if(newdoc._id == \"b\") throw({'forbidden':'fail'})}">>, + % DDoc = {[ + % {<<"_id">>, <<"_design/vdu">>}, + % {<<"validate_doc_update">>, VduFun} + % ]}, + % {ok, _, _, _} = test_request:put(Url ++ "/db/_design/vdu", + % ?ADMIN_REQ_HEADERS, jiffy:encode(DDoc)), + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db2/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + + % replicate + UserXUrl = string:replace(Url, "http://", "http://x:x@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(UserXUrl ++ "/db2")}, + {<<"target">>, list_to_binary(UserXUrl ++ "/db")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?USERX_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsRead = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + % ?debugFmt("~n History: ~p ~n", [History]), + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsRead), + ?assertEqual(2, DocsWritten), + ?assertEqual(1, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(2, proplists:get_value(<<"total_rows">>, Json)) + end). + +should_allow_user_to_replicate_from_no_access_to_no_access(_PortType, Url) -> + ?_test(begin + % create source and target dbs + {ok, 201, _, _} = test_request:put( + url() ++ "/db2?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db2/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + + {ok, 201, _, _} = test_request:put( + url() ++ "/db3?q=1&n=1", + ?ADMIN_REQ_HEADERS, + "" + ), + % set target db security + {ok, _, _, _} = test_request:put( + url() ++ "/db3/_security", + ?ADMIN_REQ_HEADERS, + jiffy:encode(?SECURITY_OBJECT) + ), + % create source docs + {ok, _, _, _} = test_request:put( + Url ++ "/db2/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db2/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + + % replicate + UserXUrl = string:replace(Url, "http://", "http://x:x@"), + EJRequestBody = + {[ + {<<"source">>, list_to_binary(UserXUrl ++ "/db2")}, + {<<"target">>, list_to_binary(UserXUrl ++ "/db3")} + ]}, + {ok, ResponseCode, _, ResponseBody} = test_request:post( + Url ++ "/_replicate", + ?USERX_REQ_HEADERS, + jiffy:encode(EJRequestBody) + ), + + % assert replication status + {EJResponseBody} = jiffy:decode(ResponseBody), + ?assertEqual(ResponseCode, 200), + ?assertEqual(true, couch_util:get_value(<<"ok">>, EJResponseBody)), + [{History}] = couch_util:get_value(<<"history">>, EJResponseBody), + + MissingChecked = couch_util:get_value(<<"missing_checked">>, History), + MissingFound = couch_util:get_value(<<"missing_found">>, History), + DocsReard = couch_util:get_value(<<"docs_read">>, History), + DocsWritten = couch_util:get_value(<<"docs_written">>, History), + DocWriteFailures = couch_util:get_value(<<"doc_write_failures">>, History), + + ?assertEqual(3, MissingChecked), + ?assertEqual(3, MissingFound), + ?assertEqual(3, DocsReard), + ?assertEqual(3, DocsWritten), + ?assertEqual(0, DocWriteFailures), + + % assert docs in target db + {ok, 200, _, ADBody} = test_request:get( + Url ++ "/db3/_all_docs?include_docs=true", + ?ADMIN_REQ_HEADERS + ), + {Json} = jiffy:decode(ADBody), + ?assertEqual(3, proplists:get_value(<<"total_rows">>, Json)) + end). + +% revs_diff +should_not_allow_user_to_revs_diff_other_docs(_PortType, Url) -> + ?_test(begin + % create test docs + {ok, _, _, _} = test_request:put( + Url ++ "/db/a", + ?ADMIN_REQ_HEADERS, + "{\"a\":1,\"_access\":[\"x\"]}" + ), + {ok, _, _, _} = test_request:put( + Url ++ "/db/b", + ?ADMIN_REQ_HEADERS, + "{\"b\":2,\"_access\":[\"x\"]}" + ), + {ok, _, _, V} = test_request:put( + Url ++ "/db/c", + ?ADMIN_REQ_HEADERS, + "{\"c\":3,\"_access\":[\"y\"]}" + ), + + % nothing missing + RevsDiff = + {[ + {<<"a">>, [ + <<"1-23202479633c2b380f79507a776743d5">> + ]} + ]}, + {ok, GoodCode, _, GoodBody} = test_request:post( + Url ++ "/db/_revs_diff", + ?USERX_REQ_HEADERS, + jiffy:encode(RevsDiff) + ), + EJGoodBody = jiffy:decode(GoodBody), + ?assertEqual(200, GoodCode), + ?assertEqual({[]}, EJGoodBody), + + % something missing + MissingRevsDiff = + {[ + {<<"a">>, [ + <<"1-missing">> + ]} + ]}, + {ok, MissingCode, _, MissingBody} = test_request:post( + Url ++ "/db/_revs_diff", + ?USERX_REQ_HEADERS, + jiffy:encode(MissingRevsDiff) + ), + EJMissingBody = jiffy:decode(MissingBody), + ?assertEqual(200, MissingCode), + MissingExpect = + {[ + {<<"a">>, + {[ + {<<"missing">>, [<<"1-missing">>]} + ]}} + ]}, + ?assertEqual(MissingExpect, EJMissingBody), + + % other doc + OtherRevsDiff = + {[ + {<<"c">>, [ + <<"1-92aef5b0e4a3f4db0aba1320869bc95d">> + ]} + ]}, + {ok, OtherCode, _, OtherBody} = test_request:post( + Url ++ "/db/_revs_diff", + ?USERX_REQ_HEADERS, + jiffy:encode(OtherRevsDiff) + ), + EJOtherBody = jiffy:decode(OtherBody), + ?assertEqual(200, OtherCode), + ?assertEqual({[]}, EJOtherBody) + end). +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ + +port() -> + integer_to_list(mochiweb_socket_server:get(chttpd, port)). + +% Potential future feature:% +% should_let_user_fetch_their_own_all_docs_plus_users_ddocs(_PortType, Url) -> +% {ok, 201, _, _} = test_request:put(Url ++ "/db/a", +% ?ADMIN_REQ_HEADERS, "{\"a\":1,\"_access\":[\"x\"]}"), +% {ok, 201, _, _} = test_request:put(Url ++ "/db/_design/foo", +% ?ADMIN_REQ_HEADERS, "{\"a\":1,\"_access\":[\"_users\"]}"), +% {ok, 201, _, _} = test_request:put(Url ++ "/db/_design/bar", +% ?ADMIN_REQ_HEADERS, "{\"a\":1,\"_access\":[\"houdini\"]}"), +% {ok, 201, _, _} = test_request:put(Url ++ "/db/b", +% ?USERX_REQ_HEADERS, "{\"b\":2,\"_access\":[\"x\"]}"), +% +% % % TODO: add allowing non-admin users adding non-admin ddocs +% {ok, 201, _, _} = test_request:put(Url ++ "/db/_design/x", +% ?ADMIN_REQ_HEADERS, "{\"b\":2,\"_access\":[\"x\"]}"), +% +% {ok, 201, _, _} = test_request:put(Url ++ "/db/c", +% ?ADMIN_REQ_HEADERS, "{\"c\":3,\"_access\":[\"y\"]}"), +% {ok, 201, _, _} = test_request:put(Url ++ "/db/d", +% ?USERY_REQ_HEADERS, "{\"d\":4,\"_access\":[\"y\"]}"), +% {ok, 200, _, Body} = test_request:get(Url ++ "/db/_all_docs?include_docs=true", +% ?USERX_REQ_HEADERS), +% {Json} = jiffy:decode(Body), +% ?_assertEqual(3, length(proplists:get_value(<<"rows">>, Json))). +% ?debugFmt("~nHSOIN: ~p~n", [Json]), diff --git a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl index 9822542f3f7..5fa547d627a 100644 --- a/src/couch/test/eunit/couchdb_mrview_cors_tests.erl +++ b/src/couch/test/eunit/couchdb_mrview_cors_tests.erl @@ -18,6 +18,7 @@ -define(DDOC, {[ {<<"_id">>, <<"_design/foo">>}, + {<<"_access">>, [<<"user_a">>]}, {<<"shows">>, {[ {<<"bar">>, <<"function(doc, req) {return '

wosh

';}">>} @@ -97,7 +98,7 @@ should_make_shows_request(_, {Host, DbName}) -> end). create_db(backdoor, DbName) -> - {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX]), + {ok, Db} = couch_db:create(DbName, [?ADMIN_CTX, {access, true}]), couch_db:close(Db); create_db(clustered, DbName) -> {ok, Status, _, _} = test_request:put(db_url(DbName), [?AUTH], ""), diff --git a/src/couch_index/src/couch_index_updater.erl b/src/couch_index/src/couch_index_updater.erl index fe2150505ae..2ffd954b483 100644 --- a/src/couch_index/src/couch_index_updater.erl +++ b/src/couch_index/src/couch_index_updater.erl @@ -123,8 +123,8 @@ update(Idx, Mod, IdxState) -> IncludeDesign = lists:member(include_design, UpdateOpts), DocOpts = case lists:member(local_seq, UpdateOpts) of - true -> [conflicts, deleted_conflicts, local_seq]; - _ -> [conflicts, deleted_conflicts] + true -> [conflicts, deleted_conflicts, local_seq, deleted]; + _ -> [conflicts, deleted_conflicts, local_seq, deleted] end, couch_util:with_db(DbName, fun(Db) -> @@ -142,23 +142,35 @@ update(Idx, Mod, IdxState) -> end, GetInfo = fun - (#full_doc_info{id = Id, update_seq = Seq, deleted = Del} = FDI) -> - {Id, Seq, Del, couch_doc:to_doc_info(FDI)}; - (#doc_info{id = Id, high_seq = Seq, revs = [RI | _]} = DI) -> - {Id, Seq, RI#rev_info.deleted, DI} + (#full_doc_info{id = Id, update_seq = Seq, deleted = Del, access = Access} = FDI) -> + {Id, Seq, Del, couch_doc:to_doc_info(FDI), Access}; + (#doc_info{id = Id, high_seq = Seq, revs = [RI | _], access = Access} = DI) -> + {Id, Seq, RI#rev_info.deleted, DI, Access} end, LoadDoc = fun(DI) -> - {DocId, Seq, Deleted, DocInfo} = GetInfo(DI), + {DocId, Seq, Deleted, DocInfo, Access} = GetInfo(DI), case {IncludeDesign, DocId} of {false, <<"_design/", _/binary>>} -> {nil, Seq}; - _ when Deleted -> - {#doc{id = DocId, deleted = true}, Seq}; _ -> - {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts), - {Doc, Seq} + % TODO: move into outer case statement + case IndexName of + <<"_design/_access">> -> + {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts), + [RevInfo | _] = DocInfo#doc_info.revs, + Doc1 = Doc#doc{ + meta = [{body_sp, RevInfo#rev_info.body_sp}], + access = Access + }, + {Doc1, Seq}; + _ when Deleted -> + {#doc{id = DocId, deleted = true}, Seq}; + _ -> + {ok, Doc} = couch_db:open_doc_int(Db, DocInfo, DocOpts), + {Doc, Seq} + end end end, diff --git a/src/couch_index/src/couch_index_util.erl b/src/couch_index/src/couch_index_util.erl index db8aad470e1..bb9d0277888 100644 --- a/src/couch_index/src/couch_index_util.erl +++ b/src/couch_index/src/couch_index_util.erl @@ -31,7 +31,7 @@ index_file(Module, DbName, FileName) -> load_doc(Db, #doc_info{} = DI, Opts) -> Deleted = lists:member(deleted, Opts), - case (catch couch_db:open_doc(Db, DI, Opts)) of + case (catch (couch_db:open_doc(Db, DI, Opts))) of {ok, #doc{deleted = false} = Doc} -> Doc; {ok, #doc{deleted = true} = Doc} when Deleted -> Doc; _Else -> null diff --git a/src/couch_mrview/include/couch_mrview.hrl b/src/couch_mrview/include/couch_mrview.hrl index b31463c53fe..ef987595d91 100644 --- a/src/couch_mrview/include/couch_mrview.hrl +++ b/src/couch_mrview/include/couch_mrview.hrl @@ -83,7 +83,8 @@ conflicts, callback, sorted = true, - extra = [] + extra = [], + deleted = false }). -record(vacc, { diff --git a/src/couch_mrview/src/couch_mrview.erl b/src/couch_mrview/src/couch_mrview.erl index a50fcd6700f..a7ed843fe05 100644 --- a/src/couch_mrview/src/couch_mrview.erl +++ b/src/couch_mrview/src/couch_mrview.erl @@ -13,7 +13,7 @@ -module(couch_mrview). -export([validate/2]). --export([query_all_docs/2, query_all_docs/4]). +-export([query_all_docs/2, query_all_docs/4, query_changes_access/5]). -export([query_view/3, query_view/4, query_view/6, get_view_index_pid/4]). -export([get_info/2]). -export([trigger_update/2, trigger_update/3]). @@ -259,6 +259,130 @@ query_all_docs(Db, Args) -> query_all_docs(Db, Args, Callback, Acc) when is_list(Args) -> query_all_docs(Db, to_mrargs(Args), Callback, Acc); query_all_docs(Db, Args0, Callback, Acc) -> + case couch_db:has_access_enabled(Db) and not couch_db:is_admin(Db) of + true -> query_all_docs_access(Db, Args0, Callback, Acc); + false -> query_all_docs_admin(Db, Args0, Callback, Acc) + end. +access_ddoc() -> + #doc{ + id = <<"_design/_access">>, + body = + {[ + {<<"language">>, <<"_access">>}, + {<<"options">>, + {[ + {<<"include_design">>, true} + ]}}, + {<<"views">>, + {[ + {<<"_access_by_id">>, + {[ + {<<"map">>, <<"_access/by-id-map">>}, + {<<"reduce">>, <<"_count">>} + ]}}, + {<<"_access_by_seq">>, + {[ + {<<"map">>, <<"_access/by-seq-map">>}, + {<<"reduce">>, <<"_count">>} + ]}} + ]}} + ]} + }. +query_changes_access(Db, StartSeq, Fun, Options, Acc) -> + DDoc = access_ddoc(), + UserCtx = couch_db:get_user_ctx(Db), + UserName = UserCtx#user_ctx.name, + % Future work: this is where we’d do a multi-key-query with a user’s + % roles + Args1 = prefix_startkey_endkey(UserName, #mrargs{}, fwd), + Args2 = Args1#mrargs{deleted = true}, + Args = Args2#mrargs{reduce = false}, + % filter out the user-prefix from the key, so _all_docs looks normal + Callback = fun + ({meta, _}, Acc0) -> + {ok, Acc0}; + ({row, Props}, Acc0) -> + % turn row into FDI + Value = couch_util:get_value(value, Props), + [Owner, Seq] = couch_util:get_value(key, Props), + Rev = couch_util:get_value(rev, Value), + Deleted = couch_util:get_value(deleted, Value, false), + BodySp = couch_util:get_value(body_sp, Value), + [Pos, RevId] = string:split(?b2l(Rev), "-"), + FDI = #full_doc_info{ + id = proplists:get_value(id, Props), + rev_tree = [ + { + list_to_integer(Pos), + { + ?l2b(RevId), + #leaf{deleted = Deleted, ptr = BodySp, seq = Seq, sizes = #size_info{}}, + [] + } + } + ], + deleted = Deleted, + update_seq = 0, + sizes = #size_info{}, + access = [Owner] + }, + Fun(FDI, Acc0); + (_Else, Acc0) -> + {ok, Acc0} + end, + VName = <<"_access_by_seq">>, + query_view(Db, DDoc, VName, Args, Callback, Acc). + +query_all_docs_access(Db, Args0, Callback0, Acc) -> + % query our not yest existing, home-grown _access view. + % use query_view for this. + DDoc = access_ddoc(), + UserCtx = couch_db:get_user_ctx(Db), + UserName = UserCtx#user_ctx.name, + Args1 = prefix_startkey_endkey(UserName, Args0, Args0#mrargs.direction), + Args = Args1#mrargs{reduce = false, extra = Args1#mrargs.extra ++ [{all_docs_access, true}]}, + Callback = fun + ({row, Props}, Acc0) -> + % filter out the user-prefix from the key, so _all_docs looks normal + [_User, Key] = proplists:get_value(key, Props), + Row0 = proplists:delete(key, Props), + Row = [{key, Key} | Row0], + Callback0({row, Row}, Acc0); + (Row, Acc0) -> + Callback0(Row, Acc0) + end, + VName = <<"_access_by_id">>, + query_view(Db, DDoc, VName, Args, Callback, Acc). + +prefix_startkey_endkey(UserName, Args, fwd) -> + #mrargs{start_key = StartKey, end_key = EndKey} = Args, + Args#mrargs{ + start_key = + case StartKey of + undefined -> [UserName]; + StartKey -> [UserName, StartKey] + end, + end_key = + case EndKey of + undefined -> [UserName, {}]; + EndKey -> [UserName, EndKey, {}] + end + }; +prefix_startkey_endkey(UserName, Args, rev) -> + #mrargs{start_key = StartKey, end_key = EndKey} = Args, + Args#mrargs{ + end_key = + case StartKey of + undefined -> [UserName]; + StartKey -> [UserName, StartKey] + end, + start_key = + case EndKey of + undefined -> [UserName, {}]; + EndKey -> [UserName, EndKey, {}] + end + }. +query_all_docs_admin(Db, Args0, Callback, Acc) -> Sig = couch_util:with_db(Db, fun(WDb) -> {ok, Info} = couch_db:get_db_info(WDb), couch_index_util:hexsig(couch_hash:md5_hash(?term_to_bin(Info))) diff --git a/src/couch_mrview/src/couch_mrview_updater.erl b/src/couch_mrview/src/couch_mrview_updater.erl index 969a8202800..f147c563f72 100644 --- a/src/couch_mrview/src/couch_mrview_updater.erl +++ b/src/couch_mrview/src/couch_mrview_updater.erl @@ -124,8 +124,6 @@ process_doc(Doc, Seq, #mrst{doc_acc = Acc} = State) when length(Acc) > 100 -> process_doc(Doc, Seq, State#mrst{doc_acc = []}); process_doc(nil, Seq, #mrst{doc_acc = Acc} = State) -> {ok, State#mrst{doc_acc = [{nil, Seq, nil} | Acc]}}; -process_doc(#doc{id = Id, deleted = true}, Seq, #mrst{doc_acc = Acc} = State) -> - {ok, State#mrst{doc_acc = [{Id, Seq, deleted} | Acc]}}; process_doc(#doc{id = Id} = Doc, Seq, #mrst{doc_acc = Acc} = State) -> {ok, State#mrst{doc_acc = [{Id, Seq, Doc} | Acc]}}. @@ -149,6 +147,14 @@ finish_update(#mrst{doc_acc = Acc} = State) -> }} end. +make_deleted_body({Props}, Meta, Seq) -> + BodySp = couch_util:get_value(body_sp, Meta), + Result = [{<<"_seq">>, Seq}, {<<"_body_sp">>, BodySp}], + case couch_util:get_value(<<"_access">>, Props) of + undefined -> Result; + Access -> [{<<"_access">>, Access} | Result] + end. + map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) -> erlang:put(io_priority, {view_update, DbName, IdxName}), case couch_work_queue:dequeue(State0#mrst.doc_queue) of @@ -156,8 +162,9 @@ map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) -> couch_query_servers:stop_doc_map(State0#mrst.qserver), couch_work_queue:close(State0#mrst.write_queue); {ok, Dequeued} -> - % Run all the non deleted docs through the view engine and + % Run all the non deleted* docs through the view engine and % then pass the results on to the writer process. + % *except when the ddoc name is _access State1 = case State0#mrst.qserver of nil -> start_query_server(State0); @@ -167,11 +174,41 @@ map_docs(Parent, #mrst{db_name = DbName, idx_name = IdxName} = State0) -> DocFun = fun ({nil, Seq, _}, {SeqAcc, Results}) -> {erlang:max(Seq, SeqAcc), Results}; - ({Id, Seq, deleted}, {SeqAcc, Results}) -> - {erlang:max(Seq, SeqAcc), [{Id, []} | Results]}; + ( + {Id, Seq, #doc{deleted = true, revs = Rev, body = Body, meta = Meta}}, + {SeqAcc, Results} + ) -> + % _access needs deleted docs + case IdxName of + <<"_design/_access">> -> + % splice in seq + {Start, Rev1} = Rev, + Doc = #doc{ + id = Id, + revs = {Start, [Rev1]}, + body = {make_deleted_body(Body, Meta, Seq)}, + deleted = true + }, + {ok, Res} = couch_query_servers:map_doc_raw(QServer, Doc), + {erlang:max(Seq, SeqAcc), [{Id, Seq, Rev, Res} | Results]}; + _Else -> + {erlang:max(Seq, SeqAcc), [{Id, []} | Results]} + end; ({Id, Seq, Doc}, {SeqAcc, Results}) -> couch_stats:increment_counter([couchdb, mrview, map_doc]), - {ok, Res} = couch_query_servers:map_doc_raw(QServer, Doc), + Doc0 = + case IdxName of + <<"_design/_access">> -> + % splice in seq + {Props} = Doc#doc.body, + BodySp = couch_util:get_value(body_sp, Doc#doc.meta), + Doc#doc{ + body = {Props ++ [{<<"_seq">>, Seq}, {<<"_body_sp">>, BodySp}]} + }; + _Else -> + Doc + end, + {ok, Res} = couch_query_servers:map_doc_raw(QServer, Doc0), {erlang:max(Seq, SeqAcc), [{Id, Res} | Results]} end, FoldFun = fun(Docs, Acc) -> diff --git a/src/couch_mrview/src/couch_mrview_util.erl b/src/couch_mrview/src/couch_mrview_util.erl index a478685daf5..03becddaad1 100644 --- a/src/couch_mrview/src/couch_mrview_util.erl +++ b/src/couch_mrview/src/couch_mrview_util.erl @@ -21,6 +21,7 @@ -export([index_file/2, compaction_file/2, open_file/1]). -export([delete_files/2, delete_index_file/2, delete_compaction_file/2]). -export([get_row_count/1, all_docs_reduce_to_count/1, reduce_to_count/1]). +-export([get_access_row_count/2]). -export([all_docs_key_opts/1, all_docs_key_opts/2, key_opts/1, key_opts/2]). -export([fold/4, fold_reduce/4]). -export([temp_view_to_ddoc/1]). @@ -450,6 +451,13 @@ reduce_to_count(Reductions) -> FinalReduction = couch_btree:final_reduce(CountReduceFun, Reductions), get_count(FinalReduction). +get_access_row_count(#mrview{btree = Bt}, UserName) -> + couch_btree:full_reduce_with_options(Bt, [ + {start_key, UserName}, + % is this correct? should this not be \ufff0? + {end_key, {[UserName, {[]}]}} + ]). + fold(#mrview{btree = Bt}, Fun, Acc, Opts) -> WrapperFun = fun(KV, Reds, Acc2) -> fold_fun(Fun, expand_dups([KV], []), Reds, Acc2) @@ -492,8 +500,9 @@ validate_args(#mrst{} = State, Args0) -> ViewPartitioned = State#mrst.partitioned, Partition = get_extra(Args, partition), + AllDocsAccess = get_extra(Args, all_docs_access, false), - case {ViewPartitioned, Partition} of + case {ViewPartitioned and not AllDocsAccess, Partition} of {true, undefined} -> Msg1 = << "`partition` parameter is mandatory " diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl index 34c745c5d03..6c2e99e9995 100644 --- a/src/couch_replicator/src/couch_replicator.erl +++ b/src/couch_replicator/src/couch_replicator.erl @@ -78,7 +78,16 @@ replicate(PostBody, Ctx) -> false -> check_authorization(RepId, UserCtx), {ok, Listener} = rep_result_listener(RepId), - Result = do_replication_loop(Rep), + % fudge replication id + Result = + case do_replication_loop(Rep) of + {ok, {ResultJson}} -> + % TODO: check with options + {PublicRepId, _} = couch_replicator_ids:replication_id(Rep), + {ok, {[{<<"replication_id">>, ?l2b(PublicRepId)} | ResultJson]}}; + Else -> + Else + end, couch_replicator_notifier:stop(Listener), Result end. diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl index b211da85b09..3f37738855b 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl @@ -66,6 +66,8 @@ rep_starttime, src_starttime, tgt_starttime, + src_access, + tgt_access, % checkpoint timer timer, changes_queue, @@ -682,6 +684,8 @@ init_state(Rep) -> rep_starttime = StartTime, src_starttime = get_value(<<"instance_start_time">>, SourceInfo), tgt_starttime = get_value(<<"instance_start_time">>, TargetInfo), + src_access = get_value(<<"access">>, SourceInfo), + tgt_access = get_value(<<"access">>, TargetInfo), session_id = couch_uuids:random(), source_seq = SourceSeq, use_checkpoints = get_value(use_checkpoints, Options, true), @@ -794,8 +798,10 @@ do_checkpoint(State) -> rep_starttime = ReplicationStartTime, src_starttime = SrcInstanceStartTime, tgt_starttime = TgtInstanceStartTime, + src_access = SrcAccess, + tgt_access = TgtAccess, stats = Stats, - rep_details = #rep{options = Options}, + rep_details = #rep{options = Options, user_ctx = UserCtx}, session_id = SessionId } = State, case commit_to_both(Source, Target) of @@ -867,10 +873,10 @@ do_checkpoint(State) -> try {SrcRevPos, SrcRevId} = update_checkpoint( - Source, SourceLog#doc{body = NewRepHistory}, source + Source, SourceLog#doc{body = NewRepHistory}, SrcAccess, UserCtx, source ), {TgtRevPos, TgtRevId} = update_checkpoint( - Target, TargetLog#doc{body = NewRepHistory}, target + Target, TargetLog#doc{body = NewRepHistory}, TgtAccess, UserCtx, target ), NewState = State#rep_state{ checkpoint_history = NewRepHistory, @@ -899,8 +905,12 @@ do_checkpoint(State) -> end. update_checkpoint(Db, Doc, DbType) -> + update_checkpoint(Db, Doc, false, #user_ctx{}, DbType). +update_checkpoint(Db, Doc) -> + update_checkpoint(Db, Doc, false, #user_ctx{}). +update_checkpoint(Db, Doc, Access, UserCtx, DbType) -> try - update_checkpoint(Db, Doc) + update_checkpoint(Db, Doc, Access, UserCtx) catch throw:{checkpoint_commit_failure, Reason} -> throw( @@ -910,7 +920,15 @@ update_checkpoint(Db, Doc, DbType) -> ) end. -update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) -> +update_checkpoint(Db, #doc{id = LogId} = Doc0, Access, UserCtx) -> + % if db has _access, then: + % get userCtx from replication and splice into doc _access + Doc = + case Access of + true -> Doc0#doc{access = [UserCtx#user_ctx.name]}; + _False -> Doc0 + end, + try case couch_replicator_api_wrap:update_doc(Db, Doc, [delay_commit]) of {ok, PosRevId} -> @@ -920,6 +938,8 @@ update_checkpoint(Db, #doc{id = LogId, body = LogBody} = Doc) -> end catch throw:conflict -> + % TODO: An admin could have changed the access on the checkpoint doc. + % However unlikely, we can handle this gracefully here. case (catch couch_replicator_api_wrap:open_doc(Db, LogId, [ejson_body])) of {ok, #doc{body = LogBody, revs = {Pos, [RevId | _]}}} -> % This means that we were able to update successfully the diff --git a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl b/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl index 30bc12c29af..685e5ace0db 100644 --- a/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl +++ b/src/couch_replicator/test/eunit/couch_replicator_error_reporting_tests.erl @@ -263,7 +263,7 @@ t_fail_changes_queue({_Ctx, {Source, Target}}) -> RepPid = couch_replicator_test_helper:get_pid(RepId), State = sys:get_state(RepPid), - ChangesQueue = element(20, State), + ChangesQueue = element(22, State), ?assert(is_process_alive(ChangesQueue)), {ok, Listener} = rep_result_listener(RepId), @@ -280,7 +280,7 @@ t_fail_changes_manager({_Ctx, {Source, Target}}) -> RepPid = couch_replicator_test_helper:get_pid(RepId), State = sys:get_state(RepPid), - ChangesManager = element(21, State), + ChangesManager = element(23, State), ?assert(is_process_alive(ChangesManager)), {ok, Listener} = rep_result_listener(RepId), @@ -297,7 +297,7 @@ t_fail_changes_reader_proc({_Ctx, {Source, Target}}) -> RepPid = couch_replicator_test_helper:get_pid(RepId), State = sys:get_state(RepPid), - ChangesReader = element(22, State), + ChangesReader = element(24, State), ?assert(is_process_alive(ChangesReader)), {ok, Listener} = rep_result_listener(RepId), diff --git a/src/custodian/src/custodian_util.erl b/src/custodian/src/custodian_util.erl index 41f51507dad..2579691b7e4 100644 --- a/src/custodian/src/custodian_util.erl +++ b/src/custodian/src/custodian_util.erl @@ -183,7 +183,8 @@ maintenance_nodes(Nodes) -> [N || {N, Mode} <- lists:zip(Nodes, Modes), Mode =:= "true"]. load_shards(Db, #full_doc_info{id = Id} = FDI) -> - case couch_db:open_doc(Db, FDI, [ejson_body]) of + Doc = couch_db:open_doc(Db, FDI, [ejson_body]), + case Doc of {ok, #doc{body = {Props}}} -> mem3_util:build_shards(Id, Props); {not_found, _} -> diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl index cf40725e4a2..1b2c3db96c8 100644 --- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl +++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid.erl @@ -28,7 +28,7 @@ ddocid({_, DDocId}) -> DDocId. recover({DbName, DDocId}) -> - fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX]). + fabric:open_doc(DbName, DDocId, [ejson_body, ?ADMIN_CTX, ddoc_cache]). insert({DbName, DDocId}, {ok, #doc{revs = Revs} = DDoc}) -> {Depth, [RevId | _]} = Revs, diff --git a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl index 5126f52107b..ce95dfc8236 100644 --- a/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl +++ b/src/ddoc_cache/src/ddoc_cache_entry_ddocid_rev.erl @@ -28,7 +28,7 @@ ddocid({_, DDocId, _}) -> DDocId. recover({DbName, DDocId, Rev}) -> - Opts = [ejson_body, ?ADMIN_CTX], + Opts = [ejson_body, ?ADMIN_CTX, ddoc_cache], {ok, [Resp]} = fabric:open_revs(DbName, DDocId, [Rev], Opts), Resp. diff --git a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl index 54f5c673f58..11f42ed08f1 100644 --- a/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl +++ b/src/ddoc_cache/src/ddoc_cache_entry_validation_funs.erl @@ -26,7 +26,8 @@ ddocid(_) -> no_ddocid. recover(DbName) -> - {ok, DDocs} = fabric:design_docs(mem3:dbname(DbName)), + {ok, DDocs0} = fabric:design_docs(mem3:dbname(DbName)), + DDocs = lists:filter(fun couch_doc:has_no_access/1, DDocs0), Funs = lists:flatmap( fun(DDoc) -> case couch_doc:get_validate_doc_fun(DbName, DDoc) of diff --git a/src/fabric/src/fabric_db_info.erl b/src/fabric/src/fabric_db_info.erl index 5461404c508..cdd2e36c2df 100644 --- a/src/fabric/src/fabric_db_info.erl +++ b/src/fabric/src/fabric_db_info.erl @@ -113,6 +113,8 @@ merge_results(Info) -> [{disk_format_version, lists:max(X)} | Acc]; (cluster, [X], Acc) -> [{cluster, {X}} | Acc]; + (access, [X], Acc) -> + [{access, X} | Acc]; (props, Xs, Acc) -> [{props, {merge_object(Xs)}} | Acc]; (_K, _V, Acc) -> diff --git a/src/fabric/src/fabric_doc_update.erl b/src/fabric/src/fabric_doc_update.erl index 77b424911a6..029e47876b7 100644 --- a/src/fabric/src/fabric_doc_update.erl +++ b/src/fabric/src/fabric_doc_update.erl @@ -423,8 +423,9 @@ doc_update1() -> {ok, StW5_2} = handle_message({rexi_EXIT, nil}, SB1, StW5_1), {ok, StW5_3} = handle_message({rexi_EXIT, nil}, SA2, StW5_2), {stop, ReplyW5} = handle_message({rexi_EXIT, nil}, SB2, StW5_3), + ?assertEqual( - {error, [{Doc1, {accepted, "A"}}, {Doc2, {error, internal_server_error}}]}, + {error, [{Doc2, {error, internal_server_error}}, {Doc1, {accepted, "A"}}]}, ReplyW5 ). @@ -455,7 +456,7 @@ doc_update2() -> handle_message({rexi_EXIT, 1}, lists:nth(3, Shards), Acc2), ?assertEqual( - {accepted, [{Doc1, {accepted, Doc1}}, {Doc2, {accepted, Doc2}}]}, + {accepted, [{Doc2, {accepted, Doc2}}, {Doc1, {accepted, Doc1}}]}, Reply ). @@ -484,8 +485,7 @@ doc_update3() -> {stop, Reply} = handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2), - - ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply). + ?assertEqual({ok, [{Doc2, {ok, Doc2}}, {Doc1, {ok, Doc1}}]}, Reply). handle_all_dbs_active() -> Doc1 = #doc{revs = {1, [<<"foo">>]}}, @@ -513,7 +513,7 @@ handle_all_dbs_active() -> {stop, Reply} = handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2), - ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply). + ?assertEqual({ok, [{Doc2, {ok, Doc2}}, {Doc1, {ok, Doc1}}]}, Reply). handle_two_all_dbs_actives() -> Doc1 = #doc{revs = {1, [<<"foo">>]}}, @@ -542,7 +542,7 @@ handle_two_all_dbs_actives() -> handle_message({error, all_dbs_active}, lists:nth(3, Shards), Acc2), ?assertEqual( - {accepted, [{Doc1, {accepted, Doc1}}, {Doc2, {accepted, Doc2}}]}, + {accepted, [{Doc2, {accepted, Doc2}}, {Doc1, {accepted, Doc1}}]}, Reply ). @@ -577,8 +577,8 @@ one_forbid() -> ?assertEqual( {ok, [ - {Doc1, {ok, Doc1}}, - {Doc2, {Doc2, {forbidden, <<"not allowed">>}}} + {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}, + {Doc1, {ok, Doc1}} ]}, Reply ). @@ -616,8 +616,8 @@ two_forbid() -> ?assertEqual( {ok, [ - {Doc1, {ok, Doc1}}, - {Doc2, {Doc2, {forbidden, <<"not allowed">>}}} + {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}, + {Doc1, {ok, Doc1}} ]}, Reply ). @@ -654,7 +654,7 @@ extend_tree_forbid() -> {stop, Reply} = handle_message({ok, [{ok, Doc1}, {ok, Doc2}]}, lists:nth(3, Shards), Acc2), - ?assertEqual({ok, [{Doc1, {ok, Doc1}}, {Doc2, {ok, Doc2}}]}, Reply). + ?assertEqual({ok, [{Doc2, {ok, Doc2}}, {Doc1, {ok, Doc1}}]}, Reply). other_errors_one_forbid() -> Doc1 = #doc{revs = {1, [<<"foo">>]}}, @@ -684,7 +684,7 @@ other_errors_one_forbid() -> handle_message( {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2 ), - ?assertEqual({error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {error, <<"foo">>}}}]}, Reply). + ?assertEqual({error, [{Doc2, {Doc2, {error, <<"foo">>}}}, {Doc1, {ok, Doc1}}]}, Reply). one_error_two_forbid() -> Doc1 = #doc{revs = {1, [<<"foo">>]}}, @@ -717,7 +717,7 @@ one_error_two_forbid() -> {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2 ), ?assertEqual( - {error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}]}, Reply + {error, [{Doc2, {Doc2, {forbidden, <<"not allowed">>}}}, {Doc1, {ok, Doc1}}]}, Reply ). one_success_two_forbid() -> @@ -751,7 +751,7 @@ one_success_two_forbid() -> {ok, [{ok, Doc1}, {Doc2, {forbidden, <<"not allowed">>}}]}, lists:nth(3, Shards), Acc2 ), ?assertEqual( - {error, [{Doc1, {ok, Doc1}}, {Doc2, {Doc2, {forbidden, <<"not allowed">>}}}]}, Reply + {error, [{Doc2, {Doc2, {forbidden, <<"not allowed">>}}}, {Doc1, {ok, Doc1}}]}, Reply ). worker_before_doc_update_forbidden() -> diff --git a/test/elixir/test/cookie_auth_test.exs b/test/elixir/test/cookie_auth_test.exs index 000b1d623ee..4bfc003f9a0 100644 --- a/test/elixir/test/cookie_auth_test.exs +++ b/test/elixir/test/cookie_auth_test.exs @@ -318,7 +318,7 @@ defmodule CookieAuthTest do session = login("jchris", "funnybone") info = Couch.Session.info(session) assert info["userCtx"]["name"] == "jchris" - assert Enum.empty?(info["userCtx"]["roles"]) + assert info["userCtx"]["roles"] == ["_users"] jason_user_doc = jason_user_doc diff --git a/test/elixir/test/jwtauth_test.exs b/test/elixir/test/jwtauth_test.exs index e4f21f261c4..fe07cbabfb5 100644 --- a/test/elixir/test/jwtauth_test.exs +++ b/test/elixir/test/jwtauth_test.exs @@ -186,7 +186,7 @@ defmodule JwtAuthTest do ) assert resp.body["userCtx"]["name"] == "couch@apache.org" - assert resp.body["userCtx"]["roles"] == ["testing"] + assert resp.body["userCtx"]["roles"] == ["testing", "_users"] assert resp.body["info"]["authenticated"] == "jwt" end diff --git a/test/elixir/test/proxyauth_test.exs b/test/elixir/test/proxyauth_test.exs index ea57c1a0e54..0c77abff5eb 100644 --- a/test/elixir/test/proxyauth_test.exs +++ b/test/elixir/test/proxyauth_test.exs @@ -5,7 +5,6 @@ defmodule ProxyAuthTest do @tag :with_db test "proxy auth with secret" do - users_db_name = random_db_name() create_db(users_db_name) @@ -71,7 +70,7 @@ defmodule ProxyAuthTest do ) assert resp2.body["userCtx"]["name"] == "couch@apache.org" - assert resp2.body["userCtx"]["roles"] == ["test_role"] + assert resp2.body["userCtx"]["roles"] == ["test_role", "_users"] assert resp2.body["info"]["authenticated"] == "proxy" assert resp2.body["ok"] == true @@ -79,7 +78,6 @@ defmodule ProxyAuthTest do @tag :with_db test "proxy auth without secret" do - users_db_name = random_db_name() create_db(users_db_name) @@ -126,7 +124,7 @@ defmodule ProxyAuthTest do ) assert resp2.body["userCtx"]["name"] == "couch@apache.org" - assert resp2.body["userCtx"]["roles"] == ["test_role_1", "test_role_2"] + assert resp2.body["userCtx"]["roles"] == ["test_role_1", "test_role_2", "_users"] assert resp2.body["info"]["authenticated"] == "proxy" assert resp2.body["ok"] == true diff --git a/test/elixir/test/security_validation_test.exs b/test/elixir/test/security_validation_test.exs index 5c8db1b45da..d6090a80c10 100644 --- a/test/elixir/test/security_validation_test.exs +++ b/test/elixir/test/security_validation_test.exs @@ -149,7 +149,7 @@ defmodule SecurityValidationTest do headers = @auth_headers[:jerry] resp = Couch.get("/_session", headers: headers) assert resp.body["userCtx"]["name"] == "jerry" - assert resp.body["userCtx"]["roles"] == [] + assert resp.body["userCtx"]["roles"] == ["_users"] end @tag :with_db diff --git a/test/elixir/test/users_db_security_test.exs b/test/elixir/test/users_db_security_test.exs index 65674904057..3945d2bfa9d 100644 --- a/test/elixir/test/users_db_security_test.exs +++ b/test/elixir/test/users_db_security_test.exs @@ -400,11 +400,11 @@ defmodule UsersDbSecurityTest do # admin should be able to read from any view resp = view_as(@users_db, "user_db_auth/test", user: "jerry") - assert resp.body["total_rows"] == 3 + assert resp.body["total_rows"] == 4 # db admin should be able to read from any view resp = view_as(@users_db, "user_db_auth/test", user: "speedy") - assert resp.body["total_rows"] == 3 + assert resp.body["total_rows"] == 4 # non-admins can't read design docs open_as(@users_db, "_design/user_db_auth", @@ -419,7 +419,7 @@ defmodule UsersDbSecurityTest do request_raw_as(@users_db, "_design/user_db_auth/_list/names/test", user: "jerry") assert result.status_code == 200 - assert length(String.split(result.body, "\n")) == 4 + assert length(String.split(result.body, "\n")) == 5 # non-admins can't read _list request_raw_as(@users_db, "_design/user_db_auth/_list/names/test", diff --git a/test/elixir/test/view_include_docs_test.exs b/test/elixir/test/view_include_docs_test.exs index a7775305840..89e35aa372a 100644 --- a/test/elixir/test/view_include_docs_test.exs +++ b/test/elixir/test/view_include_docs_test.exs @@ -238,7 +238,7 @@ defmodule ViewIncludeDocsTest do doc2 = %{_id: "bar", value: 2, str: "2"} {:ok, _} = create_doc(db_name_a, doc2) - replicate(db_name_a, db_name_b) + replicate("http://127.0.0.1:15984/#{db_name_a}", "http://127.0.0.1:15984/#{db_name_b}") resp = Couch.get("/#{db_name_b}/foo", query: [conflicts: true]) assert resp.status_code == 200