timestamper/src/ts_api.erl

609 lines
21 KiB
Erlang
Raw Normal View History

-module(ts_api).
-compile(export_all).
-include("ts_db_records.hrl").
-include("yaws_api.hrl").
out(YArg) ->
% retreive the session data
Session = ts_api_session:get_session(YArg),
%get the app mod data
PathString = YArg#arg.appmoddata,
% split the path
PathElements = case PathString of
undefined -> []; %handle no end slash: /ts_api
_Any -> string:tokens(PathString, "/")
end,
% process the request
case catch dispatch_request(YArg, Session, PathElements) of
{'EXIT', Err} ->
% TODO: log error internally
error_logger:error_report("TimeStamper: ~p", [Err]),
io:format("Error: ~n~p", [Err]),
make_json_500(YArg, Err);
Other -> Other
end.
% ================================== %
% ======== DISPATCH METHODS ======== %
% ================================== %
%% Entry point to the TimeStamper API dispatch system
dispatch_request(YArg, _Session, []) -> make_json_404(YArg, [{"see_docs", "/ts_api_doc"}]);
dispatch_request(YArg, Session, [H|T]) ->
case {Session, H} of
{_, "login"} -> do_login(YArg);
{_, "logout"} -> do_logout(YArg);
2013-09-21 17:19:13 +00:00
{not_logged_in, _} -> make_json_401(YArg);
{session_expired, _} -> make_json_401(YArg, [{"error", "session expired"}]);
{_S, "app"} -> dispatch_app(YArg, Session, T);
{_S, "users"} -> dispatch_user(YArg, Session, T);
{_S, "timelines"} -> dispatch_timeline(YArg, Session, T);
{_S, "entries"} -> dispatch_entry(YArg, Session, T);
{_S, _Other} -> make_json_404(YArg, [{"see_docs", "/ts_api_doc/"}])
end.
% -------- Dispatch for /app -------- %
dispatch_app(YArg, Session, Params) ->
HTTPMethod = (YArg#arg.req)#http_request.method,
case {HTTPMethod, Params} of
2013-09-21 17:19:13 +00:00
{'OPTIONS', ["user_summary", _]} -> make_CORS_options(YArg, "GET");
{'GET', ["user_summary", UsernameStr]} ->
case {Session#ts_api_session.username,
UsernameStr} of
{Username, Username} -> get_user_summary(YArg, Username);
_ -> make_json_401(YArg)
end;
{_BadMethod, ["user_summary", _UsernameStr]} ->
make_json_405(YArg, [{"see_docs", "/ts_api_docs/app.html"}]);
_Other -> make_json_404(YArg, [{"see_docs", "/ts_api_docs/app.html"}])
end.
% -------- Dispatch for /user -------- %
dispatch_user(YArg, Session, []) ->
dispatch_user(YArg, Session, [Session#ts_api_session.username]);
dispatch_user(YArg, Session, [Username]) ->
HTTPMethod = (YArg#arg.req)#http_request.method,
% compare to the logged-in user
case {HTTPMethod, Session#ts_api_session.username} of
2013-09-21 17:19:13 +00:00
{'OPTIONS', Username} -> make_CORS_options(YArg, "GET, PUT");
{'GET', Username} -> get_user(YArg, Username);
{'PUT', Username} -> put_user(YArg, Username);
{_BadMethod, Username} ->
make_json_405(YArg, [{"see_docs", "/ts_api_doc/users.html"}]);
_Other -> make_json_401(YArg, [{"see_docs", "/ts_api_doc/users.html"}])
end.
% -------- Dispatch for /timeline -------- %
dispatch_timeline(YArg, _Session, []) ->
make_json_404(YArg, [{"see_docs", "/ts_api_doc/timelines.html"}]);
dispatch_timeline(YArg, Session, [Username|_T] = PathElements) ->
case Session#ts_api_session.username of
Username -> dispatch_timeline(YArg, PathElements);
_Other -> make_json_404(YArg, [{"see_docs", "/ts_api_doc/users.html"}])
end.
% just username, list timelines
dispatch_timeline(YArg, [Username]) ->
HTTPMethod = (YArg#arg.req)#http_request.method,
2011-01-29 13:40:31 -06:00
case HTTPMethod of
2013-09-21 17:19:13 +00:00
'OPTIONS' -> make_CORS_options(YArg, "GET");
'GET' -> list_timelines(YArg, Username);
_Other -> make_json_405(YArg, [{"see_docs", "/ts_api_doc/timelines.html"}])
2011-01-29 13:40:31 -06:00
end;
dispatch_timeline(YArg, [Username, TimelineId]) ->
HTTPMethod = (YArg#arg.req)#http_request.method,
2011-01-29 13:40:31 -06:00
case HTTPMethod of
2013-09-21 17:19:13 +00:00
'OPTIONS'-> make_CORS_options(YArg, "GET, PUT, DELETE");
'GET' -> get_timeline(YArg, Username, TimelineId);
'PUT' -> put_timeline(YArg, Username, TimelineId);
'DELETE' -> delete_timeline(YArg, Username, TimelineId);
_Other -> make_json_405(YArg, [{"see_docs", "/ts_api_doc/timelines.html"}])
end;
2011-01-29 13:40:31 -06:00
dispatch_timeline(YArg, _Other) ->
make_json_404(YArg, [{"see_docs", "/ts_api_doc/timelines.html"}]).
% -------- Dispatch for /entry -------- %
dispatch_entry(YArg, _Session, []) ->
make_json_404(YArg, [{"see_docs", "/ts_aip_doc/entries.html"}]);
dispatch_entry(YArg, Session, [Username|_T] = PathElements) ->
case Session#ts_api_session.username of
Username -> dispatch_entry(YArg, PathElements);
_Other -> make_json_404(YArg, [{"see_docs", "/ts_api_doc/entries.html"}])
end.
dispatch_entry(YArg, [Username, TimelineId]) ->
HTTPMethod = (YArg#arg.req)#http_request.method,
case HTTPMethod of
2013-09-21 17:19:13 +00:00
'OPTIONS' -> make_CORS_options(YArg, "GET, POST");
'GET' -> list_entries(YArg, Username, TimelineId);
'POST' -> post_entry(YArg, Username, TimelineId);
_Other -> make_json_405(YArg, [{"see_docs", "/ts_api_doc/entries.html"}])
end;
dispatch_entry(YArg, [Username, TimelineId, UrlEntryId]) ->
EntryId = list_to_integer(UrlEntryId), % TODO: catch non-numbers
HTTPMethod = (YArg#arg.req)#http_request.method,
case HTTPMethod of
2013-09-21 17:19:13 +00:00
'OPTIONS'-> make_CORS_options(YArg, "GET, PUT, DELETE");
'GET' -> get_entry(YArg, Username, TimelineId, EntryId);
'PUT' -> put_entry(YArg, Username, TimelineId, EntryId);
'DELETE' -> delete_entry(YArg, Username, TimelineId, EntryId);
_Other -> make_json_405(YArg, [{"see_docs", "/ts_api_doc/entries.html"}])
end;
dispatch_entry(YArg, _Other) ->
make_json_404(YArg, [{"see_docs", "/ts_api_doc/entries.html"}]).
2011-01-29 13:40:31 -06:00
% ============================== %
% ======== IMPLEMENTATION ====== %
% ============================== %
do_login(YArg) ->
EJSON = parse_json_body(YArg),
{struct, Fields} = EJSON,
case {lists:keyfind(username, 1, Fields),
lists:keyfind(password, 1, Fields)} of
% username and password found
{{username, Username}, {password, Password}} ->
% check the uname, password
case ts_user:check_credentials(Username, Password) of
% they are good
true ->
{CookieVal, _Session} = ts_api_session:new(Username),
2013-09-21 17:19:13 +00:00
[{header, {set_cookie, io_lib:format(
Bugfixes: session management, new entry creation. * Changed the cookie Path value to allow the cookie to be reused for the domain, not just ths `/ts_api` path. This allows the user to refresh the page and reuse their existing session as long as it is not stale. * Fixed a bug in the `ts_json:ejson_to_record_strict/2` function. It was expecting a record out of `ts_json:ejson_to_record/2` but that function returns a tuple with the record and extended data. Because of the way `ejson_to_record_strict` uses case statements to check for specific values it was still passing the parsed record and data through, but all the checks were being bypassed. * Fixed bugs in the `index.yaws` bootstrap code for the case where the user already has a valid session. * Added `urlRoot` functions to the Backbone model definitions. * Changed the behavior of the new entry creation method. We were trying to fetch just updated attributes from the server for the model we created, but we were pulling all the entries due to the URL backbone was using. This led to the new client-side model having all the previous entry models as attributes. Ideally we would fix the fetch so that only the one model is requested from the server, but we run into a catch-22 because the lookup id is not know by the client as it is generated on the server-side. For now I have changed this behavior so that we still pull all entries, but we pull them into the collection. The collection is then smart enough to update the entries that have changed (namely the new one). The server returns the newly created entry attributes in response to the POST request that the client makes initially, so when I have more time to work on this I plan to do away with the fetch after create, and just pull in the data from the server's response. * Changed formatting.
2013-10-22 15:32:22 +00:00
"ts_api_session=~s; Path=/",
2013-09-21 17:19:13 +00:00
[CookieVal])}},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]},
{content, "application/json",
json:encode({struct, [{"status", "ok"}]})}];
% they are not good
false -> make_json_401(YArg, [{"error",
"bad username/password combination"}])
end;
_Other -> make_json_400(YArg, [{"see_docs", "/ts_api_doc/login.html"}])
end.
do_logout(YArg) ->
Cookie = (YArg#arg.headers)#headers.cookie,
CookieVal = yaws_api:find_cookie_val("ts_api_session", Cookie),
ts_api_session:logout(CookieVal),
2013-09-21 17:19:13 +00:00
[{status, 200},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
get_user_summary(YArg, Username) ->
2011-06-15 00:54:58 -05:00
% find user record
case ts_user:lookup(Username) of
2011-06-15 00:54:58 -05:00
% no user record, barf
no_record -> make_json_404(YArg);
2011-06-15 00:54:58 -05:00
% found user record, let us build the return
User ->
2011-06-15 00:54:58 -05:00
% get user extended data properties
UserExtData = ts_ext_data:get_properties(User),
2011-06-15 00:54:58 -05:00
% convert to intermediate JSON form
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
EJSONUser = ts_json:record_to_ejson(User, UserExtData),
2011-06-15 00:54:58 -05:00
% get the user's timelins
Timelines = ts_timeline:list(Username, 0, 100),
2011-06-15 00:54:58 -05:00
% get each timeline's extended data and convert to EJSON
EJSONTimelines = {array,
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
lists:map(
fun(Timeline) ->
ts_json:record_to_ejson(Timeline,
ts_ext_data:get_properties(Timeline))
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
end,
Timelines)},
2013-09-21 17:19:13 +00:00
% write response out
make_json_200(YArg, {struct,
[{user, EJSONUser},
{timelines, EJSONTimelines}
2013-09-21 17:19:13 +00:00
]})
end.
get_user(YArg, Username) ->
2011-06-15 00:54:58 -05:00
% find the user record
case ts_user:lookup(Username) of
2011-06-15 00:54:58 -05:00
% no such user, barf
no_record -> make_json_404(YArg);
2011-06-15 00:54:58 -05:00
% found, return a 200 with the record
2013-09-21 17:19:13 +00:00
User -> make_json_200_record(YArg, User)
end.
put_user(YArg, Username) ->
% parse the POST data
EJSON = parse_json_body(YArg),
{UR, ExtData} =
try ts_json:ejson_to_record_strict(#ts_user{username=Username}, EJSON)
catch throw:{InputError, StackTrace} ->
error_logger:error_report("Bad input in put_user/2: ~p",
[InputError]),
throw(make_json_400(YArg, [{request_error, InputError}]))
end,
Bugfixes: session management, new entry creation. * Changed the cookie Path value to allow the cookie to be reused for the domain, not just ths `/ts_api` path. This allows the user to refresh the page and reuse their existing session as long as it is not stale. * Fixed a bug in the `ts_json:ejson_to_record_strict/2` function. It was expecting a record out of `ts_json:ejson_to_record/2` but that function returns a tuple with the record and extended data. Because of the way `ejson_to_record_strict` uses case statements to check for specific values it was still passing the parsed record and data through, but all the checks were being bypassed. * Fixed bugs in the `index.yaws` bootstrap code for the case where the user already has a valid session. * Added `urlRoot` functions to the Backbone model definitions. * Changed the behavior of the new entry creation method. We were trying to fetch just updated attributes from the server for the model we created, but we were pulling all the entries due to the URL backbone was using. This led to the new client-side model having all the previous entry models as attributes. Ideally we would fix the fetch so that only the one model is requested from the server, but we run into a catch-22 because the lookup id is not know by the client as it is generated on the server-side. For now I have changed this behavior so that we still pull all entries, but we pull them into the collection. The collection is then smart enough to update the entries that have changed (namely the new one). The server returns the newly created entry attributes in response to the POST request that the client makes initially, so when I have more time to work on this I plan to do away with the fetch after create, and just pull in the data from the server's response. * Changed formatting.
2013-10-22 15:32:22 +00:00
% update the record (we do not support creating users via the API right now)
{ok, UpdatedRec} = ts_user:update(UR, ExtData),
% return a 200
2013-09-21 17:19:13 +00:00
make_json_200_record(YArg, UpdatedRec).
list_timelines(YArg, Username) ->
% pull out the POST data
QueryData = yaws_api:parse_query(YArg),
% read or default the Start
Start = case lists:keyfind("start", 1, QueryData) of
{"start", StartVal} -> list_to_integer(StartVal);
2011-02-05 08:57:34 -06:00
false -> 0
end,
% read or default the Length
Length = case lists:keyfind(length, 1, QueryData) of
{"length", LengthVal} ->
2011-02-05 08:57:34 -06:00
erlang:min(list_to_integer(LengthVal), 50);
false -> 50
end,
% list the timelines from the database
Timelines = ts_timeline:list(Username, Start, Length),
2011-06-15 00:54:58 -05:00
% convert them all to their EJSON form, adding in extended data for each
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
EJSONTimelines = {array, lists:map(
fun (Timeline) ->
ts_json:record_to_ejson(Timeline,
ts_ext_data:get_properties(Timeline))
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
end,
Timelines)},
% return response
2013-09-21 17:19:13 +00:00
make_json_200(YArg, EJSONTimelines).
2011-01-29 13:40:31 -06:00
get_timeline(YArg, Username, TimelineId) ->
% look for timeline
case ts_timeline:lookup(Username, TimelineId) of
% no such timeline, return 404
no_record -> make_json_404(YArg, [{"error", "no such timeline"}]);
% return the timeline data
2013-09-21 17:19:13 +00:00
Timeline -> make_json_200_record(YArg, Timeline)
end.
put_timeline(YArg, Username, TimelineId) ->
% parse the POST data
EJSON = parse_json_body(YArg),
%{struct, Fields} = EJSON,
% parse into a timeline record
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
{TR, ExtData} =
try ts_json:ejson_to_record_strict(
#ts_timeline{ref={Username, TimelineId}}, EJSON)
2011-06-15 00:54:58 -05:00
% we can not parse it, tell the user
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
catch throw:{InputError, _StackTrace} ->
error_logger:error_report("Bad input: ~p", [InputError]),
throw(make_json_400(YArg, [{request_error, InputError}]))
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
end,
2011-06-15 00:54:58 -05:00
% write the changes.
ts_timeline:write(TR, ExtData),
% return a 200
2013-09-21 17:19:13 +00:00
make_json_200_record(YArg, TR).
delete_timeline(_YArg, _Username, _TimelineId) -> {status, 405}.
2011-02-05 08:57:34 -06:00
list_entries(YArg, Username, TimelineId) ->
% pull out the POST data
QueryData = yaws_api:parse_query(YArg),
% first determine if we are listing by date
2011-02-05 08:57:34 -06:00
case {ts_timeline:lookup(Username, TimelineId),
lists:keyfind("byDate", 1, QueryData)} of
2011-02-05 08:57:34 -06:00
{no_record, _ByDateField} -> make_json_404(
[{"error", "no such timeline"},
{"see_docs", "/ts_api_doc/entries.html#LIST"}]);
2011-02-05 08:57:34 -06:00
% listing by date range
{Timeline, {"byDate", "true"}} ->
2011-02-05 08:57:34 -06:00
% look for the start date; default to the beginning of the timeline
StartDate = case lists:keyfind("startDate", 1, QueryData) of
2011-02-05 08:57:34 -06:00
% TODO: error handling if the date is badly formatted
{"startDate", StartDateVal} -> ts_json:decode_datetime(StartDateVal);
2011-02-05 08:57:34 -06:00
false -> Timeline#ts_timeline.created
end,
% look for end date; default to right now
EndDate = case lists:keyfind("endDate", 1, QueryData) of
2011-02-05 08:57:34 -06:00
% TODO: error handling if the date is badly formatted
{"endDate", EndDateVal} -> ts_json:decode_datetime(EndDateVal);
2011-02-05 08:57:34 -06:00
false -> calendar:now_to_universal_time(erlang:now())
end,
% read sort order and list entries
Entries = case lists:keyfind("order", 1, QueryData) of
2011-02-05 08:57:34 -06:00
% descending sort order
{"order", "desc"} -> ts_entry:list_desc(
2011-02-05 08:57:34 -06:00
{Username, TimelineId}, StartDate, EndDate);
% ascending order--{other, asc}--and default
_Other -> ts_entry:list_asc(
{Username, TimelineId}, StartDate, EndDate)
end,
EJSONEntries = {array, lists:map(
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
fun (Entry) ->
ts_json:record_to_ejson(Entry,
ts_ext_data:get_properties(Entry))
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
end,
Entries)},
2011-02-05 08:57:34 -06:00
2013-09-21 17:19:13 +00:00
make_json_200(YArg, EJSONEntries);
2011-02-05 08:57:34 -06:00
% listing by table position
_Other ->
% read or default the Start
Start = case lists:keyfind("start", 1, QueryData) of
{"start", StartVal} -> list_to_integer(StartVal);
2011-02-05 08:57:34 -06:00
false -> 0
end,
% read or default the Length
Length = case lists:keyfind("length", 1, QueryData) of
{"length", LengthVal} ->
2011-02-05 08:57:34 -06:00
erlang:min(list_to_integer(LengthVal), 500);
false -> 50
end,
% read sort order and list entries
Entries = case lists:keyfind("order", 1, QueryData) of
{"order", "desc"} -> ts_entry:list_desc(
2011-02-05 08:57:34 -06:00
{Username, TimelineId}, Start, Length);
_UnknownOrder -> ts_entry:list_asc(
2011-02-05 08:57:34 -06:00
{Username, TimelineId}, Start, Length)
end,
EJSONEntries = {array, lists:map(
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
fun (Entry) ->
ts_json:record_to_ejson(Entry,
ts_ext_data:get_properties(Entry))
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
end,
Entries)},
2011-02-05 08:57:34 -06:00
2013-09-21 17:19:13 +00:00
make_json_200(YArg, EJSONEntries)
2011-02-05 08:57:34 -06:00
end.
2011-02-05 08:57:34 -06:00
get_entry(YArg, Username, TimelineId, EntryId) ->
case ts_entry:lookup(Username, TimelineId, EntryId) of
% no such entry
no_record -> make_json_404(YArg, [{"error", "no such entry"}]);
2011-02-05 08:57:34 -06:00
% return the entry data
2013-09-21 17:19:13 +00:00
Entry -> make_json_200_record(YArg, Entry)
2011-02-05 08:57:34 -06:00
end.
post_entry(YArg, Username, TimelineId) ->
2011-02-05 08:57:34 -06:00
% parse the request body
EJSON = parse_json_body(YArg),
2011-02-05 08:57:34 -06:00
% parse into ts_entry record
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
{ER, ExtData} = try ts_json:ejson_to_record_strict(
#ts_entry{ref = {Username, TimelineId, undefined}}, EJSON)
catch _:InputError ->
error_logger:error_report("Bad input: ~p", [InputError]),
throw(make_json_400(YArg, [{request_error, InputError}]))
end,
case ts_entry:new(ER, ExtData) of
2011-02-05 08:57:34 -06:00
% record created
{ok, CreatedRecord} ->
Changed Data Model ------------------ * Created the extended data table. This is a more generic version of the extended data field that was on ``ts_user``. Instead of arbitrary key-value pairs going in a list on the user record we will have an additional table, a one to many relationship between existing tables and the ``ts_ext_data`` table, each row being an extended value of the corresponding record. * Added support to the ``timestamper:create_tables/1`` and ``timestamper_dev:create_table/1`` functions for the new ``ts_ext_data`` table. Documentation ------------- * Added some general docs about the DB layer code. * Added two new issues, *D0020*: Entry exclusion filters and *D0021*: notes width. API Changes ----------- Necessitated by the change to the data model. * Updated ``ts_api`` data-retrieval functions to use extended data: * ``get_user_summary/2`` * ``list_timelines/2`` * ``list_entries/3`` * Updated ``ts_api:put_timeline/3`` to parse the extended data supplied by the caller. *FIXME* it is not actually saving this data. * TODO: Started updating ``ts_api:post_entry/3`` to handle extended data, need to finish. Database Layer -------------- Changes necessitated by the change to the model. * Added ``ts_common:do_set_ext_data/2`` which iterates through the extended data key-value pairs calling ``ts_ext_data:set_property/3``. It does not provide a transaction context. * Split ``ts_common:new/1`` and ``ts_common:update/1`` functions into multiple functions to prevent code-duplication: * ``do_{new,update}`` contains the code that performs integrity checks and the actual database write function. It does this assuming that it is being called from within the context of an mnesia transaction (uses ``mnesia:read`` and ``mnesia:write``). * ``{new,update}/1`` performs the same function as previously. The implementation changed from using mnesia ``dirty_*`` calls to prociding a transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` expect the record to update/create and the extended data to write atomically with the record. They provide a transaction context, call ``do_{new,update}/1``, then call ``do_set_ext_data/2``. * Similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_entry:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Again similar to the refactoring of ``ts_common:{new,update}/1``, ``ts_user:{new,update}/1`` have been refactored into multiple methods each to support extended data properties: * ``do_{new,update}/1`` perform the actual update assuming we have already established an mnesia transaction. * ``{new,update}/1`` behave the same as they used to, but now do so by creating an mnesia transaction and calling ``do_{new,update}/1``. * ``{new,update}/2`` create an mnesia transaction, call ``do_{new,update}/1``, and then call ``ts_common:do_set_ext_data/2``. * Created the ``ts_ext_data`` module as the interface to the extended data properties introduced in the data model: * ``create_table/1`` performs the same function as it does in the other db layer modules, creates the table with the appropriate structure given the more general table options desired (location, storage type, etc.). * ``set_property/3`` takes a record, a property key, and a property value as input and sets the property described by the property key on the record to the given value, assuming this is a valid property for the record to have. For example, currently the ``ts_user`` record can have an associated property, ``last_timeline``, which represents the last timeline the user was working with. Trying to pass this property with a ``ts_entry`` record would result in an exception. This function uses ``ts_ext_data:do_set_property/3`` as its underlying implementation. * ``get_property/2`` takes a record and a property key and returns the value of that property for the given record, or ``not_set`` if the property has not been set on that record. This method creates its own mnesia transaction. * ``get_properties/1`` takes a record and returns a list of key-value tuples representing all of the extended data properties set for the given record. This method creates its own mnesia transaction. * ``do_set_property/3`` takes a record reference (not the whole record), a a property key, and adds the property assignment to the ``ts_ext_data`` table. It creates its own mnesia transaction. * Added ``new/2`` and ``update/2`` to the ``ts_timeline`` module to support extended data properties. They delegate implementation to ``ts_common:{new,update}/2``. JSON Encoding/Decoding ---------------------- Changes necessitated by the change to the data model. The JSON objects now contain a potentially unlimited number of fields, as each extended data property is encoded as a seperate field, and looks no different from any of the required fields on the object. The intended explanation in API documentation is that each object type (``user``, ``timeline``, or ``entry``) now has both *required* fields that *MUST* be present in every message in either direction and *optional* fields that may or may not be present in any communication with the API. There should be a clear distinction between which fields are required and which are optional. It might also be a good idea to provide a suggested default for optional values when they are not present. * Updated documentation about JSON record structures to reflect the fact that there are now potentially many optional attributes in addition to the required attributes for each record. * ``record_to_ejson/1`` refactored to ``record_to_ejson/2`` which also takes the extended data attributes and appends them as additional attributes to the end of the record structure. * Created ``ext_data_to_ejson/{1,2}`` to provide a mechanism for reformatting extended data properties whose internal representations are not immediately translatable into JSON. Currently only the ``entry_exclusions`` property, which is a list of strings, needs to be treated this way (changing from ``[val, val]`` to ``{array, [val, val]}`` as needed by ``json:encode/1``. ``ext_data_to_ejson/1`` acts as a more user-friendly facade to ``ext_data_to_ejson/2``. * Rewrote ``ejson_to_record/{2,3}`` and ``ejson_to_record_strict/{2,3}`` to handle extended data. They now use a common method, ``construct_record/3`` to create the actual record object and extended data key-value list. ``ejson_to_record_strict/{2,3}`` only differs in that it checks for the presence of each required field of the record after the record is constructed. The three-parameter versions of these functions also take in the intended reference for the constructed record, replacing anything that is in the EJSON body as the record reference (useful when the body does not have the record ids). These methods now return a tuple: ``{Record, ExtData}`` instead of just the record. * Created ``construct_record/3`` takes a record and the EJSON fields from the input object. The third parameter is an accumulator for the extended data properties found when constructing the record. This method works by iterating over the list of input fields. It recognizes any required fields and updates the record being built with the value. Any fields it does not recognize it assumes are extended data properties and adds to its list. When all input fields have been visited it returns the record and list it has constructed. * ``ejson_to_ext_data/{1,2}`` is the inverse of ``ext_data_to_ejson/{1,2}``. *TODO*: this method is not actually being used by the ``ejson_to_record*`` methods.
2011-06-14 15:24:57 -05:00
2013-09-21 17:19:13 +00:00
[{status, 201}, make_json_200_record(YArg, CreatedRecord)];
2011-02-05 08:57:34 -06:00
OtherError ->
error_logger:error_report("Could not create entry: ~p", [OtherError]),
make_json_500(YArg, OtherError)
end.
put_entry(YArg, Username, TimelineId, EntryId) ->
2011-02-05 08:57:34 -06:00
% parse the POST data
EJSON = parse_json_body(YArg),
2011-02-05 08:57:34 -06:00
% parse into ts_entry record
{ER, ExtData} = try ts_json:ejson_to_record_strict(
#ts_entry{ref={Username, TimelineId, EntryId}}, EJSON)
catch _:InputError ->
error_logger:error_report("Bad input: ~p", [InputError]),
throw(make_json_400(YArg))
end,
ts_entry:write(ER, ExtData),
2013-09-21 17:19:13 +00:00
make_json_200_record(YArg, ER).
delete_entry(YArg, Username, TimelineId, EntryId) ->
% find the record to delete
case ts_entry:lookup(Username, TimelineId, EntryId) of
no_record -> make_json_404(YArg);
Record ->
% try to delete
case ts_entry:delete(Record) of
ok -> {status, 200};
Error ->
error_logger:error_report("Error occurred deleting entry record: ~p", [Error]),
make_json_500(YArg, Error)
end
end.
% ============================== %
% ======== UTIL METHODS ======== %
% ============================== %
parse_json_body(YArg) ->
case catch json:decode([], binary_to_list(YArg#arg.clidata)) of
{done, {ok, EJSON}, _} -> EJSON;
Error ->
% TODO: log error internally
error_logger:error_report("Error parsing JSON request body: ~p", [Error]),
throw(make_json_400(YArg))
end.
2013-09-21 17:19:13 +00:00
get_origin_header(YArg) ->
Headers = (YArg#arg.headers)#headers.other,
case lists:keyfind("Origin", 3, Headers) of
false -> "*";
{http_header, 0, "Origin", _, Origin} -> Origin;
_ -> make_json_500(YArg, "Unrecognized Origin header.")
end.
make_CORS_options(YArg, AllowedMethods) ->
[{status, 200},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Methods: ", AllowedMethods]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
make_CORS_options(_YArg, AllowedOrigins, AllowedMethods) ->
[{status, 200},
{header, ["Access-Control-Allow-Origin: ", AllowedOrigins]},
{header, ["Access-Control-Allow-Methods: ", AllowedMethods]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
%% Create a JSON 200 response.
2013-09-21 17:19:13 +00:00
make_json_200(YArg, EJSONResponse) ->
JSONResponse = json:encode(EJSONResponse),
[{content, "application/json", JSONResponse},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
make_json_200_record(YArg, Record) ->
RecordExtData = ts_ext_data:get_properties(Record),
EJSON = ts_json:record_to_ejson(Record, RecordExtData),
JSONResponse = json:encode(EJSON),
2013-09-21 17:19:13 +00:00
[{content, "application/json", JSONResponse},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
make_json_400(YArg) -> make_json_400(YArg, []).
2013-09-21 17:19:13 +00:00
make_json_400(YArg, Fields) ->
F1 = case lists:keyfind(status, 1, Fields) of
false -> Fields ++ [{"status", "bad request"}];
_Else -> Fields
end,
2013-09-21 17:19:13 +00:00
[{status, 400}, {content, "application/json", json:encode({struct, F1})},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
make_json_401(YArg) -> make_json_401(YArg, []).
2013-09-21 17:19:13 +00:00
make_json_401(YArg, Fields) ->
% add default status if not provided
F1 = case lists:keyfind(status, 1, Fields) of
false -> Fields ++ [{"status", "unauthorized"}];
_Else -> Fields
end,
2013-09-21 17:19:13 +00:00
[{status, 401},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]},
{content, "application/json", json:encode({struct, F1})}].
%% Create a JSON 404 response.
make_json_404(YArg) -> make_json_404(YArg, []).
make_json_404(YArg, Fields) ->
% add default status if not provided
F1 = case lists:keyfind(status, 1, Fields) of
false -> Fields ++ [{"status", "not found"}];
_Else -> Fields
end,
% add the path they requested
F2 = F1 ++ [{path, element(2, (YArg#arg.req)#http_request.path)}],
2013-09-21 17:19:13 +00:00
[{status, 404}, {content, "application/json", json:encode({struct, F2})},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
make_json_405(YArg) -> make_json_405(YArg, []).
2013-09-21 17:19:13 +00:00
make_json_405(YArg, Fields) ->
% add default status if not provided
F1 = case lists:keyfind(status, 1, Fields) of
false -> Fields ++ [{"status", "method not allowed"}];
_Else -> Fields
end,
% add the path they requested
% F2 = F1 ++ [{path, io_lib:format("~p", [(YArg#arg.req)#http_request.path])}],
2013-09-21 17:19:13 +00:00
[{status, 405}, {content, "application/json", json:encode({struct, F1})},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
2013-09-21 17:19:13 +00:00
make_json_500(YArg, Error) ->
io:format("Error: ~n~p", [Error]),
EJSON = {struct, [
{"status", "internal server error"},
{"error", lists:flatten(io_lib:format("~p", [Error]))}]},
2013-09-21 17:19:13 +00:00
[{status, 500}, {content, "application/json", json:encode(EJSON)},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].
2013-09-21 17:19:13 +00:00
make_json_500(YArg) ->
EJSON = {struct, [
{"status", "internal server error"}]},
2013-09-21 17:19:13 +00:00
[{status, 500}, {content, "application/json", json:encode(EJSON)},
{header, ["Access-Control-Allow-Origin: ", get_origin_header(YArg)]},
{header, ["Access-Control-Allow-Credentials: ", "true"]}].