diff --git a/.gitignore b/.gitignore
index eecaf211..8de2732a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,4 @@ ebin/*
test-unchanged.escript
.idea
*.iml
+*.dump
diff --git a/.gitmodules b/.gitmodules
index b15c906c..02d16128 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,3 +2,6 @@
path = doc
url = https://github.com/basho/riak-erlang-client.git
branch = gh-pages
+[submodule "tools"]
+ path = tools
+ url = https://github.com/basho/riak-client-tools.git
diff --git a/.travis.yml b/.travis.yml
index 5fb351f9..1e0f9ce1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,18 @@
+sudo: required
+dist: trusty
language: erlang
-notifications:
- webhooks: http://basho-engbot.herokuapp.com/travis?key=8f07584549e458d4c83728f3397ecbd4368e60a8
- email: eng@basho.com
otp_release:
+ - 19.1
- 18.3
- 17.5
- R16B03
+ - R15B03
+env:
+ - RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.0/2.0.7/ubuntu/trusty/riak_2.0.7-1_amd64.deb
+ - RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.1/2.1.4/ubuntu/trusty/riak_2.1.4-1_amd64.deb
+before_script:
+ - sudo ./tools/travis-ci/riak-install -d "$RIAK_DOWNLOAD_URL"
+ - sudo ./tools/setup-riak
+notifications:
+ slack:
+ secure: JVsrhRuWRTQauP7OjSc1XO6+P3eiOZtkjYhU2R53Hn9dK1KmJRBR5MzO1nq6BUs+bViXiAyW0YOoDTWF0eUw5gdd6sqnvx0+mYJVfYDTfbjp46yqj03Nj+J5HZ1KWPM78NSZ8jpZvdwk35ZpHqhsh/zWOY2RYmIVQKLB9EthHLU=
diff --git a/Makefile b/Makefile
index b1844d3f..1f86d7a2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,9 @@
-.PHONY: all clean compile deps distclean release docs
+.PHONY: all lint clean compile deps distclean release docs
all: deps compile
+lint: xref dialyzer
+
compile: deps
./rebar compile
@@ -24,10 +26,10 @@ endif
@echo "==> Tagging version $(VERSION)"
# NB: Erlang client version strings do NOT start with 'v'. Le Sigh.
# validate VERSION and allow pre-releases
- @bash ./build/publish $(VERSION) validate
+ @./tools/build/publish $(VERSION) master validate
@git tag --sign -a "$(VERSION)" -m "riak-erlang-client $(VERSION)" --local-user "$(RELEASE_GPG_KEYNAME)"
@git push --tags
- @bash ./build/publish $(VERSION)
+ @./tools/build/publish $(VERSION) master 'Riak Erlang Client' 'riak-erlang-client'
DIALYZER_APPS = kernel stdlib sasl erts eunit ssl tools crypto \
diff --git a/README.md b/README.md
index bd47b013..84da1713 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,7 @@ Build Status
============
* Master: [](https://travis-ci.org/basho/riak-erlang-client)
+* Develop: [](https://travis-ci.org/basho/riak-erlang-client)
This document assumes that you have already started your Riak cluster. For instructions on that prerequisite, refer to [Installation and Setup](https://wiki.basho.com/Installation-and-Setup.html) in the [Riak Wiki](https://wiki.basho.com). You can also view the Riak Erlang Client EDocs [here](http://basho.github.com/riak-erlang-client/).
@@ -260,7 +261,7 @@ Note that keylist updates are asynchronous to the object storage primitives, and
4> receive Msg2 \-> Msg2 end.
{87009603,done}
-See [`riakc_pb_socket:wait_for_listkeys`](https://github.com/basho/riak-erlang-client/blob/master/src/riakc_pb_socket.erl#L1087) for an example of receiving.
+See [`riakc_utils:wait_for_list`](https://github.com/basho/riak-erlang-client/blob/develop/src/riakc_utils.erl) for a function to receive data.
Bucket Properties
=================
@@ -664,12 +665,12 @@ CREATE TABLE GeoCheckin
### Store TS Data
-To write data to your table, put the data in a list, and use the `riakc_ts:put/3` function. Please ensure the the order of the data is the same as the table definition.
+To write data to your table, put the data in a list, and use the `riakc_ts:put/3` function. Please ensure the the order of the data is the same as the table definition, and note that each row is a tuple of values corresponding to the columns in the table.
```erlang
{ok, Pid} = riakc_pb_socket:start_link("myriakdb.host", 10017).
-riakc_ts:put(Pid, "GeoCheckin", [[<<"family1">>, <<"series1">>, 1234567, <<"hot">>, 23.5], [<<"family2">>, <<"series99">>, 1234567, <<"windy">>, 19.8]]).
+riakc_ts:put(Pid, "GeoCheckin", [{<<"family1">>, <<"series1">>, 1234567, <<"hot">>, 23.5}, {<<"family2">>, <<"series99">>, 1234567, <<"windy">>, 19.8}]).
```
### Query TS Data
@@ -736,6 +737,7 @@ This is not a comprehensive list, please see the commit history.
* [Kelly McLaughlin](https://github.com/kellymclaughlin)
* [Kevin Smith](https://github.com/kevsmith)
* [Luc Perkins](https://github.com/lucperkins)
+* [Luca Favatella](https://github.com/lucafavatella)
* [Lukasz Milewski](https://github.com/milek)
* [Luke Bakken](https://github.com/lukebakken)
* [Mark Phillips](https://github.com/phips)
diff --git a/RELNOTES.md b/RELNOTES.md
index 5837303a..5c3a7d96 100644
--- a/RELNOTES.md
+++ b/RELNOTES.md
@@ -1,10 +1,12 @@
Release Notes
=============
-* [2.4.1](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.4.1)
+* [`2.5.0`](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.5.0)
+ * Adds HyperLogLog CRDT data type support
+* [`2.4.1`](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.4.1)
* OTP 19 support via updated dependencies and a few platform defines
-* [2.4.0](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.4.0)
+* [`2.4.0`](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.4.0)
* This release merges in the Riak timeseries code base from the `2.3.1-timeseries` and `2.3.2-timeseries` releases.
-* [2.1.2](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.1.2)
+* [`2.1.2`](https://github.com/basho/riak-erlang-client/issues?q=milestone%3Ariak-erlang-client-2.1.2)
* OTP 18 support
* Fix processing of response from Riak when Riak generates an object's key [PR](https://github.com/basho/riak-erlang-client/pull/289)
diff --git a/build/publish b/build/publish
deleted file mode 100755
index 4f2a91fc..00000000
--- a/build/publish
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env bash
-
-set -o errexit
-set -o nounset
-
-declare -r debug='false'
-declare -r tmpfile_file="/tmp/publish.$$.tmpfiles"
-
-function make_temp_file
-{
- local template="${1:-publish.$$.XXXXXX}"
- if [[ $template != *XXXXXX ]]
- then
- template="$template.XXXXXX"
- fi
- local tmp=$(mktemp -t "$template")
- echo "$tmp" >> "$tmpfile_file"
- echo "$tmp"
-}
-
-function now
-{
- date '+%Y-%m-%d %H:%M:%S'
-}
-
-function pwarn
-{
- echo "$(now) [warning]: $@" 1>&2
-}
-
-function perr
-{
- echo "$(now) [error]: $@" 1>&2
-}
-
-function pinfo
-{
- echo "$(now) [info]: $@"
-}
-
-function pdebug
-{
- if [[ $debug == 'true' ]]
- then
- echo "$(now) [debug]: $@"
- fi
-}
-
-function errexit
-{
- perr "$@"
- exit 1
-}
-
-function onexit
-{
- if [[ -f $tmpfile_file ]]
- then
- for tmpfile in $(< $tmpfile_file)
- do
- pdebug "removing temp file $tmpfile"
- rm -f $tmpfile
- done
- rm -f $tmpfile_file
- fi
-}
-
-function gh_publish {
- if [[ -z $version_string ]]
- then
- errexit 'gh_publish: version_string required'
- fi
-
- # NB: we use a X.Y.Z tag
- local -r release_json="{
- \"tag_name\" : \"$version_string\",
- \"name\" : \"Riak Erlang Client $version_string\",
- \"body\" : \"riak-erlang-client $version_string\nhttps://github.com/basho/riak-erlang-client/blob/master/RELNOTES.md\",
- \"draft\" : false,
- \"prerelease\" : $is_prerelease
- }"
-
- pdebug "Release JSON: $release_json"
-
- local curl_content_file="$(make_temp_file)"
- local curl_stdout_file="$(make_temp_file)"
- local curl_stderr_file="$(make_temp_file)"
-
- curl -4so $curl_content_file -w '%{http_code}' -XPOST \
- -H "Authorization: token $(< $github_api_key_file)" -H 'Content-type: application/json' \
- 'https://api.github.com/repos/basho/riak-erlang-client/releases' -d "$release_json" 1> "$curl_stdout_file" 2> "$curl_stderr_file"
- if [[ $? != 0 ]]
- then
- errexit "curl error exited with code: '$?' see '$curl_stderr_file'"
- fi
-
- local -i curl_rslt="$(< $curl_stdout_file)"
- if (( curl_rslt == 422 ))
- then
- pwarn "Release in GitHub already exists! (http code: '$curl_rslt')"
- curl -4so $curl_content_file -w '%{http_code}' -XGET \
- -H "Authorization: token $(< $github_api_key_file)" -H 'Content-type: application/json' \
- "https://api.github.com/repos/basho/riak-erlang-client/releases/tags/$version_string" 1> "$curl_stdout_file" 2> "$curl_stderr_file"
- if [[ $? != 0 ]]
- then
- errexit "curl error exited with code: '$?' see '$curl_stderr_file'"
- fi
- elif (( curl_rslt != 201 ))
- then
- errexit "Creating release in GitHub failed with http code '$curl_rslt'"
- fi
-}
-
-trap onexit EXIT
-
-declare -r version_string="${1:-unknown}"
-
-if [[ ! $version_string =~ ^[0-9].[0-9].[0-9](-[a-z]+[0-9]+)?$ ]]
-then
- errexit 'first argument must be valid version string in X.Y.Z format'
-fi
-
-is_prerelease='false'
-if [[ $version_string =~ ^[0-9].[0-9].[0-9]-[a-z]+[0-9]+$ ]]
-then
- pinfo "publishing pre-release version: $version_string"
- is_prerelease='true'
-else
- pinfo "publishing version $version_string"
-fi
-
-declare -r current_branch="$(git rev-parse --abbrev-ref HEAD)"
-
-if [[ $debug == 'false' && $is_prerelease == 'false' && $current_branch != 'master' ]]
-then
- errexit 'publish must be run on master branch'
-fi
-
-declare -r github_api_key_file="$HOME/.ghapi"
-if [[ ! -s $github_api_key_file ]]
-then
- errexit "please save your GitHub API token in $github_api_key_file"
-fi
-
-# Validate commands
-if ! hash curl 2>/dev/null
-then
- errexit "'curl' must be in your PATH"
-fi
-
-validate=${2:-''}
-if [[ $validate == 'validate' ]]
-then
- exit 0
-fi
-
-gh_publish
diff --git a/buildbot/Makefile b/buildbot/Makefile
deleted file mode 100644
index da28d708..00000000
--- a/buildbot/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-RIAK_CONF = ${RIAK_DIR}/etc/riak.conf
-ADVANCED_CONF = ${RIAK_DIR}/etc/advanced.config
-# RIAK = ${RIAK_DIR}/bin/riak
-RIAK_ADMIN = ${RIAK_DIR}/bin/riak-admin
-# CERTS_DIR = $(shell pwd)/../src/test/resources
-
-preconfigure:
- echo "storage_backend = memory" >> ${RIAK_CONF}
- echo "anti_entropy = passive" >> ${RIAK_CONF}
- ./add_to_advanced_config ${ADVANCED_CONF} '{riak_kv,[{test,true}]}.'
- echo "search = on" >> ${RIAK_CONF}
- echo "listener.protobuf.internal = 127.0.0.1:8087" >> ${RIAK_CONF}
- echo "distributed_cookie = riak" >> ${RIAK_CONF}
- echo "nodename = riak@127.0.0.1" >> ${RIAK_CONF}
-
-configure:
- ./crdt-setup ${RIAK_ADMIN}
-
-compile:
- @${MAKE} -C .. compile
-
-lint:
- @${MAKE} -C .. xref dialyzer
-
-test:
- @${MAKE} -C .. test
diff --git a/buildbot/add_to_advanced_config b/buildbot/add_to_advanced_config
deleted file mode 100755
index 4b07b2e1..00000000
--- a/buildbot/add_to_advanced_config
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-main([File|Additions]) ->
- Settings = case file:consult(File) of
- {ok, S} -> S;
- _ -> []
- end,
- Terms = [ begin
- {ok, Tokens, _} = erl_scan:string(String),
- {ok, Term} = erl_parse:parse_term(Tokens),
- Term
- end || String <- Additions ],
- Merged = overlay(lists:flatten(Settings), Terms),
- file:write_file(File, io_lib:format("~p.",[Merged])).
-
-overlay(InputConfig, NewConfig) ->
- lists:foldl(
- fun({ApplicationName, ApplicationConfig}, OuterAcc) ->
- GeneratedApplicationConfig = proplists:get_value(ApplicationName, InputConfig, []),
- Updated = lists:foldl(
- fun({ConfigElementName, ConfigElement}, Acc) ->
- replace_proplist_value(ConfigElementName, ConfigElement, Acc)
- end,
- GeneratedApplicationConfig,
- ApplicationConfig),
- replace_proplist_value(ApplicationName, Updated, OuterAcc)
- end,
- InputConfig,
- NewConfig).
-
-replace_proplist_value(Key, Value, Proplist) ->
- lists:keystore(Key, 1, Proplist, {Key, Value}).
diff --git a/buildbot/crdt-setup b/buildbot/crdt-setup
deleted file mode 100755
index 155b18fd..00000000
--- a/buildbot/crdt-setup
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-# Activate CRDT bucket types for testing.
-# Takes full path to riak-admin as argument.
-
-TIMEOUT=30
-
-test -x $1 || exit 1
-ADMIN=$1
-
-$ADMIN bucket-type create map_bucket '{"props":{"datatype":"map"}}' || exit 1
-$ADMIN bucket-type create set_bucket '{"props":{"datatype":"set"}}' || exit 1
-$ADMIN bucket-type create hll_bucket '{"props":{"datatype":"hll"}}' || exit 1
-$ADMIN bucket-type create counter_bucket '{"props":{"datatype":"counter"}}' || exit 1
-
-loop_counter=0
-
-until
- $ADMIN bucket-type activate map_bucket && \
- $ADMIN bucket-type activate set_bucket && \
- $ADMIN bucket-type activate hll_bucket && \
- $ADMIN bucket-type activate counter_bucket
-do
- echo "Waiting until activation"
- loop_counter=`expr $loop_counter + 1`
-
- test $loop_counter -gt $TIMEOUT && exit 1
- sleep 1
-done
diff --git a/rebar.config b/rebar.config
index 364ed479..2577cc37 100644
--- a/rebar.config
+++ b/rebar.config
@@ -13,7 +13,7 @@
]}.
{deps, [
- {riak_pb, ".*", {git, "git://github.com/basho/riak_pb", {branch, "develop-2.2"}}}
+ {riak_pb, ".*", {git, "https://github.com/basho/riak_pb", {tag, "2.2.0.0"}}}
]}.
{edoc_opts, [
diff --git a/src/riakc_map.erl b/src/riakc_map.erl
index 5ed8fdc2..5944f9a6 100644
--- a/src/riakc_map.erl
+++ b/src/riakc_map.erl
@@ -155,6 +155,8 @@ erase(Key, #map{removes=R}=M) ->
%% it will be initialized to the empty value for its type before being
%% passed to the function.
-spec update(key(), update_fun(), crdt_map()) -> crdt_map().
+update({_Name, hll}, _Fun, _M) ->
+ erlang:error(badarg, ["maps may not contain hll datatype"]);
update(Key, Fun, #map{updates=U}=M) ->
%% In order, search for key in 1) batched updates, then 2) values
%% taken from Riak, and otherwise 3) create a new, empty data type
diff --git a/src/riakc_pb_socket.erl b/src/riakc_pb_socket.erl
index 857c06f9..ad0127ee 100644
--- a/src/riakc_pb_socket.erl
+++ b/src/riakc_pb_socket.erl
@@ -2,7 +2,7 @@
%%
%% riakc_pb_socket: protocol buffer client
%%
-%% Copyright (c) 2007-2013 Basho Technologies, Inc. All Rights Reserved.
+%% Copyright (c) 2007-2016 Basho Technologies, Inc. All Rights Reserved.
%%
%% This file is provided to you under the Apache License,
%% Version 2.0 (the "License"); you may not use this file
@@ -77,10 +77,6 @@
get_coverage/2, get_coverage/3,
replace_coverage/3, replace_coverage/4]).
-%% Counter API
--export([counter_incr/4, counter_val/3]).
-%% with options
--export([counter_incr/5, counter_val/4]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
@@ -93,6 +89,10 @@
get_search_schema/2, get_search_schema/3,
create_search_schema/3, create_search_schema/4]).
+%% Pre-Riak 2.0 Counter API - NOT for CRDT counters
+-export([counter_incr/4, counter_val/3]).
+-export([counter_incr/5, counter_val/4]).
+
%% Datatypes API
-export([fetch_type/3, fetch_type/4,
update_type/4, update_type/5,
@@ -438,14 +438,14 @@ delete_obj(Pid, Obj, Options, Timeout) ->
delete_vclock(Pid, riakc_obj:bucket(Obj), riakc_obj:key(Obj),
riakc_obj:vclock(Obj), Options, Timeout).
-%% @doc List all buckets on the server.
+%% @doc List all buckets on the server in the "default" bucket type.
%% This is a potentially expensive operation and should not be used in production.
%% @equiv list_buckets(Pid, default_timeout(list_buckets_timeout))
-spec list_buckets(pid()) -> {ok, [bucket()]} | {error, term()}.
list_buckets(Pid) ->
list_buckets(Pid, <<"default">>, []).
-%% @doc List all buckets on the server specifying server-side timeout.
+%% @doc List all buckets in a bucket type, specifying server-side timeout.
%% This is a potentially expensive operation and should not be used in production.
-spec list_buckets(pid(), timeout()|list()|binary()) -> {ok, [bucket()]} |
{error, term()}.
@@ -459,7 +459,7 @@ list_buckets(Pid, Options) ->
list_buckets(Pid, Type, Options) when is_binary(Type), is_list(Options) ->
case stream_list_buckets(Pid, Type, Options) of
{ok, ReqId} ->
- wait_for_list(ReqId);
+ riakc_utils:wait_for_list(ReqId);
Error ->
Error
end.
@@ -516,7 +516,7 @@ list_keys(Pid, Bucket, Timeout) when is_integer(Timeout) ->
list_keys(Pid, Bucket, Options) ->
case stream_list_keys(Pid, Bucket, Options) of
{ok, ReqId} ->
- wait_for_list(ReqId);
+ riakc_utils:wait_for_list(ReqId);
Error ->
Error
end.
@@ -1181,12 +1181,12 @@ tunnel(Pid, MsgId, Pkt, Timeout) ->
Req = {tunneled, MsgId, Pkt},
call_infinity(Pid, {req, Req, Timeout}).
-%% @doc increment the counter at `bucket', `key' by `amount'
+%% @doc increment the pre-Riak 2 counter at `bucket', `key' by `amount'
-spec counter_incr(pid(), bucket() | bucket_and_type(), key(), integer()) -> ok.
counter_incr(Pid, Bucket, Key, Amount) ->
counter_incr(Pid, Bucket, Key, Amount, []).
-%% @doc increment the counter at `Bucket', `Key' by `Amount'.
+%% @doc increment the pre-Riak 2 counter at `Bucket', `Key' by `Amount'.
%% use the provided `write_quorum()' `Options' for the operation.
%% A counter increment is a lot like a riak `put' so the semantics
%% are the same for the given options.
@@ -1197,13 +1197,13 @@ counter_incr(Pid, Bucket, Key, Amount, Options) ->
Req = counter_incr_options(Options, #rpbcounterupdatereq{bucket=B, key=Key, amount=Amount}),
call_infinity(Pid, {req, Req, default_timeout(put_timeout)}).
-%% @doc get the current value of the counter at `Bucket', `Key'.
+%% @doc get the current value of the pre-Riak 2 counter at `Bucket', `Key'.
-spec counter_val(pid(), bucket() | bucket_and_type(), key()) ->
{ok, integer()} | {error, notfound}.
counter_val(Pid, Bucket, Key) ->
counter_val(Pid, Bucket, Key, []).
-%% @doc get the current value of the counter at `Bucket', `Key' using
+%% @doc get the current value of the pre-Riak 2 counter at `Bucket', `Key' using
%% the `read_qurom()' `Options' provided.
-spec counter_val(pid(), bucket() | bucket_and_type(), key(), [read_quorum()]) ->
{ok, integer()} | {error, term()}.
@@ -2386,18 +2386,6 @@ mk_reqid() -> erlang:phash2(crypto:strong_rand_bytes(10)). % only has to be uniq
mk_reqid() -> erlang:phash2(crypto:rand_bytes(10)). % only has to be unique per-pid
-endif.
-%% @private
-wait_for_list(ReqId) ->
- wait_for_list(ReqId, []).
-%% @private
-wait_for_list(ReqId, Acc) ->
- receive
- {ReqId, done} -> {ok, lists:flatten(Acc)};
- {ReqId, {error, Reason}} -> {error, Reason};
- {ReqId, {_, Res}} -> wait_for_list(ReqId, [Res|Acc])
- end.
-
-
%% @private
wait_for_mapred(ReqId, Timeout) ->
wait_for_mapred_first(ReqId, Timeout).
@@ -2543,326 +2531,9 @@ set_index_create_req_timeout(Timeout, Req) when Timeout =:= infinity ->
set_index_create_req_timeout(Timeout, _Req) when not is_integer(Timeout) ->
erlang:error(badarg).
-
-%% ====================================================================
-%% unit tests
-%% ====================================================================
-
-%% Tests disabled until they can be prevented from running when included
-%% as a dependency.
-%%
-ifdef(TEST).
--compile(export_all).
--include_lib("eunit/include/eunit.hrl").
-
-%% Get the test host - check env RIAK_TEST_PB_HOST then env 'RIAK_TEST_HOST_1'
-%% falling back to 127.0.0.1
-test_ip() ->
- case os:getenv("RIAK_TEST_PB_HOST") of
- false ->
- case os:getenv("RIAK_TEST_HOST_1") of
- false ->
- "127.0.0.1";
- Host ->
- Host
- end;
- Host ->
- Host
- end.
-
-%% Test port - check env RIAK_TEST_PBC_1
-test_port() ->
- case os:getenv("RIAK_TEST_PBC_1") of
- false ->
- 8087;
- PortStr ->
- list_to_integer(PortStr)
- end.
-
-%% Riak node under test - used to setup/configure/tweak it for tests
-test_riak_node() ->
- case os:getenv("RIAK_TEST_NODE_1") of
- false ->
- 'riak@127.0.0.1';
- NodeStr ->
- list_to_atom(NodeStr)
- end.
-
-%% Node for the eunit node for distributed erlang
-test_eunit_node() ->
- case os:getenv("RIAK_EUNIT_NODE") of
- false ->
- 'eunit@127.0.0.1';
- EunitNodeStr ->
- list_to_atom(EunitNodeStr)
- end.
-
-%% Cookie for distributed erlang
-test_cookie() ->
- case os:getenv("RIAK_TEST_COOKIE") of
- false ->
- 'riak';
- CookieStr ->
- list_to_atom(CookieStr)
- end.
-
-%% Get the riak version from the init boot script, turn it into a list
-%% of integers.
-riak_version() ->
- StrVersion = element(2, rpc:call(test_riak_node(), init, script_id, [])),
- {match, [Major, Minor, Patch|_]} = re:run(StrVersion, "\\d+", [global, {capture, first, list}]),
- [ list_to_integer(V) || [V] <- [Major, Minor, Patch]].
-
-%% Compare the first three part version array with the second.
-%% returns `greater', `less', or `equal'.
-compare_versions([M1,N1,P1], [M2,N2,P2]) ->
- V1 = (M1*1000000)+(N1*1000)+(P1),
- V2 = (M2*1000000)+(N2*1000)+(P2),
- case {V1 > V2, V1 == V2} of
- {true,_} ->
- greater;
- {false,false} ->
- less;
- {false,true} ->
- equal
- end.
-
-%% Resets the riak node
-reset_riak() ->
- %% sleep because otherwise we're going to kill the vnodes too fast
- %% for the supervisor's maximum restart frequency, which will bring
- %% down the entire node
- ?assertEqual(ok, maybe_start_network()),
- case compare_versions(riak_version(), [1,2,0]) of
- less ->
- reset_riak_legacy();
- _ ->
- reset_riak_12()
- end.
-
-reset_solr(Pid) ->
- %% clear indexes
- {ok, Indexes} = ?MODULE:list_search_indexes(Pid),
- [ ?MODULE:delete_search_index(Pid, proplists:get_value(index,Index)) || Index <- Indexes ],
- wait_until( fun() ->
- {ok, []} == ?MODULE:list_search_indexes(Pid)
- end, 20, 1000),
- ok.
-
-%% Resets a Riak 1.2+ node, which can run the memory backend in 'test'
-%% mode.
-reset_riak_12() ->
- set_test_backend(),
- ok = rpc:call(test_riak_node(), riak_kv_memory_backend, reset, []),
- reset_ring().
-
-%% Sets up the memory/test backend, leaving it alone if already set properly.
-set_test_backend() ->
- Env = rpc:call(test_riak_node(), application, get_all_env, [riak_kv]),
- Backend = proplists:get_value(storage_backend, Env),
- Test = proplists:get_value(test, Env),
- case {Backend, Test} of
- {riak_kv_memory_backend, true} ->
- ok;
- _ ->
- ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, storage_backend, riak_kv_memory_backend]),
- ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, test, true]),
- Vnodes = rpc:call(test_riak_node(), riak_core_vnode_manager, all_vnodes, [riak_kv_vnode]),
- [ ok = rpc:call(test_riak_node(), supervisor, terminate_child, [riak_core_vnode_sup, Pid]) ||
- {_, _, Pid} <- Vnodes ]
- end.
-
-%% Resets a Riak 1.1 and earlier node.
-reset_riak_legacy() ->
- timer:sleep(500),
- %% Until there is a good way to empty the vnodes, require the
- %% test to run with ETS and kill the vnode master/sup to empty all the ETS tables
- %% and the ring manager to remove any bucket properties
- ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, storage_backend, riak_kv_memory_backend]),
-
- %% Restart the vnodes so they come up with ETS
- ok = supervisor:terminate_child({riak_kv_sup, test_riak_node()}, riak_kv_vnode_master),
- ok = supervisor:terminate_child({riak_core_sup, test_riak_node()}, riak_core_vnode_sup),
- {ok, _} = supervisor:restart_child({riak_core_sup, test_riak_node()}, riak_core_vnode_sup),
- {ok, _} = supervisor:restart_child({riak_kv_sup, test_riak_node()}, riak_kv_vnode_master),
-
- %% Clear the MapReduce cache
- ok = rpc:call(test_riak_node(), riak_kv_mapred_cache, clear, []),
-
- %% Now reset the ring so bucket properties are default
- reset_ring().
-
-%% Resets the ring to a fresh one, effectively deleting any bucket properties.
-reset_ring() ->
- Ring = rpc:call(test_riak_node(), riak_core_ring, fresh, []),
- ok = rpc:call(test_riak_node(), riak_core_ring_manager, set_my_ring, [Ring]).
-
-
-%% Finds the pid of the PB listener process
-riak_pb_listener_pid() ->
- {Children, Proc} = case compare_versions(riak_version(), [1,2,0]) of
- less ->
- {supervisor:which_children({riak_kv_sup, test_riak_node()}),
- riak_kv_pb_listener};
- _ ->
- {supervisor:which_children({riak_api_sup, test_riak_node()}),
- riak_api_pb_listener}
- end,
- hd([Pid || {_,Pid,_,[Mod]} <- Children, Mod == Proc]).
-
-pause_riak_pb_listener() ->
- Pid = riak_pb_listener_pid(),
- rpc:call(test_riak_node(), sys, suspend, [Pid]).
-
-resume_riak_pb_listener() ->
- Pid = riak_pb_listener_pid(),
- rpc:call(test_riak_node(), sys, resume, [Pid]).
-
-kill_riak_pb_sockets() ->
- Children = case compare_versions(riak_version(), [1,2,0]) of
- less ->
- supervisor:which_children({riak_kv_pb_socket_sup, test_riak_node()});
- _ ->
- supervisor:which_children({riak_api_pb_sup, test_riak_node()})
- end,
- case Children of
- [] ->
- ok;
- [_|_] ->
- Pids = [Pid || {_,Pid,_,_} <- Children],
- [rpc:call(test_riak_node(), erlang, exit, [Pid, kill]) || Pid <- Pids],
- erlang:yield(),
- kill_riak_pb_sockets()
- end.
-
-maybe_start_network() ->
- %% Try to spin up net_kernel
- os:cmd("epmd -daemon"),
- case net_kernel:start([test_eunit_node(), longnames]) of
- {ok, _} ->
- erlang:set_cookie(test_riak_node(), test_cookie()),
- ok;
- {error, {already_started, _}} ->
- ok;
- X ->
- X
- end.
-
-bad_connect_test() ->
- %% Start with an unlikely port number
- ?assertEqual({error, {tcp, econnrefused}}, start({127,0,0,1}, 65535)).
-
-queue_disconnected_test() ->
- %% Start with an unlikely port number
- {ok, Pid} = start({127,0,0,1}, 65535, [queue_if_disconnected]),
- ?assertEqual({error, timeout}, ping(Pid, 10)),
- ?assertEqual({error, timeout}, list_keys(Pid, <<"b">>, 10)),
- stop(Pid).
-
-auto_reconnect_bad_connect_test() ->
- %% Start with an unlikely port number
- {ok, Pid} = start({127,0,0,1}, 65535, [auto_reconnect]),
- ?assertEqual({false, []}, is_connected(Pid)),
- ?assertEqual({error, disconnected}, ping(Pid)),
- ?assertEqual({error, disconnected}, list_keys(Pid, <<"b">>)),
- stop(Pid).
-
-server_closes_socket_test() ->
- %% Silence SASL junk when socket closes.
- error_logger:tty(false),
- %% Set up a dummy socket to send requests on
- {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
- {ok, Port} = inet:port(Listen),
- {ok, Pid} = start("127.0.0.1", Port),
- {ok, Sock} = gen_tcp:accept(Listen),
- ?assertMatch(true, is_connected(Pid)),
-
- %% Send a ping request in another process so the test doesn't block
- Self = self(),
- spawn(fun() -> Self ! ping(Pid, infinity) end),
-
- %% Make sure request received then close the socket
- {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
- ok = gen_tcp:close(Sock),
- ok = gen_tcp:close(Listen),
- receive
- Msg1 -> % result of ping from spawned process above
- ?assertEqual({error, disconnected}, Msg1)
- end,
- %% Wait for spawned process to exit
- Mref = erlang:monitor(process, Pid),
- receive
- Msg2 ->
- ?assertMatch({'DOWN', Mref, process, _, _}, Msg2)
- end.
-
-auto_reconnect_server_closes_socket_test() ->
- %% Set up a dummy socket to send requests on
- {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
- {ok, Port} = inet:port(Listen),
- {ok, Pid} = start_link("127.0.0.1", Port, [auto_reconnect]),
- {ok, Sock} = gen_tcp:accept(Listen),
- ?assertMatch(true, is_connected(Pid)),
-
- %% Send a ping request in another process so the test doesn't block
- Self = self(),
- spawn(fun() -> Self ! ping(Pid, infinity) end),
-
- %% Make sure request received then close the socket
- {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
- ok = gen_tcp:close(Sock),
- ok = gen_tcp:close(Listen),
- receive
- Msg ->
- ?assertEqual({error, disconnected}, Msg)
- end,
- %% Server will not have had a chance to reconnect yet, reason counters empty.
- ?assertMatch({false, []}, is_connected(Pid)),
- stop(Pid).
-
-dead_socket_pid_returns_to_caller_test() ->
- %% Set up a dummy socket to send requests on
- {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
- {ok, Port} = inet:port(Listen),
- {ok, Pid} = start("127.0.0.1", Port),
- {ok, Sock} = gen_tcp:accept(Listen),
- ?assertMatch(true, is_connected(Pid)),
-
- %% Send a ping request in another process so the test doesn't block
- Self = self(),
- spawn(fun() -> Self ! (catch ping(Pid, infinity)) end),
-
- %% Make sure request received then kill the process
- {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
- exit(Pid, kill),
- receive
- Msg ->
- ?assertMatch({'EXIT', {killed, _}}, Msg)
- end,
- %% Cleanup
- ok = gen_tcp:close(Sock),
- ok = gen_tcp:close(Listen).
-
-pb_socket_test_() ->
- {setup,
- fun() ->
- %% Grab the riakclient_pb.proto file
- code:add_pathz("../ebin"),
- ok = maybe_start_network()
- end,
- fun(_) ->
- net_kernel:stop()
- end,
- {generator,
- fun() ->
- case catch net_adm:ping(test_riak_node()) of
- pong ->
- live_node_tests();
- _ ->
- [] %% {skipped, need_live_server};
- end
- end}}.
+-include_lib("eunit/include/eunit.hrl").
%% Check the reconnect interval increases up to the max and sticks there
increase_reconnect_interval_test() ->
@@ -2880,1262 +2551,4 @@ increase_reconnect_interval_test(State) ->
increase_reconnect_interval_test(NextState)
end.
-%% Retry `Fun' until it returns `Retry' times, waiting `Delay'
-%% milliseconds between retries. This is our eventual consistency bread
-%% and butter
-wait_until(Fun) when is_function(Fun) ->
- wait_until(Fun, 20, 500).
-wait_until(_, 0, _) ->
- fail;
-wait_until(Fun, Retry, Delay) when Retry > 0 ->
- Pass = Fun(),
- case Pass of
- true ->
- ok;
- _ ->
- timer:sleep(Delay),
- wait_until(Fun, Retry-1, Delay)
- end.
-
-%%
-%% Tests to run against a live node - NB the node gets reconfigured and generally messed with
-%%
-live_node_tests() ->
- [{"ping",
- ?_test( begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- ?assertEqual(pong, ?MODULE:ping(Pid)),
- ?assertEqual(true, is_connected(Pid)),
- stop(Pid)
- end)},
- {"reconnect test",
- ?_test( begin
- %% Make sure originally there
- {ok, Pid} = start_link(test_ip(), test_port()),
-
- %% Change the options to allow reconnection/queueing
- set_options(Pid, [queue_if_disconnected]),
-
- %% Kill the socket
- kill_riak_pb_sockets(),
- ?assertEqual(pong, ?MODULE:ping(Pid)),
- stop(Pid)
- end)},
-
- {"set client id",
- ?_test(
- begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, <>} = ?MODULE:get_client_id(Pid),
-
- NewId = <<(OrigId+1):32>>,
- ok = ?MODULE:set_client_id(Pid, NewId),
- {ok, NewId} = ?MODULE:get_client_id(Pid)
- end)},
-
- {"version",
- ?_test(
- begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, ServerInfo} = ?MODULE:get_server_info(Pid),
- [{node, _}, {server_version, _}] = lists:sort(ServerInfo)
- end)},
-
- {"get_should_read_put_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, PO} = ?MODULE:put(Pid, O, [return_body]),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"k">>),
- ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
- end)},
-
- {"get should read put with timeout",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, PO} = ?MODULE:put(Pid, O, [{w, 1}, {dw, 1}, return_body]),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"k">>, 500),
- ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
- end)},
-
- {"get should read put with options",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, PO} = ?MODULE:put(Pid, O, [{w, 1}, {dw, 1}, return_body]),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"k">>, [{r, 1}]),
- ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
- end)},
-
- {"get should read put with non integer options",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, PO} = ?MODULE:put(Pid, O, [{w, all}, {dw, quorum}, return_body]),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"k">>, [{r, one}]),
- ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
- end)},
-
- {"put and delete with timeout",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"puttimeouttest">>, <<"value">>),
- ok = ?MODULE:put(Pid, PO, 500),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"puttimeouttest">>, 500),
- ?assertEqual(<<"value">>, riakc_obj:get_value(GO)),
- ok = ?MODULE:delete(Pid, <<"b">>, <<"puttimeouttest">>, 500),
- {error, notfound} = ?MODULE:get(Pid, <<"b">>, <<"puttimeouttest">>)
- end)},
-
- {"update_should_change_value_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, PO} = ?MODULE:put(Pid, O, [return_body]),
- PO2 = riakc_obj:update_value(PO, <<"v2">>),
- ok = ?MODULE:put(Pid, PO2),
- {ok, GO} = ?MODULE:get(Pid, <<"b">>, <<"k">>),
- ?assertEqual(<<"v2">>, riakc_obj:get_value(GO))
- end)},
-
- {"key_should_be_missing_after_delete_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- %% Put key/value
- O0 = riakc_obj:new(<<"b">>, <<"k">>),
- O = riakc_obj:update_value(O0, <<"v">>),
- {ok, _PO} = ?MODULE:put(Pid, O, [return_body]),
- %% Prove it really got stored
- {ok, GO1} = ?MODULE:get(Pid, <<"b">>, <<"k">>),
- ?assertEqual(<<"v">>, riakc_obj:get_value(GO1)),
- %% Delete and check no longer found
- ok = ?MODULE:delete(Pid, <<"b">>, <<"k">>),
- {error, notfound} = ?MODULE:get(Pid, <<"b">>, <<"k">>)
- end)},
-
- {"delete missing key test",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- %% Delete and check no longer found
- ok = ?MODULE:delete(Pid, <<"notabucket">>, <<"k">>, [{rw, 1}]),
- {error, notfound} = ?MODULE:get(Pid, <<"notabucket">>, <<"k">>)
- end)},
-
- {"empty_list_buckets_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ?assertEqual({ok, []}, ?MODULE:list_buckets(Pid))
- end)},
-
- {"list_buckets_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Bs = lists:sort([list_to_binary(["b"] ++ integer_to_list(N)) || N <- lists:seq(1, 10)]),
- F = fun(B) ->
- O=riakc_obj:new(B, <<"key">>),
- ?MODULE:put(Pid, riakc_obj:update_value(O, <<"val">>))
- end,
- [F(B) || B <- Bs],
- {ok, LBs} = ?MODULE:list_buckets(Pid),
- ?assertEqual(Bs, lists:sort(LBs))
- end)},
-
- {"list_keys_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Bucket = <<"listkeys">>,
- Ks = lists:sort([list_to_binary(integer_to_list(N)) || N <- lists:seq(1, 10)]),
- F = fun(K) ->
- O=riakc_obj:new(Bucket, K),
- ?MODULE:put(Pid, riakc_obj:update_value(O, <<"val">>))
- end,
- [F(K) || K <- Ks],
- {ok, LKs} = ?MODULE:list_keys(Pid, Bucket),
- ?assertEqual(Ks, lists:sort(LKs)),
-
- %% Make sure it works with an infinite timeout (will reset the timeout
- %% timer after each packet)
- {ok, LKs2} = ?MODULE:list_keys(Pid, Bucket, infinity),
- ?assertEqual(Ks, lists:sort(LKs2))
- end)},
-
- {"get bucket properties test",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, Props} = get_bucket(Pid, <<"b">>),
- ?assertEqual(3, proplists:get_value(n_val, Props)),
- ?assertEqual(false, proplists:get_value(allow_mult, Props))
- end)},
-
- {"set bucket properties test",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = set_bucket(Pid, <<"b">>, [{n_val, 2}, {allow_mult, false}]),
- {ok, Props} = get_bucket(Pid, <<"b">>),
- ?assertEqual(2, proplists:get_value(n_val, Props)),
- ?assertEqual(false, proplists:get_value(allow_mult, Props))
- end)},
-
- {"allow_mult should allow dupes",
- ?_test(begin
- reset_riak(),
- {ok, Pid1} = start_link(test_ip(), test_port()),
- {ok, Pid2} = start_link(test_ip(), test_port()),
- ok = set_bucket(Pid1, <<"multibucket">>, [{allow_mult, true}]),
- ?MODULE:delete(Pid1, <<"multibucket">>, <<"foo">>),
- {error, notfound} = ?MODULE:get(Pid1, <<"multibucket">>, <<"foo">>),
- O = riakc_obj:new(<<"multibucket">>, <<"foo">>),
- O1 = riakc_obj:update_value(O, <<"pid1">>),
- O2 = riakc_obj:update_value(O, <<"pid2">>),
- ok = ?MODULE:put(Pid1, O1),
-
- ok = ?MODULE:put(Pid2, O2),
- {ok, O3} = ?MODULE:get(Pid1, <<"multibucket">>, <<"foo">>),
- ?assertEqual([<<"pid1">>, <<"pid2">>], lists:sort(riakc_obj:get_values(O3))),
- O4 = riakc_obj:update_value(riakc_obj:select_sibling(1, O3), <<"resolved">>),
- ok = ?MODULE:put(Pid1, O4),
- {ok, GO} = ?MODULE:get(Pid1, <<"multibucket">>, <<"foo">>),
- ?assertEqual([<<"resolved">>], lists:sort(riakc_obj:get_values(GO))),
- ?MODULE:delete(Pid1, <<"multibucket">>, <<"foo">>)
- end)},
-
- {"update object test",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"k">>, <<"d">>),
- io:format("O0: ~p\n", [O0]),
- {ok, O1} = riakc_pb_socket:put(Pid, O0, [return_body]),
- io:format("O1: ~p\n", [O1]),
- M1 = riakc_obj:get_metadata(O1),
- M2 = dict:store(?MD_LINKS, [{{<<"b">>, <<"k1">>}, <<"t1">>}], M1),
- O2 = riakc_obj:update_metadata(O1, M2),
- riakc_pb_socket:put(Pid, O2)
- end)},
-
- {"queue test",
- ?_test(begin
- %% Would really like this in a nested {setup, blah} structure
- %% but eunit does not allow
- {ok, Pid} = start_link(test_ip(), test_port()),
- pause_riak_pb_listener(),
- Me = self(),
- %% this request will block as
- spawn(fun() -> Me ! {1, ping(Pid)} end),
- %% this request should be queued as socket will not be created
- spawn(fun() -> Me ! {2, ping(Pid)} end),
- resume_riak_pb_listener(),
- receive {1,Ping1} -> ?assertEqual(Ping1, pong) end,
- receive {2,Ping2} -> ?assertEqual(Ping2, pong) end
- end)},
-
- {"timeout queue test",
- ?_test(begin
- %% Would really like this in a nested {setup, blah} structure
- %% but eunit does not allow
- pause_riak_pb_listener(),
- {ok, Pid} = start_link(test_ip(), test_port(), [queue_if_disconnected]),
- Me = self(),
- %% this request will block as
- spawn(fun() -> Me ! {1, ping(Pid, 0)} end),
- %% this request should be queued as socket will not be created
- spawn(fun() -> Me ! {2, ping(Pid, 0)}, Me ! running end),
- receive running -> ok end,
- resume_riak_pb_listener(),
- receive {1,Ping1} -> ?assertEqual({error, timeout}, Ping1) end,
- receive {2,Ping2} -> ?assertEqual({error, timeout}, Ping2) end
- end)},
-
- {"ignore stale tref test",
- ?_test(begin
- %% Would really like this in a nested {setup, blah} structure
- %% but eunit does not allow
- {ok, Pid} = start_link(test_ip(), test_port()),
- Pid ! {req_timeout, make_ref()},
- ?assertEqual(pong, ping(Pid))
- end)},
-
- {"infinite timeout ping test",
- ?_test(begin
- %% Would really like this in a nested {setup, blah} structure
- %% but eunit does not allow
- {ok, Pid} = start_link(test_ip(), test_port()),
- ?assertEqual(pong, ping(Pid, infinity)),
- ?assertEqual(pong, ping(Pid, undefined))
- end)},
-
- {"javascript_source_map_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- B = <<"bucket">>,
- K = <<"foo">>,
- O=riakc_obj:new(B, K),
- ?MODULE:put(Pid, riakc_obj:update_value(O, <<"2">>, "application/json")),
-
- ?assertEqual({ok, [{0, [2]}]},
- ?MODULE:mapred(Pid,
- [{B, K}],
- [{map, {jsanon, <<"function (v) { return [JSON.parse(v.values[0].data)]; }">>},
- undefined, true}]))
- end)},
-
- {"javascript_named_map_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- B = <<"bucket">>,
- K = <<"foo">>,
- O=riakc_obj:new(B, K),
- ?MODULE:put(Pid, riakc_obj:update_value(O, <<"99">>, "application/json")),
-
- ?assertEqual({ok, [{0, [99]}]},
- ?MODULE:mapred(Pid,
- [{B, K}],
- [{map, {jsfun, <<"Riak.mapValuesJson">>},
- undefined, true}]))
- end)},
-
- {"javascript_source_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- ?MODULE:put(Pid,riakc_obj:update_value(O, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
- {<<"bar">>, <<"3">>},
- {<<"baz">>, <<"4">>}]],
-
- ?assertEqual({ok, [{1, [3]}]},
- ?MODULE:mapred(Pid,
- [{<<"bucket">>, <<"foo">>},
- {<<"bucket">>, <<"bar">>},
- {<<"bucket">>, <<"baz">>}],
- [{map, {jsanon, <<"function (v) { return [1]; }">>},
- undefined, false},
- {reduce, {jsanon,
- <<"function(v) {
- total = v.reduce(
- function(prev,curr,idx,array) {
- return prev+curr;
- }, 0);
- return [total];
- }">>},
- undefined, true}]))
- end)},
-
- {"javascript_named_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- ?MODULE:put(Pid,riakc_obj:update_value(O, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
- {<<"bar">>, <<"3">>},
- {<<"baz">>, <<"4">>}]],
-
- ?assertEqual({ok, [{1, [9]}]},
- ?MODULE:mapred(Pid,
- [{<<"bucket">>, <<"foo">>},
- {<<"bucket">>, <<"bar">>},
- {<<"bucket">>, <<"baz">>}],
- [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
- end)},
-
- {"javascript_bucket_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- ?MODULE:put(Pid,riakc_obj:update_value(O, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
- {<<"bar">>, <<"3">>},
- {<<"baz">>, <<"4">>}]],
-
- ?assertEqual({ok, [{1, [9]}]},
- ?MODULE:mapred_bucket(Pid, <<"bucket">>,
- [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
- end)},
-
- {"javascript_arg_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O=riakc_obj:new(<<"bucket">>, <<"foo">>),
- ?MODULE:put(Pid, riakc_obj:update_value(O, <<"2">>, "application/json")),
- ?assertEqual({ok, [{1, [10]}]},
- ?MODULE:mapred(Pid,
- [{{<<"bucket">>, <<"foo">>}, 5},
- {{<<"bucket">>, <<"foo">>}, 10},
- {{<<"bucket">>, <<"foo">>}, 15},
- {{<<"bucket">>, <<"foo">>}, -15},
- {{<<"bucket">>, <<"foo">>}, -5}],
- [{map, {jsanon, <<"function(v, arg) { return [arg]; }">>},
- undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
- end)},
- {"erlang_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- ?MODULE:put(Pid,riakc_obj:update_value(O, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
- {<<"bar">>, <<"3">>},
- {<<"baz">>, <<"4">>}]],
-
- {ok, [{1, Results}]} = ?MODULE:mapred(Pid,
- [{<<"bucket">>, <<"foo">>},
- {<<"bucket">>, <<"bar">>},
- {<<"bucket">>, <<"baz">>}],
- [{map, {modfun, riak_kv_mapreduce,
- map_object_value},
- undefined, false},
- {reduce, {modfun, riak_kv_mapreduce,
- reduce_set_union},
- undefined, true}]),
- ?assertEqual([<<"2">>, <<"3">>, <<"4">>], lists:sort(Results))
- end)},
- {"erlang_map_reduce_binary_2i_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V,I}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- MD=riakc_obj:add_secondary_index(dict:new(), I),
- O2=riakc_obj:update_metadata(O,MD),
- ?MODULE:put(Pid,riakc_obj:update_value(O2, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>, {{binary_index, "idx"}, [<<"a">>]}},
- {<<"bar">>, <<"3">>, {{binary_index, "idx"}, [<<"b">>]}},
- {<<"baz">>, <<"4">>, {{binary_index, "idx"}, [<<"a">>]}}]],
-
- {ok, [{1, Results}]} = ?MODULE:mapred(Pid,
- {index,<<"bucket">>,{binary_index, "idx"}, <<"a">>},
- [{map, {modfun, riak_kv_mapreduce,
- map_object_value},
- undefined, false},
- {reduce, {modfun, riak_kv_mapreduce,
- reduce_set_union},
- undefined, true}]),
- ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results))
- end)},
- {"erlang_map_reduce_integer_2i_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Store = fun({K,V,I}) ->
- O=riakc_obj:new(<<"bucket">>, K),
- MD=riakc_obj:add_secondary_index(dict:new(), I),
- O2=riakc_obj:update_metadata(O,MD),
- ?MODULE:put(Pid,riakc_obj:update_value(O2, V, "application/json"))
- end,
- [Store(KV) || KV <- [{<<"foo">>, <<"2">>, {{integer_index, "idx"}, [4]}},
- {<<"bar">>, <<"3">>, {{integer_index, "idx"}, [7]}},
- {<<"baz">>, <<"4">>, {{integer_index, "idx"}, [4]}}]],
-
- {ok, [{1, Results}]} = ?MODULE:mapred(Pid,
- {index,<<"bucket">>,{integer_index, "idx"},3,5},
- [{map, {modfun, riak_kv_mapreduce,
- map_object_value},
- undefined, false},
- {reduce, {modfun, riak_kv_mapreduce,
- reduce_set_union},
- undefined, true}]),
- ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results))
- end)},
- {"missing_key_erlang_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, Results} = ?MODULE:mapred(Pid, [{<<"bucket">>, <<"foo">>},
- {<<"bucket">>, <<"bar">>},
- {<<"bucket">>, <<"baz">>}],
- [{map, {modfun, riak_kv_mapreduce,
- map_object_value},
- <<"include_notfound">>, false},
- {reduce, {modfun, riak_kv_mapreduce,
- reduce_set_union},
- undefined, true}]),
- [{1, [{error, notfound}|_]}] = Results end)},
- {"missing_key_javascript_map_reduce_test()",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, Results} = ?MODULE:mapred(Pid, [{<<"bucket">>, <<"foo">>},
- {<<"bucket">>, <<"bar">>},
- {<<"bucket">>, <<"baz">>}],
- [{map, {jsfun, <<"Riak.mapValuesJson">>},
- undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSort">>},
- undefined, true}]),
- [{1, [{not_found, {_, _},<<"undefined">>}|_]}] = Results end)},
- {"map reduce bad inputs",
- ?_test(begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- Res = ?MODULE:mapred(Pid, undefined,
- [{map, {jsfun, <<"Riak.mapValuesJson">>},
- undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSum">>},
- undefined, true}]),
- ?assertEqual({error, <<"{inputs,{\"Inputs must be a binary bucket, a tuple of bucket and key-filters, a list of target tuples, or a search, index, or modfun tuple:\",\n undefined}}">>},
- Res )
- end)},
- {"map reduce bad input keys",
- ?_test(begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- Res = ?MODULE:mapred(Pid, [<<"b">>], % no {B,K} tuple
- [{map, {jsfun, <<"Riak.mapValuesJson">>},
- undefined, false},
- {reduce, {jsfun, <<"Riak.reduceSum">>},
- undefined, true}]),
- ?assertEqual({error,<<"{inputs,{\"Inputs target tuples must be {B,K} or {{B,K},KeyData}:\",[<<\"b\">>]}}">>},
- Res)
- end)},
- {"map reduce bad query",
- ?_test(begin
- {ok, Pid} = start_link(test_ip(), test_port()),
- Res = ?MODULE:mapred(Pid, [{<<"b">>,<<"k">>}], % no {B,K} tuple
- undefined),
- ?assertEqual({error,<<"{query,{\"Query takes a list of step tuples\",undefined}}">>},
- Res)
- end)},
- {"get should convert erlang terms",
- ?_test(begin
- reset_riak(),
- TestNode = test_riak_node(),
- MyBin = <<"some binary">>,
- MyTerm = [<<"b">>,<<"a_term">>,{some_term, ['full', "of", 123, 654.321]}],
- BinObj = rpc:call(TestNode, riak_object, new,
- [<<"b">>, <<"a_bin">>, MyBin]),
- TermObj = rpc:call(TestNode, riak_object, new,
- [<<"b">>, <<"a_term">>, MyTerm]),
- {ok, C} = rpc:call(TestNode, riak, local_client, []),
- %% parameterized module trickery - stick it as the last argument
- ok = rpc:call(TestNode, riak_client, put, [BinObj, 1, C]),
- ok = rpc:call(TestNode, riak_client, put, [TermObj, 1, C]),
-
- {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, GotBinObj} = ?MODULE:get(Pid, <<"b">>, <<"a_bin">>),
- {ok, GotTermObj} = ?MODULE:get(Pid, <<"b">>, <<"a_term">>),
-
- ?assertEqual(riakc_obj:get_value(GotBinObj), MyBin),
- ?assertEqual(riakc_obj:get_content_type(GotTermObj),
- "application/x-erlang-binary"),
- ?assertEqual(binary_to_term(riakc_obj:get_value(GotTermObj)), MyTerm)
- end)},
- {"putting without a key should generate one",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, undefined, <<"value">>),
- Res1 = ?MODULE:put(Pid, PO),
- Res2 = ?MODULE:put(Pid, PO),
- ?assertMatch({ok, _K}, Res1),
- ?assertMatch({ok, _K}, Res2),
- {ok, K1} = Res1,
- {ok, K2} = Res2,
- ?assertMatch(true, is_binary(K1)),
- ?assertMatch(true, is_binary(K2)),
- % Make sure the same key isn't generated twice
- ?assert(Res1 =/= Res2)
- end)},
- {"putting without a key should generate one with return_body",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, undefined, <<"value">>),
- {ok, Obj1} = ?MODULE:put(Pid, PO, [return_body]),
- {ok, Obj2} = ?MODULE:put(Pid, PO, [return_body]),
- %% Make sure the same key isn't generated twice
- ?assertEqual(riakc_obj, element(1, Obj1)),
- ?assertEqual(riakc_obj, element(1, Obj2)),
- ?assert(riakc_obj:key(Obj1) /= riakc_obj:key(Obj2))
- end)},
- {"conditional gets should return unchanged if the vclock matches",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- ?MODULE:put(Pid, PO),
- {ok, Obj} = ?MODULE:get(Pid, <<"b">>, <<"key">>),
- VClock = riakc_obj:vclock(Obj),
- %% object hasn't changed
- ?assertEqual(unchanged, ?MODULE:get(Pid, <<"b">>, <<"key">>,
- [{if_modified, VClock}])),
- %% change the object and make sure unchanged isn't returned
- P1 = riakc_obj:update_value(Obj, <<"newvalue">>),
- ?MODULE:put(Pid, P1),
- ?assertMatch({ok, _}, ?MODULE:get(Pid, <<"b">>, <<"key">>,
- [{if_modified, VClock}]))
- end)},
- {"the head get option should return the object metadata without the value",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- ?MODULE:put(Pid, PO),
- {ok, Obj} = ?MODULE:get(Pid, <<"b">>, <<"key">>, [head]),
- ?assertEqual(<<>>, riakc_obj:get_value(Obj)),
- {ok, Obj2} = ?MODULE:get(Pid, <<"b">>, <<"key">>, []),
- ?assertEqual(<<"value">>, riakc_obj:get_value(Obj2))
- end)},
- {"conditional put should allow you to avoid overwriting a value if it already exists",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- ?assertEqual(ok, ?MODULE:put(Pid, PO, [if_none_match])),
- ?assertEqual({error, <<"match_found">>}, ?MODULE:put(Pid, PO, [if_none_match]))
- end)},
- {"conditional put should allow you to avoid overwriting a value if its been updated",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- {ok, Obj} = ?MODULE:put(Pid, PO, [return_body]),
- Obj2 = riakc_obj:update_value(Obj, <<"newvalue">>),
- ?assertEqual(ok, ?MODULE:put(Pid, Obj2, [if_not_modified])),
- ?assertEqual({error, <<"modified">>}, ?MODULE:put(Pid, Obj2, [if_not_modified]))
- end)},
- {"if_not_modified should fail if the object is not found",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- ?assertEqual({error, <<"notfound">>}, ?MODULE:put(Pid, PO, [if_not_modified]))
- end)},
- {"return_head should empty out the value in the riak object",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- {ok, Obj} = ?MODULE:put(Pid, PO, [return_head]),
- ?assertEqual(<<>>, riakc_obj:get_value(Obj))
- end)},
- {"return_head should empty out all values when there's siblings",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = set_bucket(Pid, <<"b">>, [{allow_mult, true}]),
- PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
- {ok, Obj} = ?MODULE:put(Pid, PO, [return_head]),
- ?assertEqual(<<>>, riakc_obj:get_value(Obj)),
- {ok, Obj2} = ?MODULE:put(Pid, PO, [return_head]),
- ?assertEqual([<<>>, <<>>], riakc_obj:get_values(Obj2))
- end)},
-
- {"user metadata manipulation",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"key0">>, <<"value0">>),
- MD0 = riakc_obj:get_update_metadata(O0),
- MD1 = riakc_obj:set_user_metadata_entry(MD0, {<<"Key1">>,<<"Val1">>}),
- O1 = riakc_obj:update_metadata(O0, MD1),
- ?assertEqual(ok, ?MODULE:put(Pid, O1)),
- {ok, O2} = ?MODULE:get(Pid, <<"b">>, <<"key0">>),
- MD2 = riakc_obj:get_update_metadata(O2),
- ?assertEqual([{<<"Key1">>,<<"Val1">>}], riakc_obj:get_user_metadata_entries(MD2)),
- MD3 = riakc_obj:set_user_metadata_entry(MD2, {<<"Key2">>,<<"Val2">>}),
- O3 = riakc_obj:update_metadata(O2, MD3),
- ?assertEqual(ok, ?MODULE:put(Pid, O3)),
- {ok, O4} = ?MODULE:get(Pid, <<"b">>, <<"key0">>),
- ?assertEqual(2, length(riakc_obj:get_user_metadata_entries(riakc_obj:get_update_metadata(O4))))
- end)},
- {"binary secondary index manipulation",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"key1">>, <<"value1">>),
- MD0 = riakc_obj:get_update_metadata(O0),
- MD1 = riakc_obj:set_secondary_index(MD0, [{{binary_index, "idx"},[<<"aaa">>]}]),
- O1 = riakc_obj:update_metadata(O0, MD1),
- ?assertEqual(ok, ?MODULE:put(Pid, O1)),
- {ok, O2} = ?MODULE:get(Pid, <<"b">>, <<"key1">>),
- MD2 = riakc_obj:get_update_metadata(O2),
- ?assertEqual([<<"aaa">>], lists:sort(riakc_obj:get_secondary_index(MD2,{binary_index,"idx"}))),
- MD3 = riakc_obj:add_secondary_index(MD2, [{{binary_index, "idx"},[<<"bbb">>,<<"aaa">>,<<"ccc">>]}]),
- O3 = riakc_obj:update_metadata(O2, MD3),
- ?assertEqual(ok, ?MODULE:put(Pid, O3)),
- ?assertEqual({ok,?INDEX_RESULTS{keys=[<<"key1">>]}},
- ?MODULE:get_index(Pid, <<"b">>, {binary_index, "idx"}, <<"bbb">>)),
- {ok, O4} = ?MODULE:get(Pid, <<"b">>, <<"key1">>),
- MD4 = riakc_obj:get_update_metadata(O4),
- ?assertEqual([<<"aaa">>,<<"bbb">>,<<"ccc">>], lists:sort(riakc_obj:get_secondary_index(MD4, {binary_index, "idx"}))),
- MD5 = riakc_obj:delete_secondary_index(MD4,{binary_index,"idx"}),
- O5 = riakc_obj:update_metadata(O4, MD5),
- ?assertEqual(ok, ?MODULE:put(Pid, O5))
- end)},
- {"integer secondary index manipulation",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- O0 = riakc_obj:new(<<"b">>, <<"key2">>, <<"value2">>),
- MD0 = riakc_obj:get_update_metadata(O0),
- MD1 = riakc_obj:set_secondary_index(MD0, [{{integer_index, "idx"},[67]}]),
- O1 = riakc_obj:update_metadata(O0, MD1),
- ?assertEqual(ok, ?MODULE:put(Pid, O1)),
- {ok, O2} = ?MODULE:get(Pid, <<"b">>, <<"key2">>),
- MD2 = riakc_obj:get_update_metadata(O2),
- ?assertEqual([67], lists:sort(riakc_obj:get_secondary_index(MD2,{integer_index,"idx"}))),
- MD3 = riakc_obj:add_secondary_index(MD2, [{{integer_index, "idx"},[56,10000,100]}]),
- O3 = riakc_obj:update_metadata(O2, MD3),
- ?assertEqual(ok, ?MODULE:put(Pid, O3)),
- ?assertEqual({ok,?INDEX_RESULTS{keys=[<<"key2">>]}},
- ?MODULE:get_index(Pid, <<"b">>, {integer_index, "idx"}, 50, 60)),
- {ok, O4} = ?MODULE:get(Pid, <<"b">>, <<"key2">>),
- MD4 = riakc_obj:get_update_metadata(O4),
- ?assertEqual([56,67,100,10000], lists:sort(riakc_obj:get_secondary_index(MD4, {integer_index, "idx"}))),
- MD5 = riakc_obj:delete_secondary_index(MD4,{integer_index,"idx"}),
- O5 = riakc_obj:update_metadata(O4, MD5),
- ?assertEqual(ok, ?MODULE:put(Pid, O5))
- end)},
- {"counter increment / decrement / get value",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- unlink(Pid),
- Bucket = <<"counter_test_bucket">>,
- Key = <<"test_counter">>,
- %% counters require allow_mult to be true
- ok = set_bucket(Pid, Bucket, [{allow_mult, true}]),
- ok = ?MODULE:counter_incr(Pid, Bucket, Key, 10),
- ?assertEqual({ok, 10}, ?MODULE:counter_val(Pid, Bucket, Key)),
- ok = ?MODULE:counter_incr(Pid, Bucket, Key, -5, [{w, quorum}, {pw, one}, {dw, all}]),
- ?assertEqual({ok, 5}, ?MODULE:counter_val(Pid, Bucket, Key, [{pr, one}]))
- end)},
- {"create a search index / get / list / delete with default timeout",
- {timeout, 30, ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- reset_solr(Pid),
- Index = <<"indextest">>,
- SchemaName = <<"_yz_default">>,
- ?assertEqual(ok,
- ?MODULE:create_search_index(Pid,
- Index,
- SchemaName,
- [{n_val,2}])),
- case ?MODULE:get_search_index(Pid, Index) of
- {ok, IndexData} ->
- ?assertEqual(proplists:get_value(
- index, IndexData), Index),
- ?assertEqual(proplists:get_value(
- schema, IndexData), SchemaName),
- ?assertEqual(proplists:get_value(
- n_val, IndexData), 2);
- {error, <<"notfound">>} ->
- false
- end,
- ?assertEqual({ok, [[{index,Index},
- {schema,SchemaName},
- {n_val,2}]]},
- ?MODULE:list_search_indexes(Pid)),
- ?assertEqual(ok, ?MODULE:delete_search_index(Pid, Index))
- end)}},
- {"create a search index / get with user-set timeout",
- {timeout, 30, ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- reset_solr(Pid),
- Index = <<"indexwithintimeouttest">>,
- SchemaName = <<"_yz_default">>,
- ?assertEqual(ok,
- ?MODULE:create_search_index(Pid,
- Index,
- SchemaName,
- 20000)),
- case ?MODULE:get_search_index(Pid, Index) of
- {ok, IndexData} ->
- ?assertEqual(proplists:get_value(
- index, IndexData), Index),
- ?assertEqual(proplists:get_value(
- schema, IndexData), SchemaName);
- {error, <<"notfound">>} ->
- false
- end
- end)}},
- {"create a search schema / get",
- {timeout, 30, ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- reset_solr(Pid),
- Schema = <<"
-
-
-
-
-
-
-
-
-
-
-
-
-_yz_id
-
-
-
-">>,
- Index = <<"schemaindex">>,
- SchemaName = <<"myschema">>,
- ?assertEqual(ok, ?MODULE:create_search_schema(Pid, SchemaName, Schema)),
- ?assertEqual(ok, ?MODULE:create_search_index(Pid, Index, SchemaName, [])),
- wait_until( fun() ->
- case ?MODULE:list_search_indexes(Pid) of
- {ok, []} ->
- false;
- {ok, [IndexData|_]} ->
- proplists:get_value(index, IndexData) == Index andalso
- proplists:get_value(schema, IndexData) == SchemaName andalso
- proplists:get_value(n_val, IndexData) == 3
- end
- end, 20, 1000 ),
- wait_until( fun() ->
- case ?MODULE:get_search_schema(Pid, SchemaName) of
- {ok, SchemaData} ->
- proplists:get_value(name, SchemaData) == SchemaName andalso
- proplists:get_value(content, SchemaData) == Schema;
- {error, <<"notefound">>} ->
- false
- end
- end, 20, 1000 )
- end)}},
- {"create a search index and tie to a bucket",
- {timeout, 30, ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- Index = <<"myindex">>,
- Bucket = <<"mybucket">>,
- ?assertEqual(ok, ?MODULE:create_search_index(Pid, Index)),
- ok = ?MODULE:set_search_index(Pid, Bucket, Index),
- PO = riakc_obj:new(Bucket, <<"fred">>, <<"{\"name_s\":\"Freddy\"}">>, "application/json"),
- {ok, _Obj} = ?MODULE:put(Pid, PO, [return_head]),
- wait_until( fun() ->
- {ok, Result} = search(Pid, Index, <<"*:*">>),
- 1 == Result#search_results.num_found
- end, 20, 1000 )
- end)}},
- {"search utf8",
- {timeout, 30, ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- reset_solr(Pid),
- Index = <<"myindex">>,
- Bucket = <<"mybucket">>,
- ?assertEqual(ok, ?MODULE:create_search_index(Pid, Index)),
- ok = ?MODULE:set_search_index(Pid, Bucket, Index),
- PO = riakc_obj:new(Bucket, <<"fred">>, <<"{\"name_s\":\"בָּרָא\"}"/utf8>>, "application/json"),
- {ok, _Obj} = ?MODULE:put(Pid, PO, [return_head]),
- wait_until( fun() ->
- {ok, Result} = search(Pid, Index, <<"name_s:בָּרָא"/utf8>>),
- 1 == Result#search_results.num_found
- end )
- end)}},
- {"trivial set delete",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assertEqual(riakc_set:size(S0), 1),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:del_element(<<"X">>, S0))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assertNot(riakc_set:is_element(<<"X">>, S1)),
- ?assertEqual(riakc_set:size(S1), 0)
- end)},
- {"add and remove items in nested set in map",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(riakc_map:update({<<"set">>, set},
- fun(S) ->
- riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, S))
- end, riakc_map:new()))),
- {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L0 = riakc_map:fetch({<<"set">>, set}, M0),
- ?assert(lists:member(<<"X">>, L0)),
- ?assert(lists:member(<<"Y">>, L0)),
- ?assertEqual(length(L0), 2),
-
- M1 = riakc_map:update({<<"set">>, set},
- fun(S) -> riakc_set:del_element(<<"X">>,
- riakc_set:add_element(<<"Z">>, S)) end,
- M0),
-
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M1)),
- {ok, M2} = riakc_pb_socket:fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L1 = riakc_map:fetch({<<"set">>, set}, M2),
-
- ?assert(lists:member(<<"Y">>, L1)),
- ?assert(lists:member(<<"Z">>, L1)),
- ?assertEqual(length(L1), 2)
- end)},
- {"increment nested counter",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(riakc_map:update({<<"counter">>, counter},
- fun(C) ->
- riakc_counter:increment(5, C)
- end, riakc_map:new()))),
- {ok, M0} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- C0 = riakc_map:fetch({<<"counter">>, counter}, M0),
- ?assertEqual(C0, 5),
-
- M1 = riakc_map:update({<<"counter">>, counter},
- fun(C) -> riakc_counter:increment(200, C) end,
- M0),
- M2 = riakc_map:update({<<"counter">>, counter},
- fun(C) -> riakc_counter:decrement(117, C) end,
- M1),
- M3 = riakc_map:update({<<"counter">>, counter},
- fun(C) -> riakc_counter:increment(256, C) end,
- M2),
-
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M3)),
- {ok, M4} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- C1 = riakc_map:fetch({<<"counter">>, counter}, M4),
- ?assertEqual(C1, 344)
- end)},
- {"updated nested lww register",
- ?_test(begin
- reset_riak(),
- %% The word "stone" translated into Russian and Thai
- StoneInRussian = [1051,1102,1082,32,1082,1072,1084,1085,1077,1091,1083,1086,
- 1074,1080,1090,1077,1083,1103],
- StoneInThai = [3627,3636,3609],
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>},
- <<"key">>,
- riakc_map:to_op(
- riakc_map:update(
- {<<"register">>, register},
- fun(R) ->
- riakc_register:set(
- term_to_binary({"barney", "rubble", StoneInRussian}),
- R)
- end, riakc_map:new()))),
- {ok, M0} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- R0 = riakc_map:fetch({<<"register">>, register}, M0),
- ?assertEqual(binary_to_term(R0), {"barney", "rubble", StoneInRussian}),
-
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>},
- <<"key">>,
- riakc_map:to_op(
- riakc_map:update(
- {<<"register">>, register},
- fun(R) ->
- riakc_register:set(
- term_to_binary({"barney", "rubble", StoneInThai}),
- R)
- end, M0))),
-
- {ok, M1} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- R1 = riakc_map:fetch({<<"register">>, register}, M1),
- ?assertEqual(binary_to_term(R1), {"barney", "rubble", StoneInThai})
- end)},
- {"throw exception for undefined context for delete",
- ?_test(begin
- reset_riak(),
- ?assertThrow(context_required, riakc_set:del_element(<<"X">>,
- riakc_set:add_element(<<"X">>,
- riakc_set:new()))),
- ?assertThrow(context_required, riakc_map:erase({<<"counter">>, counter}, riakc_map:new())),
- ?assertThrow(context_required, riakc_map:erase({<<"set">>, set}, riakc_map:new())),
- ?assertThrow(context_required, riakc_map:erase({<<"map">>, map}, riakc_map:new())),
- ?assertThrow(context_required, riakc_map:update({<<"set">>, set}, fun(S) -> riakc_set:del_element(<<"Y">>, S) end, riakc_map:new())),
- ?assertThrow(context_required, riakc_flag:disable(riakc_flag:new()))
- end)},
- {"delete bogus item from set",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assertEqual(riakc_set:size(S0), 1),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:del_element(<<"Y">>, S0))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S1)),
- ?assertEqual(riakc_set:size(S1), 1)
- end)},
- {"add redundant item to set",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assertEqual(riakc_set:size(S0), 1),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>, S0))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S1)),
- ?assertEqual(riakc_set:size(S1), 1)
- end)},
- {"add and remove redundant item to/from set",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, riakc_set:new())))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assert(riakc_set:is_element(<<"Y">>, S0)),
- ?assertEqual(riakc_set:size(S0), 2),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:del_element(<<"X">>, riakc_set:add_element(<<"X">>, S0)))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S1)),
- ?assert(riakc_set:is_element(<<"Y">>, S1)),
- ?assertEqual(riakc_set:size(S1), 2)
- end)},
- {"remove then add redundant item from/to set",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, riakc_set:new())))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assert(riakc_set:is_element(<<"Y">>, S0)),
- ?assertEqual(riakc_set:size(S0), 2),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:del_element(<<"X">>, S0)))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S1)),
- ?assert(riakc_set:is_element(<<"Y">>, S1)),
- ?assertEqual(riakc_set:size(S1), 2)
- end)},
- {"remove item from set with outdated context",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, riakc_set:new())))),
- {ok, S0} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S0)),
- ?assert(riakc_set:is_element(<<"Y">>, S0)),
- ?assertEqual(riakc_set:size(S0), 2),
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:add_element(<<"Z">>, riakc_set:new()))),
-
- ok = update_type(Pid,
- {<<"set_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_set:to_op(riakc_set:del_element(<<"Z">>, S0))),
- {ok, S1} = fetch_type(Pid, {<<"set_bucket">>, <<"bucket">>}, <<"key">>),
- ?assert(riakc_set:is_element(<<"X">>, S1)),
- ?assert(riakc_set:is_element(<<"Y">>, S1)),
- ?assert(riakc_set:is_element(<<"Z">>, S1)),
- ?assertEqual(riakc_set:size(S1), 3)
- end)},
- {"add item to nested set in map while also removing set",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(riakc_map:update({<<"set">>, set},
- fun(S) ->
- riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, S))
- end, riakc_map:new()))),
- {ok, M0} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L0 = riakc_map:fetch({<<"set">>, set}, M0),
- ?assert(lists:member(<<"X">>, L0)),
- ?assert(lists:member(<<"Y">>, L0)),
- ?assertEqual(length(L0), 2),
-
- M1 = riakc_map:update({<<"set">>, set},
- fun(S) -> riakc_set:add_element(<<"Z">>, S) end,
- M0),
- M2 = riakc_map:erase({<<"set">>, set}, M1),
-
- ok = update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M2)),
- {ok, M3} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L1 = riakc_map:fetch({<<"set">>, set}, M3),
-
- ?assert(lists:member(<<"Z">>, L1)),
- ?assertEqual(length(L1), 1)
- end)},
- {"increment nested counter in map while also removing counter",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(riakc_map:update({<<"counter">>, counter},
- fun(C) ->
- riakc_counter:increment(5, C)
- end, riakc_map:new()))),
- {ok, M0} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- C0 = riakc_map:fetch({<<"counter">>, counter}, M0),
- ?assertEqual(C0, 5),
-
- M1 = riakc_map:update({<<"counter">>, counter},
- fun(C) -> riakc_counter:increment(2, C) end,
- M0),
- M2 = riakc_map:erase({<<"counter">>, counter}, M1),
-
- ok = update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M2)),
- {ok, M3} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- C1 = riakc_map:fetch({<<"counter">>, counter}, M3),
-
- %% Expected result depends on combination of vnodes involved, so accept either answer
- ?assert(C1 =:= 2 orelse C1 =:= 7)
- end)},
- {"add item to nested set in nested map in map while also removing nested map",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- M0 = riakc_map:update({<<"map">>, map},
- fun(M) ->
- riakc_map:update({<<"set">>, set},
- fun(S) ->
- riakc_set:add_element(<<"X">>,
- riakc_set:add_element(<<"Y">>, S))
- end,
- M)
- end,
- riakc_map:new()),
- ok = riakc_pb_socket:update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M0)),
-
- {ok, M1} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L0 = orddict:fetch({<<"set">>, set}, riakc_map:fetch({<<"map">>, map}, M1)),
-
- ?assert(lists:member(<<"X">>, L0)),
- ?assert(lists:member(<<"Y">>, L0)),
- ?assertEqual(length(L0), 2),
-
- M2 = riakc_map:update({<<"map">>, map},
- fun(M) -> riakc_map:update({<<"set">>, set},
- fun(S) -> riakc_set:add_element(<<"Z">>, S) end,
- M)
- end,
- M1),
- M3 = riakc_map:erase({<<"map">>, map}, M2),
-
- ok = update_type(Pid,
- {<<"map_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_map:to_op(M3)),
- {ok, M4} = fetch_type(Pid, {<<"map_bucket">>, <<"bucket">>}, <<"key">>),
- L1 = orddict:fetch({<<"set">>, set}, riakc_map:fetch({<<"map">>, map}, M4)),
-
- ?assert(lists:member(<<"Z">>, L1)),
- ?assertEqual(length(L1), 1)
- end)},
- {"get preflist test",
- ?_test(begin
- reset_riak(),
- Node = atom_to_binary(test_riak_node(), latin1),
- {ok, Pid} = start_link(test_ip(), test_port()),
- {ok, Preflist} = get_preflist(Pid, <<"b">>, <<"f">>),
- ?assertEqual([#preflist_item{partition = 52,
- node = Node,
- primary = true},
- #preflist_item{partition = 53,
- node = Node,
- primary = true},
- #preflist_item{partition = 54,
- node = Node,
- primary = true}],
- Preflist)
- end)},
- {"add redundant and multiple items to hll(set)",
- ?_test(begin
- reset_riak(),
- {ok, Pid} = start_link(test_ip(), test_port()),
- ok = update_type(Pid,
- {<<"hll_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_hll:to_op(
- riakc_hll:add_elements([<<"X">>, <<"Y">>],
- riakc_hll:new()))),
- {ok, Hll0} = fetch_type(Pid, {<<"hll_bucket">>, <<"bucket">>},
- <<"key">>),
- ?assertEqual(riakc_hll:value(Hll0), 2),
- ok = update_type(Pid,
- {<<"hll_bucket">>, <<"bucket">>}, <<"key">>,
- riakc_hll:to_op(
- riakc_hll:add_element(<<"X">>, Hll0))),
- {ok, Hll1} = fetch_type(Pid, {<<"hll_bucket">>, <<"bucket">>},
- <<"key">>),
- ?assert(riakc_hll:is_type(Hll1)),
- Value = riakc_hll:value(Hll1),
- ?assertEqual(Value, 2),
-
- %% Make sure card and value are the same
- ?assertEqual(riakc_hll:card(Hll1), Value)
- end)}
- ].
-
-endif.
diff --git a/src/riakc_ts.erl b/src/riakc_ts.erl
index 1247f023..c9fe32c9 100644
--- a/src/riakc_ts.erl
+++ b/src/riakc_ts.erl
@@ -24,13 +24,13 @@
-module(riakc_ts).
--export([query/2, query/3, query/4, query/5,
+-export(['query'/2, 'query'/3, 'query'/4, 'query'/5,
get_coverage/3,
replace_coverage/4, replace_coverage/5,
put/3, put/4,
get/4,
delete/4,
- stream_list_keys/3]).
+ stream_list_keys/2, stream_list_keys/3]).
-include_lib("riak_pb/include/riak_pb.hrl").
-include_lib("riak_pb/include/riak_kv_pb.hrl").
@@ -43,29 +43,29 @@
-type ts_columnname() :: riak_pb_ts_codec:tscolumnname().
--spec query(pid(), Query::string()|binary()) ->
+-spec 'query'(pid(), Query::string()|binary()) ->
{ok, {ColumnNames::[ts_columnname()], Rows::[tuple()]}} | {error, Reason::term()}.
-%% @equiv query/5
-query(Pid, Query) ->
- query(Pid, Query, [], undefined, []).
+%% @equiv 'query'/5
+'query'(Pid, Query) ->
+ 'query'(Pid, Query, [], undefined, []).
--spec query(pid(), Query::string()|binary(), Interpolations::[{binary(), binary()}]) ->
+-spec 'query'(pid(), Query::string()|binary(), Interpolations::[{binary(), binary()}]) ->
{ok, {ColumnNames::[binary()], Rows::[tuple()]}} | {error, term()}.
-%% @equiv query/5
-query(Pid, Query, Interpolations) ->
- query(Pid, Query, Interpolations, undefined, []).
+%% @equiv 'query'/5
+'query'(Pid, Query, Interpolations) ->
+ 'query'(Pid, Query, Interpolations, undefined, []).
--spec query(Pid::pid(),
- Query::string(),
+-spec 'query'(Pid::pid(),
+ Query::string()|binary(),
Interpolations::[{binary(), binary()}],
Cover::term()) ->
{ok, {ColumnNames::[binary()], Rows::[tuple()]}} | {error, term()}.
-%% @equiv query/5
-query(Pid, Query, Interpolations, Cover) ->
- query(Pid, Query, Interpolations, Cover, []).
+%% @equiv 'query'/5
+'query'(Pid, Query, Interpolations, Cover) ->
+ 'query'(Pid, Query, Interpolations, Cover, []).
--spec query(Pid::pid(),
- Query::string(),
+-spec 'query'(Pid::pid(),
+ Query::string()|binary(),
Interpolations::[{binary(), binary()}],
Cover::term(),
Options::proplists:proplist()) ->
@@ -75,29 +75,31 @@ query(Pid, Query, Interpolations, Cover) ->
%% first element, and a list of records, each represented as a
%% list of values, in the second element, or an @{error, Reason@}
%% tuple.
-query(Pid, Query, Interpolations, undefined, Options) ->
+'query'(Pid, Query, Interpolations, undefined, Options) ->
query_common(Pid, Query, Interpolations, undefined, Options);
-query(Pid, Query, Interpolations, Cover, Options) when is_binary(Cover) ->
+'query'(Pid, Query, Interpolations, Cover, Options) when is_binary(Cover) ->
query_common(Pid, Query, Interpolations, Cover, Options).
query_common(Pid, Query, Interpolations, Cover, Options)
- when is_pid(Pid), is_list(Query) ->
- Msg0 = riakc_ts_query_operator:serialize(
- iolist_to_binary(Query), Interpolations),
+ when is_pid(Pid) ->
+ Msg0 = riakc_ts_query_operator:serialize(Query, Interpolations),
Msg1 = Msg0#tsqueryreq{cover_context = Cover},
Msg = {Msg1, {msgopts, Options}},
Response = server_call(Pid, Msg),
- riakc_ts_query_operator:deserialize(Response).
+ riakc_ts_query_operator:deserialize(Response,
+ proplists:get_value(datatypes, Options, false)).
-%% @doc Generate a parallel coverage plan for the specified query
+%% @doc Generate a parallel coverage plan for the specified 'query'
-spec get_coverage(pid(), table_name(), QueryText::iolist()) ->
{ok, Entries::[term()]} | {error, term()}.
get_coverage(Pid, Table, Query) ->
+ T = riakc_utils:characters_to_unicode_binary(Table),
+ Q = riakc_utils:characters_to_unicode_binary(Query),
Message =
- #tscoveragereq{query = #tsinterpolation{base = iolist_to_binary(Query)},
+ #tscoveragereq{'query' = #tsinterpolation{base = Q},
replace_cover = undefined,
- table = iolist_to_binary(Table)},
+ table = T},
case server_call(Pid, Message) of
{ok, Entries} ->
{ok, riak_pb_ts_codec:decode_cover_list(Entries)};
@@ -115,11 +117,13 @@ replace_coverage(Pid, Table, Query, Cover) ->
OtherCover::list(binary())) ->
{ok, Entries::[term()]} | {error, term()}.
replace_coverage(Pid, Table, Query, Cover, Other) ->
+ T = riakc_utils:characters_to_unicode_binary(Table),
+ Q = riakc_utils:characters_to_unicode_binary(Query),
Message =
- #tscoveragereq{query = #tsinterpolation{base = iolist_to_binary(Query)},
+ #tscoveragereq{'query' = #tsinterpolation{base = Q},
replace_cover = Cover,
unavailable_cover = Other,
- table = iolist_to_binary(Table)},
+ table = T},
case server_call(Pid, Message) of
{ok, Entries} ->
{ok, riak_pb_ts_codec:decode_cover_list(Entries)};
@@ -170,7 +174,8 @@ put(Pid, Table, Measurements, Options)
delete(Pid, Table, Key, Options)
when is_pid(Pid), (is_binary(Table) orelse is_list(Table)),
is_list(Key), is_list(Options) ->
- Message = #tsdelreq{table = iolist_to_binary(Table),
+ T = riakc_utils:characters_to_unicode_binary(Table),
+ Message = #tsdelreq{table = T,
key = riak_pb_ts_codec:encode_cells_non_strict(Key),
vclock = proplists:get_value(vclock, Options),
timeout = proplists:get_value(timeout, Options)},
@@ -201,7 +206,14 @@ get(Pid, Table, Key, Options)
riakc_ts_get_operator:deserialize(Response).
--spec stream_list_keys(pid(), table_name(), proplists:proplist()) ->
+-spec stream_list_keys(pid(), table_name()) ->
+ {ok, req_id()} | {error, term()}.
+%% @doc Streaming lists keys in Table, using client Pid, with no timeout.
+%% Returns @{ok, ReqId@} or @{error, Reason@}.
+stream_list_keys(Pid, Table) ->
+ stream_list_keys(Pid, Table, infinity).
+
+-spec stream_list_keys(pid(), table_name(), proplists:proplist()|infinity) ->
{ok, req_id()} | {error, term()}.
%% @doc Streaming lists keys in Table, using client Pid. Parameter
%% Options is a proplist that can include a value for
@@ -212,9 +224,9 @@ stream_list_keys(Pid, Table, Timeout) when is_integer(Timeout) ->
stream_list_keys(Pid, Table, [{timeout, Timeout}]);
stream_list_keys(Pid, Table, Options)
when is_pid(Pid), (is_binary(Table) orelse is_list(Table)), is_list(Options) ->
+ T = riakc_utils:characters_to_unicode_binary(Table),
ReqTimeout = proplists:get_value(timeout, Options),
- Req = #tslistkeysreq{table = iolist_to_binary(Table),
- timeout = ReqTimeout},
+ Req = #tslistkeysreq{table = T, timeout = ReqTimeout},
ReqId = riakc_pb_socket:mk_reqid(),
gen_server:call(Pid, {req, Req, ?DEFAULT_PB_TIMEOUT, {ReqId, self()}}, infinity).
diff --git a/src/riakc_ts_get_operator.erl b/src/riakc_ts_get_operator.erl
index 9de9ae82..de39bd37 100644
--- a/src/riakc_ts_get_operator.erl
+++ b/src/riakc_ts_get_operator.erl
@@ -33,12 +33,12 @@
serialize(Table, Key, true) ->
- #tsgetreq{table = iolist_to_binary(Table),
- key = Key};
+ T = riakc_utils:characters_to_unicode_binary(Table),
+ #tsgetreq{table = T, key = Key};
serialize(Table, Key, false) ->
+ T = riakc_utils:characters_to_unicode_binary(Table),
SerializedKey = riak_pb_ts_codec:encode_cells_non_strict(Key),
- #tsgetreq{table = iolist_to_binary(Table),
- key = SerializedKey}.
+ #tsgetreq{table = T, key = SerializedKey}.
deserialize({error, {Code, Message}}) when is_integer(Code), is_list(Message) ->
{error, {Code, iolist_to_binary(Message)}};
diff --git a/src/riakc_ts_put_operator.erl b/src/riakc_ts_put_operator.erl
index 5710ce1e..e87e0271 100644
--- a/src/riakc_ts_put_operator.erl
+++ b/src/riakc_ts_put_operator.erl
@@ -35,12 +35,14 @@
%% As of 2015-11-05, columns parameter is ignored, Riak TS
%% expects the full set of fields in each element of Data.
serialize(TableName, Measurements, true) ->
- #tsputreq{table = iolist_to_binary(TableName),
+ T = riakc_utils:characters_to_unicode_binary(TableName),
+ #tsputreq{table = T,
columns = [],
rows = Measurements};
serialize(TableName, Measurements, false) ->
+ T = riakc_utils:characters_to_unicode_binary(TableName),
SerializedRows = riak_pb_ts_codec:encode_rows_non_strict(Measurements),
- #tsputreq{table = TableName,
+ #tsputreq{table = T,
columns = [],
rows = SerializedRows}.
diff --git a/src/riakc_ts_query_operator.erl b/src/riakc_ts_query_operator.erl
index 9e8b8efa..e28cd247 100644
--- a/src/riakc_ts_query_operator.erl
+++ b/src/riakc_ts_query_operator.erl
@@ -29,13 +29,16 @@
-include_lib("riak_pb/include/riak_ts_ttb.hrl").
-export([serialize/2,
- deserialize/1]).
+ deserialize/1, deserialize/2]).
-serialize(QueryText, Interpolations) ->
+
+serialize(QueryText, Interpolations)
+ when is_binary(QueryText) orelse is_list(QueryText) ->
+ Q = riakc_utils:characters_to_unicode_binary(QueryText),
Content = #tsinterpolation{
- base = iolist_to_binary(QueryText),
+ base = Q,
interpolations = serialize_interpolations(Interpolations)},
- #tsqueryreq{query = Content}.
+ #tsqueryreq{'query' = Content}.
serialize_interpolations(Interpolations) ->
serialize_interpolations(Interpolations, []).
@@ -47,17 +50,32 @@ serialize_interpolations([{Key, Value} | RemainingInterps],
UpdatedInterps = [#rpbpair{key=Key, value=Value} | SerializedInterps],
serialize_interpolations(RemainingInterps, UpdatedInterps).
-deserialize({error, {Code, Message}}) when is_integer(Code), is_list(Message) ->
+deserialize(Response) ->
+ deserialize(Response, false).
+
+%% 2nd (boolean) argument indicates whether column types should be
+%% included in the response. It's a bit silly that they aren't by
+%% default, but that's an old oversight/decision that can't be
+%% trivially changed without risking backwards compatibility.
+deserialize({error, {Code, Message}}, _IncludeColumnTypes)
+ when is_integer(Code), is_list(Message) ->
{error, {Code, iolist_to_binary(Message)}};
-deserialize({error, {Code, Message}}) when is_integer(Code), is_atom(Message) ->
+deserialize({error, {Code, Message}}, _IncludeColumnTypes)
+ when is_integer(Code), is_atom(Message) ->
{error, {Code, iolist_to_binary(atom_to_list(Message))}};
-deserialize({error, Message}) ->
+deserialize({error, Message}, _IncludeColumnTypes) ->
{error, Message};
-deserialize(tsqueryresp) ->
+deserialize(tsqueryresp, _Types) ->
{ok, {[], []}};
-deserialize({tsqueryresp, {ColumnNames, _ColumnTypes, Rows}}) ->
+deserialize({tsqueryresp, {ColumnNames, _ColumnTypes, Rows}}, false) ->
{ok, {ColumnNames, Rows}};
-deserialize(#tsqueryresp{columns = C, rows = R}) ->
+deserialize({tsqueryresp, {ColumnNames, ColumnTypes, Rows}}, true) ->
+ {ok, {lists:zip(ColumnNames, ColumnTypes), Rows}};
+deserialize(#tsqueryresp{columns = C, rows = R}, false) ->
ColumnNames = [ColName || #tscolumndescription{name = ColName} <- C],
Rows = riak_pb_ts_codec:decode_rows(R),
- {ok, {ColumnNames, Rows}}.
+ {ok, {ColumnNames, Rows}};
+deserialize(#tsqueryresp{columns = C, rows = R}, true) ->
+ Columns = [{ColName, ColType} || #tscolumndescription{name = ColName, type = ColType} <- C],
+ Rows = riak_pb_ts_codec:decode_rows(R),
+ {ok, {Columns, Rows}}.
diff --git a/src/riakc_utils.erl b/src/riakc_utils.erl
new file mode 100644
index 00000000..fba298c2
--- /dev/null
+++ b/src/riakc_utils.erl
@@ -0,0 +1,51 @@
+%% -------------------------------------------------------------------
+%%
+%% riakc_utils: erlang client utils
+%%
+%% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+
+-module(riakc_utils).
+
+-export([wait_for_list/1, characters_to_unicode_binary/1]).
+
+-spec wait_for_list(non_neg_integer()) -> {ok, list()} | {error, any()}.
+%% @doc Wait for the results of a listing operation
+wait_for_list(ReqId) ->
+ wait_for_list(ReqId, []).
+wait_for_list(ReqId, Acc) ->
+ receive
+ {ReqId, done} -> {ok, lists:flatten(Acc)};
+ {ReqId, {error, Reason}} -> {error, Reason};
+ {ReqId, {_, Res}} -> wait_for_list(ReqId, [Res|Acc])
+ end.
+
+-spec characters_to_unicode_binary(string()|binary()) -> binary().
+%% @doc Convert to unicode binary with informative errors
+%% @throws {unicode_error, ErrMsg}
+characters_to_unicode_binary(String) ->
+ case unicode:characters_to_binary(String) of
+ {incomplete, Encoded, Rest} ->
+ ErrMsg = lists:flatten(io_lib:format("Incomplete unicode data provided. Encoded: ~p Rest: ~p", [Encoded, Rest])),
+ throw({unicode_error, ErrMsg});
+ {error, Encoded, Rest} ->
+ ErrMsg = lists:flatten(io_lib:format("Unicode encoding error. Encoded: ~p Rest: ~p", [Encoded, Rest])),
+ throw({unicode_error, ErrMsg});
+ Binary ->
+ Binary
+ end.
diff --git a/test/riakc_pb_socket_tests.erl b/test/riakc_pb_socket_tests.erl
new file mode 100644
index 00000000..88e95d09
--- /dev/null
+++ b/test/riakc_pb_socket_tests.erl
@@ -0,0 +1,1400 @@
+%% -------------------------------------------------------------------
+%%
+%% riakc_pb_socket_tests: protocol buffer client tests
+%%
+%% Copyright (c) 2007-2016 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+-ifdef(TEST).
+
+-module(riakc_pb_socket_tests).
+
+-compile(export_all).
+
+-include("riakc.hrl").
+
+-include_lib("riak_pb/include/riak_pb_kv_codec.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+bad_connect_test() ->
+ %% Start with an unlikely port number
+ ?assertEqual({error, {tcp, econnrefused}}, riakc_pb_socket:start({127,0,0,1}, 65535)).
+
+queue_disconnected_test() ->
+ %% Start with an unlikely port number
+ {ok, Pid} = riakc_pb_socket:start({127,0,0,1}, 65535, [queue_if_disconnected]),
+ ?assertEqual({error, timeout}, riakc_pb_socket:ping(Pid, 10)),
+ ?assertEqual({error, timeout}, riakc_pb_socket:list_keys(Pid, <<"b">>, 10)),
+ riakc_pb_socket:stop(Pid).
+
+auto_reconnect_bad_connect_test() ->
+ %% Start with an unlikely port number
+ {ok, Pid} = riakc_pb_socket:start({127,0,0,1}, 65535, [auto_reconnect]),
+ ?assertEqual({false, []}, riakc_pb_socket:is_connected(Pid)),
+ ?assertEqual({error, disconnected}, riakc_pb_socket:ping(Pid)),
+ ?assertEqual({error, disconnected}, riakc_pb_socket:list_keys(Pid, <<"b">>)),
+ riakc_pb_socket:stop(Pid).
+
+server_closes_socket_test() ->
+ %% Silence SASL junk when socket closes.
+ error_logger:tty(false),
+ %% Set up a dummy socket to send requests on
+ {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
+ {ok, Port} = inet:port(Listen),
+ {ok, Pid} = riakc_pb_socket:start("127.0.0.1", Port),
+ {ok, Sock} = gen_tcp:accept(Listen),
+ ?assertMatch(true, riakc_pb_socket:is_connected(Pid)),
+
+ %% Send a ping request in another process so the test doesn't block
+ Self = self(),
+ spawn(fun() -> Self ! riakc_pb_socket:ping(Pid, infinity) end),
+
+ %% Make sure request received then close the socket
+ {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
+ ok = gen_tcp:close(Sock),
+ ok = gen_tcp:close(Listen),
+ receive
+ Msg1 -> % result of ping from spawned process above
+ ?assertEqual({error, disconnected}, Msg1)
+ end,
+ %% Wait for spawned process to exit
+ Mref = erlang:monitor(process, Pid),
+ receive
+ Msg2 ->
+ ?assertMatch({'DOWN', Mref, process, _, _}, Msg2)
+ end.
+
+auto_reconnect_server_closes_socket_test() ->
+ %% Set up a dummy socket to send requests on
+ {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
+ {ok, Port} = inet:port(Listen),
+ {ok, Pid} = riakc_pb_socket:start_link("127.0.0.1", Port, [auto_reconnect]),
+ {ok, Sock} = gen_tcp:accept(Listen),
+ ?assertMatch(true, riakc_pb_socket:is_connected(Pid)),
+
+ %% Send a ping request in another process so the test doesn't block
+ Self = self(),
+ spawn(fun() -> Self ! riakc_pb_socket:ping(Pid, infinity) end),
+
+ %% Make sure request received then close the socket
+ {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
+ ok = gen_tcp:close(Sock),
+ ok = gen_tcp:close(Listen),
+ receive
+ Msg ->
+ ?assertEqual({error, disconnected}, Msg)
+ end,
+ %% Server will not have had a chance to reconnect yet, reason counters empty.
+ ?assertMatch({false, []}, riakc_pb_socket:is_connected(Pid)),
+ riakc_pb_socket:stop(Pid).
+
+dead_socket_pid_returns_to_caller_test() ->
+ %% Set up a dummy socket to send requests on
+ {ok, Listen} = gen_tcp:listen(0, [binary, {packet, 4}, {active, false}]),
+ {ok, Port} = inet:port(Listen),
+ {ok, Pid} = riakc_pb_socket:start("127.0.0.1", Port),
+ {ok, Sock} = gen_tcp:accept(Listen),
+ ?assertMatch(true, riakc_pb_socket:is_connected(Pid)),
+
+ %% Send a ping request in another process so the test doesn't block
+ Self = self(),
+ spawn(fun() -> Self ! (catch riakc_pb_socket:ping(Pid, infinity)) end),
+
+ %% Make sure request received then kill the process
+ {ok, _ReqMsg} = gen_tcp:recv(Sock, 0),
+ exit(Pid, kill),
+ receive
+ Msg ->
+ ?assertMatch({'EXIT', {killed, _}}, Msg)
+ end,
+ %% Cleanup
+ ok = gen_tcp:close(Sock),
+ ok = gen_tcp:close(Listen).
+
+adding_hll_to_map_throws_error_test() ->
+ UpdateFun = fun(H) ->
+ riakc_hll:add_elements([<<"X">>, <<"Y">>], H)
+ end,
+ HllKey = {<<"hll">>, hll},
+ ?assertError(badarg, riakc_map:update(HllKey, UpdateFun, riakc_map:new())).
+
+%%
+%% Tests to run against a live node - NB the node gets reconfigured and generally messed with
+%%
+integration_tests() ->
+ [{"ping",
+ ?_test( begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid)),
+ ?assertEqual(true, riakc_pb_socket:is_connected(Pid)),
+ riakc_pb_socket:stop(Pid)
+ end)},
+
+ {"reconnect test",
+ ?_test( begin
+ %% Make sure originally there
+ {ok, Pid} = riakc_test_utils:start_link(),
+
+ %% Change the options to allow reconnection/queueing
+ riakc_pb_socket:set_options(Pid, [queue_if_disconnected]),
+
+ %% Kill the socket
+ riakc_test_utils:kill_riak_pb_sockets(),
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid)),
+ riakc_pb_socket:stop(Pid)
+ end)},
+
+ {"set client id",
+ ?_test(
+ begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, <>} = riakc_pb_socket:get_client_id(Pid),
+
+ NewId = <<(OrigId+1):32>>,
+ ok = riakc_pb_socket:set_client_id(Pid, NewId),
+ {ok, NewId} = riakc_pb_socket:get_client_id(Pid)
+ end)},
+
+ {"version",
+ ?_test(
+ begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, ServerInfo} = riakc_pb_socket:get_server_info(Pid),
+ [{node, _}, {server_version, _}] = lists:sort(ServerInfo)
+ end)},
+
+ {"get_should_read_put_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, PO} = riakc_pb_socket:put(Pid, O, [return_body]),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>),
+ ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
+ end)},
+
+ {"get should read put with timeout",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, PO} = riakc_pb_socket:put(Pid, O, [{w, 1}, {dw, 1}, return_body]),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>, 500),
+ ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
+ end)},
+
+ {"get should read put with options",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, PO} = riakc_pb_socket:put(Pid, O, [{w, 1}, {dw, 1}, return_body]),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>, [{r, 1}]),
+ ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
+ end)},
+
+ {"get should read put with non integer options",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, PO} = riakc_pb_socket:put(Pid, O, [{w, all}, {dw, quorum}, return_body]),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>, [{r, one}]),
+ ?assertEqual(riakc_obj:get_contents(PO), riakc_obj:get_contents(GO))
+ end)},
+
+ {"put and delete with timeout",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"puttimeouttest">>, <<"value">>),
+ ok = riakc_pb_socket:put(Pid, PO, 500),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"puttimeouttest">>, 500),
+ ?assertEqual(<<"value">>, riakc_obj:get_value(GO)),
+ ok = riakc_pb_socket:delete(Pid, <<"b">>, <<"puttimeouttest">>, 500),
+ {error, notfound} = riakc_pb_socket:get(Pid, <<"b">>, <<"puttimeouttest">>)
+ end)},
+
+ {"update_should_change_value_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, PO} = riakc_pb_socket:put(Pid, O, [return_body]),
+ PO2 = riakc_obj:update_value(PO, <<"v2">>),
+ ok = riakc_pb_socket:put(Pid, PO2),
+ {ok, GO} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>),
+ ?assertEqual(<<"v2">>, riakc_obj:get_value(GO))
+ end)},
+
+ {"key_should_be_missing_after_delete_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ %% Put key/value
+ O0 = riakc_obj:new(<<"b">>, <<"k">>),
+ O = riakc_obj:update_value(O0, <<"v">>),
+ {ok, _PO} = riakc_pb_socket:put(Pid, O, [return_body]),
+ %% Prove it really got stored
+ {ok, GO1} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>),
+ ?assertEqual(<<"v">>, riakc_obj:get_value(GO1)),
+ %% Delete and check no longer found
+ ok = riakc_pb_socket:delete(Pid, <<"b">>, <<"k">>),
+ {error, notfound} = riakc_pb_socket:get(Pid, <<"b">>, <<"k">>)
+ end)},
+
+ {"delete missing key test",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ %% Delete and check no longer found
+ ok = riakc_pb_socket:delete(Pid, <<"notabucket">>, <<"k">>, [{rw, 1}]),
+ {error, notfound} = riakc_pb_socket:get(Pid, <<"notabucket">>, <<"k">>)
+ end)},
+
+ {"empty_list_buckets_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ?assertEqual({ok, []}, riakc_pb_socket:list_buckets(Pid))
+ end)},
+
+ {"list_buckets_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Bs = lists:sort([list_to_binary(["b"] ++ integer_to_list(N)) || N <- lists:seq(1, 10)]),
+ F = fun(B) ->
+ O=riakc_obj:new(B, <<"key">>),
+ riakc_pb_socket:put(Pid, riakc_obj:update_value(O, <<"val">>))
+ end,
+ [F(B) || B <- Bs],
+ {ok, LBs} = riakc_pb_socket:list_buckets(Pid),
+ ?assertEqual(Bs, lists:sort(LBs))
+ end)},
+
+ {"list_keys_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Bucket = <<"listkeys">>,
+ Ks = lists:sort([list_to_binary(integer_to_list(N)) || N <- lists:seq(1, 10)]),
+ F = fun(K) ->
+ O=riakc_obj:new(Bucket, K),
+ riakc_pb_socket:put(Pid, riakc_obj:update_value(O, <<"val">>))
+ end,
+ [F(K) || K <- Ks],
+ {ok, LKs} = riakc_pb_socket:list_keys(Pid, Bucket),
+ ?assertEqual(Ks, lists:sort(LKs)),
+
+ %% Make sure it works with an infinite timeout (will reset the timeout
+ %% timer after each packet)
+ {ok, LKs2} = riakc_pb_socket:list_keys(Pid, Bucket, infinity),
+ ?assertEqual(Ks, lists:sort(LKs2))
+ end)},
+
+ {"get bucket properties test",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, Props} = riakc_pb_socket:get_bucket(Pid, <<"b">>),
+ ?assertEqual(3, proplists:get_value(n_val, Props)),
+ ?assertEqual(false, proplists:get_value(allow_mult, Props))
+ end)},
+
+ {"set bucket properties test",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:set_bucket(Pid, <<"b">>, [{n_val, 2}, {allow_mult, false}]),
+ {ok, Props} = riakc_pb_socket:get_bucket(Pid, <<"b">>),
+ ?assertEqual(2, proplists:get_value(n_val, Props)),
+ ?assertEqual(false, proplists:get_value(allow_mult, Props))
+ end)},
+
+ {"allow_mult should allow dupes",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid1} = riakc_test_utils:start_link(),
+ {ok, Pid2} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:set_bucket(Pid1, <<"multibucket">>, [{allow_mult, true}]),
+ riakc_pb_socket:delete(Pid1, <<"multibucket">>, <<"foo">>),
+ {error, notfound} = riakc_pb_socket:get(Pid1, <<"multibucket">>, <<"foo">>),
+ O = riakc_obj:new(<<"multibucket">>, <<"foo">>),
+ O1 = riakc_obj:update_value(O, <<"pid1">>),
+ O2 = riakc_obj:update_value(O, <<"pid2">>),
+ ok = riakc_pb_socket:put(Pid1, O1),
+
+ ok = riakc_pb_socket:put(Pid2, O2),
+ {ok, O3} = riakc_pb_socket:get(Pid1, <<"multibucket">>, <<"foo">>),
+ ?assertEqual([<<"pid1">>, <<"pid2">>], lists:sort(riakc_obj:get_values(O3))),
+ O4 = riakc_obj:update_value(riakc_obj:select_sibling(1, O3), <<"resolved">>),
+ ok = riakc_pb_socket:put(Pid1, O4),
+ {ok, GO} = riakc_pb_socket:get(Pid1, <<"multibucket">>, <<"foo">>),
+ ?assertEqual([<<"resolved">>], lists:sort(riakc_obj:get_values(GO))),
+ riakc_pb_socket:delete(Pid1, <<"multibucket">>, <<"foo">>)
+ end)},
+
+ {"update object test",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"k">>, <<"d">>),
+ io:format("O0: ~p\n", [O0]),
+ {ok, O1} = riakc_pb_socket:put(Pid, O0, [return_body]),
+ io:format("O1: ~p\n", [O1]),
+ M1 = riakc_obj:get_metadata(O1),
+ M2 = dict:store(?MD_LINKS, [{{<<"b">>, <<"k1">>}, <<"t1">>}], M1),
+ O2 = riakc_obj:update_metadata(O1, M2),
+ riakc_pb_socket:put(Pid, O2)
+ end)},
+
+ {"queue test",
+ ?_test(begin
+ %% Would really like this in a nested {setup, blah} structure
+ %% but eunit does not allow
+ {ok, Pid} = riakc_test_utils:start_link(),
+ riakc_test_utils:pause_riak_pb_listener(),
+ Me = self(),
+ %% this request will block as
+ spawn(fun() -> Me ! {1, riakc_pb_socket:ping(Pid)} end),
+ %% this request should be queued as socket will not be created
+ spawn(fun() -> Me ! {2, riakc_pb_socket:ping(Pid)} end),
+ riakc_test_utils:resume_riak_pb_listener(),
+ receive {1,Ping1} -> ?assertEqual(Ping1, pong) end,
+ receive {2,Ping2} -> ?assertEqual(Ping2, pong) end
+ end)},
+
+ {"timeout queue test",
+ ?_test(begin
+ %% Would really like this in a nested {setup, blah} structure
+ %% but eunit does not allow
+ riakc_test_utils:pause_riak_pb_listener(),
+ {ok, Pid} = riakc_test_utils:start_link([queue_if_disconnected]),
+ Me = self(),
+ %% this request will block as
+ spawn(fun() -> Me ! {1, riakc_pb_socket:ping(Pid, 0)} end),
+ %% this request should be queued as socket will not be created
+ spawn(fun() -> Me ! {2, riakc_pb_socket:ping(Pid, 0)}, Me ! running end),
+ receive running -> ok end,
+ riakc_test_utils:resume_riak_pb_listener(),
+ receive {1,Ping1} -> ?assertEqual({error, timeout}, Ping1) end,
+ receive {2,Ping2} -> ?assertEqual({error, timeout}, Ping2) end
+ end)},
+
+ {"ignore stale tref test",
+ ?_test(begin
+ %% Would really like this in a nested {setup, blah} structure
+ %% but eunit does not allow
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Pid ! {req_timeout, make_ref()},
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid))
+ end)},
+
+ {"infinite timeout ping test",
+ ?_test(begin
+ %% Would really like this in a nested {setup, blah} structure
+ %% but eunit does not allow
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid, infinity)),
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid, undefined))
+ end)},
+
+ {"javascript_source_map_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ B = <<"bucket">>,
+ K = <<"foo">>,
+ O=riakc_obj:new(B, K),
+ riakc_pb_socket:put(Pid, riakc_obj:update_value(O, <<"2">>, "application/json")),
+
+ ?assertEqual({ok, [{0, [2]}]},
+ riakc_pb_socket:mapred(Pid,
+ [{B, K}],
+ [{map, {jsanon, <<"function (v) { return [JSON.parse(v.values[0].data)]; }">>},
+ undefined, true}]))
+ end)},
+
+ {"javascript_named_map_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ B = <<"bucket">>,
+ K = <<"foo">>,
+ O=riakc_obj:new(B, K),
+ riakc_pb_socket:put(Pid, riakc_obj:update_value(O, <<"99">>, "application/json")),
+
+ ?assertEqual({ok, [{0, [99]}]},
+ riakc_pb_socket:mapred(Pid,
+ [{B, K}],
+ [{map, {jsfun, <<"Riak.mapValuesJson">>},
+ undefined, true}]))
+ end)},
+
+ {"javascript_source_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
+ {<<"bar">>, <<"3">>},
+ {<<"baz">>, <<"4">>}]],
+
+ ?assertEqual({ok, [{1, [3]}]},
+ riakc_pb_socket:mapred(Pid,
+ [{<<"bucket">>, <<"foo">>},
+ {<<"bucket">>, <<"bar">>},
+ {<<"bucket">>, <<"baz">>}],
+ [{map, {jsanon, <<"function (v) { return [1]; }">>},
+ undefined, false},
+ {reduce, {jsanon,
+ <<"function(v) {
+ total = v.reduce(
+ function(prev,curr,idx,array) {
+ return prev+curr;
+ }, 0);
+ return [total];
+ }">>},
+ undefined, true}]))
+ end)},
+
+ {"javascript_named_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
+ {<<"bar">>, <<"3">>},
+ {<<"baz">>, <<"4">>}]],
+
+ ?assertEqual({ok, [{1, [9]}]},
+ riakc_pb_socket:mapred(Pid,
+ [{<<"bucket">>, <<"foo">>},
+ {<<"bucket">>, <<"bar">>},
+ {<<"bucket">>, <<"baz">>}],
+ [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
+ end)},
+
+ {"javascript_bucket_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
+ {<<"bar">>, <<"3">>},
+ {<<"baz">>, <<"4">>}]],
+
+ ?assertEqual({ok, [{1, [9]}]},
+ riakc_pb_socket:mapred_bucket(Pid, <<"bucket">>,
+ [{map, {jsfun, <<"Riak.mapValuesJson">>}, undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
+ end)},
+
+ {"javascript_arg_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O=riakc_obj:new(<<"bucket">>, <<"foo">>),
+ riakc_pb_socket:put(Pid, riakc_obj:update_value(O, <<"2">>, "application/json")),
+ ?assertEqual({ok, [{1, [10]}]},
+ riakc_pb_socket:mapred(Pid,
+ [{{<<"bucket">>, <<"foo">>}, 5},
+ {{<<"bucket">>, <<"foo">>}, 10},
+ {{<<"bucket">>, <<"foo">>}, 15},
+ {{<<"bucket">>, <<"foo">>}, -15},
+ {{<<"bucket">>, <<"foo">>}, -5}],
+ [{map, {jsanon, <<"function(v, arg) { return [arg]; }">>},
+ undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSum">>}, undefined, true}]))
+ end)},
+ {"erlang_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>},
+ {<<"bar">>, <<"3">>},
+ {<<"baz">>, <<"4">>}]],
+
+ {ok, [{1, Results}]} = riakc_pb_socket:mapred(Pid,
+ [{<<"bucket">>, <<"foo">>},
+ {<<"bucket">>, <<"bar">>},
+ {<<"bucket">>, <<"baz">>}],
+ [{map, {modfun, riak_kv_mapreduce,
+ map_object_value},
+ undefined, false},
+ {reduce, {modfun, riak_kv_mapreduce,
+ reduce_set_union},
+ undefined, true}]),
+ ?assertEqual([<<"2">>, <<"3">>, <<"4">>], lists:sort(Results))
+ end)},
+ {"erlang_map_reduce_binary_2i_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V,I}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ MD=riakc_obj:add_secondary_index(dict:new(), I),
+ O2=riakc_obj:update_metadata(O,MD),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O2, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>, {{binary_index, "idx"}, [<<"a">>]}},
+ {<<"bar">>, <<"3">>, {{binary_index, "idx"}, [<<"b">>]}},
+ {<<"baz">>, <<"4">>, {{binary_index, "idx"}, [<<"a">>]}}]],
+
+ {ok, [{1, Results}]} = riakc_pb_socket:mapred(Pid,
+ {index,<<"bucket">>,{binary_index, "idx"}, <<"a">>},
+ [{map, {modfun, riak_kv_mapreduce,
+ map_object_value},
+ undefined, false},
+ {reduce, {modfun, riak_kv_mapreduce,
+ reduce_set_union},
+ undefined, true}]),
+ ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results))
+ end)},
+ {"erlang_map_reduce_integer_2i_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Store = fun({K,V,I}) ->
+ O=riakc_obj:new(<<"bucket">>, K),
+ MD=riakc_obj:add_secondary_index(dict:new(), I),
+ O2=riakc_obj:update_metadata(O,MD),
+ riakc_pb_socket:put(Pid,riakc_obj:update_value(O2, V, "application/json"))
+ end,
+ [Store(KV) || KV <- [{<<"foo">>, <<"2">>, {{integer_index, "idx"}, [4]}},
+ {<<"bar">>, <<"3">>, {{integer_index, "idx"}, [7]}},
+ {<<"baz">>, <<"4">>, {{integer_index, "idx"}, [4]}}]],
+
+ {ok, [{1, Results}]} = riakc_pb_socket:mapred(Pid,
+ {index,<<"bucket">>,{integer_index, "idx"},3,5},
+ [{map, {modfun, riak_kv_mapreduce,
+ map_object_value},
+ undefined, false},
+ {reduce, {modfun, riak_kv_mapreduce,
+ reduce_set_union},
+ undefined, true}]),
+ ?assertEqual([<<"2">>, <<"4">>], lists:sort(Results))
+ end)},
+ {"missing_key_erlang_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, Results} = riakc_pb_socket:mapred(Pid, [{<<"bucket">>, <<"foo">>},
+ {<<"bucket">>, <<"bar">>},
+ {<<"bucket">>, <<"baz">>}],
+ [{map, {modfun, riak_kv_mapreduce,
+ map_object_value},
+ <<"include_notfound">>, false},
+ {reduce, {modfun, riak_kv_mapreduce,
+ reduce_set_union},
+ undefined, true}]),
+ [{1, [{error, notfound}|_]}] = Results end)},
+ {"missing_key_javascript_map_reduce_test()",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, Results} = riakc_pb_socket:mapred(Pid, [{<<"bucket">>, <<"foo">>},
+ {<<"bucket">>, <<"bar">>},
+ {<<"bucket">>, <<"baz">>}],
+ [{map, {jsfun, <<"Riak.mapValuesJson">>},
+ undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSort">>},
+ undefined, true}]),
+ [{1, [{not_found, {_, _},<<"undefined">>}|_]}] = Results end)},
+ {"map reduce bad inputs",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Res = riakc_pb_socket:mapred(Pid, undefined,
+ [{map, {jsfun, <<"Riak.mapValuesJson">>},
+ undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSum">>},
+ undefined, true}]),
+ ?assertEqual({error, <<"{inputs,{\"Inputs must be a binary bucket, a tuple of bucket and key-filters, a list of target tuples, or a search, index, or modfun tuple:\",\n undefined}}">>},
+ Res )
+ end)},
+ {"map reduce bad input keys",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Res = riakc_pb_socket:mapred(Pid, [<<"b">>], % no {B,K} tuple
+ [{map, {jsfun, <<"Riak.mapValuesJson">>},
+ undefined, false},
+ {reduce, {jsfun, <<"Riak.reduceSum">>},
+ undefined, true}]),
+ ?assertEqual({error,<<"{inputs,{\"Inputs target tuples must be {B,K} or {{B,K},KeyData}:\",[<<\"b\">>]}}">>},
+ Res)
+ end)},
+ {"map reduce bad query",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Res = riakc_pb_socket:mapred(Pid, [{<<"b">>,<<"k">>}], % no {B,K} tuple
+ undefined),
+ ?assertEqual({error,<<"{query,{\"Query takes a list of step tuples\",undefined}}">>},
+ Res)
+ end)},
+ {"get should convert erlang terms",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ TestNode = riakc_test_utils:test_riak_node(),
+ MyBin = <<"some binary">>,
+ MyTerm = [<<"b">>,<<"a_term">>,{some_term, ['full', "of", 123, 654.321]}],
+ BinObj = rpc:call(TestNode, riak_object, new,
+ [<<"b">>, <<"a_bin">>, MyBin]),
+ TermObj = rpc:call(TestNode, riak_object, new,
+ [<<"b">>, <<"a_term">>, MyTerm]),
+ {ok, C} = rpc:call(TestNode, riak, local_client, []),
+ %% parameterized module trickery - stick it as the last argument
+ ok = rpc:call(TestNode, riak_client, put, [BinObj, 1, C]),
+ ok = rpc:call(TestNode, riak_client, put, [TermObj, 1, C]),
+
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, GotBinObj} = riakc_pb_socket:get(Pid, <<"b">>, <<"a_bin">>),
+ {ok, GotTermObj} = riakc_pb_socket:get(Pid, <<"b">>, <<"a_term">>),
+
+ ?assertEqual(riakc_obj:get_value(GotBinObj), MyBin),
+ ?assertEqual(riakc_obj:get_content_type(GotTermObj),
+ "application/x-erlang-binary"),
+ ?assertEqual(binary_to_term(riakc_obj:get_value(GotTermObj)), MyTerm)
+ end)},
+ {"putting without a key should generate one",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, undefined, <<"value">>),
+ Res1 = riakc_pb_socket:put(Pid, PO),
+ Res2 = riakc_pb_socket:put(Pid, PO),
+ ?assertMatch({ok, _K}, Res1),
+ ?assertMatch({ok, _K}, Res2),
+ {ok, K1} = Res1,
+ {ok, K2} = Res2,
+ ?assertMatch(true, is_binary(K1)),
+ ?assertMatch(true, is_binary(K2)),
+ % Make sure the same key isn't generated twice
+ ?assert(Res1 =/= Res2)
+ end)},
+ {"putting without a key should generate one with return_body",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, undefined, <<"value">>),
+ {ok, Obj1} = riakc_pb_socket:put(Pid, PO, [return_body]),
+ {ok, Obj2} = riakc_pb_socket:put(Pid, PO, [return_body]),
+ %% Make sure the same key isn't generated twice
+ ?assertEqual(riakc_obj, element(1, Obj1)),
+ ?assertEqual(riakc_obj, element(1, Obj2)),
+ ?assert(riakc_obj:key(Obj1) /= riakc_obj:key(Obj2))
+ end)},
+ {"conditional gets should return unchanged if the vclock matches",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ riakc_pb_socket:put(Pid, PO),
+ {ok, Obj} = riakc_pb_socket:get(Pid, <<"b">>, <<"key">>),
+ VClock = riakc_obj:vclock(Obj),
+ %% object hasn't changed
+ ?assertEqual(unchanged, riakc_pb_socket:get(Pid, <<"b">>, <<"key">>,
+ [{if_modified, VClock}])),
+ %% change the object and make sure unchanged isn't returned
+ P1 = riakc_obj:update_value(Obj, <<"newvalue">>),
+ riakc_pb_socket:put(Pid, P1),
+ ?assertMatch({ok, _}, riakc_pb_socket:get(Pid, <<"b">>, <<"key">>,
+ [{if_modified, VClock}]))
+ end)},
+ {"the head get option should return the object metadata without the value",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ riakc_pb_socket:put(Pid, PO),
+ {ok, Obj} = riakc_pb_socket:get(Pid, <<"b">>, <<"key">>, [head]),
+ ?assertEqual(<<>>, riakc_obj:get_value(Obj)),
+ {ok, Obj2} = riakc_pb_socket:get(Pid, <<"b">>, <<"key">>, []),
+ ?assertEqual(<<"value">>, riakc_obj:get_value(Obj2))
+ end)},
+ {"conditional put should allow you to avoid overwriting a value if it already exists",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, PO, [if_none_match])),
+ ?assertEqual({error, <<"match_found">>}, riakc_pb_socket:put(Pid, PO, [if_none_match]))
+ end)},
+ {"conditional put should allow you to avoid overwriting a value if its been updated",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ {ok, Obj} = riakc_pb_socket:put(Pid, PO, [return_body]),
+ Obj2 = riakc_obj:update_value(Obj, <<"newvalue">>),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, Obj2, [if_not_modified])),
+ ?assertEqual({error, <<"modified">>}, riakc_pb_socket:put(Pid, Obj2, [if_not_modified]))
+ end)},
+ {"if_not_modified should fail if the object is not found",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ ?assertEqual({error, <<"notfound">>}, riakc_pb_socket:put(Pid, PO, [if_not_modified]))
+ end)},
+ {"return_head should empty out the value in the riak object",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ {ok, Obj} = riakc_pb_socket:put(Pid, PO, [return_head]),
+ ?assertEqual(<<>>, riakc_obj:get_value(Obj))
+ end)},
+ {"return_head should empty out all values when there's siblings",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:set_bucket(Pid, <<"b">>, [{allow_mult, true}]),
+ PO = riakc_obj:new(<<"b">>, <<"key">>, <<"value">>),
+ {ok, Obj} = riakc_pb_socket:put(Pid, PO, [return_head]),
+ ?assertEqual(<<>>, riakc_obj:get_value(Obj)),
+ {ok, Obj2} = riakc_pb_socket:put(Pid, PO, [return_head]),
+ ?assertEqual([<<>>, <<>>], riakc_obj:get_values(Obj2))
+ end)},
+
+ {"user metadata manipulation",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"key0">>, <<"value0">>),
+ MD0 = riakc_obj:get_update_metadata(O0),
+ MD1 = riakc_obj:set_user_metadata_entry(MD0, {<<"Key1">>,<<"Val1">>}),
+ O1 = riakc_obj:update_metadata(O0, MD1),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O1)),
+ {ok, O2} = riakc_pb_socket:get(Pid, <<"b">>, <<"key0">>),
+ MD2 = riakc_obj:get_update_metadata(O2),
+ ?assertEqual([{<<"Key1">>,<<"Val1">>}], riakc_obj:get_user_metadata_entries(MD2)),
+ MD3 = riakc_obj:set_user_metadata_entry(MD2, {<<"Key2">>,<<"Val2">>}),
+ O3 = riakc_obj:update_metadata(O2, MD3),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O3)),
+ {ok, O4} = riakc_pb_socket:get(Pid, <<"b">>, <<"key0">>),
+ ?assertEqual(2, length(riakc_obj:get_user_metadata_entries(riakc_obj:get_update_metadata(O4))))
+ end)},
+ {"binary secondary index manipulation",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"key1">>, <<"value1">>),
+ MD0 = riakc_obj:get_update_metadata(O0),
+ MD1 = riakc_obj:set_secondary_index(MD0, [{{binary_index, "idx"},[<<"aaa">>]}]),
+ O1 = riakc_obj:update_metadata(O0, MD1),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O1)),
+ {ok, O2} = riakc_pb_socket:get(Pid, <<"b">>, <<"key1">>),
+ MD2 = riakc_obj:get_update_metadata(O2),
+ ?assertEqual([<<"aaa">>], lists:sort(riakc_obj:get_secondary_index(MD2,{binary_index,"idx"}))),
+ MD3 = riakc_obj:add_secondary_index(MD2, [{{binary_index, "idx"},[<<"bbb">>,<<"aaa">>,<<"ccc">>]}]),
+ O3 = riakc_obj:update_metadata(O2, MD3),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O3)),
+ ?assertEqual({ok,?INDEX_RESULTS{keys=[<<"key1">>]}},
+ riakc_pb_socket:get_index(Pid, <<"b">>, {binary_index, "idx"}, <<"bbb">>)),
+ {ok, O4} = riakc_pb_socket:get(Pid, <<"b">>, <<"key1">>),
+ MD4 = riakc_obj:get_update_metadata(O4),
+ ?assertEqual([<<"aaa">>,<<"bbb">>,<<"ccc">>], lists:sort(riakc_obj:get_secondary_index(MD4, {binary_index, "idx"}))),
+ MD5 = riakc_obj:delete_secondary_index(MD4,{binary_index,"idx"}),
+ O5 = riakc_obj:update_metadata(O4, MD5),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O5))
+ end)},
+ {"integer secondary index manipulation",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ O0 = riakc_obj:new(<<"b">>, <<"key2">>, <<"value2">>),
+ MD0 = riakc_obj:get_update_metadata(O0),
+ MD1 = riakc_obj:set_secondary_index(MD0, [{{integer_index, "idx"},[67]}]),
+ O1 = riakc_obj:update_metadata(O0, MD1),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O1)),
+ {ok, O2} = riakc_pb_socket:get(Pid, <<"b">>, <<"key2">>),
+ MD2 = riakc_obj:get_update_metadata(O2),
+ ?assertEqual([67], lists:sort(riakc_obj:get_secondary_index(MD2,{integer_index,"idx"}))),
+ MD3 = riakc_obj:add_secondary_index(MD2, [{{integer_index, "idx"},[56,10000,100]}]),
+ O3 = riakc_obj:update_metadata(O2, MD3),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O3)),
+ ?assertEqual({ok,?INDEX_RESULTS{keys=[<<"key2">>]}},
+ riakc_pb_socket:get_index(Pid, <<"b">>, {integer_index, "idx"}, 50, 60)),
+ {ok, O4} = riakc_pb_socket:get(Pid, <<"b">>, <<"key2">>),
+ MD4 = riakc_obj:get_update_metadata(O4),
+ ?assertEqual([56,67,100,10000], lists:sort(riakc_obj:get_secondary_index(MD4, {integer_index, "idx"}))),
+ MD5 = riakc_obj:delete_secondary_index(MD4,{integer_index,"idx"}),
+ O5 = riakc_obj:update_metadata(O4, MD5),
+ ?assertEqual(ok, riakc_pb_socket:put(Pid, O5))
+ end)},
+ {"counter increment / decrement / get value",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ unlink(Pid),
+ Bucket = <<"counter_test_bucket">>,
+ Key = <<"test_counter">>,
+ %% counters require allow_mult to be true
+ ok = riakc_pb_socket:set_bucket(Pid, Bucket, [{allow_mult, true}]),
+ ok = riakc_pb_socket:counter_incr(Pid, Bucket, Key, 10),
+ ?assertEqual({ok, 10}, riakc_pb_socket:counter_val(Pid, Bucket, Key)),
+ ok = riakc_pb_socket:counter_incr(Pid, Bucket, Key, -5, [{w, quorum}, {pw, one}, {dw, all}]),
+ ?assertEqual({ok, 5}, riakc_pb_socket:counter_val(Pid, Bucket, Key, [{pr, one}]))
+ end)},
+ {"create a search index / get / list / delete with default timeout",
+ {timeout, 30, ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ riakc_test_utils:reset_solr(Pid),
+ Index = <<"indextest">>,
+ SchemaName = <<"_yz_default">>,
+ ?assertEqual(ok,
+ riakc_pb_socket:create_search_index(Pid,
+ Index,
+ SchemaName,
+ [{n_val,2}])),
+ case riakc_pb_socket:get_search_index(Pid, Index) of
+ {ok, IndexData} ->
+ ?assertEqual(proplists:get_value(
+ index, IndexData), Index),
+ ?assertEqual(proplists:get_value(
+ schema, IndexData), SchemaName),
+ ?assertEqual(proplists:get_value(
+ n_val, IndexData), 2);
+ {error, <<"notfound">>} ->
+ false
+ end,
+ ?assertEqual({ok, [[{index,Index},
+ {schema,SchemaName},
+ {n_val,2}]]},
+ riakc_pb_socket:list_search_indexes(Pid)),
+ ?assertEqual(ok, riakc_pb_socket:delete_search_index(Pid, Index))
+ end)}},
+ {"create a search index / get with user-set timeout",
+ {timeout, 30, ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ riakc_test_utils:reset_solr(Pid),
+ Index = <<"indexwithintimeouttest">>,
+ SchemaName = <<"_yz_default">>,
+ ?assertEqual(ok,
+ riakc_pb_socket:create_search_index(Pid,
+ Index,
+ SchemaName,
+ 20000)),
+ case riakc_pb_socket:get_search_index(Pid, Index) of
+ {ok, IndexData} ->
+ ?assertEqual(proplists:get_value(
+ index, IndexData), Index),
+ ?assertEqual(proplists:get_value(
+ schema, IndexData), SchemaName);
+ {error, <<"notfound">>} ->
+ false
+ end
+ end)}},
+ {"create a search schema / get",
+ {timeout, 30, ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ riakc_test_utils:reset_solr(Pid),
+ Schema = <<"
+
+
+
+
+
+
+
+
+
+
+
+
+_yz_id
+
+
+
+">>,
+ Index = <<"schemaindex">>,
+ SchemaName = <<"myschema">>,
+ ?assertEqual(ok, riakc_pb_socket:create_search_schema(Pid, SchemaName, Schema)),
+ ?assertEqual(ok, riakc_pb_socket:create_search_index(Pid, Index, SchemaName, [])),
+ riakc_test_utils:wait_until( fun() ->
+ case riakc_pb_socket:list_search_indexes(Pid) of
+ {ok, []} ->
+ false;
+ {ok, [IndexData|_]} ->
+ proplists:get_value(index, IndexData) == Index andalso
+ proplists:get_value(schema, IndexData) == SchemaName andalso
+ proplists:get_value(n_val, IndexData) == 3
+ end
+ end, 20, 1000 ),
+ riakc_test_utils:wait_until( fun() ->
+ case riakc_pb_socket:get_search_schema(Pid, SchemaName) of
+ {ok, SchemaData} ->
+ proplists:get_value(name, SchemaData) == SchemaName andalso
+ proplists:get_value(content, SchemaData) == Schema;
+ {error, <<"notefound">>} ->
+ false
+ end
+ end, 20, 1000 )
+ end)}},
+ {"create a search index and tie to a bucket",
+ {timeout, 30, ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Index = <<"myindex">>,
+ Bucket = <<"mybucket">>,
+ ?assertEqual(ok, riakc_pb_socket:create_search_index(Pid, Index)),
+ ok = riakc_pb_socket:set_search_index(Pid, Bucket, Index),
+ PO = riakc_obj:new(Bucket, <<"fred">>, <<"{\"name_s\":\"Freddy\"}">>, "application/json"),
+ {ok, _Obj} = riakc_pb_socket:put(Pid, PO, [return_head]),
+ riakc_test_utils:wait_until( fun() ->
+ {ok, Result} = riakc_pb_socket:search(Pid, Index, <<"*:*">>),
+ 1 == Result#search_results.num_found
+ end, 20, 1000 )
+ end)}},
+ {"search utf8",
+ {timeout, 30, ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ riakc_test_utils:reset_solr(Pid),
+ Index = <<"myindex">>,
+ Bucket = <<"mybucket">>,
+ ?assertEqual(ok, riakc_pb_socket:create_search_index(Pid, Index)),
+ ok = riakc_pb_socket:set_search_index(Pid, Bucket, Index),
+ PO = riakc_obj:new(Bucket, <<"fred">>, <<"{\"name_s\":\"×ָּרָ×\"}"/utf8>>, "application/json"),
+ {ok, _Obj} = riakc_pb_socket:put(Pid, PO, [return_head]),
+ riakc_test_utils:wait_until( fun() ->
+ {ok, Result} = riakc_pb_socket:search(Pid, Index, <<"name_s:×ָּרָ×"/utf8>>),
+ 1 == Result#search_results.num_found
+ end )
+ end)}},
+ {"trivial set delete",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 1),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:del_element(<<"X">>, S0))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assertNot(riakc_set:is_element(<<"X">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 0)
+ end)},
+ {"add and remove items in nested set in map",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(riakc_map:update({<<"set">>, set},
+ fun(S) ->
+ riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, S))
+ end, riakc_map:new()))),
+ {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L0 = riakc_map:fetch({<<"set">>, set}, M0),
+ ?assert(lists:member(<<"X">>, L0)),
+ ?assert(lists:member(<<"Y">>, L0)),
+ ?assertEqual(length(L0), 2),
+
+ M1 = riakc_map:update({<<"set">>, set},
+ fun(S) -> riakc_set:del_element(<<"X">>,
+ riakc_set:add_element(<<"Z">>, S)) end,
+ M0),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M1)),
+ {ok, M2} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L1 = riakc_map:fetch({<<"set">>, set}, M2),
+
+ ?assert(lists:member(<<"Y">>, L1)),
+ ?assert(lists:member(<<"Z">>, L1)),
+ ?assertEqual(length(L1), 2)
+ end)},
+ {"increment nested counter",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(riakc_map:update({<<"counter">>, counter},
+ fun(C) ->
+ riakc_counter:increment(5, C)
+ end, riakc_map:new()))),
+ {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ C0 = riakc_map:fetch({<<"counter">>, counter}, M0),
+ ?assertEqual(C0, 5),
+
+ M1 = riakc_map:update({<<"counter">>, counter},
+ fun(C) -> riakc_counter:increment(200, C) end,
+ M0),
+ M2 = riakc_map:update({<<"counter">>, counter},
+ fun(C) -> riakc_counter:decrement(117, C) end,
+ M1),
+ M3 = riakc_map:update({<<"counter">>, counter},
+ fun(C) -> riakc_counter:increment(256, C) end,
+ M2),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M3)),
+ {ok, M4} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ C1 = riakc_map:fetch({<<"counter">>, counter}, M4),
+ ?assertEqual(C1, 344)
+ end)},
+ {"updated nested lww register",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ %% The word "stone" translated into Russian and Thai
+ StoneInRussian = [1051,1102,1082,32,1082,1072,1084,1085,1077,1091,1083,1086,
+ 1074,1080,1090,1077,1083,1103],
+ StoneInThai = [3627,3636,3609],
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>},
+ <<"key">>,
+ riakc_map:to_op(
+ riakc_map:update(
+ {<<"register">>, register},
+ fun(R) ->
+ riakc_register:set(
+ term_to_binary({"barney", "rubble", StoneInRussian}),
+ R)
+ end, riakc_map:new()))),
+ {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ R0 = riakc_map:fetch({<<"register">>, register}, M0),
+ ?assertEqual(binary_to_term(R0), {"barney", "rubble", StoneInRussian}),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>},
+ <<"key">>,
+ riakc_map:to_op(
+ riakc_map:update(
+ {<<"register">>, register},
+ fun(R) ->
+ riakc_register:set(
+ term_to_binary({"barney", "rubble", StoneInThai}),
+ R)
+ end, M0))),
+
+ {ok, M1} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ R1 = riakc_map:fetch({<<"register">>, register}, M1),
+ ?assertEqual(binary_to_term(R1), {"barney", "rubble", StoneInThai})
+ end)},
+ {"throw exception for undefined context for delete",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ ?assertThrow(context_required, riakc_set:del_element(<<"X">>,
+ riakc_set:add_element(<<"X">>,
+ riakc_set:new()))),
+ ?assertThrow(context_required, riakc_map:erase({<<"counter">>, counter}, riakc_map:new())),
+ ?assertThrow(context_required, riakc_map:erase({<<"set">>, set}, riakc_map:new())),
+ ?assertThrow(context_required, riakc_map:erase({<<"map">>, map}, riakc_map:new())),
+ ?assertThrow(context_required, riakc_map:update({<<"set">>, set}, fun(S) -> riakc_set:del_element(<<"Y">>, S) end, riakc_map:new())),
+ ?assertThrow(context_required, riakc_flag:disable(riakc_flag:new()))
+ end)},
+ {"delete bogus item from set",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 1),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:del_element(<<"Y">>, S0))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 1)
+ end)},
+ {"add redundant item to set",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:new()))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 1),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>, S0))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 1)
+ end)},
+ {"add and remove redundant item to/from set",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, riakc_set:new())))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assert(riakc_set:is_element(<<"Y">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 2),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:del_element(<<"X">>, riakc_set:add_element(<<"X">>, S0)))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S1)),
+ ?assert(riakc_set:is_element(<<"Y">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 2)
+ end)},
+ {"remove then add redundant item from/to set",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, riakc_set:new())))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assert(riakc_set:is_element(<<"Y">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 2),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>, riakc_set:del_element(<<"X">>, S0)))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S1)),
+ ?assert(riakc_set:is_element(<<"Y">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 2)
+ end)},
+ {"remove item from set with outdated context",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, riakc_set:new())))),
+ {ok, S0} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S0)),
+ ?assert(riakc_set:is_element(<<"Y">>, S0)),
+ ?assertEqual(riakc_set:size(S0), 2),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:add_element(<<"Z">>, riakc_set:new()))),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"sets">>, <<"bucket">>}, <<"key">>,
+ riakc_set:to_op(riakc_set:del_element(<<"Z">>, S0))),
+ {ok, S1} = riakc_pb_socket:fetch_type(Pid, {<<"sets">>, <<"bucket">>}, <<"key">>),
+ ?assert(riakc_set:is_element(<<"X">>, S1)),
+ ?assert(riakc_set:is_element(<<"Y">>, S1)),
+ ?assert(riakc_set:is_element(<<"Z">>, S1)),
+ ?assertEqual(riakc_set:size(S1), 3)
+ end)},
+ {"add item to nested set in map while also removing set",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(riakc_map:update({<<"set">>, set},
+ fun(S) ->
+ riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, S))
+ end, riakc_map:new()))),
+ {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L0 = riakc_map:fetch({<<"set">>, set}, M0),
+ ?assert(lists:member(<<"X">>, L0)),
+ ?assert(lists:member(<<"Y">>, L0)),
+ ?assertEqual(length(L0), 2),
+
+ M1 = riakc_map:update({<<"set">>, set},
+ fun(S) -> riakc_set:add_element(<<"Z">>, S) end,
+ M0),
+ M2 = riakc_map:erase({<<"set">>, set}, M1),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M2)),
+ {ok, M3} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L1 = riakc_map:fetch({<<"set">>, set}, M3),
+
+ ?assert(lists:member(<<"Z">>, L1)),
+ ?assertEqual(length(L1), 1)
+ end)},
+ {"increment nested counter in map while also removing counter",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(riakc_map:update({<<"counter">>, counter},
+ fun(C) ->
+ riakc_counter:increment(5, C)
+ end, riakc_map:new()))),
+ {ok, M0} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ C0 = riakc_map:fetch({<<"counter">>, counter}, M0),
+ ?assertEqual(C0, 5),
+
+ M1 = riakc_map:update({<<"counter">>, counter},
+ fun(C) -> riakc_counter:increment(2, C) end,
+ M0),
+ M2 = riakc_map:erase({<<"counter">>, counter}, M1),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M2)),
+ {ok, M3} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ C1 = riakc_map:fetch({<<"counter">>, counter}, M3),
+
+ %% Expected result depends on combination of vnodes involved, so accept either answer
+ ?assert(C1 =:= 2 orelse C1 =:= 7)
+ end)},
+ {"add item to nested set in nested map in map while also removing nested map",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ M0 = riakc_map:update({<<"map">>, map},
+ fun(M) ->
+ riakc_map:update({<<"set">>, set},
+ fun(S) ->
+ riakc_set:add_element(<<"X">>,
+ riakc_set:add_element(<<"Y">>, S))
+ end,
+ M)
+ end,
+ riakc_map:new()),
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M0)),
+
+ {ok, M1} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L0 = orddict:fetch({<<"set">>, set}, riakc_map:fetch({<<"map">>, map}, M1)),
+
+ ?assert(lists:member(<<"X">>, L0)),
+ ?assert(lists:member(<<"Y">>, L0)),
+ ?assertEqual(length(L0), 2),
+
+ M2 = riakc_map:update({<<"map">>, map},
+ fun(M) -> riakc_map:update({<<"set">>, set},
+ fun(S) -> riakc_set:add_element(<<"Z">>, S) end,
+ M)
+ end,
+ M1),
+ M3 = riakc_map:erase({<<"map">>, map}, M2),
+
+ ok = riakc_pb_socket:update_type(Pid,
+ {<<"maps">>, <<"bucket">>}, <<"key">>,
+ riakc_map:to_op(M3)),
+ {ok, M4} = riakc_pb_socket:fetch_type(Pid, {<<"maps">>, <<"bucket">>}, <<"key">>),
+ L1 = orddict:fetch({<<"set">>, set}, riakc_map:fetch({<<"map">>, map}, M4)),
+
+ ?assert(lists:member(<<"Z">>, L1)),
+ ?assertEqual(length(L1), 1)
+ end)},
+ {"get-preflist",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ Node = atom_to_binary(riakc_test_utils:test_riak_node(), latin1),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, ServerInfo} = riakc_pb_socket:get_server_info(Pid),
+ [{node, _}, {server_version, SV}] = lists:sort(ServerInfo),
+ Ver = binary_to_list(SV),
+ if Ver < "2.1" ->
+ ?debugFmt("preflists are not supported in version ~p", [Ver]);
+ true ->
+ {ok, Preflist} = riakc_pb_socket:get_preflist(Pid, <<"b">>, <<"f">>),
+ ?assertEqual([#preflist_item{partition = 52,
+ node = Node,
+ primary = true},
+ #preflist_item{partition = 53,
+ node = Node,
+ primary = true},
+ #preflist_item{partition = 54,
+ node = Node,
+ primary = true}],
+ Preflist)
+ end
+ end)},
+ {"add redundant and multiple items to hll(set)",
+ ?_test(begin
+ riakc_test_utils:reset_riak(),
+ {ok, Pid} = riakc_test_utils:start_link(),
+ HB = {<<"hlls">>, <<"bucket">>},
+ HK = <<"key">>,
+ case riakc_pb_socket:get_bucket(Pid, HB) of
+ {ok, _} ->
+ Hll0 = riakc_hll:new(),
+ HllOp0 = riakc_hll:to_op(riakc_hll:add_elements([<<"X">>, <<"Y">>], Hll0)),
+ ok = riakc_pb_socket:update_type(Pid, HB, HK, HllOp0),
+ {ok, Hll1} = riakc_pb_socket:fetch_type(Pid, HB, HK),
+ HllOp1 = riakc_hll:to_op(riakc_hll:add_element(<<"X">>, Hll1)),
+ ok = riakc_pb_socket:update_type(Pid, HB, HK, HllOp1),
+ {ok, Hll2} = riakc_pb_socket:fetch_type(Pid, HB, HK),
+ ?assertEqual(riakc_hll:value(Hll1), 2),
+ ?assert(riakc_hll:is_type(Hll2)),
+ Value = riakc_hll:value(Hll2),
+ ?assertEqual(Value, 2),
+ %% Make sure card and value are the same
+ ?assertEqual(riakc_hll:card(Hll2), Value);
+ Rsp ->
+ ?debugFmt("hlls bucket is not present, skipping (~p)", [Rsp])
+ end
+ end)}
+ ].
+
+integration_test_() ->
+ SetupFun = fun() ->
+ %% Grab the riakclient_pb.proto file
+ code:add_pathz("../ebin"),
+ ok = riakc_test_utils:maybe_start_network()
+ end,
+ CleanupFun = fun(_) -> net_kernel:stop() end,
+ GenFun = fun() ->
+ case catch net_adm:ping(riakc_test_utils:test_riak_node()) of
+ pong -> integration_tests();
+ _ ->
+ ?debugMsg("Skipped - needs live server"),
+ []
+ end
+ end,
+ {setup, SetupFun, CleanupFun, {generator, GenFun}}.
+-endif.
diff --git a/test/riakc_test_utils.erl b/test/riakc_test_utils.erl
new file mode 100644
index 00000000..a356a6e6
--- /dev/null
+++ b/test/riakc_test_utils.erl
@@ -0,0 +1,246 @@
+%% -------------------------------------------------------------------
+%%
+%% riakc_pb_socket_tests: protocol buffer client tests
+%%
+%% Copyright (c) 2007-2016 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+-ifdef(TEST).
+
+-module(riakc_test_utils).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+
+%% Get the test host - check env RIAK_TEST_PB_HOST then env 'RIAK_TEST_HOST_1'
+%% falling back to 127.0.0.1
+test_ip() ->
+ case os:getenv("RIAK_TEST_PB_HOST") of
+ false ->
+ case os:getenv("RIAK_TEST_HOST_1") of
+ false ->
+ "127.0.0.1";
+ Host ->
+ Host
+ end;
+ Host ->
+ Host
+ end.
+
+%% Test port - check env RIAK_TEST_PBC_1
+test_port() ->
+ case os:getenv("RIAK_TEST_PBC_1") of
+ false ->
+ 8087;
+ PortStr ->
+ list_to_integer(PortStr)
+ end.
+
+%% Riak node under test - used to setup/configure/tweak it for tests
+test_riak_node() ->
+ case os:getenv("RIAK_TEST_NODE_1") of
+ false ->
+ 'riak@127.0.0.1';
+ NodeStr ->
+ list_to_atom(NodeStr)
+ end.
+
+%% Node for the eunit node for distributed erlang
+test_eunit_node() ->
+ case os:getenv("RIAK_EUNIT_NODE") of
+ false ->
+ 'eunit@127.0.0.1';
+ EunitNodeStr ->
+ list_to_atom(EunitNodeStr)
+ end.
+
+%% Cookie for distributed erlang
+test_cookie() ->
+ case os:getenv("RIAK_TEST_COOKIE") of
+ false ->
+ 'riak';
+ CookieStr ->
+ list_to_atom(CookieStr)
+ end.
+
+start_link() ->
+ riakc_pb_socket:start_link(test_ip(), test_port()).
+
+start_link(Opts) ->
+ riakc_pb_socket:start_link(test_ip(), test_port(), Opts).
+
+%% Get the riak version from the init boot script, turn it into a list
+%% of integers.
+riak_version() ->
+ StrVersion = element(2, rpc:call(test_riak_node(), init, script_id, [])),
+ {match, [Major, Minor, Patch|_]} = re:run(StrVersion, "\\d+", [global, {capture, first, list}]),
+ [ list_to_integer(V) || [V] <- [Major, Minor, Patch]].
+
+%% Compare the first three part version array with the second.
+%% returns `greater', `less', or `equal'.
+compare_versions([M1,N1,P1], [M2,N2,P2]) ->
+ V1 = (M1*1000000)+(N1*1000)+(P1),
+ V2 = (M2*1000000)+(N2*1000)+(P2),
+ case {V1 > V2, V1 == V2} of
+ {true,_} ->
+ greater;
+ {false,false} ->
+ less;
+ {false,true} ->
+ equal
+ end.
+
+%% Retry `Fun' until it returns `Retry' times, waiting `Delay'
+%% milliseconds between retries. This is our eventual consistency bread
+%% and butter
+wait_until(Fun) when is_function(Fun) ->
+ wait_until(Fun, 20, 500).
+wait_until(_, 0, _) ->
+ fail;
+wait_until(Fun, Retry, Delay) when Retry > 0 ->
+ Pass = Fun(),
+ case Pass of
+ true ->
+ ok;
+ _ ->
+ timer:sleep(Delay),
+ wait_until(Fun, Retry-1, Delay)
+ end.
+
+%% Resets the riak node
+reset_riak() ->
+ %% sleep because otherwise we're going to kill the vnodes too fast
+ %% for the supervisor's maximum restart frequency, which will bring
+ %% down the entire node
+ ?assertEqual(ok, maybe_start_network()),
+ case compare_versions(riak_version(), [1,2,0]) of
+ less ->
+ reset_riak_legacy();
+ _ ->
+ reset_riak_12()
+ end.
+
+reset_solr(Pid) ->
+ %% clear indexes
+ {ok, Indexes} = riakc_pb_socket:list_search_indexes(Pid),
+ [ riakc_pb_socket:delete_search_index(Pid, proplists:get_value(index,Index)) || Index <- Indexes ],
+ wait_until( fun() ->
+ {ok, []} == riakc_pb_socket:list_search_indexes(Pid)
+ end, 20, 1000),
+ ok.
+
+%% Resets a Riak 1.2+ node, which can run the memory backend in 'test'
+%% mode.
+reset_riak_12() ->
+ set_test_backend(),
+ ok = rpc:call(test_riak_node(), riak_kv_memory_backend, reset, []),
+ reset_ring().
+
+%% Sets up the memory/test backend, leaving it alone if already set properly.
+set_test_backend() ->
+ Env = rpc:call(test_riak_node(), application, get_all_env, [riak_kv]),
+ Backend = proplists:get_value(storage_backend, Env),
+ Test = proplists:get_value(test, Env),
+ case {Backend, Test} of
+ {riak_kv_memory_backend, true} ->
+ ok;
+ _ ->
+ ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, storage_backend, riak_kv_memory_backend]),
+ ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, test, true]),
+ Vnodes = rpc:call(test_riak_node(), riak_core_vnode_manager, all_vnodes, [riak_kv_vnode]),
+ [ ok = rpc:call(test_riak_node(), supervisor, terminate_child, [riak_core_vnode_sup, Pid]) ||
+ {_, _, Pid} <- Vnodes ]
+ end.
+
+%% Resets a Riak 1.1 and earlier node.
+reset_riak_legacy() ->
+ timer:sleep(500),
+ %% Until there is a good way to empty the vnodes, require the
+ %% test to run with ETS and kill the vnode master/sup to empty all the ETS tables
+ %% and the ring manager to remove any bucket properties
+ ok = rpc:call(test_riak_node(), application, set_env, [riak_kv, storage_backend, riak_kv_memory_backend]),
+
+ %% Restart the vnodes so they come up with ETS
+ ok = supervisor:terminate_child({riak_kv_sup, test_riak_node()}, riak_kv_vnode_master),
+ ok = supervisor:terminate_child({riak_core_sup, test_riak_node()}, riak_core_vnode_sup),
+ {ok, _} = supervisor:restart_child({riak_core_sup, test_riak_node()}, riak_core_vnode_sup),
+ {ok, _} = supervisor:restart_child({riak_kv_sup, test_riak_node()}, riak_kv_vnode_master),
+
+ %% Clear the MapReduce cache
+ ok = rpc:call(test_riak_node(), riak_kv_mapred_cache, clear, []),
+
+ %% Now reset the ring so bucket properties are default
+ reset_ring().
+
+%% Resets the ring to a fresh one, effectively deleting any bucket properties.
+reset_ring() ->
+ Ring = rpc:call(test_riak_node(), riak_core_ring, fresh, []),
+ ok = rpc:call(test_riak_node(), riak_core_ring_manager, set_my_ring, [Ring]).
+
+
+%% Finds the pid of the PB listener process
+riak_pb_listener_pid() ->
+ {Children, Proc} = case compare_versions(riak_version(), [1,2,0]) of
+ less ->
+ {supervisor:which_children({riak_kv_sup, test_riak_node()}),
+ riak_kv_pb_listener};
+ _ ->
+ {supervisor:which_children({riak_api_sup, test_riak_node()}),
+ riak_api_pb_listener}
+ end,
+ hd([Pid || {_,Pid,_,[Mod]} <- Children, Mod == Proc]).
+
+pause_riak_pb_listener() ->
+ Pid = riak_pb_listener_pid(),
+ rpc:call(test_riak_node(), sys, suspend, [Pid]).
+
+resume_riak_pb_listener() ->
+ Pid = riak_pb_listener_pid(),
+ rpc:call(test_riak_node(), sys, resume, [Pid]).
+
+kill_riak_pb_sockets() ->
+ Children = case compare_versions(riak_version(), [1,2,0]) of
+ less ->
+ supervisor:which_children({riak_kv_pb_socket_sup, test_riak_node()});
+ _ ->
+ supervisor:which_children({riak_api_pb_sup, test_riak_node()})
+ end,
+ case Children of
+ [] ->
+ ok;
+ [_|_] ->
+ Pids = [Pid || {_,Pid,_,_} <- Children],
+ [rpc:call(test_riak_node(), erlang, exit, [Pid, kill]) || Pid <- Pids],
+ erlang:yield(),
+ kill_riak_pb_sockets()
+ end.
+
+maybe_start_network() ->
+ %% Try to spin up net_kernel
+ os:cmd("epmd -daemon"),
+ case net_kernel:start([test_eunit_node(), longnames]) of
+ {ok, _} ->
+ erlang:set_cookie(test_riak_node(), test_cookie()),
+ ok;
+ {error, {already_started, _}} ->
+ ok;
+ X ->
+ X
+ end.
+
+-endif.
diff --git a/test/riakc_ts_tests.erl b/test/riakc_ts_tests.erl
new file mode 100644
index 00000000..af8d875d
--- /dev/null
+++ b/test/riakc_ts_tests.erl
@@ -0,0 +1,99 @@
+%% -------------------------------------------------------------------
+%%
+%% riakc_ts_tests: timeseries client tests
+%%
+%% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+-ifdef(TEST).
+
+-module(riakc_ts_tests).
+
+-compile(export_all).
+
+-include("riakc.hrl").
+
+-include_lib("eunit/include/eunit.hrl").
+
+-define(TABLE, <<"GeoCheckin">>).
+-define(TWENTY_MINS_AGO, 1443795700987).
+-define(FIFTEEN_MINS_AGO, 1443796000987).
+-define(TEN_MINS_AGO, 1443796300987).
+-define(FIVE_MINS_AGO, 1443796600987).
+-define(NOW, 1443796900987).
+
+integration_tests({ok, _Props}) ->
+ [{"ping",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ ?assertEqual(pong, riakc_pb_socket:ping(Pid)),
+ ?assertEqual(true, riakc_pb_socket:is_connected(Pid)),
+ riakc_pb_socket:stop(Pid)
+ end)},
+ {"put-get",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Data = [{<<"hash1">>, <<"user2">>, ?TWENTY_MINS_AGO, <<"hurricane">>, 82.3},
+ {<<"hash1">>, <<"user2">>, ?FIFTEEN_MINS_AGO, <<"rain">>, 79.0},
+ {<<"hash1">>, <<"user2">>, ?FIVE_MINS_AGO, <<"wind">>, []},
+ {<<"hash1">>, <<"user2">>, ?NOW, <<"snow">>, 20.1}],
+ ok = riakc_ts:put(Pid, ?TABLE, Data),
+ Key = [<<"hash1">>, <<"user2">>, ?FIVE_MINS_AGO],
+ {ok, {_C, _R}} = riakc_ts:get(Pid, ?TABLE, Key, []),
+ riakc_pb_socket:stop(Pid)
+ end)},
+ {"query-describe",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, {ColumnNames, _Rows}} = riakc_ts:'query'(Pid, <<"DESCRIBE GeoCheckin">>),
+ ?assert(length(ColumnNames) >= 5),
+ riakc_pb_socket:stop(Pid)
+ end)},
+ {"list-keys",
+ ?_test(begin
+ {ok, Pid} = riakc_test_utils:start_link(),
+ {ok, ReqId} = riakc_ts:stream_list_keys(Pid, ?TABLE),
+ {ok, Keys} = riakc_utils:wait_for_list(ReqId),
+ ?assert(length(Keys) > 0),
+ riakc_pb_socket:stop(Pid)
+ end)}
+ ];
+integration_tests(Error) ->
+ ?debugFmt("~s table does not exist: ~p", [?TABLE, Error]),
+ [].
+
+generate_integration_tests() ->
+ {ok, Pid} = riakc_test_utils:start_link(),
+ Tests = integration_tests(riakc_pb_socket:get_bucket_type(Pid, ?TABLE)),
+ riakc_pb_socket:stop(Pid),
+ Tests.
+
+integration_test_() ->
+ SetupFun = fun() ->
+ %% Grab the riakclient_pb.proto file
+ code:add_pathz("../ebin"),
+ ok = riakc_test_utils:maybe_start_network()
+ end,
+ CleanupFun = fun(_) -> net_kernel:stop() end,
+ GenFun = fun() ->
+ case catch net_adm:ping(riakc_test_utils:test_riak_node()) of
+ pong -> generate_integration_tests();
+ _ -> []
+ end
+ end,
+ {setup, SetupFun, CleanupFun, {generator, GenFun}}.
+-endif.
diff --git a/test/riakc_utils_tests.erl b/test/riakc_utils_tests.erl
new file mode 100644
index 00000000..b5d36ce2
--- /dev/null
+++ b/test/riakc_utils_tests.erl
@@ -0,0 +1,34 @@
+%% -------------------------------------------------------------------
+%%
+%% riakc_utils_tests
+%%
+%% Copyright (c) 2016 Basho Technologies, Inc. All Rights Reserved.
+%%
+%% This file is provided to you under the Apache License,
+%% Version 2.0 (the "License"); you may not use this file
+%% except in compliance with the License. You may obtain
+%% a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing,
+%% software distributed under the License is distributed on an
+%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+%% KIND, either express or implied. See the License for the
+%% specific language governing permissions and limitations
+%% under the License.
+%%
+%% -------------------------------------------------------------------
+-ifdef(TEST).
+
+-module(riakc_utils_tests).
+
+-compile(export_all).
+
+-include_lib("eunit/include/eunit.hrl").
+
+bad_unicode_binary_test() ->
+ S = <<"\xa0\xa1">>,
+ ?assertThrow({unicode_error, _Msg}, riakc_utils:characters_to_unicode_binary(S)).
+
+-endif.
diff --git a/tools b/tools
new file mode 160000
index 00000000..419dbc5b
--- /dev/null
+++ b/tools
@@ -0,0 +1 @@
+Subproject commit 419dbc5b8a16d56692ef5385e23ed675e289c5b2
diff --git a/tools.mk b/tools.mk
index 98a3a2ce..a7c11a07 100644
--- a/tools.mk
+++ b/tools.mk
@@ -41,7 +41,7 @@ test: compile
@echo " Tests will _not_ run correctly if allow_mult=true."
@echo " Also, it would be great to document here what other"
@echo " Riak configuration bits are prerequisites for running"
- @echo " the riak_pb_socket:live_node_tests suite..........."
+ @echo " the integration test suite..........."
@echo ""
${REBAR} eunit skip_deps=true