explicit_hash_key() = binary() | undefined
get_records_limit() = 1..10000
ordering() = binary() | undefined
partition_key() = binary()
payload() = binary() | string()
put_records_item() = {Data::string(), PartitionKey::string()} | {Data::string(), ExplicitHashKey::string(), PartitionKey::string()}
put_records_items() = [put_records_item()]
configure(AccessKeyID::string(), SecretAccessKey::string()) -> ok
configure(AccessKeyID::string(), SecretAccessKey::string(), Host::string()) -> ok
configure(AccessKeyID::string(), SecretAccessKey::string(), Host::string(), Port::non_neg_integer()) -> ok
create_stream(StreamName::string(), ShardCount::1..100000) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_CreateStream.html
This operation adds a new Amazon Kinesis stream to your AWS account.
erlcloud_kinesis:create_stream(<<"test">>, 2).
{ok,{incomplete,#Fun<jsx_decoder.1.688044>}}
create_stream(StreamName::string(), ShardCount::1..100000, Config::aws_config()) -> proplist()
delete_stream(StreamName::string()) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_DeleteStream.html
This operation deletes a stream and all of its shards and data.
erlcloud_kinesis:delete_stream(<<"test">>).
{ok,{incomplete,#Fun<jsx_decoder.1.688044>}}
delete_stream(StreamName::string(), Config::aws_config()) -> proplist()
describe_stream(StreamName::string()) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStream.html
This operation returns the following information about the stream: the current status of the stream, the stream Amazon Resource Name (ARN), and an array of shard objects that comprise the stream.
erlcloud_kinesis:describe_stream(<<"staging">>).
{ok,[{<<"StreamDescription">>,
[{<<"HasMoreShards">>,false},
{<<"Shards">>,
[[{<<"HashKeyRange">>,
[{<<"EndingHashKey">>,
<<"170141183460469231731687303715884105727">>},
{<<"StartingHashKey">>,<<"0">>}]},
{<<"SequenceNumberRange">>,
[{<<"StartingSequenceNumber">>,
<<"495372647485535624187345081927970814089871018992"...>>}]},
{<<"ShardId">>,<<"shardId-000000000000">>}],
[{<<"HashKeyRange">>,
[{<<"EndingHashKey">>,
<<"340282366920938463463374607431768211455">>},
{<<"StartingHashKey">>,
<<"170141183460469231731687303715884105728">>}]},
{<<"SequenceNumberRange">>,
[{<<"StartingSequenceNumber">>,
<<"49537264748575863163933038815938617127259750"...>>}]},
{<<"ShardId">>,<<"shardId-000000000001">>}]]},
{<<"StreamARN">>,
<<"arn:aws:kinesis:us-east-1:821148768124:stream/staging">>},
{<<"StreamName">>,<<"staging">>},
{<<"StreamStatus">>,<<"ACTIVE">>}]}]}
describe_stream(StreamName::string(), Config::get_records_limit() | aws_config()) -> proplist()
describe_stream(StreamName::string(), Limit::get_records_limit(), Config::string() | aws_config()) -> proplist()
describe_stream(StreamName::string(), Limit::get_records_limit(), ExcludeShard::string(), Config::aws_config()) -> proplist()
get_records(ShardIterator::string()) -> {ok, [proplist()]} | {error, any()}
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html
This operation returns one or more data records from a shard. A GetRecords operation request can retrieve up to 10 MB of data.
{ok, [{_, A2}]} = erlcloud_kinesis:get_shard_terator(<<"test">>, <<"shardId-000000000000">>, <<"TRIM_HORIZON">>).
{ok,[{<<"ShardIterator">>,
<<"AAAAAAAAAAEuncwaAk+GTC2TIdmdg5w6dIuZ4Scu6vaMGPtaPUfopvw9cBm2NM3Rlj9WyI5JFJr2ahuSh3Z187AdW4Lug86E"...>>}]}
erlcloud_kinesis:get_records(A2).
{ok,[{<<"NextShardIterator">>,
<<"AAAAAAAAAAEkuCmrC+QDW1gUywyu7G8GxvRyM6GSMkcHQ9wrvCJBW87mjn9C8YEckkipaoJySwgKXMmn1BwSPjnjiUCsu6pc"...>>},
{<<"Records">>,
[[{<<"Data">>,<<"asdasd">>},
{<<"PartitionKey">>,<<"key">>},
{<<"SequenceNumber">>,
<<"49537292605574028653758531131893428543501381406818304001">>}],
[{<<"Data">>,<<"asdasd 213123123">>},
{<<"PartitionKey">>,<<"key">>},
{<<"SequenceNumber">>,
<<"49537292605574028653758541428570459745183078607853977601">>}]]}]}
get_records(ShardIterator::string(), Config::get_records_limit() | aws_config()) -> {ok, [proplist()]} | {error, any()}
get_records(ShardIterator::binary(), Limit::get_records_limit(), Config::aws_config()) -> {ok, [proplist()]} | {error, any()}
get_records(ShardIterator::binary(), Limit::get_records_limit(), Options::proplist(), Config::aws_config()) -> {ok, [proplist()] | binary()} | {error, any()}
get_shard_iterator(StreamName::string(), ShardId::string(), ShardIteratorType::string()) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html
This operation returns a shard iterator in ShardIterator. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially.
erlcloud_kinesis:get_shard_iterator(<<"test">>, <<"shardId-000000000001">>, <<"TRIM_HORIZON">>).
{ok,[{<<"ShardIterator">>,
<<"AAAAAAAAAAFHJejL6/AjDShV3pIXsxYZT7Xj2G6EHxokHqT2D1stIOVYUEyprlUGWUepKqUDaR0+hB6qTlKvZa+fsBRqgHi4"...>>}]}
get_shard_iterator(StreamName::string(), ShardId::string(), ShardIteratorType::string(), Config::string() | aws_config()) -> proplist()
get_shard_iterator(StreamName::string(), ShardId::string(), ShardIteratorType::string(), StartingSequenceNumber::string(), Config::aws_config()) -> proplist()
list_streams() -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListStreams.html
This operation returns an array of the names of all the streams that are associated with the AWS account making the ListStreams request.
erlcloud_kinesis:list_streams().
{ok,[{<<"HasMoreStreams">>,false},
{<<"StreamNames">>,[<<"staging">>]}]}
list_streams(Config::string() | aws_config()) -> proplist()
list_streams(ExclusiveStartStreamName::string(), Config::1..100 | aws_config()) -> proplist()
list_streams(ExclusiveStartStreamName::string(), Limit::1..100, Config::aws_config()) -> proplist()
merge_shards(StreamName::string(), AdjacentShardToMerge::string(), ShardToMerge::string()) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_MergeShards.html
This operation merges two adjacent shards in a stream and combines them into a single shard to reduce the stream's capacity to ingest and transport data. Two shards are considered adjacent if the union of the hash key ranges for the two shards form a contiguous set with no gaps.
erlcloud_kinesis:merge_shards(<<"test">>, <<"shardId-000000000001">>, <<"shardId-000000000003">>).
{ok,{incomplete,#Fun<jsx_decoder.1.688044>}}
merge_shards(StreamName::string(), AdjacentShardToMerge::string(), ShardToMerge::string(), Config::aws_config()) -> proplist()
new(AccessKeyID::string(), SecretAccessKey::string()) -> aws_config()
new(AccessKeyID::string(), SecretAccessKey::string(), Host::string()) -> aws_config()
put_record(StreamName::binary(), PartitionKey::partition_key(), Data::payload()) -> {ok, proplist()} | {error, any()}
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html
This operation puts a data record into an Amazon Kinesis stream from a producer.
erlcloud_kinesis:put_record(<<"test">>, <<"key">>, <<"asdasd">>).
{ok,[{<<"SequenceNumber">>,
<<"49537292605574028653758531131893428543501381406818304001">>},
{<<"ShardId">>,<<"shardId-000000000000">>}]}
erlcloud_kinesis:put_record(<<"test">>, <<"key">>, <<"asdasd 213123123">>).
{ok,[{<<"SequenceNumber">>,
<<"49537292605574028653758541428570459745183078607853977601">>},
{<<"ShardId">>,<<"shardId-000000000000">>}]}
put_record(StreamName::binary(), PartitionKey::partition_key(), Data::payload(), Config::explicit_hash_key() | aws_config()) -> {ok, proplist()} | {error, any()}
put_record(StreamName::binary(), PartitionKey::partition_key(), Data::payload(), ExplicitHashKey::explicit_hash_key(), Config::ordering() | aws_config()) -> {ok, proplist()} | {error, any()}
put_record(StreamName::binary(), PartitionKey::partition_key(), Data::payload(), ExplicitHashKey::explicit_hash_key(), Ordering::ordering(), Config::proplist() | aws_config()) -> {ok, proplist()} | {error, any()}
put_record(StreamName::binary(), PartitionKey::partition_key(), Data::payload(), ExplicitHashKey::explicit_hash_key(), Ordering::ordering(), Options::proplist(), Config::aws_config()) -> {ok, proplist()} | {error, any()}
put_records(StreamName::string(), Items::put_records_items()) -> proplist()
put_records(StreamName::string(), Items::put_records_items(), Config) -> proplist()
split_shards(StreamName::string(), ShardToSplit::string(), NewStartingHashKey::string()) -> proplist()
Kinesis API: http://docs.aws.amazon.com/kinesis/latest/APIReference/API_SplitShard.html
This operation splits a shard into two new shards in the stream, to increase the stream's capacity to ingest and transport data.
erlcloud_kinesis:split_shards(<<"test">>, <<"shardId-000000000000">>, <<"10">>).
{ok,{incomplete,#Fun<jsx_decoder.1.688044>}}
split_shards(StreamName::string(), ShardToSplit::string(), NewStartingHashKey::string(), Config::aws_config()) -> proplist()
Generated by EDoc, Dec 23 2016, 12:25:53.