Commit 92761964 authored by Paras Garg's avatar Paras Garg

1. Added JNI

2. Added YSCB basic code
3. Added future for client
4. Added properties in client
parent b9e98c52
...@@ -45,9 +45,10 @@ count: ...@@ -45,9 +45,10 @@ count:
Jclient: $(OBJS) Jclient: $(OBJS)
$(CXX) -o libHelloImpl.so -shared .build/Hello.o $(LIBS) $(CXX) -o libhpdosclient.so -L/usr/local/lib -shared $^ $(LIBS)
@echo "jclient "$<" successfully!" @echo "jclient "$<" successfully!"
sudo cp libhpdosclient.so /usr/lib
@echo "Copied libhpdosclient.so to /usr/lib"
#jcompile: javac $(JSRCS) -d JBUILD #jcompile: javac $(JSRCS) -d JBUILD
JniHeader: JniHeader:
......
Steps to build jni client
> make JniHeader <br>
> make JClient <br>
> java -cp jsrc JClient
Running YSCB
> mvn compile <br>
./bin/ycsb load hpdos -P workloads/workloadb -threads 1
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
to do
delete client endpoint on close
threading in client and hashing in client
resolve double delete for get and put
add cache add support for invalidation
interface client API through endpointGroup
Endpointgroup to manage list of servers and caches, Invalidation
./bin/ycsb shell hpdos
./bin/ycsb run hpdos -P workloads/workloada
./bin/ycsb load hpdos -P workloads/workloada
Options:
-P file Specify workload file
-cp path Additional Java classpath entries
-jvm-args args Additional arguments to the JVM
-p key=value Override workload property
-s Print status to stderr
-target n Target ops/sec (default: unthrottled)
-threads n Number of client threads (default: 1)
\ No newline at end of file
# Copyright (c) 2015, 2017 YCSB contributors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# For more info, see: http://EditorConfig.org
root = true
[*.java]
indent_style = space
indent_size = 2
continuation_indent_size = 4
[*.md]
indent_style = space
indent_size = 2
continuation_indent_size = 4
[*.xml]
indent_style = space
indent_size = 2
continuation_indent_size = 4
# ignore compiled byte code
target
# ignore output files from testing
output*
# ignore standard Eclipse files
.project
.classpath
.settings
.checkstyle
# ignore standard IntelliJ files
.idea/
*.iml
*.ipr
*.iws
# ignore standard Vim and Emacs temp files
*.swp
*~
# ignore standard Mac OS X files/dirs
.DS_Store
/differentbin/
# Copyright (c) 2010 Yahoo! Inc., 2012 - 2015 YCSB contributors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# more info here about TravisCI and Java projects
# http://docs.travis-ci.com/user/languages/java/
language: java
jdk:
- openjdk8
- openjdk11
- oraclejdk11
addons:
hosts:
- myshorthost
hostname: myshorthost
postgresql: "9.5"
install:
- mvn -N io.takari:maven:0.7.7:wrapper -Dmaven=3.6.3
- ./mvnw install -q -DskipTests=true
script: ./mvnw test -q
before_script:
- psql -c 'CREATE database test;' -U postgres
- psql -c 'CREATE TABLE usertable (YCSB_KEY VARCHAR(255) PRIMARY KEY not NULL, YCSB_VALUE JSONB not NULL);' -U postgres -d test
- psql -c 'GRANT ALL PRIVILEGES ON DATABASE test to postgres;' -U postgres
# Services to start for tests.
services:
- ignite
- mongodb
- postgresql
# temporarily disable riak. failing, docs offline.
# - riak
# Can't use container based infra because of hosts/hostname
sudo: true
<!--
Copyright (c) 2017 YCSB contributors.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
## How To Contribute
As more and more databases are created to handle distributed or "cloud" workloads, YCSB needs contributors to write clients to test them. And of course we always need bug fixes, updates for existing databases and new features to keep YCSB going. Here are some guidelines to follow when digging into the code.
## Project Source
YCSB is located in a Git repository hosted on GitHub at [https://github.com/brianfrankcooper/YCSB](https://github.com/brianfrankcooper/YCSB). To modify the code, fork the main repo into your own GitHub account or organization and commit changes there.
YCSB is written in Java (as most of the new cloud data stores at beginning of the project were written in Java) and is laid out as a multi-module Maven project. You should be able to import the project into your favorite IDE or environment easily. For more details about the Maven layout see the [Guide to Working with Multiple Modules](https://maven.apache.org/guides/mini/guide-multiple-modules.html).
## Licensing
YCSB is licensed under the Apache License, Version 2.0 (APL2). Every file included in the project must include the APL header. For example, each Java source file must have a header similar to the following:
```java
/**
* Copyright (c) 2015-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
```
When modifying files that already have a license header, please update the year when you made your edits. E.g. change ``Copyright (c) 2010 Yahoo! Inc., 2012 - 2016 YCSB contributors.`` to ``Copyright (c) 2010 Yahoo! Inc., 2012 - 2017 YCSB contributors.`` If the file only has ``Copyright (c) 2010 Yahoo! Inc.``, append the current year as in ``Copyright (c) 2010 Yahoo! Inc., 2017 YCSB contributors.``.
**WARNING**: It should go without saying, but don't copy and paste code from outside authors or sources. If you are a database author and want to copy some example code, it must be APL2 compatible.
Client bindings to non-APL databases are perfectly acceptable, as data stores are meant to be used from all kinds of projects. Just make sure not to copy any code or commit libraries or binaries into the YCSB code base. Link to them in the Maven pom file.
## Issues and Support
To track bugs, feature requests and releases we use GitHub's integrated [Issues](https://github.com/brianfrankcooper/YCSB/issues). If you find a bug or problem, open an issue with a descriptive title and as many details as you can give us in the body (stack traces, log files, etc). Then if you can create a fix, follow the PR guidelines below.
**Note** Before embarking on a code change or DB, search through the existing issues and pull requests to see if anyone is already working on it. Reach out to them if so.
For general support, please use the mailing list hosted (of course) with Yahoo groups at [http://groups.yahoo.com/group/ycsb-users](http://groups.yahoo.com/group/ycsb-users).
## Code Style
A Java coding style guide is enforced via the Maven CheckStyle plugin. We try not to be too draconian with enforcement but the biggies include:
* Whitespaces instead of tabs.
* Proper Javadocs for methods and classes.
* Camel case member names.
* Upper camel case classes and method names.
* Line length.
CheckStyle will run for pull requests or if you create a package locally so if you just compile and push a commit, you may be surprised when the build fails with a style issue. Just execute ``mvn checkstyle:checkstyle `` before you open a PR and you should avoid any suprises.
## Platforms
Since most data bases aim to support multiple platforms, YCSB aims to run on as many as possible as well. Besides **Linux** and **macOS**, YCSB must compile and run for **Windows**. While not all DBs will run under every platform, the YCSB tool itself must be able to execute on all of these systems and hopefully be able to communicate with remote data stores.
Additionally, YCSB is targeting Java 7 (1.7.0) as its build version as some users are glacially slow moving to Java 8. So please avoid those Lambdas and Streams for now.
## Pull Requests
You've written some amazing code and are excited to share it with the community! It's time to open a PR! Here's what you should do.
* Checkout YCSB's ``master`` branch in your own fork and create a new branch based off of it with a name that is reflective of your work. E.g. ``i123`` for fixing an issue or ``db_xyz`` when working on a binding.
* Add your changes to the branch.
* Commit the code and start the commit message with the component you are working on in square braces. E.g. ``[core] Add another format for exporting histograms.`` or ``[hbase12] Fix interrupted exception bug.``.
* Push to your fork and click the ``Create Pull Request`` button.
* Wait for the build to complete in the CI pipeline. If it fails with a red X, click through the logs for details and fix any issues and commit your changes.
* If you have made changes, please flatten the commits so that the commit logs are nice and clean. Just run a ``git rebase -i <hash before your first commit>``.
After you have opened your PR, a YCSB maintainer will review it and offer constructive feedback via the GitHub review feature. If no one has responded to your PR, please bump the thread by adding comments.
**NOTE**: For maintainers, please get another maintainer to sign off on your changes before merging a PR. And if you're writing code, please do create a PR from your fork, don't just push code directly to the master branch.
## Core, Bindings and Workloads
The main components of the code base include the core library and benchmarking utility, various database client bindings and workload classes and definitions.
### Core
When working on the core classes, keep in mind the following:
* Do not change the core behavior or operation of the main benchmarking classes (Particularly the Client and Workload classes). YCSB is used all over the place because it's a consistent standard that allows different users to compare results with the same workloads. If you find a way to drastically improve throughput, that's great! But please check with the rest of the maintainers to see if we can add the tweaks without invalidating years of benchmarks.
* Do not remove or modify measurements. Users may have tooling to parse the outputs so if you take something out, they'll be a wee bit unhappy. Extending or adding measurements is fine (so if you do have tooling, expect additions.)
* Do not modify existing generators. Again we don't want to invalidate years of benchmarks. Instead, create a new generator or option that can be enabled explicitly (not implicitly!) for users to try out.
* Utility classes and methods are welcome. But if they're only ever used by a specific database binding, co-locate the code with that binding.
* Don't change the DB interface if at all possible. Implementations can squeeze all kinds of workloads through the existing interface and while it may be easy to change the bindings included with the source code, some users may have private clients they can't share with the community.
### Bindings and Clients
When a new database is released a *binding* can be created that implements a client communicating with the given data store that will execute YCSB workloads. Details about writing a DB binding can be found on our [GitHub Wiki page](https://github.com/brianfrankcooper/YCSB/wiki/Adding-a-Database). Some development guidelines to follow include:
* Create a new Maven module for your binding. Follow the existing bindings as examples.
* The module *must* include a README.md file with details such as:
* Database setup with links to documentation so that the YCSB benchmarks will execute properly.
* Example command line executions (workload selection, etc).
* Required and optional properties (e.g. connection strings, behavior settings, etc) along with the default values.
* Versions of the database the binding supports.
* Javadoc the binding and all of the methods. Tell us what it does and how it works.
Because YCSB is a utility to compare multiple data stores, we need each binding to behave similarly by default. That means each data store should enforce the strictest consistency guarantees available and avoid client side buffering or optimizations. This allows users to evaluate different DBs with a common baseline and tough standards.
However you *should* include parameters to tune and improve performance as much as possible to reach those flashy marketing numbers. Just be honest and document what the settings do and what trade-offs are made. (e.g. client side buffering reduces I/O but a crash can lead to data loss).
### Workloads
YCSB began comparing various key/value data stores with simple CRUD operations. However as DBs have become more specialized we've added more workloads for various tasks and would love to have more in the future. Keep the following in mind:
* Make sure more than one publicly available database can handle your workload. It's no fun if only one player is in the game.
* Use the existing DB interface to pass your data around. If you really need another API, discuss with the maintainers to see if there isn't a workaround.
* Provide real-world use cases for the workload, not just theoretical idealizations.
\ No newline at end of file
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=========================================================================
NOTICE file for use with, and corresponding to Section 4 of,
the Apache License, Version 2.0,
in this case for the YCSB project.
=========================================================================
This product includes software developed by
Yahoo! Inc. (www.yahoo.com)
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
This product includes software developed by
Google Inc. (www.google.com)
Copyright (c) 2015 Google Inc. All rights reserved.
<!--
Copyright (c) 2010 Yahoo! Inc., 2012 - 2016 YCSB contributors.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
YCSB
====================================
[![Build Status](https://travis-ci.org/brianfrankcooper/YCSB.png?branch=master)](https://travis-ci.org/brianfrankcooper/YCSB)
Links
-----
* To get here, use https://ycsb.site
* [Our project docs](https://github.com/brianfrankcooper/YCSB/wiki)
* [The original announcement from Yahoo!](https://labs.yahoo.com/news/yahoo-cloud-serving-benchmark/)
Getting Started
---------------
1. Download the [latest release of YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest):
```sh
curl -O --location https://github.com/brianfrankcooper/YCSB/releases/download/0.17.0/ycsb-0.17.0.tar.gz
tar xfvz ycsb-0.17.0.tar.gz
cd ycsb-0.17.0
```
2. Set up a database to benchmark. There is a README file under each binding
directory.
3. Run YCSB command.
On Linux:
```sh
bin/ycsb.sh load basic -P workloads/workloada
bin/ycsb.sh run basic -P workloads/workloada
```
On Windows:
```bat
bin/ycsb.bat load basic -P workloads\workloada
bin/ycsb.bat run basic -P workloads\workloada
```
Running the `ycsb` command without any argument will print the usage.
See https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload
for a detailed documentation on how to run a workload.
See https://github.com/brianfrankcooper/YCSB/wiki/Core-Properties for
the list of available workload properties.
Building from source
--------------------
YCSB requires the use of Maven 3; if you use Maven 2, you may see [errors
such as these](https://github.com/brianfrankcooper/YCSB/issues/406).
To build the full distribution, with all database bindings:
mvn clean package
To build a single database binding:
mvn -pl site.ycsb:mongodb-binding -am clean package
<!--
Copyright (c) 2015 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>root</artifactId>
<version>0.18.0-SNAPSHOT</version>
<relativePath>../../</relativePath>
</parent>
<artifactId>datastore-specific-descriptor</artifactId>
<name>Per Datastore Binding descriptor</name>
<packaging>jar</packaging>
<description>
This module contains the assembly descriptor used by the individual components
to build binding-specific distributions.
</description>
<dependencies>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>
<!--
Copyright (c) 2015 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
<id>dist</id>
<includeBaseDirectory>true</includeBaseDirectory>
<baseDirectory>ycsb-${artifactId}-${version}</baseDirectory>
<files>
<file>
<source>README.md</source>
<outputDirectory></outputDirectory>
</file>
</files>
<fileSets>
<fileSet>
<directory>..</directory>
<outputDirectory></outputDirectory>
<fileMode>0644</fileMode>
<includes>
<include>LICENSE.txt</include>
<include>NOTICE.txt</include>
</includes>
</fileSet>
<fileSet>
<directory>../bin</directory>
<outputDirectory>bin</outputDirectory>
<fileMode>0755</fileMode>
<includes>
<include>ycsb*</include>
</includes>
</fileSet>
<fileSet>
<directory>../bin</directory>
<outputDirectory>bin</outputDirectory>
<fileMode>0644</fileMode>
<includes>
<include>bindings.properties</include>
</includes>
</fileSet>
<fileSet>
<directory>../workloads</directory>
<outputDirectory>workloads</outputDirectory>
<fileMode>0644</fileMode>
</fileSet>
<fileSet>
<directory>src/main/conf</directory>
<outputDirectory>conf</outputDirectory>
<fileMode>0644</fileMode>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<outputDirectory>lib</outputDirectory>
<includes>
<include>site.ycsb:core</include>
</includes>
<scope>provided</scope>
<useTransitiveFiltering>true</useTransitiveFiltering>
</dependencySet>
<dependencySet>
<outputDirectory>lib</outputDirectory>
<includes>
<include>*:jar:*</include>
</includes>
<excludes>
<exclude>*:sources</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>
<!--
Copyright (c) 2015-2016 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>root</artifactId>
<version>0.18.0-SNAPSHOT</version>
</parent>
<artifactId>binding-parent</artifactId>
<name>YCSB Datastore Binding Parent</name>
<packaging>pom</packaging>
<description>
This module acts as the parent for new datastore bindings.
It creates a datastore specific binary artifact.
</description>
<modules>
<module>datastore-specific-descriptor</module>
</modules>
<properties>
<!-- See the test-on-jdk9 profile below. Default to 'jdk9 works' -->
<skipJDK9Tests>false</skipJDK9Tests>
<skipJDK10Tests>false</skipJDK10Tests>
<skipJDK11Tests>false</skipJDK11Tests>
</properties>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven.assembly.version}</version>
<dependencies>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>datastore-specific-descriptor</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<configuration>
<descriptorRefs>
<descriptorRef>datastore-specific-assembly</descriptorRef>
</descriptorRefs>
<finalName>ycsb-${project.artifactId}-${project.version}</finalName>
<formats>
<format>tar.gz</format>
</formats>
<appendAssemblyId>false</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<executions>
<execution>
<id>validate</id>
<configuration>
<configLocation>../checkstyle.xml</configLocation>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven.dependency.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>stage-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<!-- If the binding defines a README, presume we should make an assembly. -->
<profile>
<id>datastore-binding</id>
<activation>
<file>
<exists>README.md</exists>
</file>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
<!-- If the binding doesn't work with jdk9, it should redefine the
skipJDK9 property
-->
<profile>
<id>tests-on-jdk9</id>
<activation>
<jdk>9</jdk>
</activation>
<properties>
<skipTests>${skipJDK9Tests}</skipTests>
</properties>
</profile>
<!-- If the binding doesn't work with jdk10, it should redefine the
skipJDK10 property
-->
<profile>
<id>tests-on-jdk10</id>
<activation>
<jdk>10</jdk>
</activation>
<properties>
<skipTests>${skipJDK10Tests}</skipTests>
</properties>
</profile>
<!-- If the binding doesn't work with jdk11, it should redefine the
skipJDK11 property
-->
<profile>
<id>tests-on-jdk11</id>
<activation>
<jdk>11</jdk>
</activation>
<properties>
<skipTests>${skipJDK11Tests}</skipTests>
</properties>
</profile>
<!-- When doing a YCSB release, we want to make sure specific bindings aren't included in the maven repo -->
<profile>
<id>ycsb-release</id>
<properties>
<!-- Set the deploy plugin to skip wherever this property is inherited -->
<maven.deploy.skip>true</maven.deploy.skip>
</properties>
<build>
<plugins>
<!-- We still want to make sure that *this* module gets deployed. -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<executions>
<execution>
<id>but-still-deploy-the-binding-parent</id>
<goals>
<goal>deploy</goal>
</goals>
<phase>deploy</phase>
<inherited>false</inherited>
<configuration>
<skip>false</skip>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>
<!--
Copyright (c) 2015 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
# Apache Cassandra 2.x CQL binding
Binding for [Apache Cassandra](http://cassandra.apache.org), using the CQL API
via the [DataStax
driver](http://docs.datastax.com/en/developer/java-driver/2.1/java-driver/whatsNew2.html).
To run against the (deprecated) Cassandra Thrift API, use the `cassandra-10` binding.
## Creating a table for use with YCSB
For keyspace `ycsb`, table `usertable`:
cqlsh> create keyspace ycsb
WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor': 3 };
cqlsh> USE ycsb;
cqlsh> create table usertable (
y_id varchar primary key,
field0 varchar,
field1 varchar,
field2 varchar,
field3 varchar,
field4 varchar,
field5 varchar,
field6 varchar,
field7 varchar,
field8 varchar,
field9 varchar);
**Note that `replication_factor` and consistency levels (below) will affect performance.**
## Cassandra Configuration Parameters
- `hosts` (**required**)
- Cassandra nodes to connect to.
- No default.
* `port`
* CQL port for communicating with Cassandra cluster.
* Default is `9042`.
- `cassandra.keyspace`
Keyspace name - must match the keyspace for the table created (see above).
See http://docs.datastax.com/en/cql/3.1/cql/cql_reference/create_keyspace_r.html for details.
- Default value is `ycsb`
- `cassandra.username`
- `cassandra.password`
- Optional user name and password for authentication. See http://docs.datastax.com/en/cassandra/2.0/cassandra/security/security_config_native_authenticate_t.html for details.
* `cassandra.readconsistencylevel`
* `cassandra.writeconsistencylevel`
* Default value is `QUORUM`
- Consistency level for reads and writes, respectively. See the [DataStax documentation](http://docs.datastax.com/en/cassandra/2.0/cassandra/dml/dml_config_consistency_c.html) for details.
* `cassandra.maxconnections`
* `cassandra.coreconnections`
* Defaults for max and core connections can be found here: https://datastax.github.io/java-driver/2.1.8/features/pooling/#pool-size. Cassandra 2.0.X falls under protocol V2, Cassandra 2.1+ falls under protocol V3.
* `cassandra.connecttimeoutmillis`
* `cassandra.useSSL`
* Default value is false.
- To connect with SSL set this value to true.
* `cassandra.readtimeoutmillis`
* Defaults for connect and read timeouts can be found here: https://docs.datastax.com/en/drivers/java/2.0/com/datastax/driver/core/SocketOptions.html.
* `cassandra.tracing`
* Default is false
* https://docs.datastax.com/en/cql/3.3/cql/cql_reference/tracing_r.html
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012-2016 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>binding-parent</artifactId>
<version>0.18.0-SNAPSHOT</version>
<relativePath>../binding-parent</relativePath>
</parent>
<artifactId>cassandra-binding</artifactId>
<name>Cassandra 2.1+ DB Binding</name>
<packaging>jar</packaging>
<properties>
<!-- Skip tests by default. will be activated by jdk8 profile -->
<skipTests>true</skipTests>
</properties>
<dependencies>
<!-- CQL driver -->
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>${cassandra.cql.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.cassandraunit</groupId>
<artifactId>cassandra-unit</artifactId>
<version>3.0.0.1</version>
<classifier>shaded</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.21</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<!-- only for Cassandra test (Cassandra 2.2+ uses Sigar for collecting system information, and Sigar requires some native lib files) -->
<dependency>
<groupId>org.hyperic</groupId>
<artifactId>sigar-dist</artifactId>
<version>1.6.4.129</version>
<type>zip</type>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<!-- Cassandra 2.2+ requires JDK8 to run, so none of our tests
will work unless we're using jdk8.
-->
<profile>
<id>jdk8-tests</id>
<activation>
<jdk>1.8</jdk>
</activation>
<properties>
<skipTests>false</skipTests>
</properties>
</profile>
</profiles>
<!-- sigar-dist can be downloaded from jboss repository -->
<repositories>
<repository>
<id>central2</id>
<name>sigar Repository</name>
<url>https://repository.jboss.org/nexus/content/groups/public-jboss/</url>
<layout>default</layout>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<!-- unzip sigar-dist/lib files.
References:
http://stackoverflow.com/questions/5388661/unzip-dependency-in-maven
https://arviarya.wordpress.com/2013/09/22/sigar-access-operating-system-and-hardware-level-information/
-->
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>unpack-sigar</id>
<phase>process-test-resources<!-- or any other valid maven phase --></phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
<configuration>
<includeGroupIds>org.hyperic</includeGroupIds>
<includeArtifactIds>sigar-dist</includeArtifactIds>
<includes>**/sigar-bin/lib/*</includes>
<excludes>**/sigar-bin/lib/*jar</excludes>
<outputDirectory>
${project.build.directory}/cassandra-dependency
</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.8</version>
<configuration>
<argLine>-Djava.library.path=${project.build.directory}/cassandra-dependency/hyperic-sigar-1.6.4/sigar-bin/lib</argLine>
</configuration>
</plugin>
</plugins>
</build>
</project>
/**
* Copyright (c) 2013-2015 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License. See accompanying LICENSE file.
*
* Submitted by Chrisjan Matser on 10/11/2010.
*/
package site.ycsb.db;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.ColumnDefinitions;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Host;
import com.datastax.driver.core.HostDistance;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.querybuilder.Insert;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import com.datastax.driver.core.querybuilder.Update;
import site.ycsb.ByteArrayByteIterator;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.helpers.MessageFormatter;
/**
* Cassandra 2.x CQL client.
*
* See {@code cassandra2/README.md} for details.
*
* @author cmatser
*/
public class CassandraCQLClient extends DB {
private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class);
private static Cluster cluster = null;
private static Session session = null;
private static ConcurrentMap<Set<String>, PreparedStatement> readStmts =
new ConcurrentHashMap<Set<String>, PreparedStatement>();
private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts =
new ConcurrentHashMap<Set<String>, PreparedStatement>();
private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts =
new ConcurrentHashMap<Set<String>, PreparedStatement>();
private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts =
new ConcurrentHashMap<Set<String>, PreparedStatement>();
private static AtomicReference<PreparedStatement> readAllStmt =
new AtomicReference<PreparedStatement>();
private static AtomicReference<PreparedStatement> scanAllStmt =
new AtomicReference<PreparedStatement>();
private static AtomicReference<PreparedStatement> deleteStmt =
new AtomicReference<PreparedStatement>();
private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM;
private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM;
public static final String YCSB_KEY = "y_id";
public static final String KEYSPACE_PROPERTY = "cassandra.keyspace";
public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb";
public static final String USERNAME_PROPERTY = "cassandra.username";
public static final String PASSWORD_PROPERTY = "cassandra.password";
public static final String HOSTS_PROPERTY = "hosts";
public static final String PORT_PROPERTY = "port";
public static final String PORT_PROPERTY_DEFAULT = "9042";
public static final String READ_CONSISTENCY_LEVEL_PROPERTY =
"cassandra.readconsistencylevel";
public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name();
public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY =
"cassandra.writeconsistencylevel";
public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name();
public static final String MAX_CONNECTIONS_PROPERTY =
"cassandra.maxconnections";
public static final String CORE_CONNECTIONS_PROPERTY =
"cassandra.coreconnections";
public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY =
"cassandra.connecttimeoutmillis";
public static final String READ_TIMEOUT_MILLIS_PROPERTY =
"cassandra.readtimeoutmillis";
public static final String TRACING_PROPERTY = "cassandra.tracing";
public static final String TRACING_PROPERTY_DEFAULT = "false";
public static final String USE_SSL_CONNECTION = "cassandra.useSSL";
private static final String DEFAULT_USE_SSL_CONNECTION = "false";
/**
* Count the number of times initialized to teardown on the last
* {@link #cleanup()}.
*/
private static final AtomicInteger INIT_COUNT = new AtomicInteger(0);
private static boolean debug = false;
private static boolean trace = false;
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
// Keep track of number of calls to init (for later cleanup)
INIT_COUNT.incrementAndGet();
// Synchronized so that we only have a single
// cluster/session instance for all the threads.
synchronized (INIT_COUNT) {
// Check if the cluster has already been initialized
if (cluster != null) {
return;
}
try {
debug =
Boolean.parseBoolean(getProperties().getProperty("debug", "false"));
trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT));
String host = getProperties().getProperty(HOSTS_PROPERTY);
if (host == null) {
throw new DBException(String.format(
"Required property \"%s\" missing for CassandraCQLClient",
HOSTS_PROPERTY));
}
String[] hosts = host.split(",");
String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT);
String username = getProperties().getProperty(USERNAME_PROPERTY);
String password = getProperties().getProperty(PASSWORD_PROPERTY);
String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY,
KEYSPACE_PROPERTY_DEFAULT);
readConsistencyLevel = ConsistencyLevel.valueOf(
getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY,
READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
writeConsistencyLevel = ConsistencyLevel.valueOf(
getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY,
WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT));
Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION,
DEFAULT_USE_SSL_CONNECTION));
if ((username != null) && !username.isEmpty()) {
Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password)
.withPort(Integer.valueOf(port)).addContactPoints(hosts);
if (useSSL) {
clusterBuilder = clusterBuilder.withSSL();
}
cluster = clusterBuilder.build();
} else {
cluster = Cluster.builder().withPort(Integer.valueOf(port))
.addContactPoints(hosts).build();
}
String maxConnections = getProperties().getProperty(
MAX_CONNECTIONS_PROPERTY);
if (maxConnections != null) {
cluster.getConfiguration().getPoolingOptions()
.setMaxConnectionsPerHost(HostDistance.LOCAL,
Integer.valueOf(maxConnections));
}
String coreConnections = getProperties().getProperty(
CORE_CONNECTIONS_PROPERTY);
if (coreConnections != null) {
cluster.getConfiguration().getPoolingOptions()
.setCoreConnectionsPerHost(HostDistance.LOCAL,
Integer.valueOf(coreConnections));
}
String connectTimoutMillis = getProperties().getProperty(
CONNECT_TIMEOUT_MILLIS_PROPERTY);
if (connectTimoutMillis != null) {
cluster.getConfiguration().getSocketOptions()
.setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis));
}
String readTimoutMillis = getProperties().getProperty(
READ_TIMEOUT_MILLIS_PROPERTY);
if (readTimoutMillis != null) {
cluster.getConfiguration().getSocketOptions()
.setReadTimeoutMillis(Integer.valueOf(readTimoutMillis));
}
Metadata metadata = cluster.getMetadata();
logger.info("Connected to cluster: {}\n",
metadata.getClusterName());
for (Host discoveredHost : metadata.getAllHosts()) {
logger.info("Datacenter: {}; Host: {}; Rack: {}\n",
discoveredHost.getDatacenter(), discoveredHost.getAddress(),
discoveredHost.getRack());
}
session = cluster.connect(keyspace);
} catch (Exception e) {
throw new DBException(e);
}
} // synchronized
}
/**
* Cleanup any state for this DB. Called once per DB instance; there is one DB
* instance per client thread.
*/
@Override
public void cleanup() throws DBException {
synchronized (INIT_COUNT) {
final int curInitCount = INIT_COUNT.decrementAndGet();
if (curInitCount <= 0) {
readStmts.clear();
scanStmts.clear();
insertStmts.clear();
updateStmts.clear();
readAllStmt.set(null);
scanAllStmt.set(null);
deleteStmt.set(null);
session.close();
cluster.close();
cluster = null;
session = null;
}
if (curInitCount < 0) {
// This should never happen.
throw new DBException(
String.format("initCount is negative: %d", curInitCount));
}
}
}
/**
* Read a record from the database. Each field/value pair from the result will
* be stored in a HashMap.
*
* @param table
* The name of the table
* @param key
* The record key of the record to read.
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try {
PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
stmt = session.prepare(selectBuilder.from(table)
.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))
.limit(1));
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
readAllStmt.getAndSet(stmt) :
readStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
ResultSet rs = session.execute(stmt.bind(key));
if (rs.isExhausted()) {
return Status.NOT_FOUND;
}
// Should be only 1 row
Row row = rs.one();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
result.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
result.put(def.getName(), null);
}
}
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e);
return Status.ERROR;
}
}
/**
* Perform a range scan for a set of records in the database. Each field/value
* pair from the result will be stored in a HashMap.
*
* Cassandra CQL uses "token" method for range scan which doesn't always yield
* intuitive results.
*
* @param table
* The name of the table
* @param startkey
* The record key of the first record to read.
* @param recordcount
* The number of records to read
* @param fields
* The list of fields to read, or null for all of them
* @param result
* A Vector of HashMaps, where each HashMap is a set field/value
* pairs for one record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try {
PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Select.Builder selectBuilder;
if (fields == null) {
selectBuilder = QueryBuilder.select().all();
} else {
selectBuilder = QueryBuilder.select();
for (String col : fields) {
((Select.Selection) selectBuilder).column(col);
}
}
Select selectStmt = selectBuilder.from(table);
// The statement builder is not setup right for tokens.
// So, we need to build it manually.
String initialStmt = selectStmt.toString();
StringBuilder scanStmt = new StringBuilder();
scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1));
scanStmt.append(" WHERE ");
scanStmt.append(QueryBuilder.token(YCSB_KEY));
scanStmt.append(" >= ");
scanStmt.append("token(");
scanStmt.append(QueryBuilder.bindMarker());
scanStmt.append(")");
scanStmt.append(" LIMIT ");
scanStmt.append(QueryBuilder.bindMarker());
stmt = session.prepare(scanStmt.toString());
stmt.setConsistencyLevel(readConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = (fields == null) ?
scanAllStmt.getAndSet(stmt) :
scanStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("startKey = {}, recordcount = {}", startkey, recordcount);
ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount)));
HashMap<String, ByteIterator> tuple;
while (!rs.isExhausted()) {
Row row = rs.one();
tuple = new HashMap<String, ByteIterator>();
ColumnDefinitions cd = row.getColumnDefinitions();
for (ColumnDefinitions.Definition def : cd) {
ByteBuffer val = row.getBytesUnsafe(def.getName());
if (val != null) {
tuple.put(def.getName(), new ByteArrayByteIterator(val.array()));
} else {
tuple.put(def.getName(), null);
}
}
result.add(tuple);
}
return Status.OK;
} catch (Exception e) {
logger.error(
MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e);
return Status.ERROR;
}
}
/**
* Update a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key, overwriting any existing values with the same field name.
*
* @param table
* The name of the table
* @param key
* The record key of the record to write.
* @param values
* A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
try {
Set<String> fields = values.keySet();
PreparedStatement stmt = updateStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Update updateStmt = QueryBuilder.update(table);
// Add fields
for (String field : fields) {
updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker()));
}
// Add key
updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()));
stmt = session.prepare(updateStmt);
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
if (logger.isDebugEnabled()) {
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
logger.debug("{} = {}", entry.getKey(), entry.getValue());
}
}
// Add fields
ColumnDefinitions vars = stmt.getVariables();
BoundStatement boundStmt = stmt.bind();
for (int i = 0; i < vars.size() - 1; i++) {
boundStmt.setString(i, values.get(vars.getName(i)).toString());
}
// Add key
boundStmt.setString(vars.size() - 1, key);
session.execute(boundStmt);
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified record
* key.
*
* @param table
* The name of the table
* @param key
* The record key of the record to insert.
* @param values
* A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
try {
Set<String> fields = values.keySet();
PreparedStatement stmt = insertStmts.get(fields);
// Prepare statement on demand
if (stmt == null) {
Insert insertStmt = QueryBuilder.insertInto(table);
// Add key
insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker());
// Add fields
for (String field : fields) {
insertStmt.value(field, QueryBuilder.bindMarker());
}
stmt = session.prepare(insertStmt);
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
if (logger.isDebugEnabled()) {
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
logger.debug("{} = {}", entry.getKey(), entry.getValue());
}
}
// Add key
BoundStatement boundStmt = stmt.bind().setString(0, key);
// Add fields
ColumnDefinitions vars = stmt.getVariables();
for (int i = 1; i < vars.size(); i++) {
boundStmt.setString(i, values.get(vars.getName(i)).toString());
}
session.execute(boundStmt);
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
/**
* Delete a record from the database.
*
* @param table
* The name of the table
* @param key
* The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
@Override
public Status delete(String table, String key) {
try {
PreparedStatement stmt = deleteStmt.get();
// Prepare statement on demand
if (stmt == null) {
stmt = session.prepare(QueryBuilder.delete().from(table)
.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())));
stmt.setConsistencyLevel(writeConsistencyLevel);
if (trace) {
stmt.enableTracing();
}
PreparedStatement prevStmt = deleteStmt.getAndSet(stmt);
if (prevStmt != null) {
stmt = prevStmt;
}
}
logger.debug(stmt.getQueryString());
logger.debug("key = {}", key);
session.execute(stmt.bind(key));
return Status.OK;
} catch (Exception e) {
logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e);
}
return Status.ERROR;
}
}
/*
* Copyright (c) 2014, Yahoo!, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="http://cassandra.apache.org/">Cassandra</a>
* 2.1+ via CQL.
*/
package site.ycsb.db;
/**
* Copyright (c) 2015 YCSB contributors All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import com.google.common.collect.Sets;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.Statement;
import com.datastax.driver.core.querybuilder.Insert;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.datastax.driver.core.querybuilder.Select;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import org.cassandraunit.CassandraCQLUnit;
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* Integration tests for the Cassandra client
*/
public class CassandraCQLClientTest {
// Change the default Cassandra timeout from 10s to 120s for slow CI machines
private final static long timeout = 120000L;
private final static String TABLE = "usertable";
private final static String HOST = "localhost";
private final static int PORT = 9142;
private final static String DEFAULT_ROW_KEY = "user1";
private CassandraCQLClient client;
private Session session;
@ClassRule
public static CassandraCQLUnit cassandraUnit = new CassandraCQLUnit(
new ClassPathCQLDataSet("ycsb.cql", "ycsb"), null, timeout);
@Before
public void setUp() throws Exception {
session = cassandraUnit.getSession();
Properties p = new Properties();
p.setProperty("hosts", HOST);
p.setProperty("port", Integer.toString(PORT));
p.setProperty("table", TABLE);
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
client = new CassandraCQLClient();
client.setProperties(p);
client.init();
}
@After
public void tearDownClient() throws Exception {
if (client != null) {
client.cleanup();
}
client = null;
}
@After
public void clearTable() throws Exception {
// Clear the table so that each test starts fresh.
final Statement truncate = QueryBuilder.truncate(TABLE);
if (cassandraUnit != null) {
cassandraUnit.getSession().execute(truncate);
}
}
@Test
public void testReadMissingRow() throws Exception {
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(TABLE, "Missing row", null, result);
assertThat(result.size(), is(0));
assertThat(status, is(Status.NOT_FOUND));
}
private void insertRow() {
final String rowKey = DEFAULT_ROW_KEY;
Insert insertStmt = QueryBuilder.insertInto(TABLE);
insertStmt.value(CassandraCQLClient.YCSB_KEY, rowKey);
insertStmt.value("field0", "value1");
insertStmt.value("field1", "value2");
session.execute(insertStmt);
}
@Test
public void testRead() throws Exception {
insertRow();
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Status status = client.read(TABLE, DEFAULT_ROW_KEY, null, result);
assertThat(status, is(Status.OK));
assertThat(result.entrySet(), hasSize(11));
assertThat(result, hasEntry("field2", null));
final HashMap<String, String> strResult = new HashMap<String, String>();
for (final Map.Entry<String, ByteIterator> e : result.entrySet()) {
if (e.getValue() != null) {
strResult.put(e.getKey(), e.getValue().toString());
}
}
assertThat(strResult, hasEntry(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY));
assertThat(strResult, hasEntry("field0", "value1"));
assertThat(strResult, hasEntry("field1", "value2"));
}
@Test
public void testReadSingleColumn() throws Exception {
insertRow();
final HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
final Set<String> fields = Sets.newHashSet("field1");
final Status status = client.read(TABLE, DEFAULT_ROW_KEY, fields, result);
assertThat(status, is(Status.OK));
assertThat(result.entrySet(), hasSize(1));
final Map<String, String> strResult = StringByteIterator.getStringMap(result);
assertThat(strResult, hasEntry("field1", "value2"));
}
@Test
public void testInsert() throws Exception {
final String key = "key";
final Map<String, String> input = new HashMap<String, String>();
input.put("field0", "value1");
input.put("field1", "value2");
final Status status = client.insert(TABLE, key, StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, key))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, notNullValue());
assertThat(rs.isExhausted(), is(true));
assertThat(row.getString("field0"), is("value1"));
assertThat(row.getString("field1"), is("value2"));
}
@Test
public void testUpdate() throws Exception {
insertRow();
final Map<String, String> input = new HashMap<String, String>();
input.put("field0", "new-value1");
input.put("field1", "new-value2");
final Status status = client.update(TABLE,
DEFAULT_ROW_KEY,
StringByteIterator.getByteIteratorMap(input));
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, notNullValue());
assertThat(rs.isExhausted(), is(true));
assertThat(row.getString("field0"), is("new-value1"));
assertThat(row.getString("field1"), is("new-value2"));
}
@Test
public void testDelete() throws Exception {
insertRow();
final Status status = client.delete(TABLE, DEFAULT_ROW_KEY);
assertThat(status, is(Status.OK));
// Verify result
final Select selectStmt =
QueryBuilder.select("field0", "field1")
.from(TABLE)
.where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY))
.limit(1);
final ResultSet rs = session.execute(selectStmt);
final Row row = rs.one();
assertThat(row, nullValue());
}
@Test
public void testPreparedStatements() throws Exception {
final int LOOP_COUNT = 3;
for (int i = 0; i < LOOP_COUNT; i++) {
testInsert();
testUpdate();
testRead();
testReadSingleColumn();
testReadMissingRow();
testDelete();
}
}
}
/**
* Copyright (c) 2015 YCSB Contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
CREATE TABLE usertable (
y_id varchar primary key,
field0 varchar,
field1 varchar,
field2 varchar,
field3 varchar,
field4 varchar,
field5 varchar,
field6 varchar,
field7 varchar,
field8 varchar,
field9 varchar);
<?xml version="1.0"?>
<!--
Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.2//EN"
"http://www.puppycrawl.com/dtds/configuration_1_2.dtd">
<!--
Checkstyle configuration for Hadoop that is based on the sun_checks.xml file
that is bundled with Checkstyle and includes checks for:
- the Java Language Specification at
http://java.sun.com/docs/books/jls/second_edition/html/index.html
- the Sun Code Conventions at http://java.sun.com/docs/codeconv/
- the Javadoc guidelines at
http://java.sun.com/j2se/javadoc/writingdoccomments/index.html
- the JDK Api documentation http://java.sun.com/j2se/docs/api/index.html
- some best practices
Checkstyle is very configurable. Be sure to read the documentation at
http://checkstyle.sf.net (or in your downloaded distribution).
Most Checks are configurable, be sure to consult the documentation.
To completely disable a check, just comment it out or delete it from the file.
Finally, it is worth reading the documentation.
-->
<module name="Checker">
<!-- Checks that a package.html file exists for each package. -->
<!-- See http://checkstyle.sf.net/config_javadoc.html#PackageHtml -->
<module name="JavadocPackage"/>
<!-- Checks whether files end with a new line. -->
<!-- See http://checkstyle.sf.net/config_misc.html#NewlineAtEndOfFile -->
<!-- module name="NewlineAtEndOfFile"/-->
<!-- Checks that property files contain the same keys. -->
<!-- See http://checkstyle.sf.net/config_misc.html#Translation -->
<module name="Translation"/>
<module name="FileLength"/>
<module name="FileTabCharacter"/>
<module name="TreeWalker">
<!-- Checks for Javadoc comments. -->
<!-- See http://checkstyle.sf.net/config_javadoc.html -->
<module name="JavadocType">
<property name="scope" value="public"/>
<property name="allowMissingParamTags" value="true"/>
<!-- unfortunately we cannot add implNote, implSpec, apiNote and apiSpec to checkstyle -->
<property name="allowUnknownTags" value="true"/>
</module>
<module name="JavadocStyle"/>
<!-- Checks for Naming Conventions. -->
<!-- See http://checkstyle.sf.net/config_naming.html -->
<module name="ConstantName"/>
<module name="LocalFinalVariableName"/>
<module name="LocalVariableName"/>
<module name="MemberName"/>
<module name="MethodName"/>
<module name="PackageName"/>
<module name="ParameterName"/>
<module name="StaticVariableName"/>
<module name="TypeName"/>
<!-- Checks for Headers -->
<!-- See http://checkstyle.sf.net/config_header.html -->
<!-- <module name="Header"> -->
<!-- The follow property value demonstrates the ability -->
<!-- to have access to ANT properties. In this case it uses -->
<!-- the ${basedir} property to allow Checkstyle to be run -->
<!-- from any directory within a project. See property -->
<!-- expansion, -->
<!-- http://checkstyle.sf.net/config.html#properties -->
<!-- <property -->
<!-- name="headerFile" -->
<!-- value="${basedir}/java.header"/> -->
<!-- </module> -->
<!-- Following interprets the header file as regular expressions. -->
<!-- <module name="RegexpHeader"/> -->
<!-- Checks for imports -->
<!-- See http://checkstyle.sf.net/config_import.html -->
<module name="IllegalImport"/> <!-- defaults to sun.* packages -->
<module name="RedundantImport"/>
<module name="UnusedImports"/>
<!-- Checks for Size Violations. -->
<!-- See http://checkstyle.sf.net/config_sizes.html -->
<module name="LineLength">
<property name="max" value="120"/>
</module>
<module name="MethodLength"/>
<module name="ParameterNumber"/>
<!-- Checks for whitespace -->
<!-- See http://checkstyle.sf.net/config_whitespace.html -->
<module name="EmptyForIteratorPad"/>
<module name="MethodParamPad"/>
<module name="NoWhitespaceAfter"/>
<module name="NoWhitespaceBefore"/>
<module name="ParenPad"/>
<module name="TypecastParenPad"/>
<module name="WhitespaceAfter">
<property name="tokens" value="COMMA, SEMI"/>
</module>
<!-- Modifier Checks -->
<!-- See http://checkstyle.sf.net/config_modifiers.html -->
<module name="ModifierOrder"/>
<module name="RedundantModifier"/>
<!-- Checks for blocks. You know, those {}'s -->
<!-- See http://checkstyle.sf.net/config_blocks.html -->
<module name="AvoidNestedBlocks"/>
<module name="EmptyBlock">
<property name="option" value="text"/>
</module>
<module name="LeftCurly"/>
<module name="NeedBraces"/>
<module name="RightCurly"/>
<!-- Checks for common coding problems -->
<!-- See http://checkstyle.sf.net/config_coding.html -->
<!-- module name="AvoidInlineConditionals"/-->
<module name="EmptyStatement"/>
<module name="EqualsHashCode"/>
<module name="HiddenField">
<property name="ignoreConstructorParameter" value="true"/>
</module>
<module name="IllegalInstantiation"/>
<module name="InnerAssignment"/>
<module name="MissingSwitchDefault"/>
<module name="SimplifyBooleanExpression"/>
<module name="SimplifyBooleanReturn"/>
<!-- Checks for class design -->
<!-- See http://checkstyle.sf.net/config_design.html -->
<module name="FinalClass"/>
<module name="HideUtilityClassConstructor"/>
<module name="InterfaceIsType"/>
<module name="VisibilityModifier">
<property name="protectedAllowed" value="true"/>
</module>
<!-- Miscellaneous other checks. -->
<!-- See http://checkstyle.sf.net/config_misc.html -->
<module name="ArrayTypeStyle"/>
<module name="Indentation">
<property name="basicOffset" value="2" />
<property name="caseIndent" value="0" />
</module>
<!-- <module name="TodoComment"/> -->
<module name="UpperEll"/>
</module>
</module>
<!--
Copyright (c) 2015 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
When used as a latency under load benchmark YCSB in it's original form suffers from
Coordinated Omission[1] and related measurement issue:
* Load is controlled by response time
* Measurement does not account for missing time
* Measurement starts at beginning of request rather than at intended beginning
* Measurement is limited in scope as the histogram does not provide data on overflow values
To provide a minimal correction patch the following were implemented:
1. Replace internal histogram implementation with HdrHistogram[2]:
HdrHistogram offers a dynamic range of measurement at a given precision and will
improve the fidelity of reporting. It allows capturing a much wider range of latencies.
HdrHistogram also supports compressed loss-less serialization which enable capturing
snapshot histograms from which lower resolution histograms can be constructed for plotting
latency over time. Snapshot interval histograms are serialized on status reporting which
must be enabled using the '-s' option.
2. Track intended operation start and report latencies from that point in time:
Assuming the benchmark sets a target schedule of execution in which every operation
is supposed to happen at a given time the benchmark should measure the latency between
intended start time and operation completion.
This required the introduction of a new measurement point and inevitably
includes measuring some of the internal preparation steps of the load generator.
These overhead should be negligible in the context of a network hop, but could
be corrected for by estimating the load-generator overheads (e.g. by measuring a
no-op DB or by measuring the setup time for an operation and deducting that from total).
This intended measurement point is only used when there is a target load (specified by
the -target paramaeter)
This branch supports the following new options:
* -p measurementtype=[histogram|hdrhistogram|hdrhistogram+histogram|timeseries] (default=histogram)
The new measurement types are hdrhistogram and hdrhistogram+histogram. Default is still
histogram, which is the old histogram. Ultimately we would remove the old measurement types
and use only HdrHistogram but the old measurement is left in there for comparison sake.
* -p measurement.interval=[op|intended|both] (default=op)
This new option deferentiates between measured intervals and adds the intended interval(as described)
above, and the option to record both the op and intended for comparison.
* -p hdrhistogram.fileoutput=[true|false] (default=false)
This new option will enable periodical writes of the interval histogram into an output file. The path can be set using '-p hdrhistogram.output.path=<PATH>'.
Example parameters:
-target 1000 -s -p workload=site.ycsb.workloads.CoreWorkload -p basicdb.verbose=false -p basicdb.simulatedelay=4 -p measurement.interval=both -p measurementtype=hdrhistogram -p hdrhistogram.fileoutput=true -p maxexecutiontime=60
Further changes made:
* -p status.interval=<number of seconds> (default=10)
Controls the number of seconds between status reports and therefore between HdrHistogram snapshots reported.
* -p basicdb.randomizedelay=[true|false] (default=true)
Controls weather the delay simulated by the mock DB is uniformly random or not.
Further suggestions:
1. Correction load control: currently after a pause the load generator will do
operations back to back to catchup, this leads to a flat out throughput mode
of testing as opposed to controlled load.
2. Move to async model: Scenarios where Ops have no dependency could delegate the
Op execution to a threadpool and thus separate the request rate control from the
synchronous execution of Ops. Measurement would start on queuing for execution.
1. https://groups.google.com/forum/#!msg/mechanical-sympathy/icNZJejUHfE/BfDekfBEs_sJ
2. https://github.com/HdrHistogram/HdrHistogram
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>root</artifactId>
<version>0.18.0-SNAPSHOT</version>
</parent>
<artifactId>core</artifactId>
<name>Core YCSB</name>
<packaging>jar</packaging>
<properties>
<jackson.api.version>1.9.4</jackson.api.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core4</artifactId>
<version>4.1.0-incubating</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<version>${jackson.api.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<version>${jackson.api.version}</version>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.1.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hdrhistogram</groupId>
<artifactId>HdrHistogram</artifactId>
<version>2.1.4</version>
</dependency>
</dependencies>
<build>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
</resources>
</build>
<profiles>
<profile>
<!-- Build profile when running via yscb.sh or yscb.bat-->
<id>source-run</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>stage-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.*;
import java.util.Map.Entry;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
/**
* Basic DB that just prints out the requested operations, instead of doing them against a database.
*/
public class BasicDB extends DB {
public static final String COUNT = "basicdb.count";
public static final String COUNT_DEFAULT = "false";
public static final String VERBOSE = "basicdb.verbose";
public static final String VERBOSE_DEFAULT = "true";
public static final String SIMULATE_DELAY = "basicdb.simulatedelay";
public static final String SIMULATE_DELAY_DEFAULT = "0";
public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay";
public static final String RANDOMIZE_DELAY_DEFAULT = "true";
protected static final Object MUTEX = new Object();
protected static int counter = 0;
protected static Map<Integer, Integer> reads;
protected static Map<Integer, Integer> scans;
protected static Map<Integer, Integer> updates;
protected static Map<Integer, Integer> inserts;
protected static Map<Integer, Integer> deletes;
protected boolean verbose;
protected boolean randomizedelay;
protected int todelay;
protected boolean count;
public BasicDB() {
todelay = 0;
}
protected void delay() {
if (todelay > 0) {
long delayNs;
if (randomizedelay) {
delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay));
if (delayNs == 0) {
return;
}
} else {
delayNs = TimeUnit.MILLISECONDS.toNanos(todelay);
}
final long deadline = System.nanoTime() + delayNs;
do {
LockSupport.parkNanos(deadline - System.nanoTime());
} while (System.nanoTime() < deadline && !Thread.interrupted());
}
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() {
verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT));
todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT));
randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT));
count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT));
if (verbose) {
synchronized (System.out) {
System.out.println("***************** properties *****************");
Properties p = getProperties();
if (p != null) {
for (Enumeration e = p.propertyNames(); e.hasMoreElements();) {
String k = (String) e.nextElement();
System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\"");
}
}
System.out.println("**********************************************");
}
}
synchronized (MUTEX) {
if (counter == 0 && count) {
reads = new HashMap<Integer, Integer>();
scans = new HashMap<Integer, Integer>();
updates = new HashMap<Integer, Integer>();
inserts = new HashMap<Integer, Integer>();
deletes = new HashMap<Integer, Integer>();
}
counter++;
}
}
protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() {
@Override
protected StringBuilder initialValue() {
return new StringBuilder();
}
};
protected static StringBuilder getStringBuilder() {
StringBuilder sb = TL_STRING_BUILDER.get();
sb.setLength(0);
return sb;
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("READ ").append(table).append(" ").append(key).append(" [ ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(reads, hash(table, key, fields));
}
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return Zero on success, a non-zero error code on error
*/
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(scans, hash(table, startkey, fields));
}
return Status.OK;
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(updates, hash(table, key, values));
}
return Status.OK;
}
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("INSERT ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
for (Map.Entry<String, ByteIterator> entry : values.entrySet()) {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
incCounter(inserts, hash(table, key, values));
}
return Status.OK;
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
public Status delete(String table, String key) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("DELETE ").append(table).append(" ").append(key);
System.out.println(sb);
}
if (count) {
incCounter(deletes, (table + key).hashCode());
}
return Status.OK;
}
@Override
public void cleanup() {
synchronized (MUTEX) {
int countDown = --counter;
if (count && countDown < 1) {
// TODO - would be nice to call something like:
// Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size());
System.out.println("[READS], Uniques, " + reads.size());
System.out.println("[SCANS], Uniques, " + scans.size());
System.out.println("[UPDATES], Uniques, " + updates.size());
System.out.println("[INSERTS], Uniques, " + inserts.size());
System.out.println("[DELETES], Uniques, " + deletes.size());
}
}
}
/**
* Increments the count on the hash in the map.
* @param map A non-null map to sync and use for incrementing.
* @param hash A hash code to increment.
*/
protected void incCounter(final Map<Integer, Integer> map, final int hash) {
synchronized (map) {
Integer ctr = map.get(hash);
if (ctr == null) {
map.put(hash, 1);
} else {
map.put(hash, ctr + 1);
}
}
}
/**
* Hashes the table, key and fields, sorting the fields first for a consistent
* hash.
* Note that this is expensive as we generate a copy of the fields and a string
* buffer to hash on. Hashing on the objects is problematic.
* @param table The user table.
* @param key The key read or scanned.
* @param fields The fields read or scanned.
* @return The hash code.
*/
protected int hash(final String table, final String key, final Set<String> fields) {
if (fields == null) {
return (table + key).hashCode();
}
StringBuilder buf = getStringBuilder().append(table).append(key);
List<String> sorted = new ArrayList<String>(fields);
Collections.sort(sorted);
for (final String field : sorted) {
buf.append(field);
}
return buf.toString().hashCode();
}
/**
* Hashes the table, key and fields, sorting the fields first for a consistent
* hash.
* Note that this is expensive as we generate a copy of the fields and a string
* buffer to hash on. Hashing on the objects is problematic.
* @param table The user table.
* @param key The key read or scanned.
* @param values The values to hash on.
* @return The hash code.
*/
protected int hash(final String table, final String key, final Map<String, ByteIterator> values) {
if (values == null) {
return (table + key).hashCode();
}
final TreeMap<String, ByteIterator> sorted =
new TreeMap<String, ByteIterator>(values);
StringBuilder buf = getStringBuilder().append(table).append(key);
for (final Entry<String, ByteIterator> entry : sorted.entrySet()) {
entry.getValue().reset();
buf.append(entry.getKey())
.append(entry.getValue().toString());
}
return buf.toString().hashCode();
}
/**
* Short test of BasicDB
*/
/*
public static void main(String[] args) {
BasicDB bdb = new BasicDB();
Properties p = new Properties();
p.setProperty("Sky", "Blue");
p.setProperty("Ocean", "Wet");
bdb.setProperties(p);
bdb.init();
HashMap<String, String> fields = new HashMap<String, ByteIterator>();
fields.put("A", new StringByteIterator("X"));
fields.put("B", new StringByteIterator("Y"));
bdb.read("table", "key", null, null);
bdb.insert("table", "key", fields);
fields = new HashMap<String, ByteIterator>();
fields.put("C", new StringByteIterator("Z"));
bdb.update("table", "key", fields);
bdb.delete("table", "key");
}
*/
}
/**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import site.ycsb.workloads.TimeSeriesWorkload;
/**
* Basic DB for printing out time series workloads and/or tracking the distribution
* of keys and fields.
*/
public class BasicTSDB extends BasicDB {
/** Time series workload specific counters. */
protected static Map<Long, Integer> timestamps;
protected static Map<Integer, Integer> floats;
protected static Map<Integer, Integer> integers;
private String timestampKey;
private String valueKey;
private String tagPairDelimiter;
private String queryTimeSpanDelimiter;
private long lastTimestamp;
@Override
public void init() {
super.init();
synchronized (MUTEX) {
if (timestamps == null) {
timestamps = new HashMap<Long, Integer>();
floats = new HashMap<Integer, Integer>();
integers = new HashMap<Integer, Integer>();
}
}
timestampKey = getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = getProperties().getProperty(
TimeSeriesWorkload.VALUE_KEY_PROPERTY,
TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
tagPairDelimiter = getProperties().getProperty(
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY,
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY_DEFAULT);
queryTimeSpanDelimiter = getProperties().getProperty(
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY,
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
}
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("READ ").append(table).append(" ").append(key).append(" [ ");
if (fields != null) {
for (String f : fields) {
sb.append(f).append(" ");
}
} else {
sb.append("<all fields>");
}
sb.append("]");
System.out.println(sb);
}
if (count) {
Set<String> filtered = null;
if (fields != null) {
filtered = new HashSet<String>();
for (final String field : fields) {
if (field.startsWith(timestampKey)) {
String[] parts = field.split(tagPairDelimiter);
if (parts[1].contains(queryTimeSpanDelimiter)) {
parts = parts[1].split(queryTimeSpanDelimiter);
lastTimestamp = Long.parseLong(parts[0]);
} else {
lastTimestamp = Long.parseLong(parts[1]);
}
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
} else {
filtered.add(field);
}
}
}
incCounter(reads, hash(table, key, filtered));
}
return Status.OK;
}
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
boolean isFloat = false;
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
final TreeMap<String, ByteIterator> tree = new TreeMap<String, ByteIterator>(values);
for (Map.Entry<String, ByteIterator> entry : tree.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
sb.append(entry.getKey()).append("=")
.append(Utils.bytesToLong(entry.getValue().toArray())).append(" ");
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
isFloat = it.isFloatingPoint();
sb.append(entry.getKey()).append("=")
.append(isFloat ? it.getDouble() : it.getLong()).append(" ");
} else {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
if (!verbose) {
isFloat = ((NumericByteIterator) values.get(valueKey)).isFloatingPoint();
}
int hash = hash(table, key, values);
incCounter(updates, hash);
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
if (isFloat) {
incCounter(floats, hash);
} else {
incCounter(integers, hash);
}
}
return Status.OK;
}
@Override
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
boolean isFloat = false;
if (verbose) {
StringBuilder sb = getStringBuilder();
sb.append("INSERT ").append(table).append(" ").append(key).append(" [ ");
if (values != null) {
final TreeMap<String, ByteIterator> tree = new TreeMap<String, ByteIterator>(values);
for (Map.Entry<String, ByteIterator> entry : tree.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
sb.append(entry.getKey()).append("=")
.append(Utils.bytesToLong(entry.getValue().toArray())).append(" ");
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
isFloat = it.isFloatingPoint();
sb.append(entry.getKey()).append("=")
.append(isFloat ? it.getDouble() : it.getLong()).append(" ");
} else {
sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" ");
}
}
}
sb.append("]");
System.out.println(sb);
}
if (count) {
if (!verbose) {
isFloat = ((NumericByteIterator) values.get(valueKey)).isFloatingPoint();
}
int hash = hash(table, key, values);
incCounter(inserts, hash);
synchronized(timestamps) {
Integer ctr = timestamps.get(lastTimestamp);
if (ctr == null) {
timestamps.put(lastTimestamp, 1);
} else {
timestamps.put(lastTimestamp, ctr + 1);
}
}
if (isFloat) {
incCounter(floats, hash);
} else {
incCounter(integers, hash);
}
}
return Status.OK;
}
@Override
public void cleanup() {
super.cleanup();
if (count && counter < 1) {
System.out.println("[TIMESTAMPS], Unique, " + timestamps.size());
System.out.println("[FLOATS], Unique series, " + floats.size());
System.out.println("[INTEGERS], Unique series, " + integers.size());
long minTs = Long.MAX_VALUE;
long maxTs = Long.MIN_VALUE;
for (final long ts : timestamps.keySet()) {
if (ts > maxTs) {
maxTs = ts;
}
if (ts < minTs) {
minTs = ts;
}
}
System.out.println("[TIMESTAMPS], Min, " + minTs);
System.out.println("[TIMESTAMPS], Max, " + maxTs);
}
}
@Override
protected int hash(final String table, final String key, final Map<String, ByteIterator> values) {
final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>();
for (final Entry<String, ByteIterator> entry : values.entrySet()) {
if (entry.getKey().equals(valueKey)) {
continue;
} else if (entry.getKey().equals(timestampKey)) {
lastTimestamp = ((NumericByteIterator) entry.getValue()).getLong();
entry.getValue().reset();
continue;
}
sorted.put(entry.getKey(), entry.getValue());
}
// yeah it's ugly but gives us a unique hash without having to add hashers
// to all of the ByteIterators.
StringBuilder buf = new StringBuilder().append(table).append(key);
for (final Entry<String, ByteIterator> entry : sorted.entrySet()) {
entry.getValue().reset();
buf.append(entry.getKey())
.append(entry.getValue().toString());
}
return buf.toString().hashCode();
}
}
\ No newline at end of file
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* A ByteIterator that iterates through a byte array.
*/
public class ByteArrayByteIterator extends ByteIterator {
private final int originalOffset;
private final byte[] str;
private int off;
private final int len;
public ByteArrayByteIterator(byte[] s) {
this.str = s;
this.off = 0;
this.len = s.length;
originalOffset = 0;
}
public ByteArrayByteIterator(byte[] s, int off, int len) {
this.str = s;
this.off = off;
this.len = off + len;
originalOffset = off;
}
@Override
public boolean hasNext() {
return off < len;
}
@Override
public byte nextByte() {
byte ret = str[off];
off++;
return ret;
}
@Override
public long bytesLeft() {
return len - off;
}
@Override
public void reset() {
off = originalOffset;
}
@Override
public byte[] toArray() {
int size = (int) bytesLeft();
byte[] bytes = new byte[size];
System.arraycopy(str, off, bytes, 0, size);
off = len;
return bytes;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.Iterator;
/**
* YCSB-specific buffer class. ByteIterators are designed to support
* efficient field generation, and to allow backend drivers that can stream
* fields (instead of materializing them in RAM) to do so.
* <p>
* YCSB originially used String objects to represent field values. This led to
* two performance issues.
* </p><p>
* First, it leads to unnecessary conversions between UTF-16 and UTF-8, both
* during field generation, and when passing data to byte-based backend
* drivers.
* </p><p>
* Second, Java strings are represented internally using UTF-16, and are
* built by appending to a growable array type (StringBuilder or
* StringBuffer), then calling a toString() method. This leads to a 4x memory
* overhead as field values are being built, which prevented YCSB from
* driving large object stores.
* </p>
* The StringByteIterator class contains a number of convenience methods for
* backend drivers that convert between Map&lt;String,String&gt; and
* Map&lt;String,ByteBuffer&gt;.
*
*/
public abstract class ByteIterator implements Iterator<Byte> {
@Override
public abstract boolean hasNext();
@Override
public Byte next() {
throw new UnsupportedOperationException();
}
public abstract byte nextByte();
/** @return byte offset immediately after the last valid byte */
public int nextBuf(byte[] buf, int bufOff) {
int sz = bufOff;
while (sz < buf.length && hasNext()) {
buf[sz] = nextByte();
sz++;
}
return sz;
}
public abstract long bytesLeft();
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/** Resets the iterator so that it can be consumed again. Not all
* implementations support this call.
* @throws UnsupportedOperationException if the implementation hasn't implemented
* the method.
*/
public void reset() {
throw new UnsupportedOperationException();
}
/** Consumes remaining contents of this object, and returns them as a string. */
public String toString() {
Charset cset = Charset.forName("UTF-8");
CharBuffer cb = cset.decode(ByteBuffer.wrap(this.toArray()));
return cb.toString();
}
/** Consumes remaining contents of this object, and returns them as a byte array. */
public byte[] toArray() {
long left = bytesLeft();
if (left != (int) left) {
throw new ArrayIndexOutOfBoundsException("Too much data to fit in one array!");
}
byte[] ret = new byte[(int) left];
for (int i = 0; i < ret.length; i++) {
ret[i] = nextByte();
}
return ret;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import site.ycsb.measurements.exporter.TextMeasurementsExporter;
import org.apache.htrace.core.HTraceConfiguration;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* Turn seconds remaining into more useful units.
* i.e. if there are hours or days worth of seconds, use them.
*/
final class RemainingFormatter {
private RemainingFormatter() {
// not used
}
public static StringBuilder format(long seconds) {
StringBuilder time = new StringBuilder();
long days = TimeUnit.SECONDS.toDays(seconds);
if (days > 0) {
time.append(days).append(days == 1 ? " day " : " days ");
seconds -= TimeUnit.DAYS.toSeconds(days);
}
long hours = TimeUnit.SECONDS.toHours(seconds);
if (hours > 0) {
time.append(hours).append(hours == 1 ? " hour " : " hours ");
seconds -= TimeUnit.HOURS.toSeconds(hours);
}
/* Only include minute granularity if we're < 1 day. */
if (days < 1) {
long minutes = TimeUnit.SECONDS.toMinutes(seconds);
if (minutes > 0) {
time.append(minutes).append(minutes == 1 ? " minute " : " minutes ");
seconds -= TimeUnit.MINUTES.toSeconds(seconds);
}
}
/* Only bother to include seconds if we're < 1 minute */
if (time.length() == 0) {
time.append(seconds).append(time.length() == 1 ? " second " : " seconds ");
}
return time;
}
}
/**
* Main class for executing YCSB.
*/
public final class Client {
private Client() {
//not used
}
public static final String DEFAULT_RECORD_COUNT = "0";
/**
* The target number of operations to perform.
*/
public static final String OPERATION_COUNT_PROPERTY = "operationcount";
/**
* The number of records to load into the database initially.
*/
public static final String RECORD_COUNT_PROPERTY = "recordcount";
/**
* The workload class to be loaded.
*/
public static final String WORKLOAD_PROPERTY = "workload";
/**
* The database class to be used.
*/
public static final String DB_PROPERTY = "db";
/**
* The exporter class to be used. The default is
* site.ycsb.measurements.exporter.TextMeasurementsExporter.
*/
public static final String EXPORTER_PROPERTY = "exporter";
/**
* If set to the path of a file, YCSB will write all output to this file
* instead of STDOUT.
*/
public static final String EXPORT_FILE_PROPERTY = "exportfile";
/**
* The number of YCSB client threads to run.
*/
public static final String THREAD_COUNT_PROPERTY = "threadcount";
/**
* Indicates how many inserts to do if less than recordcount.
* Useful for partitioning the load among multiple servers if the client is the bottleneck.
* Additionally workloads should support the "insertstart" property which tells them which record to start at.
*/
public static final String INSERT_COUNT_PROPERTY = "insertcount";
/**
* Target number of operations per second.
*/
public static final String TARGET_PROPERTY = "target";
/**
* The maximum amount of time (in seconds) for which the benchmark will be run.
*/
public static final String MAX_EXECUTION_TIME = "maxexecutiontime";
/**
* Whether or not this is the transaction phase (run) or not (load).
*/
public static final String DO_TRANSACTIONS_PROPERTY = "dotransactions";
/**
* Whether or not to show status during run.
*/
public static final String STATUS_PROPERTY = "status";
/**
* Use label for status (e.g. to label one experiment out of a whole batch).
*/
public static final String LABEL_PROPERTY = "label";
/**
* An optional thread used to track progress and measure JVM stats.
*/
private static StatusThread statusthread = null;
// HTrace integration related constants.
/**
* All keys for configuring the tracing system start with this prefix.
*/
private static final String HTRACE_KEY_PREFIX = "htrace.";
private static final String CLIENT_WORKLOAD_INIT_SPAN = "Client#workload_init";
private static final String CLIENT_INIT_SPAN = "Client#init";
private static final String CLIENT_WORKLOAD_SPAN = "Client#workload";
private static final String CLIENT_CLEANUP_SPAN = "Client#cleanup";
private static final String CLIENT_EXPORT_MEASUREMENTS_SPAN = "Client#export_measurements";
public static void usageMessage() {
System.out.println("Usage: java site.ycsb.Client [options]");
System.out.println("Options:");
System.out.println(" -threads n: execute using n threads (default: 1) - can also be specified as the \n" +
" \"threadcount\" property using -p");
System.out.println(" -target n: attempt to do n operations per second (default: unlimited) - can also\n" +
" be specified as the \"target\" property using -p");
System.out.println(" -load: run the loading phase of the workload");
System.out.println(" -t: run the transactions phase of the workload (default)");
System.out.println(" -db dbname: specify the name of the DB to use (default: site.ycsb.BasicDB) - \n" +
" can also be specified as the \"db\" property using -p");
System.out.println(" -P propertyfile: load properties from the given file. Multiple files can");
System.out.println(" be specified, and will be processed in the order specified");
System.out.println(" -p name=value: specify a property to be passed to the DB and workloads;");
System.out.println(" multiple properties can be specified, and override any");
System.out.println(" values in the propertyfile");
System.out.println(" -s: show status during run (default: no status)");
System.out.println(" -l label: use label for status (e.g. to label one experiment out of a whole batch)");
System.out.println("");
System.out.println("Required properties:");
System.out.println(" " + WORKLOAD_PROPERTY + ": the name of the workload class to use (e.g. " +
"site.ycsb.workloads.CoreWorkload)");
System.out.println("");
System.out.println("To run the transaction phase from multiple servers, start a separate client on each.");
System.out.println("To run the load phase from multiple servers, start a separate client on each; additionally,");
System.out.println("use the \"insertcount\" and \"insertstart\" properties to divide up the records " +
"to be inserted");
}
public static boolean checkRequiredProperties(Properties props) {
if (props.getProperty(WORKLOAD_PROPERTY) == null) {
System.out.println("Missing property: " + WORKLOAD_PROPERTY);
return false;
}
return true;
}
/**
* Exports the measurements to either sysout or a file using the exporter
* loaded from conf.
*
* @throws IOException Either failed to write to output stream or failed to close it.
*/
private static void exportMeasurements(Properties props, int opcount, long runtime)
throws IOException {
MeasurementsExporter exporter = null;
try {
// if no destination file is provided the results will be written to stdout
OutputStream out;
String exportFile = props.getProperty(EXPORT_FILE_PROPERTY);
if (exportFile == null) {
out = System.out;
} else {
out = new FileOutputStream(exportFile);
}
// if no exporter is provided the default text one will be used
String exporterStr = props.getProperty(EXPORTER_PROPERTY,
"site.ycsb.measurements.exporter.TextMeasurementsExporter");
try {
exporter = (MeasurementsExporter) Class.forName(exporterStr).getConstructor(OutputStream.class)
.newInstance(out);
} catch (Exception e) {
System.err.println("Could not find exporter " + exporterStr
+ ", will use default text reporter.");
e.printStackTrace();
exporter = new TextMeasurementsExporter(out);
}
exporter.write("OVERALL", "RunTime(ms)", runtime);
double throughput = 1000.0 * (opcount) / (runtime);
exporter.write("OVERALL", "Throughput(ops/sec)", throughput);
final Map<String, Long[]> gcs = Utils.getGCStatst();
long totalGCCount = 0;
long totalGCTime = 0;
for (final Entry<String, Long[]> entry : gcs.entrySet()) {
exporter.write("TOTAL_GCS_" + entry.getKey(), "Count", entry.getValue()[0]);
exporter.write("TOTAL_GC_TIME_" + entry.getKey(), "Time(ms)", entry.getValue()[1]);
exporter.write("TOTAL_GC_TIME_%_" + entry.getKey(), "Time(%)",
((double) entry.getValue()[1] / runtime) * (double) 100);
totalGCCount += entry.getValue()[0];
totalGCTime += entry.getValue()[1];
}
exporter.write("TOTAL_GCs", "Count", totalGCCount);
exporter.write("TOTAL_GC_TIME", "Time(ms)", totalGCTime);
exporter.write("TOTAL_GC_TIME_%", "Time(%)", ((double) totalGCTime / runtime) * (double) 100);
if (statusthread != null && statusthread.trackJVMStats()) {
exporter.write("MAX_MEM_USED", "MBs", statusthread.getMaxUsedMem());
exporter.write("MIN_MEM_USED", "MBs", statusthread.getMinUsedMem());
exporter.write("MAX_THREADS", "Count", statusthread.getMaxThreads());
exporter.write("MIN_THREADS", "Count", statusthread.getMinThreads());
exporter.write("MAX_SYS_LOAD_AVG", "Load", statusthread.getMaxLoadAvg());
exporter.write("MIN_SYS_LOAD_AVG", "Load", statusthread.getMinLoadAvg());
}
Measurements.getMeasurements().exportMeasurements(exporter);
} finally {
if (exporter != null) {
exporter.close();
}
}
}
@SuppressWarnings("unchecked")
public static void main(String[] args) {
Properties props = parseArguments(args);
boolean status = Boolean.valueOf(props.getProperty(STATUS_PROPERTY, String.valueOf(false)));
String label = props.getProperty(LABEL_PROPERTY, "");
long maxExecutionTime = Integer.parseInt(props.getProperty(MAX_EXECUTION_TIME, "0"));
//get number of threads, target and db
int threadcount = Integer.parseInt(props.getProperty(THREAD_COUNT_PROPERTY, "1"));
String dbname = props.getProperty(DB_PROPERTY, "site.ycsb.BasicDB");
int target = Integer.parseInt(props.getProperty(TARGET_PROPERTY, "0"));
//compute the target throughput
double targetperthreadperms = -1;
if (target > 0) {
double targetperthread = ((double) target) / ((double) threadcount);
targetperthreadperms = targetperthread / 1000.0;
}
Thread warningthread = setupWarningThread();
warningthread.start();
Measurements.setProperties(props);
Workload workload = getWorkload(props);
final Tracer tracer = getTracer(props, workload);
initWorkload(props, warningthread, workload, tracer);
System.err.println("Starting test.");
final CountDownLatch completeLatch = new CountDownLatch(threadcount);
final List<ClientThread> clients = initDb(dbname, props, threadcount, targetperthreadperms,
workload, tracer, completeLatch);
if (status) {
boolean standardstatus = false;
if (props.getProperty(Measurements.MEASUREMENT_TYPE_PROPERTY, "").compareTo("timeseries") == 0) {
standardstatus = true;
}
int statusIntervalSeconds = Integer.parseInt(props.getProperty("status.interval", "10"));
boolean trackJVMStats = props.getProperty(Measurements.MEASUREMENT_TRACK_JVM_PROPERTY,
Measurements.MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT).equals("true");
statusthread = new StatusThread(completeLatch, clients, label, standardstatus, statusIntervalSeconds,
trackJVMStats);
statusthread.start();
}
Thread terminator = null;
long st;
long en;
int opsDone;
try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_SPAN)) {
final Map<Thread, ClientThread> threads = new HashMap<>(threadcount);
for (ClientThread client : clients) {
threads.put(new Thread(tracer.wrap(client, "ClientThread")), client);
}
st = System.currentTimeMillis();
for (Thread t : threads.keySet()) {
t.start();
}
if (maxExecutionTime > 0) {
terminator = new TerminatorThread(maxExecutionTime, threads.keySet(), workload);
terminator.start();
}
opsDone = 0;
for (Map.Entry<Thread, ClientThread> entry : threads.entrySet()) {
try {
entry.getKey().join();
opsDone += entry.getValue().getOpsDone();
} catch (InterruptedException ignored) {
// ignored
}
}
en = System.currentTimeMillis();
}
try {
try (final TraceScope span = tracer.newScope(CLIENT_CLEANUP_SPAN)) {
if (terminator != null && !terminator.isInterrupted()) {
terminator.interrupt();
}
if (status) {
// wake up status thread if it's asleep
statusthread.interrupt();
// at this point we assume all the monitored threads are already gone as per above join loop.
try {
statusthread.join();
} catch (InterruptedException ignored) {
// ignored
}
}
workload.cleanup();
}
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
try {
try (final TraceScope span = tracer.newScope(CLIENT_EXPORT_MEASUREMENTS_SPAN)) {
exportMeasurements(props, opsDone, en - st);
}
} catch (IOException e) {
System.err.println("Could not export measurements, error: " + e.getMessage());
e.printStackTrace();
System.exit(-1);
}
System.exit(0);
}
private static List<ClientThread> initDb(String dbname, Properties props, int threadcount,
double targetperthreadperms, Workload workload, Tracer tracer,
CountDownLatch completeLatch) {
boolean initFailed = false;
boolean dotransactions = Boolean.valueOf(props.getProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(true)));
final List<ClientThread> clients = new ArrayList<>(threadcount);
try (final TraceScope span = tracer.newScope(CLIENT_INIT_SPAN)) {
int opcount;
if (dotransactions) {
opcount = Integer.parseInt(props.getProperty(OPERATION_COUNT_PROPERTY, "0"));
} else {
if (props.containsKey(INSERT_COUNT_PROPERTY)) {
opcount = Integer.parseInt(props.getProperty(INSERT_COUNT_PROPERTY, "0"));
} else {
opcount = Integer.parseInt(props.getProperty(RECORD_COUNT_PROPERTY, DEFAULT_RECORD_COUNT));
}
}
if (threadcount > opcount && opcount > 0){
threadcount = opcount;
System.out.println("Warning: the threadcount is bigger than recordcount, the threadcount will be recordcount!");
}
for (int threadid = 0; threadid < threadcount; threadid++) {
DB db;
try {
db = DBFactory.newDB(dbname, props, tracer);
} catch (UnknownDBException e) {
System.out.println("Unknown DB " + dbname);
initFailed = true;
break;
}
int threadopcount = opcount / threadcount;
// ensure correct number of operations, in case opcount is not a multiple of threadcount
if (threadid < opcount % threadcount) {
++threadopcount;
}
ClientThread t = new ClientThread(db, dotransactions, workload, props, threadopcount, targetperthreadperms,
completeLatch);
t.setThreadId(threadid);
t.setThreadCount(threadcount);
clients.add(t);
}
if (initFailed) {
System.err.println("Error initializing datastore bindings.");
System.exit(0);
}
}
return clients;
}
private static Tracer getTracer(Properties props, Workload workload) {
return new Tracer.Builder("YCSB " + workload.getClass().getSimpleName())
.conf(getHTraceConfiguration(props))
.build();
}
private static void initWorkload(Properties props, Thread warningthread, Workload workload, Tracer tracer) {
try {
try (final TraceScope span = tracer.newScope(CLIENT_WORKLOAD_INIT_SPAN)) {
workload.init(props);
warningthread.interrupt();
}
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
}
private static HTraceConfiguration getHTraceConfiguration(Properties props) {
final Map<String, String> filteredProperties = new HashMap<>();
for (String key : props.stringPropertyNames()) {
if (key.startsWith(HTRACE_KEY_PREFIX)) {
filteredProperties.put(key.substring(HTRACE_KEY_PREFIX.length()), props.getProperty(key));
}
}
return HTraceConfiguration.fromMap(filteredProperties);
}
private static Thread setupWarningThread() {
//show a warning message that creating the workload is taking a while
//but only do so if it is taking longer than 2 seconds
//(showing the message right away if the setup wasn't taking very long was confusing people)
return new Thread() {
@Override
public void run() {
try {
sleep(2000);
} catch (InterruptedException e) {
return;
}
System.err.println(" (might take a few minutes for large data sets)");
}
};
}
private static Workload getWorkload(Properties props) {
ClassLoader classLoader = Client.class.getClassLoader();
try {
Properties projectProp = new Properties();
projectProp.load(classLoader.getResourceAsStream("project.properties"));
System.err.println("YCSB Client " + projectProp.getProperty("version"));
} catch (IOException e) {
System.err.println("Unable to retrieve client version.");
}
System.err.println();
System.err.println("Loading workload...");
try {
Class workloadclass = classLoader.loadClass(props.getProperty(WORKLOAD_PROPERTY));
return (Workload) workloadclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
return null;
}
private static Properties parseArguments(String[] args) {
Properties props = new Properties();
System.err.print("Command line:");
for (String arg : args) {
System.err.print(" " + arg);
}
System.err.println();
Properties fileprops = new Properties();
int argindex = 0;
if (args.length == 0) {
usageMessage();
System.out.println("At least one argument specifying a workload is required.");
System.exit(0);
}
while (args[argindex].startsWith("-")) {
if (args[argindex].compareTo("-threads") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -threads.");
System.exit(0);
}
int tcount = Integer.parseInt(args[argindex]);
props.setProperty(THREAD_COUNT_PROPERTY, String.valueOf(tcount));
argindex++;
} else if (args[argindex].compareTo("-target") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -target.");
System.exit(0);
}
int ttarget = Integer.parseInt(args[argindex]);
props.setProperty(TARGET_PROPERTY, String.valueOf(ttarget));
argindex++;
} else if (args[argindex].compareTo("-load") == 0) {
props.setProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(false));
argindex++;
} else if (args[argindex].compareTo("-t") == 0) {
props.setProperty(DO_TRANSACTIONS_PROPERTY, String.valueOf(true));
argindex++;
} else if (args[argindex].compareTo("-s") == 0) {
props.setProperty(STATUS_PROPERTY, String.valueOf(true));
argindex++;
} else if (args[argindex].compareTo("-db") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -db.");
System.exit(0);
}
props.setProperty(DB_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-l") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -l.");
System.exit(0);
}
props.setProperty(LABEL_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-P") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -P.");
System.exit(0);
}
String propfile = args[argindex];
argindex++;
Properties myfileprops = new Properties();
try {
myfileprops.load(new FileInputStream(propfile));
} catch (IOException e) {
System.out.println("Unable to open the properties file " + propfile);
System.out.println(e.getMessage());
System.exit(0);
}
//Issue #5 - remove call to stringPropertyNames to make compilable under Java 1.5
for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, myfileprops.getProperty(prop));
}
} else if (args[argindex].compareTo("-p") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.out.println("Missing argument value for -p");
System.exit(0);
}
int eq = args[argindex].indexOf('=');
if (eq < 0) {
usageMessage();
System.out.println("Argument '-p' expected to be in key=value format (e.g., -p operationcount=99999)");
System.exit(0);
}
String name = args[argindex].substring(0, eq);
String value = args[argindex].substring(eq + 1);
props.put(name, value);
argindex++;
} else {
usageMessage();
System.out.println("Unknown option " + args[argindex]);
System.exit(0);
}
if (argindex >= args.length) {
break;
}
}
if (argindex != args.length) {
usageMessage();
if (argindex < args.length) {
System.out.println("An argument value without corresponding argument specifier (e.g., -p, -s) was found. "
+ "We expected an argument specifier and instead found " + args[argindex]);
} else {
System.out.println("An argument specifier without corresponding value was found at the end of the supplied " +
"command line arguments.");
}
System.exit(0);
}
//overwrite file properties with properties from the command line
//Issue #5 - remove call to stringPropertyNames to make compilable under Java 1.5
for (Enumeration e = props.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, props.getProperty(prop));
}
props = fileprops;
if (!checkRequiredProperties(props)) {
System.out.println("Failed check required properties.");
System.exit(0);
}
return props;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.LockSupport;
/**
* A thread for executing transactions or data inserts to the database.
*/
public class ClientThread implements Runnable {
// Counts down each of the clients completing.
private final CountDownLatch completeLatch;
private static boolean spinSleep;
private DB db;
private boolean dotransactions;
private Workload workload;
private int opcount;
private double targetOpsPerMs;
private int opsdone;
private int threadid;
private int threadcount;
private Object workloadstate;
private Properties props;
private long targetOpsTickNs;
private final Measurements measurements;
/**
* Constructor.
*
* @param db the DB implementation to use
* @param dotransactions true to do transactions, false to insert data
* @param workload the workload to use
* @param props the properties defining the experiment
* @param opcount the number of operations (transactions or inserts) to do
* @param targetperthreadperms target number of operations per thread per ms
* @param completeLatch The latch tracking the completion of all clients.
*/
public ClientThread(DB db, boolean dotransactions, Workload workload, Properties props, int opcount,
double targetperthreadperms, CountDownLatch completeLatch) {
this.db = db;
this.dotransactions = dotransactions;
this.workload = workload;
this.opcount = opcount;
opsdone = 0;
if (targetperthreadperms > 0) {
targetOpsPerMs = targetperthreadperms;
targetOpsTickNs = (long) (1000000 / targetOpsPerMs);
}
this.props = props;
measurements = Measurements.getMeasurements();
spinSleep = Boolean.valueOf(this.props.getProperty("spin.sleep", "false"));
this.completeLatch = completeLatch;
}
public void setThreadId(final int threadId) {
threadid = threadId;
}
public void setThreadCount(final int threadCount) {
threadcount = threadCount;
}
public int getOpsDone() {
return opsdone;
}
@Override
public void run() {
try {
db.init();
} catch (DBException e) {
e.printStackTrace();
e.printStackTrace(System.out);
return;
}
try {
workloadstate = workload.initThread(props, threadid, threadcount);
} catch (WorkloadException e) {
e.printStackTrace();
e.printStackTrace(System.out);
return;
}
//NOTE: Switching to using nanoTime and parkNanos for time management here such that the measurements
// and the client thread have the same view on time.
//spread the thread operations out so they don't all hit the DB at the same time
// GH issue 4 - throws exception if _target>1 because random.nextInt argument must be >0
// and the sleep() doesn't make sense for granularities < 1 ms anyway
if ((targetOpsPerMs > 0) && (targetOpsPerMs <= 1.0)) {
long randomMinorDelay = ThreadLocalRandom.current().nextInt((int) targetOpsTickNs);
sleepUntil(System.nanoTime() + randomMinorDelay);
}
try {
if (dotransactions) {
long startTimeNanos = System.nanoTime();
while (((opcount == 0) || (opsdone < opcount)) && !workload.isStopRequested()) {
if (!workload.doTransaction(db, workloadstate)) {
break;
}
opsdone++;
throttleNanos(startTimeNanos);
}
} else {
long startTimeNanos = System.nanoTime();
while (((opcount == 0) || (opsdone < opcount)) && !workload.isStopRequested()) {
if (!workload.doInsert(db, workloadstate)) {
break;
}
opsdone++;
throttleNanos(startTimeNanos);
}
}
} catch (Exception e) {
e.printStackTrace();
e.printStackTrace(System.out);
System.exit(0);
}
try {
measurements.setIntendedStartTimeNs(0);
db.cleanup();
} catch (DBException e) {
e.printStackTrace();
e.printStackTrace(System.out);
} finally {
completeLatch.countDown();
}
}
private static void sleepUntil(long deadline) {
while (System.nanoTime() < deadline) {
if (!spinSleep) {
LockSupport.parkNanos(deadline - System.nanoTime());
}
}
}
private void throttleNanos(long startTimeNanos) {
//throttle the operations
if (targetOpsPerMs > 0) {
// delay until next tick
long deadline = startTimeNanos + opsdone * targetOpsTickNs;
sleepUntil(deadline);
measurements.setIntendedStartTimeNs(deadline);
}
}
/**
* The total amount of work this thread is still expected to do.
*/
int getOpsTodo() {
int todo = opcount - opsdone;
return todo < 0 ? 0 : todo;
}
}
/**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.workloads.CoreWorkload;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.*;
/**
* A simple command line client to a database, using the appropriate site.ycsb.DB implementation.
*/
public final class CommandLine {
private CommandLine() {
//not used
}
public static final String DEFAULT_DB = "site.ycsb.BasicDB";
public static void usageMessage() {
System.out.println("YCSB Command Line Client");
System.out.println("Usage: java site.ycsb.CommandLine [options]");
System.out.println("Options:");
System.out.println(" -P filename: Specify a property file");
System.out.println(" -p name=value: Specify a property value");
System.out.println(" -db classname: Use a specified DB class (can also set the \"db\" property)");
System.out.println(" -table tablename: Use the table name instead of the default \"" +
CoreWorkload.TABLENAME_PROPERTY_DEFAULT + "\"");
System.out.println();
}
public static void help() {
System.out.println("Commands:");
System.out.println(" read key [field1 field2 ...] - Read a record");
System.out.println(" scan key recordcount [field1 field2 ...] - Scan starting at key");
System.out.println(" insert key name1=value1 [name2=value2 ...] - Insert a new record");
System.out.println(" update key name1=value1 [name2=value2 ...] - Update a record");
System.out.println(" delete key - Delete a record");
System.out.println(" table [tablename] - Get or [set] the name of the table");
System.out.println(" quit - Quit");
}
public static void main(String[] args) {
Properties props = new Properties();
Properties fileprops = new Properties();
parseArguments(args, props, fileprops);
for (Enumeration e = props.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, props.getProperty(prop));
}
props = fileprops;
System.out.println("YCSB Command Line client");
System.out.println("Type \"help\" for command line help");
System.out.println("Start with \"-help\" for usage info");
String table = props.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
//create a DB
String dbname = props.getProperty(Client.DB_PROPERTY, DEFAULT_DB);
ClassLoader classLoader = CommandLine.class.getClassLoader();
DB db = null;
try {
Class dbclass = classLoader.loadClass(dbname);
db = (DB) dbclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
db.setProperties(props);
try {
db.init();
} catch (DBException e) {
e.printStackTrace();
System.exit(0);
}
System.out.println("Connected.");
//main loop
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
for (;;) {
//get user input
System.out.print("> ");
String input = null;
try {
input = br.readLine();
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
if (input.compareTo("") == 0) {
continue;
}
if (input.compareTo("help") == 0) {
help();
continue;
}
if (input.compareTo("quit") == 0) {
break;
}
String[] tokens = input.split(" ");
long st = System.currentTimeMillis();
//handle commands
if (tokens[0].compareTo("table") == 0) {
handleTable(tokens, table);
} else if (tokens[0].compareTo("read") == 0) {
handleRead(tokens, table, db);
} else if (tokens[0].compareTo("scan") == 0) {
handleScan(tokens, table, db);
} else if (tokens[0].compareTo("update") == 0) {
handleUpdate(tokens, table, db);
} else if (tokens[0].compareTo("insert") == 0) {
handleInsert(tokens, table, db);
} else if (tokens[0].compareTo("delete") == 0) {
handleDelete(tokens, table, db);
} else {
System.out.println("Error: unknown command \"" + tokens[0] + "\"");
}
System.out.println((System.currentTimeMillis() - st) + " ms");
}
}
private static void parseArguments(String[] args, Properties props, Properties fileprops) {
int argindex = 0;
while ((argindex < args.length) && (args[argindex].startsWith("-"))) {
if ((args[argindex].compareTo("-help") == 0) ||
(args[argindex].compareTo("--help") == 0) ||
(args[argindex].compareTo("-?") == 0) ||
(args[argindex].compareTo("--?") == 0)) {
usageMessage();
System.exit(0);
}
if (args[argindex].compareTo("-db") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
props.setProperty(Client.DB_PROPERTY, args[argindex]);
argindex++;
} else if (args[argindex].compareTo("-P") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
String propfile = args[argindex];
argindex++;
Properties myfileprops = new Properties();
try {
myfileprops.load(new FileInputStream(propfile));
} catch (IOException e) {
System.out.println(e.getMessage());
System.exit(0);
}
for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) {
String prop = (String) e.nextElement();
fileprops.setProperty(prop, myfileprops.getProperty(prop));
}
} else if (args[argindex].compareTo("-p") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
int eq = args[argindex].indexOf('=');
if (eq < 0) {
usageMessage();
System.exit(0);
}
String name = args[argindex].substring(0, eq);
String value = args[argindex].substring(eq + 1);
props.put(name, value);
argindex++;
} else if (args[argindex].compareTo("-table") == 0) {
argindex++;
if (argindex >= args.length) {
usageMessage();
System.exit(0);
}
props.put(CoreWorkload.TABLENAME_PROPERTY, args[argindex]);
argindex++;
} else {
System.out.println("Unknown option " + args[argindex]);
usageMessage();
System.exit(0);
}
if (argindex >= args.length) {
break;
}
}
if (argindex != args.length) {
usageMessage();
System.exit(0);
}
}
private static void handleDelete(String[] tokens, String table, DB db) {
if (tokens.length != 2) {
System.out.println("Error: syntax is \"delete keyname\"");
} else {
Status ret = db.delete(table, tokens[1]);
System.out.println("Return result: " + ret.getName());
}
}
private static void handleInsert(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"insert keyname name1=value1 [name2=value2 ...]\"");
} else {
HashMap<String, ByteIterator> values = new HashMap<>();
for (int i = 2; i < tokens.length; i++) {
String[] nv = tokens[i].split("=");
values.put(nv[0], new StringByteIterator(nv[1]));
}
Status ret = db.insert(table, tokens[1], values);
System.out.println("Result: " + ret.getName());
}
}
private static void handleUpdate(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"update keyname name1=value1 [name2=value2 ...]\"");
} else {
HashMap<String, ByteIterator> values = new HashMap<>();
for (int i = 2; i < tokens.length; i++) {
String[] nv = tokens[i].split("=");
values.put(nv[0], new StringByteIterator(nv[1]));
}
Status ret = db.update(table, tokens[1], values);
System.out.println("Result: " + ret.getName());
}
}
private static void handleScan(String[] tokens, String table, DB db) {
if (tokens.length < 3) {
System.out.println("Error: syntax is \"scan keyname scanlength [field1 field2 ...]\"");
} else {
Set<String> fields = null;
if (tokens.length > 3) {
fields = new HashSet<>();
fields.addAll(Arrays.asList(tokens).subList(3, tokens.length));
}
Vector<HashMap<String, ByteIterator>> results = new Vector<>();
Status ret = db.scan(table, tokens[1], Integer.parseInt(tokens[2]), fields, results);
System.out.println("Result: " + ret.getName());
int record = 0;
if (results.isEmpty()) {
System.out.println("0 records");
} else {
System.out.println("--------------------------------");
}
for (Map<String, ByteIterator> result : results) {
System.out.println("Record " + (record++));
for (Map.Entry<String, ByteIterator> ent : result.entrySet()) {
System.out.println(ent.getKey() + "=" + ent.getValue());
}
System.out.println("--------------------------------");
}
}
}
private static void handleRead(String[] tokens, String table, DB db) {
if (tokens.length == 1) {
System.out.println("Error: syntax is \"read keyname [field1 field2 ...]\"");
} else {
Set<String> fields = null;
if (tokens.length > 2) {
fields = new HashSet<>();
fields.addAll(Arrays.asList(tokens).subList(2, tokens.length));
}
HashMap<String, ByteIterator> result = new HashMap<>();
Status ret = db.read(table, tokens[1], fields, result);
System.out.println("Return code: " + ret.getName());
for (Map.Entry<String, ByteIterator> ent : result.entrySet()) {
System.out.println(ent.getKey() + "=" + ent.getValue());
}
}
}
private static void handleTable(String[] tokens, String table) {
if (tokens.length == 1) {
System.out.println("Using table \"" + table + "\"");
} else if (tokens.length == 2) {
table = tokens[1];
System.out.println("Using table \"" + table + "\"");
} else {
System.out.println("Error: syntax is \"table tablename\"");
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
/**
* A layer for accessing a database to be benchmarked. Each thread in the client
* will be given its own instance of whatever DB class is to be used in the test.
* This class should be constructed using a no-argument constructor, so we can
* load it dynamically. Any argument-based initialization should be
* done by init().
*
* Note that YCSB does not make any use of the return codes returned by this class.
* Instead, it keeps a count of the return values and presents them to the user.
*
* The semantics of methods such as insert, update and delete vary from database
* to database. In particular, operations may or may not be durable once these
* methods commit, and some systems may return 'success' regardless of whether
* or not a tuple with a matching key existed before the call. Rather than dictate
* the exact semantics of these methods, we recommend you either implement them
* to match the database's default semantics, or the semantics of your
* target application. For the sake of comparison between experiments we also
* recommend you explain the semantics you chose when presenting performance results.
*/
public abstract class DB {
/**
* Properties for configuring this DB.
*/
private Properties properties = new Properties();
/**
* Set the properties for this DB.
*/
public void setProperties(Properties p) {
properties = p;
}
/**
* Get the set of properties for this DB.
*/
public Properties getProperties() {
return properties;
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() throws DBException {
}
/**
* Cleanup any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void cleanup() throws DBException {
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return The result of the operation.
*/
public abstract Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result);
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
public abstract Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result);
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return The result of the operation.
*/
public abstract Status update(String table, String key, Map<String, ByteIterator> values);
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
public abstract Status insert(String table, String key, Map<String, ByteIterator> values);
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return The result of the operation.
*/
public abstract Status delete(String table, String key);
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* Something bad happened while interacting with the database.
*/
public class DBException extends Exception {
/**
*
*/
private static final long serialVersionUID = 6646883591588721475L;
public DBException(String message) {
super(message);
}
public DBException() {
super();
}
public DBException(String message, Throwable cause) {
super(message, cause);
}
public DBException(Throwable cause) {
super(cause);
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.apache.htrace.core.Tracer;
import java.util.Properties;
/**
* Creates a DB layer by dynamically classloading the specified DB class.
*/
public final class DBFactory {
private DBFactory() {
// not used
}
public static DB newDB(String dbname, Properties properties, final Tracer tracer) throws UnknownDBException {
ClassLoader classLoader = DBFactory.class.getClassLoader();
DB ret;
try {
Class dbclass = classLoader.loadClass(dbname);
ret = (DB) dbclass.newInstance();
} catch (Exception e) {
e.printStackTrace();
return null;
}
ret.setProperties(properties);
return new DBWrapper(ret, tracer);
}
}
/**
* Copyright (c) 2010 Yahoo! Inc., 2016-2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.Map;
import site.ycsb.measurements.Measurements;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Wrapper around a "real" DB that measures latencies and counts return codes.
* Also reports latency separately between OK and failed operations.
*/
public class DBWrapper extends DB {
private final DB db;
private final Measurements measurements;
private final Tracer tracer;
private boolean reportLatencyForEachError = false;
private Set<String> latencyTrackedErrors = new HashSet<String>();
private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY = "reportlatencyforeacherror";
private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT = "false";
private static final String LATENCY_TRACKED_ERRORS_PROPERTY = "latencytrackederrors";
private static final AtomicBoolean LOG_REPORT_CONFIG = new AtomicBoolean(false);
private final String scopeStringCleanup;
private final String scopeStringDelete;
private final String scopeStringInit;
private final String scopeStringInsert;
private final String scopeStringRead;
private final String scopeStringScan;
private final String scopeStringUpdate;
public DBWrapper(final DB db, final Tracer tracer) {
this.db = db;
measurements = Measurements.getMeasurements();
this.tracer = tracer;
final String simple = db.getClass().getSimpleName();
scopeStringCleanup = simple + "#cleanup";
scopeStringDelete = simple + "#delete";
scopeStringInit = simple + "#init";
scopeStringInsert = simple + "#insert";
scopeStringRead = simple + "#read";
scopeStringScan = simple + "#scan";
scopeStringUpdate = simple + "#update";
}
/**
* Set the properties for this DB.
*/
public void setProperties(Properties p) {
db.setProperties(p);
}
/**
* Get the set of properties for this DB.
*/
public Properties getProperties() {
return db.getProperties();
}
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void init() throws DBException {
try (final TraceScope span = tracer.newScope(scopeStringInit)) {
db.init();
this.reportLatencyForEachError = Boolean.parseBoolean(getProperties().
getProperty(REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY,
REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT));
if (!reportLatencyForEachError) {
String latencyTrackedErrorsProperty = getProperties().getProperty(LATENCY_TRACKED_ERRORS_PROPERTY, null);
if (latencyTrackedErrorsProperty != null) {
this.latencyTrackedErrors = new HashSet<String>(Arrays.asList(
latencyTrackedErrorsProperty.split(",")));
}
}
if (LOG_REPORT_CONFIG.compareAndSet(false, true)) {
System.err.println("DBWrapper: report latency for each error is " +
this.reportLatencyForEachError + " and specific error codes to track" +
" for latency are: " + this.latencyTrackedErrors.toString());
}
}
}
/**
* Cleanup any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
public void cleanup() throws DBException {
try (final TraceScope span = tracer.newScope(scopeStringCleanup)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.cleanup();
long en = System.nanoTime();
measure("CLEANUP", Status.OK, ist, st, en);
}
}
/**
* Read a record from the database. Each field/value pair from the result
* will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return The result of the operation.
*/
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
try (final TraceScope span = tracer.newScope(scopeStringRead)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.read(table, key, fields, result);
long en = System.nanoTime();
measure("READ", res, ist, st, en);
measurements.reportStatus("READ", res);
return res;
}
}
/**
* Perform a range scan for a set of records in the database.
* Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return The result of the operation.
*/
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
try (final TraceScope span = tracer.newScope(scopeStringScan)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.scan(table, startkey, recordcount, fields, result);
long en = System.nanoTime();
measure("SCAN", res, ist, st, en);
measurements.reportStatus("SCAN", res);
return res;
}
}
private void measure(String op, Status result, long intendedStartTimeNanos,
long startTimeNanos, long endTimeNanos) {
String measurementName = op;
if (result == null || !result.isOk()) {
if (this.reportLatencyForEachError ||
this.latencyTrackedErrors.contains(result.getName())) {
measurementName = op + "-" + result.getName();
} else {
measurementName = op + "-FAILED";
}
}
measurements.measure(measurementName,
(int) ((endTimeNanos - startTimeNanos) / 1000));
measurements.measureIntended(measurementName,
(int) ((endTimeNanos - intendedStartTimeNanos) / 1000));
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return The result of the operation.
*/
public Status update(String table, String key,
Map<String, ByteIterator> values) {
try (final TraceScope span = tracer.newScope(scopeStringUpdate)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.update(table, key, values);
long en = System.nanoTime();
measure("UPDATE", res, ist, st, en);
measurements.reportStatus("UPDATE", res);
return res;
}
}
/**
* Insert a record in the database. Any field/value pairs in the specified
* values HashMap will be written into the record with the specified
* record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return The result of the operation.
*/
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
try (final TraceScope span = tracer.newScope(scopeStringInsert)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.insert(table, key, values);
long en = System.nanoTime();
measure("INSERT", res, ist, st, en);
measurements.reportStatus("INSERT", res);
return res;
}
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return The result of the operation.
*/
public Status delete(String table, String key) {
try (final TraceScope span = tracer.newScope(scopeStringDelete)) {
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
Status res = db.delete(table, key);
long en = System.nanoTime();
measure("DELETE", res, ist, st, en);
measurements.reportStatus("DELETE", res);
return res;
}
}
}
/**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static java.util.concurrent.TimeUnit.MICROSECONDS;
/**
* Basic DB that just prints out the requested operations, instead of doing them against a database.
*/
public class GoodBadUglyDB extends DB {
public static final String SIMULATE_DELAY = "gbudb.delays";
public static final String SIMULATE_DELAY_DEFAULT = "200,1000,10000,50000,100000";
private static final ReadWriteLock DB_ACCESS = new ReentrantReadWriteLock();
private long[] delays;
public GoodBadUglyDB() {
delays = new long[]{200, 1000, 10000, 50000, 200000};
}
private void delay() {
final Random random = ThreadLocalRandom.current();
double p = random.nextDouble();
int mod;
if (p < 0.9) {
mod = 0;
} else if (p < 0.99) {
mod = 1;
} else if (p < 0.9999) {
mod = 2;
} else {
mod = 3;
}
// this will make mod 3 pauses global
Lock lock = mod == 3 ? DB_ACCESS.writeLock() : DB_ACCESS.readLock();
if (mod == 3) {
System.out.println("OUCH");
}
lock.lock();
try {
final long baseDelayNs = MICROSECONDS.toNanos(delays[mod]);
final int delayRangeNs = (int) (MICROSECONDS.toNanos(delays[mod + 1]) - baseDelayNs);
final long delayNs = baseDelayNs + random.nextInt(delayRangeNs);
final long deadline = System.nanoTime() + delayNs;
do {
LockSupport.parkNanos(deadline - System.nanoTime());
} while (System.nanoTime() < deadline && !Thread.interrupted());
} finally {
lock.unlock();
}
}
/**
* Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread.
*/
public void init() {
int i = 0;
for (String delay : getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT).split(",")) {
delays[i++] = Long.parseLong(delay);
}
}
/**
* Read a record from the database. Each field/value pair from the result will be stored in a HashMap.
*
* @param table The name of the table
* @param key The record key of the record to read.
* @param fields The list of fields to read, or null for all of them
* @param result A HashMap of field/value pairs for the result
* @return Zero on success, a non-zero error code on error
*/
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
delay();
return Status.OK;
}
/**
* Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored
* in a HashMap.
*
* @param table The name of the table
* @param startkey The record key of the first record to read.
* @param recordcount The number of records to read
* @param fields The list of fields to read, or null for all of them
* @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record
* @return Zero on success, a non-zero error code on error
*/
public Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
delay();
return Status.OK;
}
/**
* Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key, overwriting any existing values with the same field name.
*
* @param table The name of the table
* @param key The record key of the record to write.
* @param values A HashMap of field/value pairs to update in the record
* @return Zero on success, a non-zero error code on error
*/
public Status update(String table, String key, Map<String, ByteIterator> values) {
delay();
return Status.OK;
}
/**
* Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the
* record with the specified record key.
*
* @param table The name of the table
* @param key The record key of the record to insert.
* @param values A HashMap of field/value pairs to insert in the record
* @return Zero on success, a non-zero error code on error
*/
public Status insert(String table, String key, Map<String, ByteIterator> values) {
delay();
return Status.OK;
}
/**
* Delete a record from the database.
*
* @param table The name of the table
* @param key The record key of the record to delete.
* @return Zero on success, a non-zero error code on error
*/
public Status delete(String table, String key) {
delay();
return Status.OK;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.io.IOException;
import java.io.InputStream;
/**
* A ByteIterator that iterates through an inputstream of bytes.
*/
public class InputStreamByteIterator extends ByteIterator {
private final long len;
private final InputStream ins;
private long off;
private final boolean resetable;
public InputStreamByteIterator(InputStream ins, long len) {
this.len = len;
this.ins = ins;
off = 0;
resetable = ins.markSupported();
if (resetable) {
ins.mark((int) len);
}
}
@Override
public boolean hasNext() {
return off < len;
}
@Override
public byte nextByte() {
int ret;
try {
ret = ins.read();
} catch (Exception e) {
throw new IllegalStateException(e);
}
if (ret == -1) {
throw new IllegalStateException("Past EOF!");
}
off++;
return (byte) ret;
}
@Override
public long bytesLeft() {
return len - off;
}
@Override
public byte[] toArray() {
int size = (int) bytesLeft();
byte[] bytes = new byte[size];
try {
if (ins.read(bytes) < size) {
throw new IllegalStateException("Past EOF!");
}
} catch (IOException e) {
throw new IllegalStateException(e);
}
off = len;
return bytes;
}
@Override
public void reset() {
if (resetable) {
try {
ins.reset();
ins.mark((int) len);
off = 0;
} catch (IOException e) {
throw new IllegalStateException("Failed to reset the input stream", e);
}
} else {
throw new UnsupportedOperationException();
}
}
}
/**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* A byte iterator that handles encoding and decoding numeric values.
* Currently this iterator can handle 64 bit signed values and double precision
* floating point values.
*/
public class NumericByteIterator extends ByteIterator {
private final byte[] payload;
private final boolean floatingPoint;
private int off;
public NumericByteIterator(final long value) {
floatingPoint = false;
payload = Utils.longToBytes(value);
off = 0;
}
public NumericByteIterator(final double value) {
floatingPoint = true;
payload = Utils.doubleToBytes(value);
off = 0;
}
@Override
public boolean hasNext() {
return off < payload.length;
}
@Override
public byte nextByte() {
return payload[off++];
}
@Override
public long bytesLeft() {
return payload.length - off;
}
@Override
public void reset() {
off = 0;
}
public long getLong() {
if (floatingPoint) {
throw new IllegalStateException("Byte iterator is of the type double");
}
return Utils.bytesToLong(payload);
}
public double getDouble() {
if (!floatingPoint) {
throw new IllegalStateException("Byte iterator is of the type long");
}
return Utils.bytesToDouble(payload);
}
public boolean isFloatingPoint() {
return floatingPoint;
}
}
\ No newline at end of file
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.concurrent.ThreadLocalRandom;
/**
* A ByteIterator that generates a random sequence of bytes.
*/
public class RandomByteIterator extends ByteIterator {
private final long len;
private long off;
private int bufOff;
private final byte[] buf;
@Override
public boolean hasNext() {
return (off + bufOff) < len;
}
private void fillBytesImpl(byte[] buffer, int base) {
int bytes = ThreadLocalRandom.current().nextInt();
switch (buffer.length - base) {
default:
buffer[base + 5] = (byte) (((bytes >> 25) & 95) + ' ');
case 5:
buffer[base + 4] = (byte) (((bytes >> 20) & 63) + ' ');
case 4:
buffer[base + 3] = (byte) (((bytes >> 15) & 31) + ' ');
case 3:
buffer[base + 2] = (byte) (((bytes >> 10) & 95) + ' ');
case 2:
buffer[base + 1] = (byte) (((bytes >> 5) & 63) + ' ');
case 1:
buffer[base + 0] = (byte) (((bytes) & 31) + ' ');
case 0:
break;
}
}
private void fillBytes() {
if (bufOff == buf.length) {
fillBytesImpl(buf, 0);
bufOff = 0;
off += buf.length;
}
}
public RandomByteIterator(long len) {
this.len = len;
this.buf = new byte[6];
this.bufOff = buf.length;
fillBytes();
this.off = 0;
}
public byte nextByte() {
fillBytes();
bufOff++;
return buf[bufOff - 1];
}
@Override
public int nextBuf(byte[] buffer, int bufOffset) {
int ret;
if (len - off < buffer.length - bufOffset) {
ret = (int) (len - off);
} else {
ret = buffer.length - bufOffset;
}
int i;
for (i = 0; i < ret; i += 6) {
fillBytesImpl(buffer, i + bufOffset);
}
off += ret;
return ret + bufOffset;
}
@Override
public long bytesLeft() {
return len - off - bufOff;
}
@Override
public void reset() {
off = 0;
}
/** Consumes remaining contents of this object, and returns them as a byte array. */
public byte[] toArray() {
long left = bytesLeft();
if (left != (int) left) {
throw new ArrayIndexOutOfBoundsException("Too much data to fit in one array!");
}
byte[] ret = new byte[(int) left];
int bufOffset = 0;
while (bufOffset < ret.length) {
bufOffset = nextBuf(ret, bufOffset);
}
return ret;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* The result of an operation.
*/
public class Status {
private final String name;
private final String description;
/**
* @param name A short name for the status.
* @param description A description of the status.
*/
public Status(String name, String description) {
super();
this.name = name;
this.description = description;
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
@Override
public String toString() {
return "Status [name=" + name + ", description=" + description + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((description == null) ? 0 : description.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Status other = (Status) obj;
if (description == null) {
if (other.description != null) {
return false;
}
} else if (!description.equals(other.description)) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
return true;
}
/**
* Is {@code this} a passing state for the operation: {@link Status#OK} or {@link Status#BATCHED_OK}.
* @return true if the operation is successful, false otherwise
*/
public boolean isOk() {
return this == OK || this == BATCHED_OK;
}
public static final Status OK = new Status("OK", "The operation completed successfully.");
public static final Status ERROR = new Status("ERROR", "The operation failed.");
public static final Status NOT_FOUND = new Status("NOT_FOUND", "The requested record was not found.");
public static final Status NOT_IMPLEMENTED = new Status("NOT_IMPLEMENTED", "The operation is not " +
"implemented for the current binding.");
public static final Status UNEXPECTED_STATE = new Status("UNEXPECTED_STATE", "The operation reported" +
" success, but the result was not as expected.");
public static final Status BAD_REQUEST = new Status("BAD_REQUEST", "The request was not valid.");
public static final Status FORBIDDEN = new Status("FORBIDDEN", "The operation is forbidden.");
public static final Status SERVICE_UNAVAILABLE = new Status("SERVICE_UNAVAILABLE", "Dependant " +
"service for the current binding is not available.");
public static final Status BATCHED_OK = new Status("BATCHED_OK", "The operation has been batched by " +
"the binding to be executed later.");
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.measurements.Measurements;
import java.text.DecimalFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* A thread to periodically show the status of the experiment to reassure you that progress is being made.
*/
public class StatusThread extends Thread {
// Counts down each of the clients completing
private final CountDownLatch completeLatch;
// Stores the measurements for the run
private final Measurements measurements;
// Whether or not to track the JVM stats per run
private final boolean trackJVMStats;
// The clients that are running.
private final List<ClientThread> clients;
private final String label;
private final boolean standardstatus;
// The interval for reporting status.
private long sleeptimeNs;
// JVM max/mins
private int maxThreads;
private int minThreads = Integer.MAX_VALUE;
private long maxUsedMem;
private long minUsedMem = Long.MAX_VALUE;
private double maxLoadAvg;
private double minLoadAvg = Double.MAX_VALUE;
private long lastGCCount = 0;
private long lastGCTime = 0;
/**
* Creates a new StatusThread without JVM stat tracking.
*
* @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()}
* as they complete.
* @param clients The clients to collect metrics from.
* @param label The label for the status.
* @param standardstatus If true the status is printed to stdout in addition to stderr.
* @param statusIntervalSeconds The number of seconds between status updates.
*/
public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
String label, boolean standardstatus, int statusIntervalSeconds) {
this(completeLatch, clients, label, standardstatus, statusIntervalSeconds, false);
}
/**
* Creates a new StatusThread.
*
* @param completeLatch The latch that each client thread will {@link CountDownLatch#countDown()}
* as they complete.
* @param clients The clients to collect metrics from.
* @param label The label for the status.
* @param standardstatus If true the status is printed to stdout in addition to stderr.
* @param statusIntervalSeconds The number of seconds between status updates.
* @param trackJVMStats Whether or not to track JVM stats.
*/
public StatusThread(CountDownLatch completeLatch, List<ClientThread> clients,
String label, boolean standardstatus, int statusIntervalSeconds,
boolean trackJVMStats) {
this.completeLatch = completeLatch;
this.clients = clients;
this.label = label;
this.standardstatus = standardstatus;
sleeptimeNs = TimeUnit.SECONDS.toNanos(statusIntervalSeconds);
measurements = Measurements.getMeasurements();
this.trackJVMStats = trackJVMStats;
}
/**
* Run and periodically report status.
*/
@Override
public void run() {
final long startTimeMs = System.currentTimeMillis();
final long startTimeNanos = System.nanoTime();
long deadline = startTimeNanos + sleeptimeNs;
long startIntervalMs = startTimeMs;
long lastTotalOps = 0;
boolean alldone;
do {
long nowMs = System.currentTimeMillis();
lastTotalOps = computeStats(startTimeMs, startIntervalMs, nowMs, lastTotalOps);
if (trackJVMStats) {
measureJVM();
}
alldone = waitForClientsUntil(deadline);
startIntervalMs = nowMs;
deadline += sleeptimeNs;
}
while (!alldone);
if (trackJVMStats) {
measureJVM();
}
// Print the final stats.
computeStats(startTimeMs, startIntervalMs, System.currentTimeMillis(), lastTotalOps);
}
/**
* Computes and prints the stats.
*
* @param startTimeMs The start time of the test.
* @param startIntervalMs The start time of this interval.
* @param endIntervalMs The end time (now) for the interval.
* @param lastTotalOps The last total operations count.
* @return The current operation count.
*/
private long computeStats(final long startTimeMs, long startIntervalMs, long endIntervalMs,
long lastTotalOps) {
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS");
long totalops = 0;
long todoops = 0;
// Calculate the total number of operations completed.
for (ClientThread t : clients) {
totalops += t.getOpsDone();
todoops += t.getOpsTodo();
}
long interval = endIntervalMs - startTimeMs;
double throughput = 1000.0 * (((double) totalops) / (double) interval);
double curthroughput = 1000.0 * (((double) (totalops - lastTotalOps)) /
((double) (endIntervalMs - startIntervalMs)));
long estremaining = (long) Math.ceil(todoops / throughput);
DecimalFormat d = new DecimalFormat("#.##");
String labelString = this.label + format.format(new Date());
StringBuilder msg = new StringBuilder(labelString).append(" ").append(interval / 1000).append(" sec: ");
msg.append(totalops).append(" operations; ");
if (totalops != 0) {
msg.append(d.format(curthroughput)).append(" current ops/sec; ");
}
if (todoops != 0) {
msg.append("est completion in ").append(RemainingFormatter.format(estremaining));
}
msg.append(Measurements.getMeasurements().getSummary());
System.err.println(msg);
if (standardstatus) {
System.out.println(msg);
}
return totalops;
}
/**
* Waits for all of the client to finish or the deadline to expire.
*
* @param deadline The current deadline.
* @return True if all of the clients completed.
*/
private boolean waitForClientsUntil(long deadline) {
boolean alldone = false;
long now = System.nanoTime();
while (!alldone && now < deadline) {
try {
alldone = completeLatch.await(deadline - now, TimeUnit.NANOSECONDS);
} catch (InterruptedException ie) {
// If we are interrupted the thread is being asked to shutdown.
// Return true to indicate that and reset the interrupt state
// of the thread.
Thread.currentThread().interrupt();
alldone = true;
}
now = System.nanoTime();
}
return alldone;
}
/**
* Executes the JVM measurements.
*/
private void measureJVM() {
final int threads = Utils.getActiveThreadCount();
if (threads < minThreads) {
minThreads = threads;
}
if (threads > maxThreads) {
maxThreads = threads;
}
measurements.measure("THREAD_COUNT", threads);
// TODO - once measurements allow for other number types, switch to using
// the raw bytes. Otherwise we can track in MB to avoid negative values
// when faced with huge heaps.
final int usedMem = Utils.getUsedMemoryMegaBytes();
if (usedMem < minUsedMem) {
minUsedMem = usedMem;
}
if (usedMem > maxUsedMem) {
maxUsedMem = usedMem;
}
measurements.measure("USED_MEM_MB", usedMem);
// Some JVMs may not implement this feature so if the value is less than
// zero, just ommit it.
final double systemLoad = Utils.getSystemLoadAverage();
if (systemLoad >= 0) {
// TODO - store the double if measurements allows for them
measurements.measure("SYS_LOAD_AVG", (int) systemLoad);
if (systemLoad > maxLoadAvg) {
maxLoadAvg = systemLoad;
}
if (systemLoad < minLoadAvg) {
minLoadAvg = systemLoad;
}
}
final long gcs = Utils.getGCTotalCollectionCount();
measurements.measure("GCS", (int) (gcs - lastGCCount));
final long gcTime = Utils.getGCTotalTime();
measurements.measure("GCS_TIME", (int) (gcTime - lastGCTime));
lastGCCount = gcs;
lastGCTime = gcTime;
}
/**
* @return The maximum threads running during the test.
*/
public int getMaxThreads() {
return maxThreads;
}
/**
* @return The minimum threads running during the test.
*/
public int getMinThreads() {
return minThreads;
}
/**
* @return The maximum memory used during the test.
*/
public long getMaxUsedMem() {
return maxUsedMem;
}
/**
* @return The minimum memory used during the test.
*/
public long getMinUsedMem() {
return minUsedMem;
}
/**
* @return The maximum load average during the test.
*/
public double getMaxLoadAvg() {
return maxLoadAvg;
}
/**
* @return The minimum load average during the test.
*/
public double getMinLoadAvg() {
return minLoadAvg;
}
/**
* @return Whether or not the thread is tracking JVM stats.
*/
public boolean trackJVMStats() {
return trackJVMStats;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.HashMap;
import java.util.Map;
/**
* A ByteIterator that iterates through a string.
*/
public class StringByteIterator extends ByteIterator {
private String str;
private int off;
/**
* Put all of the entries of one map into the other, converting
* String values into ByteIterators.
*/
public static void putAllAsByteIterators(Map<String, ByteIterator> out, Map<String, String> in) {
for (Map.Entry<String, String> entry : in.entrySet()) {
out.put(entry.getKey(), new StringByteIterator(entry.getValue()));
}
}
/**
* Put all of the entries of one map into the other, converting
* ByteIterator values into Strings.
*/
public static void putAllAsStrings(Map<String, String> out, Map<String, ByteIterator> in) {
for (Map.Entry<String, ByteIterator> entry : in.entrySet()) {
out.put(entry.getKey(), entry.getValue().toString());
}
}
/**
* Create a copy of a map, converting the values from Strings to
* StringByteIterators.
*/
public static Map<String, ByteIterator> getByteIteratorMap(Map<String, String> m) {
HashMap<String, ByteIterator> ret =
new HashMap<String, ByteIterator>();
for (Map.Entry<String, String> entry : m.entrySet()) {
ret.put(entry.getKey(), new StringByteIterator(entry.getValue()));
}
return ret;
}
/**
* Create a copy of a map, converting the values from
* StringByteIterators to Strings.
*/
public static Map<String, String> getStringMap(Map<String, ByteIterator> m) {
HashMap<String, String> ret = new HashMap<String, String>();
for (Map.Entry<String, ByteIterator> entry : m.entrySet()) {
ret.put(entry.getKey(), entry.getValue().toString());
}
return ret;
}
public StringByteIterator(String s) {
this.str = s;
this.off = 0;
}
@Override
public boolean hasNext() {
return off < str.length();
}
@Override
public byte nextByte() {
byte ret = (byte) str.charAt(off);
off++;
return ret;
}
@Override
public long bytesLeft() {
return str.length() - off;
}
@Override
public void reset() {
off = 0;
}
@Override
public byte[] toArray() {
byte[] bytes = new byte[(int) bytesLeft()];
for (int i = 0; i < bytes.length; i++) {
bytes[i] = (byte) str.charAt(off + i);
}
off = str.length();
return bytes;
}
/**
* Specialization of general purpose toString() to avoid unnecessary
* copies.
* <p>
* Creating a new StringByteIterator, then calling toString()
* yields the original String object, and does not perform any copies
* or String conversion operations.
* </p>
*/
@Override
public String toString() {
if (off > 0) {
return super.toString();
} else {
return str;
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.Collection;
/**
* A thread that waits for the maximum specified time and then interrupts all the client
* threads passed at initialization of this thread.
*
* The maximum execution time passed is assumed to be in seconds.
*
*/
public class TerminatorThread extends Thread {
private final Collection<? extends Thread> threads;
private long maxExecutionTime;
private Workload workload;
private long waitTimeOutInMS;
public TerminatorThread(long maxExecutionTime, Collection<? extends Thread> threads,
Workload workload) {
this.maxExecutionTime = maxExecutionTime;
this.threads = threads;
this.workload = workload;
waitTimeOutInMS = 2000;
System.err.println("Maximum execution time specified as: " + maxExecutionTime + " secs");
}
public void run() {
try {
Thread.sleep(maxExecutionTime * 1000);
} catch (InterruptedException e) {
System.err.println("Could not wait until max specified time, TerminatorThread interrupted.");
return;
}
System.err.println("Maximum time elapsed. Requesting stop for the workload.");
workload.requestStop();
System.err.println("Stop requested for workload. Now Joining!");
for (Thread t : threads) {
while (t.isAlive()) {
try {
t.join(waitTimeOutInMS);
if (t.isAlive()) {
System.out.println("Still waiting for thread " + t.getName() + " to complete. " +
"Workload status: " + workload.isStopRequested());
}
} catch (InterruptedException e) {
// Do nothing. Don't know why I was interrupted.
}
}
}
}
}
/*
* Copyright (c) 2018 YCSB Contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import site.ycsb.generator.Generator;
import site.ycsb.generator.IncrementingPrintableStringGenerator;
import site.ycsb.workloads.TimeSeriesWorkload;
import java.util.*;
import java.util.concurrent.TimeUnit;
/**
* Abstract class to adapt the default ycsb DB interface to Timeseries databases.
* This class is mostly here to be extended by Timeseries dataabases
* originally developed by Andreas Bader in <a href="https://github.com/TSDBBench/YCSB-TS">YCSB-TS</a>.
* <p>
* This class is mostly parsing the workload information passed through the default ycsb interface
* according to the information outlined in {@link TimeSeriesWorkload}.
* It also contains some minor utility methods relevant to Timeseries databases.
* </p>
*
* @implSpec It's vital to call <tt>super.init()</tt> when overwriting the init method
* to correctly initialize the workload-parsing.
*/
public abstract class TimeseriesDB extends DB {
// defaults for downsampling. Basically we ignore it
private static final String DOWNSAMPLING_FUNCTION_PROPERTY_DEFAULT = "NONE";
private static final String DOWNSAMPLING_INTERVAL_PROPERTY_DEFAULT = "0";
// debug property loading
private static final String DEBUG_PROPERTY = "debug";
private static final String DEBUG_PROPERTY_DEFAULT = "false";
// test property loading
private static final String TEST_PROPERTY = "test";
private static final String TEST_PROPERTY_DEFAULT = "false";
// Workload parameters that we need to parse this
protected String timestampKey;
protected String valueKey;
protected String tagPairDelimiter;
protected String queryTimeSpanDelimiter;
protected String deleteDelimiter;
protected TimeUnit timestampUnit;
protected String groupByKey;
protected String downsamplingKey;
protected Integer downsamplingInterval;
protected AggregationOperation downsamplingFunction;
// YCSB-parameters
protected boolean debug;
protected boolean test;
/**
* Initialize any state for this DB.
* Called once per DB instance; there is one DB instance per client thread.
*/
@Override
public void init() throws DBException {
// taken from BasicTSDB
timestampKey = getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = getProperties().getProperty(
TimeSeriesWorkload.VALUE_KEY_PROPERTY,
TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
tagPairDelimiter = getProperties().getProperty(
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY,
TimeSeriesWorkload.PAIR_DELIMITER_PROPERTY_DEFAULT);
queryTimeSpanDelimiter = getProperties().getProperty(
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY,
TimeSeriesWorkload.QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = getProperties().getProperty(
TimeSeriesWorkload.DELETE_DELIMITER_PROPERTY,
TimeSeriesWorkload.DELETE_DELIMITER_PROPERTY_DEFAULT);
timestampUnit = TimeUnit.valueOf(getProperties().getProperty(
TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY,
TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY_DEFAULT));
groupByKey = getProperties().getProperty(
TimeSeriesWorkload.GROUPBY_KEY_PROPERTY,
TimeSeriesWorkload.GROUPBY_KEY_PROPERTY_DEFAULT);
downsamplingKey = getProperties().getProperty(
TimeSeriesWorkload.DOWNSAMPLING_KEY_PROPERTY,
TimeSeriesWorkload.DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsamplingFunction = TimeseriesDB.AggregationOperation.valueOf(getProperties()
.getProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, DOWNSAMPLING_FUNCTION_PROPERTY_DEFAULT));
downsamplingInterval = Integer.valueOf(getProperties()
.getProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, DOWNSAMPLING_INTERVAL_PROPERTY_DEFAULT));
test = Boolean.parseBoolean(getProperties().getProperty(TEST_PROPERTY, TEST_PROPERTY_DEFAULT));
debug = Boolean.parseBoolean(getProperties().getProperty(DEBUG_PROPERTY, DEBUG_PROPERTY_DEFAULT));
}
@Override
public final Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
Map<String, List<String>> tagQueries = new HashMap<>();
Long timestamp = null;
for (String field : fields) {
if (field.startsWith(timestampKey)) {
String[] timestampParts = field.split(tagPairDelimiter);
if (timestampParts[1].contains(queryTimeSpanDelimiter)) {
// Since we're looking for a single datapoint, a range of timestamps makes no sense.
// As we cannot throw an exception to bail out here, we return `BAD_REQUEST` instead.
return Status.BAD_REQUEST;
}
timestamp = Long.valueOf(timestampParts[1]);
} else {
String[] queryParts = field.split(tagPairDelimiter);
tagQueries.computeIfAbsent(queryParts[0], k -> new ArrayList<>()).add(queryParts[1]);
}
}
if (timestamp == null) {
return Status.BAD_REQUEST;
}
return read(table, timestamp, tagQueries);
}
/**
* Read a record from the database. Each value from the result will be stored in a HashMap
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to read.
* @param tags actual tags that were want to receive (can be empty)
* @return Zero on success, a non-zero error code on error or "not found".
*/
protected abstract Status read(String metric, long timestamp, Map<String, List<String>> tags);
/**
* @inheritDoc
* @implNote this method parses the information passed to it and subsequently passes it to the modified
* interface at {@link #scan(String, long, long, Map, AggregationOperation, int, TimeUnit)}
*/
@Override
public final Status scan(String table, String startkey, int recordcount, Set<String> fields,
Vector<HashMap<String, ByteIterator>> result) {
Map<String, List<String>> tagQueries = new HashMap<>();
TimeseriesDB.AggregationOperation aggregationOperation = TimeseriesDB.AggregationOperation.NONE;
Set<String> groupByFields = new HashSet<>();
boolean rangeSet = false;
long start = 0;
long end = 0;
for (String field : fields) {
if (field.startsWith(timestampKey)) {
String[] timestampParts = field.split(tagPairDelimiter);
if (!timestampParts[1].contains(queryTimeSpanDelimiter)) {
// seems like this should be a more elaborate query.
// for now we don't support scanning single timestamps
// TODO: Support Timestamp range queries
return Status.NOT_IMPLEMENTED;
}
String[] rangeParts = timestampParts[1].split(queryTimeSpanDelimiter);
rangeSet = true;
start = Long.valueOf(rangeParts[0]);
end = Long.valueOf(rangeParts[1]);
} else if (field.startsWith(groupByKey)) {
String groupBySpecifier = field.split(tagPairDelimiter)[1];
aggregationOperation = TimeseriesDB.AggregationOperation.valueOf(groupBySpecifier);
} else if (field.startsWith(downsamplingKey)) {
String downsamplingSpec = field.split(tagPairDelimiter)[1];
// apparently that needs to always hold true:
if (!downsamplingSpec.equals(downsamplingFunction.toString() + downsamplingInterval.toString())) {
System.err.print("Downsampling specification for Scan did not match configured downsampling");
return Status.BAD_REQUEST;
}
} else {
String[] queryParts = field.split(tagPairDelimiter);
if (queryParts.length == 1) {
// we should probably warn about this being ignored...
System.err.println("Grouping by arbitrary series is currently not supported");
groupByFields.add(field);
} else {
tagQueries.computeIfAbsent(queryParts[0], k -> new ArrayList<>()).add(queryParts[1]);
}
}
}
if (!rangeSet) {
return Status.BAD_REQUEST;
}
return scan(table, start, end, tagQueries, downsamplingFunction, downsamplingInterval, timestampUnit);
}
/**
* Perform a range scan for a set of records in the database. Each value from the result will be stored in a
* HashMap.
*
* @param metric The name of the metric
* @param startTs The timestamp of the first record to read.
* @param endTs The timestamp of the last record to read.
* @param tags actual tags that were want to receive (can be empty).
* @param aggreg The aggregation operation to perform.
* @param timeValue value for timeUnit for aggregation
* @param timeUnit timeUnit for aggregation
* @return A {@link Status} detailing the outcome of the scan operation.
*/
protected abstract Status scan(String metric, long startTs, long endTs, Map<String, List<String>> tags,
AggregationOperation aggreg, int timeValue, TimeUnit timeUnit);
@Override
public Status update(String table, String key, Map<String, ByteIterator> values) {
return Status.NOT_IMPLEMENTED;
// not supportable for general TSDBs
// can be explicitly overwritten in inheriting classes
}
@Override
public final Status insert(String table, String key, Map<String, ByteIterator> values) {
NumericByteIterator tsContainer = (NumericByteIterator) values.remove(timestampKey);
NumericByteIterator valueContainer = (NumericByteIterator) values.remove(valueKey);
if (valueContainer.isFloatingPoint()) {
return insert(table, tsContainer.getLong(), valueContainer.getDouble(), values);
} else {
return insert(table, tsContainer.getLong(), valueContainer.getLong(), values);
}
}
/**
* Insert a record into the database. Any tags/tagvalue pairs in the specified tagmap and the given value will be
* written into the record with the specified timestamp.
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to insert.
* @param value The actual value to insert.
* @param tags A Map of tag/tagvalue pairs to insert as tags
* @return A {@link Status} detailing the outcome of the insert
*/
protected abstract Status insert(String metric, long timestamp, long value, Map<String, ByteIterator> tags);
/**
* Insert a record in the database. Any tags/tagvalue pairs in the specified tagmap and the given value will be
* written into the record with the specified timestamp.
*
* @param metric The name of the metric
* @param timestamp The timestamp of the record to insert.
* @param value actual value to insert
* @param tags A HashMap of tag/tagvalue pairs to insert as tags
* @return A {@link Status} detailing the outcome of the insert
*/
protected abstract Status insert(String metric, long timestamp, double value, Map<String, ByteIterator> tags);
/**
* NOTE: This operation is usually <b>not</b> supported for Time-Series databases.
* Deletion of data is often instead regulated through automatic cleanup and "retention policies" or similar.
*
* @return Status.NOT_IMPLEMENTED or a {@link Status} specifying the outcome of deletion
* in case the operation is supported.
*/
public Status delete(String table, String key) {
return Status.NOT_IMPLEMENTED;
}
/**
* Examines the given {@link Properties} and returns an array containing the Tag Keys
* (basically matching column names for traditional Relational DBs) that are detailed in the workload specification.
* See {@link TimeSeriesWorkload} for how these are generated.
* <p>
* This method is intended to be called during the initialization phase to create a table schema
* for DBMS that require such a schema before values can be inserted (or queried)
*
* @param properties The properties detailing the workload configuration.
* @return An array of strings specifying all allowed TagKeys (or column names)
* except for the "value" and the "timestamp" column name.
* @implSpec WARNING this method must exactly match how tagKeys are generated by the {@link TimeSeriesWorkload},
* otherwise databases requiring this information will most likely break!
*/
protected static String[] getPossibleTagKeys(Properties properties) {
final int tagCount = Integer.parseInt(properties.getProperty(TimeSeriesWorkload.TAG_COUNT_PROPERTY,
TimeSeriesWorkload.TAG_COUNT_PROPERTY_DEFAULT));
final int tagKeylength = Integer.parseInt(properties.getProperty(TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY,
TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY_DEFAULT));
Generator<String> tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeylength);
String[] tagNames = new String[tagCount];
for (int i = 0; i < tagCount; i++) {
tagNames[i] = tagKeyGenerator.nextValue();
}
return tagNames;
}
/**
* An enum containing the possible aggregation operations.
* Not all of these operations are required to be supported by implementing classes.
* <p>
* Aggregations are applied when using the <tt>SCAN</tt> operation on a range of timestamps.
* That way the result set is reduced from multiple records into
* a single one or one record for each group specified through <tt>GROUP BY</tt> clauses.
*/
public enum AggregationOperation {
/**
* No aggregation whatsoever. Return the results as a full table
*/
NONE,
/**
* Sum the values of the matching records when calculating the value.
* GroupBy criteria apply where relevant for sub-summing.
*/
SUM,
/**
* Calculate the arithmetic mean over the value across matching records when calculating the value.
* GroupBy criteria apply where relevant for group-targeted averages
*/
AVERAGE,
/**
* Count the number of matching records and return that as value.
* GroupBy criteria apply where relevant.
*/
COUNT,
/**
* Return only the maximum of the matching record values.
* GroupBy criteria apply and result in group-based maxima.
*/
MAX,
/**
* Return only the minimum of the matching record values.
* GroupBy criteria apply and result in group-based minima.
*/
MIN;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* Could not create the specified DB.
*/
public class UnknownDBException extends Exception {
/**
*
*/
private static final long serialVersionUID = 459099842269616836L;
public UnknownDBException(String message) {
super(message);
}
public UnknownDBException() {
super();
}
public UnknownDBException(String message, Throwable cause) {
super(message, cause);
}
public UnknownDBException(Throwable cause) {
super(cause);
}
}
/**
* Copyright (c) 2010 Yahoo! Inc., 2016 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
/**
* Utility functions.
*/
public final class Utils {
private Utils() {
// not used
}
/**
* Hash an integer value.
*/
public static long hash(long val) {
return fnvhash64(val);
}
public static final long FNV_OFFSET_BASIS_64 = 0xCBF29CE484222325L;
public static final long FNV_PRIME_64 = 1099511628211L;
/**
* 64 bit FNV hash. Produces more "random" hashes than (say) String.hashCode().
*
* @param val The value to hash.
* @return The hash value
*/
public static long fnvhash64(long val) {
//from http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash
long hashval = FNV_OFFSET_BASIS_64;
for (int i = 0; i < 8; i++) {
long octet = val & 0x00ff;
val = val >> 8;
hashval = hashval ^ octet;
hashval = hashval * FNV_PRIME_64;
//hashval = hashval ^ octet;
}
return Math.abs(hashval);
}
/**
* Reads a big-endian 8-byte long from an offset in the given array.
* @param bytes The array to read from.
* @return A long integer.
* @throws IndexOutOfBoundsException if the byte array is too small.
* @throws NullPointerException if the byte array is null.
*/
public static long bytesToLong(final byte[] bytes) {
return (bytes[0] & 0xFFL) << 56
| (bytes[1] & 0xFFL) << 48
| (bytes[2] & 0xFFL) << 40
| (bytes[3] & 0xFFL) << 32
| (bytes[4] & 0xFFL) << 24
| (bytes[5] & 0xFFL) << 16
| (bytes[6] & 0xFFL) << 8
| (bytes[7] & 0xFFL) << 0;
}
/**
* Writes a big-endian 8-byte long at an offset in the given array.
* @param val The value to encode.
* @throws IndexOutOfBoundsException if the byte array is too small.
*/
public static byte[] longToBytes(final long val) {
final byte[] bytes = new byte[8];
bytes[0] = (byte) (val >>> 56);
bytes[1] = (byte) (val >>> 48);
bytes[2] = (byte) (val >>> 40);
bytes[3] = (byte) (val >>> 32);
bytes[4] = (byte) (val >>> 24);
bytes[5] = (byte) (val >>> 16);
bytes[6] = (byte) (val >>> 8);
bytes[7] = (byte) (val >>> 0);
return bytes;
}
/**
* Parses the byte array into a double.
* The byte array must be at least 8 bytes long and have been encoded using
* {@link #doubleToBytes}. If the array is longer than 8 bytes, only the
* first 8 bytes are parsed.
* @param bytes The byte array to parse, at least 8 bytes.
* @return A double value read from the byte array.
* @throws IllegalArgumentException if the byte array is not 8 bytes wide.
*/
public static double bytesToDouble(final byte[] bytes) {
if (bytes.length < 8) {
throw new IllegalArgumentException("Byte array must be 8 bytes wide.");
}
return Double.longBitsToDouble(bytesToLong(bytes));
}
/**
* Encodes the double value as an 8 byte array.
* @param val The double value to encode.
* @return A byte array of length 8.
*/
public static byte[] doubleToBytes(final double val) {
return longToBytes(Double.doubleToRawLongBits(val));
}
/**
* Measure the estimated active thread count in the current thread group.
* Since this calls {@link Thread.activeCount} it should be called from the
* main thread or one started by the main thread. Threads included in the
* count can be in any state.
* For a more accurate count we could use {@link Thread.getAllStackTraces().size()}
* but that freezes the JVM and incurs a high overhead.
* @return An estimated thread count, good for showing the thread count
* over time.
*/
public static int getActiveThreadCount() {
return Thread.activeCount();
}
/** @return The currently used memory in bytes */
public static long getUsedMemoryBytes() {
final Runtime runtime = Runtime.getRuntime();
return runtime.totalMemory() - runtime.freeMemory();
}
/** @return The currently used memory in megabytes. */
public static int getUsedMemoryMegaBytes() {
return (int) (getUsedMemoryBytes() / 1024 / 1024);
}
/** @return The current system load average if supported by the JDK.
* If it's not supported, the value will be negative. */
public static double getSystemLoadAverage() {
final OperatingSystemMXBean osBean =
ManagementFactory.getOperatingSystemMXBean();
return osBean.getSystemLoadAverage();
}
/** @return The total number of garbage collections executed for all
* memory pools. */
public static long getGCTotalCollectionCount() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
long count = 0;
for (final GarbageCollectorMXBean bean : gcBeans) {
if (bean.getCollectionCount() < 0) {
continue;
}
count += bean.getCollectionCount();
}
return count;
}
/** @return The total time, in milliseconds, spent in GC. */
public static long getGCTotalTime() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
long time = 0;
for (final GarbageCollectorMXBean bean : gcBeans) {
if (bean.getCollectionTime() < 0) {
continue;
}
time += bean.getCollectionTime();
}
return time;
}
/**
* Returns a map of garbage collectors and their stats.
* The first object in the array is the total count since JVM start and the
* second is the total time (ms) since JVM start.
* If a garbage collectors does not support the collector MXBean, then it
* will not be represented in the map.
* @return A non-null map of garbage collectors and their metrics. The map
* may be empty.
*/
public static Map<String, Long[]> getGCStatst() {
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
final Map<String, Long[]> map = new HashMap<String, Long[]>(gcBeans.size());
for (final GarbageCollectorMXBean bean : gcBeans) {
if (!bean.isValid() || bean.getCollectionCount() < 0 ||
bean.getCollectionTime() < 0) {
continue;
}
final Long[] measurements = new Long[]{
bean.getCollectionCount(),
bean.getCollectionTime()
};
map.put(bean.getName().replace(" ", "_"), measurements);
}
return map;
}
/**
* Simple Fisher-Yates array shuffle to randomize discrete sets.
* @param array The array to randomly shuffle.
* @return The shuffled array.
*/
public static <T> T [] shuffleArray(final T[] array) {
for (int i = array.length -1; i > 0; i--) {
final int idx = ThreadLocalRandom.current().nextInt(i + 1);
final T temp = array[idx];
array[idx] = array[i];
array[i] = temp;
}
return array;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.Properties;
/**
* One experiment scenario. One object of this type will
* be instantiated and shared among all client threads. This class
* should be constructed using a no-argument constructor, so we can
* load it dynamically. Any argument-based initialization should be
* done by init().
*
* If you extend this class, you should support the "insertstart" property. This
* allows the Client to proceed from multiple clients on different machines, in case
* the client is the bottleneck. For example, if we want to load 1 million records from
* 2 machines, the first machine should have insertstart=0 and the second insertstart=500000. Additionally,
* the "insertcount" property, which is interpreted by Client, can be used to tell each instance of the
* client how many inserts to do. In the example above, both clients should have insertcount=500000.
*/
public abstract class Workload {
public static final String INSERT_START_PROPERTY = "insertstart";
public static final String INSERT_COUNT_PROPERTY = "insertcount";
public static final String INSERT_START_PROPERTY_DEFAULT = "0";
private volatile AtomicBoolean stopRequested = new AtomicBoolean(false);
/** Operations available for a database. */
public enum Operation {
READ,
UPDATE,
INSERT,
SCAN,
DELETE
}
/**
* Initialize the scenario. Create any generators and other shared objects here.
* Called once, in the main client thread, before any operations are started.
*/
public void init(Properties p) throws WorkloadException {
}
/**
* Initialize any state for a particular client thread. Since the scenario object
* will be shared among all threads, this is the place to create any state that is specific
* to one thread. To be clear, this means the returned object should be created anew on each
* call to initThread(); do not return the same object multiple times.
* The returned object will be passed to invocations of doInsert() and doTransaction()
* for this thread. There should be no side effects from this call; all state should be encapsulated
* in the returned object. If you have no state to retain for this thread, return null. (But if you have
* no state to retain for this thread, probably you don't need to override initThread().)
*
* @return false if the workload knows it is done for this thread. Client will terminate the thread.
* Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
* traces from a file, return true when there are more to do, false when you are done.
*/
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
return null;
}
/**
* Cleanup the scenario. Called once, in the main client thread, after all operations have completed.
*/
public void cleanup() throws WorkloadException {
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads, this
* function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
* other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
* effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
* synchronized, since each thread has its own threadstate instance.
*/
public abstract boolean doInsert(DB db, Object threadstate);
/**
* Do one transaction operation. Because it will be called concurrently from multiple client threads, this
* function must be thread safe. However, avoid synchronized, or the threads will block waiting for each
* other, and it will be difficult to reach the target throughput. Ideally, this function would have no side
* effects other than DB operations and mutations on threadstate. Mutations to threadstate do not need to be
* synchronized, since each thread has its own threadstate instance.
*
* @return false if the workload knows it is done for this thread. Client will terminate the thread.
* Return true otherwise. Return true for workloads that rely on operationcount. For workloads that read
* traces from a file, return true when there are more to do, false when you are done.
*/
public abstract boolean doTransaction(DB db, Object threadstate);
/**
* Allows scheduling a request to stop the workload.
*/
public void requestStop() {
stopRequested.set(true);
}
/**
* Check the status of the stop request flag.
* @return true if stop was requested, false otherwise.
*/
public boolean isStopRequested() {
return stopRequested.get();
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
/**
* The workload tried to do something bad.
*/
public class WorkloadException extends Exception {
/**
*
*/
private static final long serialVersionUID = 8844396756042772132L;
public WorkloadException(String message) {
super(message);
}
public WorkloadException() {
super();
}
public WorkloadException(String message, Throwable cause) {
super(message, cause);
}
public WorkloadException(Throwable cause) {
super(cause);
}
}
/**
* Copyright (c) 2015-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.locks.ReentrantLock;
/**
* A CounterGenerator that reports generated integers via lastInt()
* only after they have been acknowledged.
*/
public class AcknowledgedCounterGenerator extends CounterGenerator {
/** The size of the window of pending id ack's. 2^20 = {@value} */
static final int WINDOW_SIZE = Integer.rotateLeft(1, 20);
/** The mask to use to turn an id into a slot in {@link #window}. */
private static final int WINDOW_MASK = WINDOW_SIZE - 1;
private final ReentrantLock lock;
private final boolean[] window;
private volatile long limit;
/**
* Create a counter that starts at countstart.
*/
public AcknowledgedCounterGenerator(long countstart) {
super(countstart);
lock = new ReentrantLock();
window = new boolean[WINDOW_SIZE];
limit = countstart - 1;
}
/**
* In this generator, the highest acknowledged counter value
* (as opposed to the highest generated counter value).
*/
@Override
public Long lastValue() {
return limit;
}
/**
* Make a generated counter value available via lastInt().
*/
public void acknowledge(long value) {
final int currentSlot = (int)(value & WINDOW_MASK);
if (window[currentSlot]) {
throw new RuntimeException("Too many unacknowledged insertion keys.");
}
window[currentSlot] = true;
if (lock.tryLock()) {
// move a contiguous sequence from the window
// over to the "limit" variable
try {
// Only loop through the entire window at most once.
long beforeFirstSlot = (limit & WINDOW_MASK);
long index;
for (index = limit + 1; index != beforeFirstSlot; ++index) {
int slot = (int)(index & WINDOW_MASK);
if (!window[slot]) {
break;
}
window[slot] = false;
}
limit = index - 1;
} finally {
lock.unlock();
}
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* A trivial integer generator that always returns the same value.
*
*/
public class ConstantIntegerGenerator extends NumberGenerator {
private final int i;
/**
* @param i The integer that this generator will always return.
*/
public ConstantIntegerGenerator(int i) {
this.i = i;
}
@Override
public Integer nextValue() {
return i;
}
@Override
public double mean() {
return i;
}
}
/**
* Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.atomic.AtomicLong;
/**
* Generates a sequence of integers.
* (0, 1, ...)
*/
public class CounterGenerator extends NumberGenerator {
private final AtomicLong counter;
/**
* Create a counter that starts at countstart.
*/
public CounterGenerator(long countstart) {
counter=new AtomicLong(countstart);
}
@Override
public Long nextValue() {
return counter.getAndIncrement();
}
@Override
public Long lastValue() {
return counter.get() - 1;
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.ThreadLocalRandom;
import static java.util.Objects.requireNonNull;
/**
* Generates a distribution by choosing from a discrete set of values.
*/
public class DiscreteGenerator extends Generator<String> {
private static class Pair {
private double weight;
private String value;
Pair(double weight, String value) {
this.weight = weight;
this.value = requireNonNull(value);
}
}
private final Collection<Pair> values = new ArrayList<>();
private String lastvalue;
public DiscreteGenerator() {
lastvalue = null;
}
/**
* Generate the next string in the distribution.
*/
@Override
public String nextValue() {
double sum = 0;
for (Pair p : values) {
sum += p.weight;
}
double val = ThreadLocalRandom.current().nextDouble();
for (Pair p : values) {
double pw = p.weight / sum;
if (val < pw) {
return p.value;
}
val -= pw;
}
throw new AssertionError("oops. should not get here.");
}
/**
* Return the previous string generated by the distribution; e.g., returned from the last nextString() call.
* Calling lastString() should not advance the distribution or have any side effects. If nextString() has not yet
* been called, lastString() should return something reasonable.
*/
@Override
public String lastValue() {
if (lastvalue == null) {
lastvalue = nextValue();
}
return lastvalue;
}
public void addValue(double weight, String value) {
values.add(new Pair(weight, value));
}
}
/**
* Copyright (c) 2011-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* A generator of an exponential distribution. It produces a sequence
* of time intervals according to an exponential
* distribution. Smaller intervals are more frequent than larger
* ones, and there is no bound on the length of an interval. When you
* construct an instance of this class, you specify a parameter gamma,
* which corresponds to the rate at which events occur.
* Alternatively, 1/gamma is the average length of an interval.
*/
public class ExponentialGenerator extends NumberGenerator {
// What percentage of the readings should be within the most recent exponential.frac portion of the dataset?
public static final String EXPONENTIAL_PERCENTILE_PROPERTY = "exponential.percentile";
public static final String EXPONENTIAL_PERCENTILE_DEFAULT = "95";
// What fraction of the dataset should be accessed exponential.percentile of the time?
public static final String EXPONENTIAL_FRAC_PROPERTY = "exponential.frac";
public static final String EXPONENTIAL_FRAC_DEFAULT = "0.8571428571"; // 1/7
/**
* The exponential constant to use.
*/
private double gamma;
/******************************* Constructors **************************************/
/**
* Create an exponential generator with a mean arrival rate of
* gamma. (And half life of 1/gamma).
*/
public ExponentialGenerator(double mean) {
gamma = 1.0 / mean;
}
public ExponentialGenerator(double percentile, double range) {
gamma = -Math.log(1.0 - percentile / 100.0) / range; //1.0/mean;
}
/****************************************************************************************/
/**
* Generate the next item as a long. This distribution will be skewed toward lower values; e.g. 0 will
* be the most popular, 1 the next most popular, etc.
* @return The next item in the sequence.
*/
@Override
public Double nextValue() {
return -Math.log(ThreadLocalRandom.current().nextDouble()) / gamma;
}
@Override
public double mean() {
return 1.0 / gamma;
}
public static void main(String[] args) {
ExponentialGenerator e = new ExponentialGenerator(90, 100);
int j = 0;
for (int i = 0; i < 1000; i++) {
if (e.nextValue() < 100) {
j++;
}
}
System.out.println("Got " + j + " hits. Expect 900");
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
/**
* A generator, whose sequence is the lines of a file.
*/
public class FileGenerator extends Generator<String> {
private final String filename;
private String current;
private BufferedReader reader;
/**
* Create a FileGenerator with the given file.
* @param filename The file to read lines from.
*/
public FileGenerator(String filename) {
this.filename = filename;
reloadFile();
}
/**
* Return the next string of the sequence, ie the next line of the file.
*/
@Override
public synchronized String nextValue() {
try {
current = reader.readLine();
return current;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Return the previous read line.
*/
@Override
public String lastValue() {
return current;
}
/**
* Reopen the file to reuse values.
*/
public synchronized void reloadFile() {
try (Reader r = reader) {
System.err.println("Reload " + filename);
reader = new BufferedReader(new FileReader(filename));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* An expression that generates a sequence of values, following some distribution (Uniform, Zipfian, Sequential, etc.).
*/
public abstract class Generator<V> {
/**
* Generate the next value in the distribution.
*/
public abstract V nextValue();
/**
* Return the previous value generated by the distribution; e.g., returned from the last {@link Generator#nextValue()}
* call.
* Calling {@link #lastValue()} should not advance the distribution or have any side effects. If {@link #nextValue()}
* has not yet been called, {@link #lastValue()} should return something reasonable.
*/
public abstract V lastValue();
public final String nextString() {
V ret = nextValue();
return ret == null ? null : ret.toString();
}
public final String lastString() {
V ret = lastValue();
return ret == null ? null : ret.toString();
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generate integers according to a histogram distribution. The histogram
* buckets are of width one, but the values are multiplied by a block size.
* Therefore, instead of drawing sizes uniformly at random within each
* bucket, we always draw the largest value in the current bucket, so the value
* drawn is always a multiple of blockSize.
*
* The minimum value this distribution returns is blockSize (not zero).
*
*/
public class HistogramGenerator extends NumberGenerator {
private final long blockSize;
private final long[] buckets;
private long area;
private long weightedArea = 0;
private double meanSize = 0;
public HistogramGenerator(String histogramfile) throws IOException {
try (BufferedReader in = new BufferedReader(new FileReader(histogramfile))) {
String str;
String[] line;
ArrayList<Integer> a = new ArrayList<>();
str = in.readLine();
if (str == null) {
throw new IOException("Empty input file!\n");
}
line = str.split("\t");
if (line[0].compareTo("BlockSize") != 0) {
throw new IOException("First line of histogram is not the BlockSize!\n");
}
blockSize = Integer.parseInt(line[1]);
while ((str = in.readLine()) != null) {
// [0] is the bucket, [1] is the value
line = str.split("\t");
a.add(Integer.parseInt(line[0]), Integer.parseInt(line[1]));
}
buckets = new long[a.size()];
for (int i = 0; i < a.size(); i++) {
buckets[i] = a.get(i);
}
}
init();
}
public HistogramGenerator(long[] buckets, int blockSize) {
this.blockSize = blockSize;
this.buckets = buckets;
init();
}
private void init() {
for (int i = 0; i < buckets.length; i++) {
area += buckets[i];
weightedArea += i * buckets[i];
}
// calculate average file size
meanSize = ((double) blockSize) * ((double) weightedArea) / (area);
}
@Override
public Long nextValue() {
int number = ThreadLocalRandom.current().nextInt((int) area);
int i;
for (i = 0; i < (buckets.length - 1); i++) {
number -= buckets[i];
if (number <= 0) {
return (i + 1) * blockSize;
}
}
return i * blockSize;
}
@Override
public double mean() {
return meanSize;
}
}
/**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generate integers resembling a hotspot distribution where x% of operations
* access y% of data items. The parameters specify the bounds for the numbers,
* the percentage of the of the interval which comprises the hot set and
* the percentage of operations that access the hot set. Numbers of the hot set are
* always smaller than any number in the cold set. Elements from the hot set and
* the cold set are chose using a uniform distribution.
*
*/
public class HotspotIntegerGenerator extends NumberGenerator {
private final long lowerBound;
private final long upperBound;
private final long hotInterval;
private final long coldInterval;
private final double hotsetFraction;
private final double hotOpnFraction;
/**
* Create a generator for Hotspot distributions.
*
* @param lowerBound lower bound of the distribution.
* @param upperBound upper bound of the distribution.
* @param hotsetFraction percentage of data item
* @param hotOpnFraction percentage of operations accessing the hot set.
*/
public HotspotIntegerGenerator(long lowerBound, long upperBound,
double hotsetFraction, double hotOpnFraction) {
if (hotsetFraction < 0.0 || hotsetFraction > 1.0) {
System.err.println("Hotset fraction out of range. Setting to 0.0");
hotsetFraction = 0.0;
}
if (hotOpnFraction < 0.0 || hotOpnFraction > 1.0) {
System.err.println("Hot operation fraction out of range. Setting to 0.0");
hotOpnFraction = 0.0;
}
if (lowerBound > upperBound) {
System.err.println("Upper bound of Hotspot generator smaller than the lower bound. " +
"Swapping the values.");
long temp = lowerBound;
lowerBound = upperBound;
upperBound = temp;
}
this.lowerBound = lowerBound;
this.upperBound = upperBound;
this.hotsetFraction = hotsetFraction;
long interval = upperBound - lowerBound + 1;
this.hotInterval = (int) (interval * hotsetFraction);
this.coldInterval = interval - hotInterval;
this.hotOpnFraction = hotOpnFraction;
}
@Override
public Long nextValue() {
long value = 0;
Random random = ThreadLocalRandom.current();
if (random.nextDouble() < hotOpnFraction) {
// Choose a value from the hot set.
value = lowerBound + Math.abs(random.nextLong()) % hotInterval;
} else {
// Choose a value from the cold set.
value = lowerBound + hotInterval + Math.abs(random.nextLong()) % coldInterval;
}
setLastValue(value);
return value;
}
/**
* @return the lowerBound
*/
public long getLowerBound() {
return lowerBound;
}
/**
* @return the upperBound
*/
public long getUpperBound() {
return upperBound;
}
/**
* @return the hotsetFraction
*/
public double getHotsetFraction() {
return hotsetFraction;
}
/**
* @return the hotOpnFraction
*/
public double getHotOpnFraction() {
return hotOpnFraction;
}
@Override
public double mean() {
return hotOpnFraction * (lowerBound + hotInterval / 2.0)
+ (1 - hotOpnFraction) * (lowerBound + hotInterval + coldInterval / 2.0);
}
}
/**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.*;
/**
* A generator that produces strings of {@link #length} using a set of code points
* from {@link #characterSet}. Each time {@link #nextValue()} is executed, the string
* is incremented by one character. Eventually the string may rollover to the beginning
* and the user may choose to have the generator throw a NoSuchElementException at that
* point or continue incrementing. (By default the generator will continue incrementing).
* <p>
* For example, if we set a length of 2 characters and the character set includes
* [A, B] then the generator output will be:
* <ul>
* <li>AA</li>
* <li>AB</li>
* <li>BA</li>
* <li>BB</li>
* <li>AA <-- rolled over</li>
* </ul>
* <p>
* This class includes some default character sets to choose from including ASCII
* and plane 0 UTF.
*/
public class IncrementingPrintableStringGenerator extends Generator<String> {
/** Default string length for the generator. */
public static final int DEFAULTSTRINGLENGTH = 8;
/**
* Set of all character types that include every symbol other than non-printable
* control characters.
*/
public static final Set<Integer> CHAR_TYPES_ALL_BUT_CONTROL;
static {
CHAR_TYPES_ALL_BUT_CONTROL = new HashSet<Integer>(24);
// numbers
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LETTER_NUMBER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_NUMBER);
// letters
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LOWERCASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.TITLECASE_LETTER);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_LETTER);
// marks
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.COMBINING_SPACING_MARK);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.NON_SPACING_MARK);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.ENCLOSING_MARK);
// punctuation
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.CONNECTOR_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.DASH_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.START_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.END_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.INITIAL_QUOTE_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.FINAL_QUOTE_PUNCTUATION);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_PUNCTUATION);
// symbols
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.MATH_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.CURRENCY_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.MODIFIER_SYMBOL);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.OTHER_SYMBOL);
// separators
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.SPACE_SEPARATOR);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.LINE_SEPARATOR);
CHAR_TYPES_ALL_BUT_CONTROL.add((int) Character.PARAGRAPH_SEPARATOR);
}
/**
* Set of character types including only decimals, upper and lower case letters.
*/
public static final Set<Integer> CHAR_TYPES_BASIC_ALPHA;
static {
CHAR_TYPES_BASIC_ALPHA = new HashSet<Integer>(2);
CHAR_TYPES_BASIC_ALPHA.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_BASIC_ALPHA.add((int) Character.LOWERCASE_LETTER);
}
/**
* Set of character types including only decimals, upper and lower case letters.
*/
public static final Set<Integer> CHAR_TYPES_BASIC_ALPHANUMERICS;
static {
CHAR_TYPES_BASIC_ALPHANUMERICS = new HashSet<Integer>(3);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPES_BASIC_ALPHANUMERICS.add((int) Character.LOWERCASE_LETTER);
}
/**
* Set of character types including only decimals, letter numbers,
* other numbers, upper, lower, title case as well as letter modifiers
* and other letters.
*/
public static final Set<Integer> CHAR_TYPE_EXTENDED_ALPHANUMERICS;
static {
CHAR_TYPE_EXTENDED_ALPHANUMERICS = new HashSet<Integer>(8);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.DECIMAL_DIGIT_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.LETTER_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.OTHER_NUMBER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.UPPERCASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.LOWERCASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.TITLECASE_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.MODIFIER_LETTER);
CHAR_TYPE_EXTENDED_ALPHANUMERICS.add((int) Character.OTHER_LETTER);
}
/** The character set to iterate over. */
private final int[] characterSet;
/** An array indices matching a position in the output string. */
private int[] indices;
/** The length of the output string in characters. */
private final int length;
/** The last value returned by the generator. Should be null if {@link #nextValue()}
* has not been called.*/
private String lastValue;
/** Whether or not to throw an exception when the string rolls over. */
private boolean throwExceptionOnRollover;
/** Whether or not the generator has rolled over. */
private boolean hasRolledOver;
/**
* Generates strings of 8 characters using only the upper and lower case alphabetical
* characters from the ASCII set.
*/
public IncrementingPrintableStringGenerator() {
this(DEFAULTSTRINGLENGTH, printableBasicAlphaASCIISet());
}
/**
* Generates strings of {@link #length} characters using only the upper and lower
* case alphabetical characters from the ASCII set.
* @param length The length of string to return from the generator.
* @throws IllegalArgumentException if the length is less than one.
*/
public IncrementingPrintableStringGenerator(final int length) {
this(length, printableBasicAlphaASCIISet());
}
/**
* Generates strings of {@link #length} characters using the code points in
* {@link #characterSet}.
* @param length The length of string to return from the generator.
* @param characterSet A set of code points to choose from. Code points in the
* set can be in any order, not necessarily lexical.
* @throws IllegalArgumentException if the length is less than one or the character
* set has fewer than one code points.
*/
public IncrementingPrintableStringGenerator(final int length, final int[] characterSet) {
if (length < 1) {
throw new IllegalArgumentException("Length must be greater than or equal to 1");
}
if (characterSet == null || characterSet.length < 1) {
throw new IllegalArgumentException("Character set must have at least one character");
}
this.length = length;
this.characterSet = characterSet;
indices = new int[length];
}
@Override
public String nextValue() {
if (hasRolledOver && throwExceptionOnRollover) {
throw new NoSuchElementException("The generator has rolled over to the beginning");
}
final StringBuilder buffer = new StringBuilder(length);
for (int i = 0; i < length; i++) {
buffer.append(Character.toChars(characterSet[indices[i]]));
}
// increment the indices;
for (int i = length - 1; i >= 0; --i) {
if (indices[i] >= characterSet.length - 1) {
indices[i] = 0;
if (i == 0 || characterSet.length == 1 && lastValue != null) {
hasRolledOver = true;
}
} else {
++indices[i];
break;
}
}
lastValue = buffer.toString();
return lastValue;
}
@Override
public String lastValue() {
return lastValue;
}
/** @param exceptionOnRollover Whether or not to throw an exception on rollover. */
public void setThrowExceptionOnRollover(final boolean exceptionOnRollover) {
this.throwExceptionOnRollover = exceptionOnRollover;
}
/** @return Whether or not to throw an exception on rollover. */
public boolean getThrowExceptionOnRollover() {
return throwExceptionOnRollover;
}
/**
* Returns an array of printable code points with only the upper and lower
* case alphabetical characters from the basic ASCII set.
* @return An array of code points
*/
public static int[] printableBasicAlphaASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHA);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the upper and lower case
* alphabetical characters as well as the numeric values from the basic
* ASCII set.
* @return An array of code points
*/
public static int[] printableBasicAlphaNumericASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 127, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the entire basic ASCII table,
* including spaces. Excludes new lines.
* @return An array of code points
*/
public static int[] fullPrintableBasicASCIISet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(32, 127, null, false, null);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points with the entire basic ASCII table,
* including spaces and new lines.
* @return An array of code points
*/
public static int[] fullPrintableBasicASCIISetWithNewlines() {
final List<Integer> validCharacters = new ArrayList<Integer>();
validCharacters.add(10); // newline
validCharacters.addAll(generatePrintableCharacterSet(32, 127, null, false, null));
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points the first plane of Unicode characters
* including only the alpha-numeric values.
* @return An array of code points
*/
public static int[] printableAlphaNumericPlaneZeroSet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_BASIC_ALPHANUMERICS);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Returns an array of printable code points the first plane of Unicode characters
* including all printable characters.
* @return An array of code points
*/
public static int[] fullPrintablePlaneZeroSet() {
final List<Integer> validCharacters =
generatePrintableCharacterSet(0, 65535, null, false, CHAR_TYPES_ALL_BUT_CONTROL);
final int[] characterSet = new int[validCharacters.size()];
for (int i = 0; i < validCharacters.size(); i++) {
characterSet[i] = validCharacters.get(i);
}
return characterSet;
}
/**
* Generates a list of code points based on a range and filters.
* These can be used for generating strings with various ASCII and/or
* Unicode printable character sets for use with DBs that may have
* character limitations.
* <p>
* Note that control, surrogate, format, private use and unassigned
* code points are skipped.
* @param startCodePoint The starting code point, inclusive.
* @param lastCodePoint The final code point, inclusive.
* @param characterTypesFilter An optional set of allowable character
* types. See {@link Character} for types.
* @param isFilterAllowableList Determines whether the {@code allowableTypes}
* set is inclusive or exclusive. When true, only those code points that
* appear in the list will be included in the resulting set. Otherwise
* matching code points are excluded.
* @param allowableTypes An optional list of code points for inclusion or
* exclusion.
* @return A list of code points matching the given range and filters. The
* list may be empty but is guaranteed not to be null.
*/
public static List<Integer> generatePrintableCharacterSet(
final int startCodePoint,
final int lastCodePoint,
final Set<Integer> characterTypesFilter,
final boolean isFilterAllowableList,
final Set<Integer> allowableTypes) {
// since we don't know the final size of the allowable character list we
// start with a list then we'll flatten it to an array.
final List<Integer> validCharacters = new ArrayList<Integer>(lastCodePoint);
for (int codePoint = startCodePoint; codePoint <= lastCodePoint; ++codePoint) {
if (allowableTypes != null &&
!allowableTypes.contains(Character.getType(codePoint))) {
continue;
} else {
// skip control points, formats, surrogates, etc
final int type = Character.getType(codePoint);
if (type == Character.CONTROL ||
type == Character.SURROGATE ||
type == Character.FORMAT ||
type == Character.PRIVATE_USE ||
type == Character.UNASSIGNED) {
continue;
}
}
if (characterTypesFilter != null) {
// if the filter is enabled then we need to make sure the code point
// is in the allowable list if it's a whitelist or that the code point
// is NOT in the list if it's a blacklist.
if ((isFilterAllowableList && !characterTypesFilter.contains(codePoint)) ||
(characterTypesFilter.contains(codePoint))) {
continue;
}
}
validCharacters.add(codePoint);
}
return validCharacters;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* A generator that is capable of generating numeric values.
*
*/
public abstract class NumberGenerator extends Generator<Number> {
private Number lastVal;
/**
* Set the last value generated. NumberGenerator subclasses must use this call
* to properly set the last value, or the {@link #lastValue()} calls won't work.
*/
protected void setLastValue(Number last) {
lastVal = last;
}
@Override
public Number lastValue() {
return lastVal;
}
/**
* Return the expected value (mean) of the values this generator will return.
*/
public abstract double mean();
}
/**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.TimeUnit;
import site.ycsb.Utils;
/**
* A generator that picks from a discrete set of offsets from a base Unix Epoch
* timestamp that returns timestamps in a random order with the guarantee that
* each timestamp is only returned once.
* <p>
* TODO - It would be best to implement some kind of psuedo non-repeating random
* generator for this as it's likely OK that some small percentage of values are
* repeated. For now we just generate all of the offsets in an array, shuffle
* it and then iterate over the array.
* <p>
* Note that {@link #MAX_INTERVALS} defines a hard limit on the size of the
* offset array so that we don't completely blow out the heap.
* <p>
* The constructor parameter {@code intervals} determines how many values will be
* returned by the generator. For example, if the {@code interval} is 60 and the
* {@code timeUnits} are set to {@link TimeUnit#SECONDS} and {@code intervals}
* is set to 60, then the consumer can call {@link #nextValue()} 60 times for
* timestamps within an hour.
*/
public class RandomDiscreteTimestampGenerator extends UnixEpochTimestampGenerator {
/** A hard limit on the size of the offsets array to a void using too much heap. */
public static final int MAX_INTERVALS = 16777216;
/** The total number of intervals for this generator. */
private final int intervals;
// can't be primitives due to the generic params on the sort function :(
/** The array of generated offsets from the base time. */
private final Integer[] offsets;
/** The current index into the offsets array. */
private int offsetIndex;
/**
* Ctor that uses the current system time as current.
* @param interval The interval between timestamps.
* @param timeUnits The time units of the returned Unix Epoch timestamp (as well
* as the units for the interval).
* @param intervals The total number of intervals for the generator.
* @throws IllegalArgumentException if the intervals is larger than {@link #MAX_INTERVALS}
*/
public RandomDiscreteTimestampGenerator(final long interval, final TimeUnit timeUnits,
final int intervals) {
super(interval, timeUnits);
this.intervals = intervals;
offsets = new Integer[intervals];
setup();
}
/**
* Ctor for supplying a starting timestamp.
* The interval between timestamps.
* @param timeUnits The time units of the returned Unix Epoch timestamp (as well
* as the units for the interval).
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* @param intervals The total number of intervals for the generator.
* @throws IllegalArgumentException if the intervals is larger than {@link #MAX_INTERVALS}
*/
public RandomDiscreteTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp, final int intervals) {
super(interval, timeUnits, startTimestamp);
this.intervals = intervals;
offsets = new Integer[intervals];
setup();
}
/**
* Generates the offsets and shuffles the array.
*/
private void setup() {
if (intervals > MAX_INTERVALS) {
throw new IllegalArgumentException("Too many intervals for the in-memory "
+ "array. The limit is " + MAX_INTERVALS + ".");
}
offsetIndex = 0;
for (int i = 0; i < intervals; i++) {
offsets[i] = i;
}
Utils.shuffleArray(offsets);
}
@Override
public Long nextValue() {
if (offsetIndex >= offsets.length) {
throw new IllegalStateException("Reached the end of the random timestamp "
+ "intervals: " + offsetIndex);
}
lastTimestamp = currentTimestamp;
currentTimestamp = startTimestamp + (offsets[offsetIndex++] * getOffset(1));
return currentTimestamp;
}
}
\ No newline at end of file
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import site.ycsb.Utils;
/**
* A generator of a zipfian distribution. It produces a sequence of items, such that some items are more popular than
* others, according to a zipfian distribution. When you construct an instance of this class, you specify the number
* of items in the set to draw from, either by specifying an itemcount (so that the sequence is of items from 0 to
* itemcount-1) or by specifying a min and a max (so that the sequence is of items from min to max inclusive). After
* you construct the instance, you can change the number of items by calling nextInt(itemcount) or nextLong(itemcount).
* <p>
* Unlike @ZipfianGenerator, this class scatters the "popular" items across the itemspace. Use this, instead of
* @ZipfianGenerator, if you don't want the head of the distribution (the popular items) clustered together.
*/
public class ScrambledZipfianGenerator extends NumberGenerator {
public static final double ZETAN = 26.46902820178302;
public static final double USED_ZIPFIAN_CONSTANT = 0.99;
public static final long ITEM_COUNT = 10000000000L;
private ZipfianGenerator gen;
private final long min, max, itemcount;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
*
* @param items The number of items in the distribution.
*/
public ScrambledZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ScrambledZipfianGenerator(long min, long max) {
this(min, max, ZipfianGenerator.ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param _items The number of items in the distribution.
* @param _zipfianconstant The zipfian constant to use.
*/
/*
// not supported, as the value of zeta depends on the zipfian constant, and we have only precomputed zeta for one
zipfian constant
public ScrambledZipfianGenerator(long _items, double _zipfianconstant)
{
this(0,_items-1,_zipfianconstant);
}
*/
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant. If you
* use a zipfian constant other than 0.99, this will take a long time to complete because we need to recompute zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ScrambledZipfianGenerator(long min, long max, double zipfianconstant) {
this.min = min;
this.max = max;
itemcount = this.max - this.min + 1;
if (zipfianconstant == USED_ZIPFIAN_CONSTANT) {
gen = new ZipfianGenerator(0, ITEM_COUNT, zipfianconstant, ZETAN);
} else {
gen = new ZipfianGenerator(0, ITEM_COUNT, zipfianconstant);
}
}
/**************************************************************************************************/
/**
* Return the next long in the sequence.
*/
@Override
public Long nextValue() {
long ret = gen.nextValue();
ret = min + Utils.fnvhash64(ret) % itemcount;
setLastValue(ret);
return ret;
}
public static void main(String[] args) {
double newzetan = ZipfianGenerator.zetastatic(ITEM_COUNT, ZipfianGenerator.ZIPFIAN_CONSTANT);
System.out.println("zetan: " + newzetan);
System.exit(0);
ScrambledZipfianGenerator gen = new ScrambledZipfianGenerator(10000);
for (int i = 0; i < 1000000; i++) {
System.out.println("" + gen.nextValue());
}
}
/**
* since the values are scrambled (hopefully uniformly), the mean is simply the middle of the range.
*/
@Override
public double mean() {
return ((min) + max) / 2.0;
}
}
/**
* Copyright (c) 2016-2017 YCSB Contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.atomic.AtomicLong;
/**
* Generates a sequence of integers 0, 1, ...
*/
public class SequentialGenerator extends NumberGenerator {
private final AtomicLong counter;
private long interval;
private long countstart;
/**
* Create a counter that starts at countstart.
*/
public SequentialGenerator(long countstart, long countend) {
counter = new AtomicLong();
setLastValue(counter.get());
this.countstart = countstart;
interval = countend - countstart + 1;
}
/**
* If the generator returns numeric (long) values, return the next value as an long.
* Default is to return -1, which is appropriate for generators that do not return numeric values.
*/
public long nextLong() {
long ret = countstart + counter.getAndIncrement() % interval;
setLastValue(ret);
return ret;
}
@Override
public Number nextValue() {
long ret = countstart + counter.getAndIncrement() % interval;
setLastValue(ret);
return ret;
}
@Override
public Number lastValue() {
return counter.get() + 1;
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
/**
* Generate a popularity distribution of items, skewed to favor recent items significantly more than older items.
*/
public class SkewedLatestGenerator extends NumberGenerator {
private CounterGenerator basis;
private final ZipfianGenerator zipfian;
public SkewedLatestGenerator(CounterGenerator basis) {
this.basis = basis;
zipfian = new ZipfianGenerator(this.basis.lastValue());
nextValue();
}
/**
* Generate the next string in the distribution, skewed Zipfian favoring the items most recently returned by
* the basis generator.
*/
@Override
public Long nextValue() {
long max = basis.lastValue();
long next = max - zipfian.nextLong(max);
setLastValue(next);
return next;
}
public static void main(String[] args) {
SkewedLatestGenerator gen = new SkewedLatestGenerator(new CounterGenerator(1000));
for (int i = 0; i < Integer.parseInt(args[0]); i++) {
System.out.println(gen.nextString());
}
}
@Override
public double mean() {
throw new UnsupportedOperationException("Can't compute mean of non-stationary distribution!");
}
}
/**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* An expression that generates a random value in the specified range.
*/
public class UniformGenerator extends Generator<String> {
private final List<String> values;
private String laststring;
private final UniformLongGenerator gen;
/**
* Creates a generator that will return strings from the specified set uniformly randomly.
*/
public UniformGenerator(Collection<String> values) {
this.values = new ArrayList<>(values);
laststring = null;
gen = new UniformLongGenerator(0, values.size() - 1);
}
/**
* Generate the next string in the distribution.
*/
@Override
public String nextValue() {
laststring = values.get(gen.nextValue().intValue());
return laststring;
}
/**
* Return the previous string generated by the distribution; e.g., returned from the last nextString() call.
* Calling lastString() should not advance the distribution or have any side effects. If nextString() has not yet
* been called, lastString() should return something reasonable.
*/
@Override
public String lastValue() {
if (laststring == null) {
nextValue();
}
return laststring;
}
}
/**
* Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* Generates longs randomly uniform from an interval.
*/
public class UniformLongGenerator extends NumberGenerator {
private final long lb, ub, interval;
/**
* Creates a generator that will return longs uniformly randomly from the
* interval [lb,ub] inclusive (that is, lb and ub are possible values)
* (lb and ub are possible values).
*
* @param lb the lower bound (inclusive) of generated values
* @param ub the upper bound (inclusive) of generated values
*/
public UniformLongGenerator(long lb, long ub) {
this.lb = lb;
this.ub = ub;
interval = this.ub - this.lb + 1;
}
@Override
public Long nextValue() {
long ret = Math.abs(ThreadLocalRandom.current().nextLong()) % interval + lb;
setLastValue(ret);
return ret;
}
@Override
public double mean() {
return ((lb + (long) ub)) / 2.0;
}
}
/**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.TimeUnit;
/**
* A generator that produces Unix epoch timestamps in seconds, milli, micro or
* nanoseconds and increments the stamp a given interval each time
* {@link #nextValue()} is called. The result is emitted as a long in the same
* way calls to {@code System.currentTimeMillis()} and
* {@code System.nanoTime()} behave.
* <p>
* By default, the current system time of the host is used as the starting
* timestamp. Calling {@link #initalizeTimestamp(long)} can adjust the timestamp
* back or forward in time. For example, if a workload will generate an hour of
* data at 1 minute intervals, then to set the start timestamp an hour in the past
* from the current run, use:
* <pre>{@code
* UnixEpochTimestampGenerator generator = new UnixEpochTimestampGenerator();
* generator.initalizeTimestamp(-60);
* }</pre>
* A constructor is also present for setting an explicit start time.
* Negative intervals are supported as well for iterating back in time.
* <p>
* WARNING: This generator is not thread safe and should not called from multiple
* threads.
*/
public class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
}
\ No newline at end of file
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.concurrent.ThreadLocalRandom;
/**
* A generator of a zipfian distribution. It produces a sequence of items, such that some items are more popular than
* others, according to a zipfian distribution. When you construct an instance of this class, you specify the number
* of items in the set to draw from, either by specifying an itemcount (so that the sequence is of items from 0 to
* itemcount-1) or by specifying a min and a max (so that the sequence is of items from min to max inclusive). After
* you construct the instance, you can change the number of items by calling nextInt(itemcount) or nextLong(itemcount).
*
* Note that the popular items will be clustered together, e.g. item 0 is the most popular, item 1 the second most
* popular, and so on (or min is the most popular, min+1 the next most popular, etc.) If you don't want this clustering,
* and instead want the popular items scattered throughout the item space, then use ScrambledZipfianGenerator instead.
*
* Be aware: initializing this generator may take a long time if there are lots of items to choose from (e.g. over a
* minute for 100 million objects). This is because certain mathematical values need to be computed to properly
* generate a zipfian skew, and one of those values (zeta) is a sum sequence from 1 to n, where n is the itemcount.
* Note that if you increase the number of items in the set, we can compute a new zeta incrementally, so it should be
* fast unless you have added millions of items. However, if you decrease the number of items, we recompute zeta from
* scratch, so this can take a long time.
*
* The algorithm used here is from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994.
*/
public class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
}
/*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB generator package.
*/
package site.ycsb.generator;
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2020 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
/**
* Collects latency measurements, and reports them when requested.
*/
public class Measurements {
/**
* All supported measurement types are defined in this enum.
*/
public enum MeasurementType {
HISTOGRAM,
HDRHISTOGRAM,
HDRHISTOGRAM_AND_HISTOGRAM,
HDRHISTOGRAM_AND_RAW,
TIMESERIES,
RAW
}
public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype";
private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram";
public static final String MEASUREMENT_INTERVAL = "measurement.interval";
private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
private static Measurements singleton = null;
private static Properties measurementproperties = null;
public static void setProperties(Properties props) {
measurementproperties = props;
}
/**
* Return the singleton Measurements object.
*/
public static synchronized Measurements getMeasurements() {
if (singleton == null) {
singleton = new Measurements(measurementproperties);
}
return singleton;
}
private final ConcurrentHashMap<String, OneMeasurement> opToMesurementMap;
private final ConcurrentHashMap<String, OneMeasurement> opToIntendedMesurementMap;
private final MeasurementType measurementType;
private final int measurementInterval;
private final Properties props;
/**
* Create a new object with the specified properties.
*/
public Measurements(Properties props) {
opToMesurementMap = new ConcurrentHashMap<>();
opToIntendedMesurementMap = new ConcurrentHashMap<>();
this.props = props;
String mTypeString = this.props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT);
switch (mTypeString) {
case "histogram":
measurementType = MeasurementType.HISTOGRAM;
break;
case "hdrhistogram":
measurementType = MeasurementType.HDRHISTOGRAM;
break;
case "hdrhistogram+histogram":
measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM;
break;
case "hdrhistogram+raw":
measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW;
break;
case "timeseries":
measurementType = MeasurementType.TIMESERIES;
break;
case "raw":
measurementType = MeasurementType.RAW;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_TYPE_PROPERTY + "=" + mTypeString);
}
String mIntervalString = this.props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT);
switch (mIntervalString) {
case "op":
measurementInterval = 0;
break;
case "intended":
measurementInterval = 1;
break;
case "both":
measurementInterval = 2;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_INTERVAL + "=" + mIntervalString);
}
}
private OneMeasurement constructOneMeasurement(String name) {
switch (measurementType) {
case HISTOGRAM:
return new OneMeasurementHistogram(name, props);
case HDRHISTOGRAM:
return new OneMeasurementHdrHistogram(name, props);
case HDRHISTOGRAM_AND_HISTOGRAM:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementHistogram("Bucket" + name, props));
case HDRHISTOGRAM_AND_RAW:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementRaw("Raw" + name, props));
case TIMESERIES:
return new OneMeasurementTimeSeries(name, props);
case RAW:
return new OneMeasurementRaw(name, props);
default:
throw new AssertionError("Impossible to be here. Dead code reached. Bugs?");
}
}
static class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A single measured metric (such as READ LATENCY).
*/
public abstract class OneMeasurement {
private final String name;
private final ConcurrentHashMap<Status, AtomicInteger> returncodes;
public String getName() {
return name;
}
/**
* @param name measurement name
*/
public OneMeasurement(String name) {
this.name = name;
this.returncodes = new ConcurrentHashMap<>();
}
public abstract void measure(int latency);
public abstract String getSummary();
/**
* No need for synchronization, using CHM to deal with that.
*/
public void reportStatus(Status status) {
AtomicInteger counter = returncodes.get(status);
if (counter == null) {
counter = new AtomicInteger();
AtomicInteger other = returncodes.putIfAbsent(status, counter);
if (other != null) {
counter = other;
}
}
counter.incrementAndGet();
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public abstract void exportMeasurements(MeasurementsExporter exporter) throws IOException;
protected final void exportStatusCounts(MeasurementsExporter exporter) throws IOException {
for (Map.Entry<Status, AtomicInteger> entry : returncodes.entrySet()) {
exporter.write(getName(), "Return=" + entry.getKey().getName(), entry.getValue().get());
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import org.HdrHistogram.Histogram;
import org.HdrHistogram.HistogramIterationValue;
import org.HdrHistogram.HistogramLogWriter;
import org.HdrHistogram.Recorder;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* Take measurements and maintain a HdrHistogram of a given metric, such as READ LATENCY.
*
*/
public class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Properties;
/**
* Take measurements and maintain a histogram of a given metric, such as READ LATENCY.
*
*/
public class OneMeasurementHistogram extends OneMeasurement {
public static final String BUCKETS = "histogram.buckets";
public static final String BUCKETS_DEFAULT = "1000";
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Specify the range of latencies to track in the histogram.
*/
private final int buckets;
/**
* Groups operations in discrete blocks of 1ms width.
*/
private long[] histogram;
/**
* Counts all operations outside the histogram's range.
*/
private long histogramoverflow;
/**
* The total number of reported operations.
*/
private long operations;
/**
* The sum of each latency measurement over all operations.
* Calculated in ms.
*/
private long totallatency;
/**
* The sum of each latency measurement squared over all operations.
* Used to calculate variance of latency.
* Calculated in ms.
*/
private double totalsquaredlatency;
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
//keep a windowed version of these stats for printing status
private long windowoperations;
private long windowtotallatency;
private int min;
private int max;
public OneMeasurementHistogram(String name, Properties props) {
super(name);
buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
histogram = new long[buckets];
histogramoverflow = 0;
operations = 0;
totallatency = 0;
totalsquaredlatency = 0;
windowoperations = 0;
windowtotallatency = 0;
min = -1;
max = -1;
}
/* (non-Javadoc)
* @see site.ycsb.OneMeasurement#measure(int)
*/
public synchronized void measure(int latency) {
//latency reported in us and collected in bucket by ms.
if (latency / 1000 >= buckets) {
histogramoverflow++;
} else {
histogram[latency / 1000]++;
}
operations++;
totallatency += latency;
totalsquaredlatency += ((double) latency) * ((double) latency);
windowoperations++;
windowtotallatency += latency;
if ((min < 0) || (latency < min)) {
min = latency;
}
if ((max < 0) || (latency > max)) {
max = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
double mean = totallatency / ((double) operations);
double variance = totalsquaredlatency / ((double) operations) - (mean * mean);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", mean);
exporter.write(getName(), "LatencyVariance(us)", variance);
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
long opcounter=0;
boolean done95th = false;
for (int i = 0; i < buckets; i++) {
opcounter += histogram[i];
if ((!done95th) && (((double) opcounter) / ((double) operations) >= 0.95)) {
exporter.write(getName(), "95thPercentileLatency(us)", i * 1000);
done95th = true;
}
if (((double) opcounter) / ((double) operations) >= 0.99) {
exporter.write(getName(), "99thPercentileLatency(us)", i * 1000);
break;
}
}
exportStatusCounts(exporter);
if (verbose) {
for (int i = 0; i < buckets; i++) {
exporter.write(getName(), Integer.toString(i), histogram[i]);
}
exporter.write(getName(), ">" + buckets, histogramoverflow);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
}
/**
* Copyright (c) 2015-2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.Properties;
/**
* Record a series of measurements as raw data points without down sampling,
* optionally write to an output file when configured.
*
*/
public class OneMeasurementRaw extends OneMeasurement {
/**
* One raw data point, two fields: timestamp (ms) when the datapoint is
* inserted, and the value.
*/
class RawDataPoint {
private final long timestamp;
private final int value;
public RawDataPoint(int value) {
this.timestamp = System.currentTimeMillis();
this.value = value;
}
public long timeStamp() {
return timestamp;
}
public int value() {
return value;
}
}
class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
}
}
/**
* Optionally, user can configure an output file to save the raw data points.
* Default is none, raw results will be written to stdout.
*
*/
public static final String OUTPUT_FILE_PATH = "measurement.raw.output_file";
public static final String OUTPUT_FILE_PATH_DEFAULT = "";
/**
* Optionally, user can request to not output summary stats. This is useful
* if the user chains the raw measurement type behind the HdrHistogram type
* which already outputs summary stats. But even in that case, the user may
* still want this class to compute summary stats for them, especially if
* they want accurate computation of percentiles (because percentils computed
* by histogram classes are still approximations).
*/
public static final String NO_SUMMARY_STATS = "measurement.raw.no_summary";
public static final String NO_SUMMARY_STATS_DEFAULT = "false";
private final PrintStream outputStream;
private boolean noSummaryStats = false;
private LinkedList<RawDataPoint> measurements;
private long totalLatency = 0;
// A window of stats to print summary for at the next getSummary() call.
// It's supposed to be a one line summary, so we will just print count and
// average.
private int windowOperations = 0;
private long windowTotalLatency = 0;
public OneMeasurementRaw(String name, Properties props) {
super(name);
String outputFilePath = props.getProperty(OUTPUT_FILE_PATH, OUTPUT_FILE_PATH_DEFAULT);
if (!outputFilePath.isEmpty()) {
System.out.println("Raw data measurement: will output to result file: " +
outputFilePath);
try {
outputStream = new PrintStream(
new FileOutputStream(outputFilePath, true),
true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open raw data output file", e);
}
} else {
System.out.println("Raw data measurement: will output to stdout.");
outputStream = System.out;
}
noSummaryStats = Boolean.parseBoolean(props.getProperty(NO_SUMMARY_STATS,
NO_SUMMARY_STATS_DEFAULT));
measurements = new LinkedList<>();
}
@Override
public synchronized void measure(int latency) {
totalLatency += latency;
windowTotalLatency += latency;
windowOperations++;
measurements.add(new RawDataPoint(latency));
}
@Override
public void exportMeasurements(MeasurementsExporter exporter)
throws IOException {
// Output raw data points first then print out a summary of percentiles to
// stdout.
outputStream.println(getName() +
" latency raw data: op, timestamp(ms), latency(us)");
for (RawDataPoint point : measurements) {
outputStream.println(
String.format("%s,%d,%d", getName(), point.timeStamp(),
point.value()));
}
if (outputStream != System.out) {
outputStream.close();
}
int totalOps = measurements.size();
exporter.write(getName(), "Total Operations", totalOps);
if (totalOps > 0 && !noSummaryStats) {
exporter.write(getName(),
"Below is a summary of latency in microseconds:", -1);
exporter.write(getName(), "Average",
(double) totalLatency / (double) totalOps);
Collections.sort(measurements, new RawDataPointComparator());
exporter.write(getName(), "Min", measurements.get(0).value());
exporter.write(
getName(), "Max", measurements.get(totalOps - 1).value());
exporter.write(
getName(), "p1", measurements.get((int) (totalOps * 0.01)).value());
exporter.write(
getName(), "p5", measurements.get((int) (totalOps * 0.05)).value());
exporter.write(
getName(), "p50", measurements.get((int) (totalOps * 0.5)).value());
exporter.write(
getName(), "p90", measurements.get((int) (totalOps * 0.9)).value());
exporter.write(
getName(), "p95", measurements.get((int) (totalOps * 0.95)).value());
exporter.write(
getName(), "p99", measurements.get((int) (totalOps * 0.99)).value());
exporter.write(getName(), "p99.9",
measurements.get((int) (totalOps * 0.999)).value());
exporter.write(getName(), "p99.99",
measurements.get((int) (totalOps * 0.9999)).value());
}
exportStatusCounts(exporter);
}
@Override
public synchronized String getSummary() {
if (windowOperations == 0) {
return "";
}
String toReturn = String.format("%s count: %d, average latency(us): %.2f",
getName(), windowOperations,
(double) windowTotalLatency / (double) windowOperations);
windowTotalLatency = 0;
windowOperations = 0;
return toReturn;
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
import java.text.DecimalFormat;
import java.util.Properties;
import java.util.Vector;
class SeriesUnit {
/**
* @param time
* @param average
*/
public SeriesUnit(long time, double average) {
this.time = time;
this.average = average;
}
protected final long time;
protected final double average;
}
/**
* A time series measurement of a metric, such as READ LATENCY.
*/
public class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
}
@Override
public void measure(int latency) {
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements;
import site.ycsb.Status;
import site.ycsb.measurements.exporter.MeasurementsExporter;
import java.io.IOException;
/**
* delegates to 2 measurement instances.
*/
public class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {
thing1.reportStatus(status);
}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
return thing1.getSummary() + "\n" + thing2.getSummary();
}
}
/**
* Copyright (c) 2015-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.util.DefaultPrettyPrinter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Export measurements into a machine readable JSON Array of measurement objects.
*/
public class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.writeEndArray();
g.close();
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.util.DefaultPrettyPrinter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Export measurements into a machine readable JSON file.
*/
public class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.close();
}
}
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import java.io.Closeable;
import java.io.IOException;
/**
* Used to export the collected measurements into a useful format, for example
* human readable text or machine readable JSON.
*/
public interface MeasurementsExporter extends Closeable {
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param i Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, int i) throws IOException;
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param i Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, long i) throws IOException;
/**
* Write a measurement to the exported format.
*
* @param metric Metric name, for example "READ LATENCY".
* @param measurement Measurement name, for example "Average latency".
* @param d Measurement to write.
* @throws IOException if writing failed
*/
void write(String metric, String measurement, double d) throws IOException;
}
/**
* Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
/**
* Write human readable text. Tries to emulate the previous print report method.
*/
public class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, long i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, double d) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
}
public void close() throws IOException {
this.bw.close();
}
}
/*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB measurements.exporter package.
*/
package site.ycsb.measurements.exporter;
/*
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB measurements package.
*/
package site.ycsb.measurements;
/*
* Copyright (c) 2015 - 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB core package.
*/
package site.ycsb;
/**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.Client;
import site.ycsb.WorkloadException;
import site.ycsb.generator.NumberGenerator;
import java.util.Properties;
/**
* A disk-fragmenting workload.
* <p>
* Properties to control the client:
* </p>
* <UL>
* <LI><b>disksize</b>: how many bytes of storage can the disk store? (default 100,000,000)
* <LI><b>occupancy</b>: what fraction of the available storage should be used? (default 0.9)
* <LI><b>requestdistribution</b>: what distribution should be used to select the records to operate on - uniform,
* zipfian or latest (default: histogram)
* </ul>
* <p>
* <p>
* <p> See also:
* Russell Sears, Catharine van Ingen.
* <a href='https://database.cs.wisc.edu/cidr/cidr2007/papers/cidr07p34.pdf'>Fragmentation in Large Object
* Repositories</a>,
* CIDR 2006. [<a href='https://database.cs.wisc.edu/cidr/cidr2007/slides/p34-sears.ppt'>Presentation</a>]
* </p>
*/
public class ConstantOccupancyWorkload extends CoreWorkload {
private long disksize;
private long storageages;
private double occupancy;
private long objectCount;
public static final String STORAGE_AGE_PROPERTY = "storageages";
public static final long STORAGE_AGE_PROPERTY_DEFAULT = 10;
public static final String DISK_SIZE_PROPERTY = "disksize";
public static final long DISK_SIZE_PROPERTY_DEFAULT = 100 * 1000 * 1000;
public static final String OCCUPANCY_PROPERTY = "occupancy";
public static final double OCCUPANCY_PROPERTY_DEFAULT = 0.9;
@Override
public void init(Properties p) throws WorkloadException {
disksize = Long.parseLong(p.getProperty(DISK_SIZE_PROPERTY, String.valueOf(DISK_SIZE_PROPERTY_DEFAULT)));
storageages = Long.parseLong(p.getProperty(STORAGE_AGE_PROPERTY, String.valueOf(STORAGE_AGE_PROPERTY_DEFAULT)));
occupancy = Double.parseDouble(p.getProperty(OCCUPANCY_PROPERTY, String.valueOf(OCCUPANCY_PROPERTY_DEFAULT)));
if (p.getProperty(Client.RECORD_COUNT_PROPERTY) != null ||
p.getProperty(Client.INSERT_COUNT_PROPERTY) != null ||
p.getProperty(Client.OPERATION_COUNT_PROPERTY) != null) {
System.err.println("Warning: record, insert or operation count was set prior to initting " +
"ConstantOccupancyWorkload. Overriding old values.");
}
NumberGenerator g = CoreWorkload.getFieldLengthGenerator(p);
double fieldsize = g.mean();
int fieldcount = Integer.parseInt(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
objectCount = (long) (occupancy * (disksize / (fieldsize * fieldcount)));
if (objectCount == 0) {
throw new IllegalStateException("Object count was zero. Perhaps disksize is too low?");
}
p.setProperty(Client.RECORD_COUNT_PROPERTY, String.valueOf(objectCount));
p.setProperty(Client.OPERATION_COUNT_PROPERTY, String.valueOf(storageages * objectCount));
p.setProperty(Client.INSERT_COUNT_PROPERTY, String.valueOf(objectCount));
super.init(p);
}
}
/**
* Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2016-2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.*;
import site.ycsb.generator.*;
import site.ycsb.generator.UniformLongGenerator;
import site.ycsb.measurements.Measurements;
import java.io.IOException;
import java.util.*;
/**
* The core benchmark scenario. Represents a set of clients doing simple CRUD operations. The
* relative proportion of different kinds of operations, and other properties of the workload,
* are controlled by parameters specified at runtime.
* <p>
* Properties to control the client:
* <UL>
* <LI><b>fieldcount</b>: the number of fields in a record (default: 10)
* <LI><b>fieldlength</b>: the size of each field (default: 100)
* <LI><b>minfieldlength</b>: the minimum size of each field (default: 1)
* <LI><b>readallfields</b>: should reads read all fields (true) or just one (false) (default: true)
* <LI><b>writeallfields</b>: should updates and read/modify/writes update all fields (true) or just
* one (false) (default: false)
* <LI><b>readproportion</b>: what proportion of operations should be reads (default: 0.95)
* <LI><b>updateproportion</b>: what proportion of operations should be updates (default: 0.05)
* <LI><b>insertproportion</b>: what proportion of operations should be inserts (default: 0)
* <LI><b>scanproportion</b>: what proportion of operations should be scans (default: 0)
* <LI><b>readmodifywriteproportion</b>: what proportion of operations should be read a record,
* modify it, write it back (default: 0)
* <LI><b>requestdistribution</b>: what distribution should be used to select the records to operate
* on - uniform, zipfian, hotspot, sequential, exponential or latest (default: uniform)
* <LI><b>minscanlength</b>: for scans, what is the minimum number of records to scan (default: 1)
* <LI><b>maxscanlength</b>: for scans, what is the maximum number of records to scan (default: 1000)
* <LI><b>scanlengthdistribution</b>: for scans, what distribution should be used to choose the
* number of records to scan, for each scan, between 1 and maxscanlength (default: uniform)
* <LI><b>insertstart</b>: for parallel loads and runs, defines the starting record for this
* YCSB instance (default: 0)
* <LI><b>insertcount</b>: for parallel loads and runs, defines the number of records for this
* YCSB instance (default: recordcount)
* <LI><b>zeropadding</b>: for generating a record sequence compatible with string sort order by
* 0 padding the record number. Controls the number of 0s to use for padding. (default: 1)
* For example for row 5, with zeropadding=1 you get 'user5' key and with zeropading=8 you get
* 'user00000005' key. In order to see its impact, zeropadding needs to be bigger than number of
* digits in the record number.
* <LI><b>insertorder</b>: should records be inserted in order by key ("ordered"), or in hashed
* order ("hashed") (default: hashed)
* <LI><b>fieldnameprefix</b>: what should be a prefix for field names, the shorter may decrease the
* required storage size (default: "field")
* </ul>
*/
public class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
}
/**
* Copyright (c) 2016-2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.RandomByteIterator;
import site.ycsb.WorkloadException;
import site.ycsb.generator.*;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import site.ycsb.generator.UniformLongGenerator;
/**
* Typical RESTFul services benchmarking scenario. Represents a set of client
* calling REST operations like HTTP DELETE, GET, POST, PUT on a web service.
* This scenario is completely different from CoreWorkload which is mainly
* designed for databases benchmarking. However due to some reusable
* functionality this class extends {@link CoreWorkload} and overrides necessary
* methods like init, doTransaction etc.
*/
public class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
}
/**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import site.ycsb.ByteIterator;
import site.ycsb.Client;
import site.ycsb.DB;
import site.ycsb.NumericByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.Utils;
import site.ycsb.Workload;
import site.ycsb.WorkloadException;
import site.ycsb.generator.DiscreteGenerator;
import site.ycsb.generator.Generator;
import site.ycsb.generator.HotspotIntegerGenerator;
import site.ycsb.generator.IncrementingPrintableStringGenerator;
import site.ycsb.generator.NumberGenerator;
import site.ycsb.generator.RandomDiscreteTimestampGenerator;
import site.ycsb.generator.ScrambledZipfianGenerator;
import site.ycsb.generator.SequentialGenerator;
import site.ycsb.generator.UniformLongGenerator;
import site.ycsb.generator.UnixEpochTimestampGenerator;
import site.ycsb.generator.ZipfianGenerator;
import site.ycsb.measurements.Measurements;
/**
* A specialized workload dealing with time series data, i.e. series of discreet
* events associated with timestamps and identifiers. For this workload, identities
* consist of a {@link String} <b>key</b> and a set of {@link String} <b>tag key/value</b>
* pairs.
* <p>
* For example:
* <table border="1">
* <tr><th>Time Series Key</th><th>Tag Keys/Values</th><th>1483228800</th><th>1483228860</th><th>1483228920</th></tr>
* <tr><td>AA</td><td>AA=AA, AB=AA</td><td>42.5</td><td>1.0</td><td>85.9</td></tr>
* <tr><td>AA</td><td>AA=AA, AB=AB</td><td>-9.4</td><td>76.9</td><td>0.18</td></tr>
* <tr><td>AB</td><td>AA=AA, AB=AA</td><td>-93.0</td><td>57.1</td><td>-63.8</td></tr>
* <tr><td>AB</td><td>AA=AA, AB=AB</td><td>7.6</td><td>56.1</td><td>-0.3</td></tr>
* </table>
* <p>
* This table shows four time series with 3 measurements at three different timestamps.
* Keys, tags, timestamps and values (numeric only at this time) are generated by
* this workload. For details on properties and behavior, see the
* {@code workloads/tsworkload_template} file. The Javadocs will focus on implementation
* and how {@link DB} clients can parse the workload.
* <p>
* In order to avoid having existing DB implementations implement a brand new interface
* this workload uses the existing APIs to encode a few special values that can be parsed
* by the client. The special values include the timestamp, numeric value and some
* query (read or scan) parameters. As an example on how to parse the fields, see
* {@link BasicTSDB}.
* <p>
* <b>Timestamps</b>
* <p>
* Timestamps are presented as Unix Epoch values in units of {@link TimeUnit#SECONDS},
* {@link TimeUnit#MILLISECONDS} or {@link TimeUnit#NANOSECONDS} based on the
* {@code timestampunits} property. For calls to {@link DB#insert(String, String, java.util.Map)}
* and {@link DB#update(String, String, java.util.Map)}, the timestamp is added to the
* {@code values} map encoded in a {@link NumericByteIterator} with the key defined
* in the {@code timestampkey} property (defaulting to "YCSBTS"). To pull out the timestamp
* when iterating over the values map, cast the {@link ByteIterator} to a
* {@link NumericByteIterator} and call {@link NumericByteIterator#getLong()}.
* <p>
* Note that for calls to {@link DB#update(String, String, java.util.Map)}, timestamps
* earlier than the timestamp generator's timestamp will be choosen at random to
* mimic a lambda architecture or old job re-reporting some data.
* <p>
* For calls to {@link DB#read(String, String, java.util.Set, java.util.Map)} and
* {@link DB#scan(String, String, int, java.util.Set, Vector)}, timestamps
* are encoded in a {@link StringByteIterator} in a key/value format with the
* {@code tagpairdelimiter} separator. E.g {@code YCSBTS=1483228800}. If {@code querytimespan}
* has been set to a positive value then the value will include a range with the
* starting (oldest) timestamp followed by the {@code querytimespandelimiter} separator
* and the ending (most recent) timestamp. E.g. {@code YCSBTS=1483228800-1483228920}.
* <p>
* For calls to {@link DB#delete(String, String)}, encoding is the same as reads and
* scans but key/value pairs are separated by the {@code deletedelimiter} property value.
* <p>
* By default, the starting timestamp is the current system time without any rounding.
* All timestamps are then offsets from that starting value.
* <p>
* <b>Values</b>
* <p>
* Similar to timestamps, values are encoded in {@link NumericByteIterator}s and stored
* in the values map with the key defined in {@code valuekey} (defaulting to "YCSBV").
* Values can either be 64 bit signed {@link long}s or double precision {@link double}s
* depending on the {@code valuetype} or {@code dataintegrity} properties. When parsing
* out the value, always call {@link NumericByteIterator#isFloatingPoint()} to determine
* whether or not to call {@link NumericByteIterator#getDouble()} (true) or
* {@link NumericByteIterator#getLong()} (false).
* <p>
* When {@code dataintegrity} is set to true, then the value is always set to a
* 64 bit signed integer which is the Java hash code of the concatenation of the
* key and map of values (sorted on the map keys and skipping the timestamp and value
* entries) OR'd with the timestamp of the data point. See
* {@link #validationFunction(String, long, TreeMap)} for the implementation.
* <p>
* <b>Keys and Tags</b>
* <p>
* As mentioned, the workload generates strings for the keys and tags. On initialization
* three string generators are created using the {@link IncrementingPrintableStringGenerator}
* implementation. Then the generators fill three arrays with values based on the
* number of keys, the number of tags and the cardinality of each tag key/value pair.
* This implementation gives us time series like the example table where every string
* starts at something like "AA" (depending on the length of keys, tag keys and tag values)
* and continuing to "ZZ" wherein they rollover back to "AA".
* <p>
* Each time series must have a unique set of tag keys, i.e. the key "AA" cannot appear
* more than once per time series. If the workload is configured for four tags with a
* tag key length of 2, the keys would be "AA", "AB", "AC" and "AD".
* <p>
* Each tag key is then associated with a tag value. Tag values may appear more than once
* in each time series. E.g. time series will usually start with the tags "AA=AA",
* "AB=AA", "AC=AA" and "AD=AA". The {@code tagcardinality} property determines how many
* unique values will be generated per tag key. In the example table above, the
* {@code tagcardinality} property would have been set to {@code 1,2} meaning tag
* key "AA" would always have the tag value "AA" given a cardinality of 1. However
* tag key "AB" would have values "AA" and "AB" due to a cardinality of 2. This
* cardinality map, along with the number of unique time series keys determines how
* many unique time series are generated for the workload. Tag values share a common
* array of generated strings to save on memory.
* <p>
* <b>Operation Order</b>
* <p>
* The default behavior of the workload (for inserts and updates) is to generate a
* value for each time series for a given timestamp before incrementing to the next
* timestamp and writing values. This is an ideal workload and some time series
* databases are designed for this behavior. However in the real-world events will
* arrive grouped close to the current system time with a number of events being
* delayed, hence their timestamps are further in the past. The {@code delayedseries}
* property determines the percentage of time series that are delayed by up to
* {@code delayedintervals} intervals. E.g. setting this value to 0.05 means that
* 5% of the time series will be written with timestamps earlier than the timestamp
* generator's current time.
* </p>
* <b>Reads and Scans</b>
* <p>
* For benchmarking queries, some common tasks implemented by almost every time series
* data base are available and are passed in the fields {@link Set}:
* <p>
* <b>GroupBy</b> - A common operation is to aggregate multiple time series into a
* single time series via common parameters. For example, a user may want to see the
* total network traffic in a data center so they'll issue a SQL query like:
* <code>SELECT value FROM timeseriesdb GROUP BY datacenter ORDER BY SUM(value);</code>
* If the {@code groupbyfunction} has been set to a group by function, then the fields
* will contain a key/value pair with the key set in {@code groupbykey}. E.g.
* {@code YCSBGB=SUM}.
* <p>
* Additionally with grouping enabled, fields on tag keys where group bys should
* occur will only have the key defined and will not have a value or delimiter. E.g.
* if grouping on tag key "AA", the field will contain {@code AA} instead of {@code AA=AB}.
* <p>
* <b>Downsampling</b> - Another common operation is to reduce the resolution of the
* queried time series when fetching a wide time range of data so fewer data points
* are returned. For example, a user may fetch a week of data but if the data is
* recorded on a 1 second interval, that would be over 600k data points so they
* may ask for a 1 hour downsampling (also called bucketing) wherein every hour, all
* of the data points for a "bucket" are aggregated into a single value.
* <p>
* To enable downsampling, the {@code downsamplingfunction} property must be set to
* a supported function such as "SUM" and the {@code downsamplinginterval} must be
* set to a valid time interval with the same units as {@code timestampunits}, e.g.
* "3600" which would create 1 hour buckets if the time units were set to seconds.
* With downsampling, query fields will include a key/value pair with
* {@code downsamplingkey} as the key (defaulting to "YCSBDS") and the value being
* a concatenation of {@code downsamplingfunction} and {@code downsamplinginterval},
* for example {@code YCSBDS=SUM60}.
* <p>
* <b>Timestamps</b> - For every read, a random timestamp is selected from the interval
* set. If {@code querytimespan} has been set to a positive value, then the configured
* query time interval is added to the selected timestamp so the read passes the DB
* a range of times. Note that during the run phase, if no data was previously loaded,
* or if there are more {@code recordcount}s set for the run phase, reads may be sent
* to the DB with timestamps that are beyond the written data time range (or even the
* system clock of the DB).
* <p>
* <b>Deletes</b>
* <p>
* Because the delete API only accepts a single key, a full key and tag key/value
* pair map is flattened into a single string for parsing by the database. Common
* workloads include deleting a single time series (wherein all tag key and values are
* defined), deleting all series containing a tag key and value or deleting all of the
* time series sharing a common time series key.
* <p>
* Right now the workload supports deletes with a key and for time series tag key/value
* pairs or a key with tags and a group by on one or more tags (meaning, delete all of
* the series with any value for the given tag key). The parameters are collapsed into
* a single string delimited with the character in the {@code deletedelimiter} property.
* For example, a delete request may look like: {@code AA:AA=AA:AA=AB} to delete the
* first time series in the table above.
* <p>
* <b>Threads</b>
* <p>
* For a multi-threaded execution, the number of time series keys set via the
* {@code fieldcount} property, must be greater than or equal to the number of
* threads set via {@code threads}. This is due to each thread choosing a subset
* of the total number of time series keys and being responsible for writing values
* for each time series containing those keys at each timestamp. Thus each thread
* will have it's own timestamp generator, incrementing each time every time series
* it is responsible for has had a value written.
* <p>
* Each thread may, however, issue reads and scans for any time series in the
* complete set.
* <p>
* <b>Sparsity</b>
* <p>
* By default, during loads, every time series will have a data point written at every
* time stamp in the interval set. This is common in workloads where a sensor writes
* a value at regular intervals. However some time series are only reported under
* certain conditions.
* <p>
* For example, a counter may track the number of errors over a
* time period for a web service and only report when the value is greater than 1.
* Or a time series may include tags such as a user ID and IP address when a request
* arrives at the web service and only report values when that combination is seen.
* This means the timeseries will <i>not</i> have a value at every timestamp and in
* some cases there may be only a single value!
* <p>
* This workload has a {@code sparsity} parameter that can choose how often a
* time series should record a value. The default value of 0.0 means every series
* will get a value at every timestamp. A value of 0.95 will mean that for each
* series, only 5% of the timestamps in the interval will have a value. The distribution
* of values is random.
* <p>
* <b>Notes/Warnings</b>
* <p>
* <ul>
* <li>Because time series keys and tag key/values are generated and stored in memory,
* be careful of setting the cardinality too high for the JVM's heap.</li>
* <li>When running for data integrity, a number of settings are incompatible and will
* throw errors. Check the error messages for details.</li>
* <li>Databases that support keys only and can't store tags should order and then
* collapse the tag values using a delimiter. For example the series in the example
* table at the top could be written as:
* <ul>
* <li>{@code AA.AA.AA}</li>
* <li>{@code AA.AA.AB}</li>
* <li>{@code AB.AA.AA}</li>
* <li>{@code AB.AA.AB}</li>
* </ul></li>
* </ul>
* <p>
* <b>TODOs</b>
* <p>
* <ul>
* <li>Support random time intervals. E.g. some series write every second, others every
* 60 seconds.</li>
* <li>Support random time series cardinality. Right now every series has the same
* cardinality.</li>
* <li>Truly random timetamps per time series. We could use bitmaps to determine if
* a series has had a value written for a given timestamp. Right now all of the series
* are in sync time-wise.</li>
* <li>Possibly a real-time load where values are written with the current system time.
* It's more of a bulk-loading operation now.</li>
* </ul>
*/
public class TimeSeriesWorkload extends Workload {
/**
* The types of values written to the timeseries store.
*/
public enum ValueType {
INTEGERS("integers"),
FLOATS("floats"),
MIXED("mixednumbers");
protected final String name;
ValueType(final String name) {
this.name = name;
}
public static ValueType fromString(final String name) {
for (final ValueType type : ValueType.values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
throw new IllegalArgumentException("Unrecognized type: " + name);
}
}
/** Name and default value for the timestamp key property. */
public static final String TIMESTAMP_KEY_PROPERTY = "timestampkey";
public static final String TIMESTAMP_KEY_PROPERTY_DEFAULT = "YCSBTS";
/** Name and default value for the value key property. */
public static final String VALUE_KEY_PROPERTY = "valuekey";
public static final String VALUE_KEY_PROPERTY_DEFAULT = "YCSBV";
/** Name and default value for the timestamp interval property. */
public static final String TIMESTAMP_INTERVAL_PROPERTY = "timestampinterval";
public static final String TIMESTAMP_INTERVAL_PROPERTY_DEFAULT = "60";
/** Name and default value for the timestamp units property. */
public static final String TIMESTAMP_UNITS_PROPERTY = "timestampunits";
public static final String TIMESTAMP_UNITS_PROPERTY_DEFAULT = "SECONDS";
/** Name and default value for the number of tags property. */
public static final String TAG_COUNT_PROPERTY = "tagcount";
public static final String TAG_COUNT_PROPERTY_DEFAULT = "4";
/** Name and default value for the tag value cardinality map property. */
public static final String TAG_CARDINALITY_PROPERTY = "tagcardinality";
public static final String TAG_CARDINALITY_PROPERTY_DEFAULT = "1, 2, 4, 8";
/** Name and default value for the tag key length property. */
public static final String TAG_KEY_LENGTH_PROPERTY = "tagkeylength";
public static final String TAG_KEY_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag value length property. */
public static final String TAG_VALUE_LENGTH_PROPERTY = "tagvaluelength";
public static final String TAG_VALUE_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag pair delimiter property. */
public static final String PAIR_DELIMITER_PROPERTY = "tagpairdelimiter";
public static final String PAIR_DELIMITER_PROPERTY_DEFAULT = "=";
/** Name and default value for the delete string delimiter property. */
public static final String DELETE_DELIMITER_PROPERTY = "deletedelimiter";
public static final String DELETE_DELIMITER_PROPERTY_DEFAULT = ":";
/** Name and default value for the random timestamp write order property. */
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY = "randomwritetimestamporder";
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT = "false";
/** Name and default value for the random time series write order property. */
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY = "randomtimeseriesorder";
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT = "true";
/** Name and default value for the value types property. */
public static final String VALUE_TYPE_PROPERTY = "valuetype";
public static final String VALUE_TYPE_PROPERTY_DEFAULT = "floats";
/** Name and default value for the sparsity property. */
public static final String SPARSITY_PROPERTY = "sparsity";
public static final String SPARSITY_PROPERTY_DEFAULT = "0.00";
/** Name and default value for the delayed series percentage property. */
public static final String DELAYED_SERIES_PROPERTY = "delayedseries";
public static final String DELAYED_SERIES_PROPERTY_DEFAULT = "0.10";
/** Name and default value for the delayed series intervals property. */
public static final String DELAYED_INTERVALS_PROPERTY = "delayedintervals";
public static final String DELAYED_INTERVALS_PROPERTY_DEFAULT = "5";
/** Name and default value for the query time span property. */
public static final String QUERY_TIMESPAN_PROPERTY = "querytimespan";
public static final String QUERY_TIMESPAN_PROPERTY_DEFAULT = "0";
/** Name and default value for the randomized query time span property. */
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY = "queryrandomtimespan";
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT = "false";
/** Name and default value for the query time stamp delimiter property. */
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY = "querytimespandelimiter";
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT = ",";
/** Name and default value for the group-by key property. */
public static final String GROUPBY_KEY_PROPERTY = "groupbykey";
public static final String GROUPBY_KEY_PROPERTY_DEFAULT = "YCSBGB";
/** Name and default value for the group-by function property. */
public static final String GROUPBY_PROPERTY = "groupbyfunction";
/** Name and default value for the group-by key map property. */
public static final String GROUPBY_KEYS_PROPERTY = "groupbykeys";
/** Name and default value for the downsampling key property. */
public static final String DOWNSAMPLING_KEY_PROPERTY = "downsamplingkey";
public static final String DOWNSAMPLING_KEY_PROPERTY_DEFAULT = "YCSBDS";
/** Name and default value for the downsampling function property. */
public static final String DOWNSAMPLING_FUNCTION_PROPERTY = "downsamplingfunction";
/** Name and default value for the downsampling interval property. */
public static final String DOWNSAMPLING_INTERVAL_PROPERTY = "downsamplinginterval";
/** The properties to pull settings from. */
protected Properties properties;
/** Generators for keys, tag keys and tag values. */
protected Generator<String> keyGenerator;
protected Generator<String> tagKeyGenerator;
protected Generator<String> tagValueGenerator;
/** The timestamp key, defaults to "YCSBTS". */
protected String timestampKey;
/** The value key, defaults to "YCSBDS". */
protected String valueKey;
/** The number of time units in between timestamps. */
protected int timestampInterval;
/** The units of time the timestamp and various intervals represent. */
protected TimeUnit timeUnits;
/** Whether or not to randomize the timestamp order when writing. */
protected boolean randomizeTimestampOrder;
/** Whether or not to randomize (shuffle) the time series order. NOT compatible
* with data integrity. */
protected boolean randomizeTimeseriesOrder;
/** The type of values to generate when writing data. */
protected ValueType valueType;
/** Used to calculate an offset for each time series. */
protected int[] cumulativeCardinality;
/** The calculated total cardinality based on the config. */
protected int totalCardinality;
/** The calculated per-time-series-key cardinality. I.e. the number of unique
* tag key and value combinations. */
protected int perKeyCardinality;
/** How much data to scan for in each call. */
protected NumberGenerator scanlength;
/** A generator used to select a random time series key per read/scan. */
protected NumberGenerator keychooser;
/** A generator to select what operation to perform during the run phase. */
protected DiscreteGenerator operationchooser;
/** The maximum number of interval offsets from the starting timestamp. Calculated
* based on the number of records configured for the run. */
protected int maxOffsets;
/** The number of records or operations to perform for this run. */
protected int recordcount;
/** The number of tag pairs per time series. */
protected int tagPairs;
/** The table we'll write to. */
protected String table;
/** How many time series keys will be generated. */
protected int numKeys;
/** The generated list of possible time series key values. */
protected String[] keys;
/** The generated list of possible tag key values. */
protected String[] tagKeys;
/** The generated list of possible tag value values. */
protected String[] tagValues;
/** The cardinality for each tag key. */
protected int[] tagCardinality;
/** A helper to skip non-incrementing tag values. */
protected int firstIncrementableCardinality;
/** How sparse the data written should be. */
protected double sparsity;
/** The percentage of time series that should be delayed in writes. */
protected double delayedSeries;
/** The maximum number of intervals to delay a series. */
protected int delayedIntervals;
/** Optional query time interval during reads/scans. */
protected int queryTimeSpan;
/** Whether or not the actual interval should be randomly chosen, using
* queryTimeSpan as the maximum value. */
protected boolean queryRandomTimeSpan;
/** The delimiter for tag pairs in fields. */
protected String tagPairDelimiter;
/** The delimiter between parameters for the delete key. */
protected String deleteDelimiter;
/** The delimiter between timestamps for query time spans. */
protected String queryTimeSpanDelimiter;
/** Whether or not to issue group-by queries. */
protected boolean groupBy;
/** The key used for group-by tag keys. */
protected String groupByKey;
/** The function used for group-by's. */
protected String groupByFunction;
/** The tag keys to group on. */
protected boolean[] groupBys;
/** Whether or not to issue downsampling queries. */
protected boolean downsample;
/** The key used for downsampling tag keys. */
protected String downsampleKey;
/** The downsampling function. */
protected String downsampleFunction;
/** The downsampling interval. */
protected int downsampleInterval;
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
protected boolean dataintegrity;
/** Measurements to write data integrity results to. */
protected Measurements measurements = Measurements.getMeasurements();
@Override
public void init(final Properties p) throws WorkloadException {
properties = p;
recordcount =
Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY,
Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
timestampKey = p.getProperty(TIMESTAMP_KEY_PROPERTY, TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = p.getProperty(VALUE_KEY_PROPERTY, VALUE_KEY_PROPERTY_DEFAULT);
operationchooser = CoreWorkload.createOperationGenerator(properties);
final int maxscanlength =
Integer.parseInt(p.getProperty(CoreWorkload.MAX_SCAN_LENGTH_PROPERTY,
CoreWorkload.MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY,
CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(1, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(1, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
randomizeTimestampOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY,
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT));
randomizeTimeseriesOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESERIES_ORDER_PROPERTY,
RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT));
// setup the cardinality
numKeys = Integer.parseInt(p.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY,
CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
tagPairs = Integer.parseInt(p.getProperty(TAG_COUNT_PROPERTY,
TAG_COUNT_PROPERTY_DEFAULT));
sparsity = Double.parseDouble(p.getProperty(SPARSITY_PROPERTY, SPARSITY_PROPERTY_DEFAULT));
tagCardinality = new int[tagPairs];
final String requestdistrib =
p.getProperty(CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY,
CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
keychooser = new ScrambledZipfianGenerator(0, numKeys - 1);
//} else if (requestdistrib.compareTo("latest") == 0) {
// keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_DATA_FRACTION,
CoreWorkload.HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_OPN_FRACTION,
CoreWorkload.HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, numKeys - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
// figure out the start timestamp based on the units, cardinality and interval
try {
timestampInterval = Integer.parseInt(p.getProperty(
TIMESTAMP_INTERVAL_PROPERTY, TIMESTAMP_INTERVAL_PROPERTY_DEFAULT));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
TIMESTAMP_INTERVAL_PROPERTY, nfe);
}
try {
timeUnits = TimeUnit.valueOf(p.getProperty(TIMESTAMP_UNITS_PROPERTY,
TIMESTAMP_UNITS_PROPERTY_DEFAULT).toUpperCase());
} catch (IllegalArgumentException e) {
throw new WorkloadException("Unknown time unit type", e);
}
if (timeUnits == TimeUnit.NANOSECONDS || timeUnits == TimeUnit.MICROSECONDS) {
throw new WorkloadException("YCSB doesn't support " + timeUnits +
" at this time.");
}
tagPairDelimiter = p.getProperty(PAIR_DELIMITER_PROPERTY, PAIR_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = p.getProperty(DELETE_DELIMITER_PROPERTY, DELETE_DELIMITER_PROPERTY_DEFAULT);
dataintegrity = Boolean.parseBoolean(
p.getProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY,
CoreWorkload.DATA_INTEGRITY_PROPERTY_DEFAULT));
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
queryTimeSpan = Integer.parseInt(p.getProperty(QUERY_TIMESPAN_PROPERTY,
QUERY_TIMESPAN_PROPERTY_DEFAULT));
queryRandomTimeSpan = Boolean.parseBoolean(p.getProperty(QUERY_RANDOM_TIMESPAN_PROPERTY,
QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT));
queryTimeSpanDelimiter = p.getProperty(QUERY_TIMESPAN_DELIMITER_PROPERTY,
QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
groupByKey = p.getProperty(GROUPBY_KEY_PROPERTY, GROUPBY_KEY_PROPERTY_DEFAULT);
groupByFunction = p.getProperty(GROUPBY_PROPERTY);
if (groupByFunction != null && !groupByFunction.isEmpty()) {
final String groupByKeys = p.getProperty(GROUPBY_KEYS_PROPERTY);
if (groupByKeys == null || groupByKeys.isEmpty()) {
throw new WorkloadException("Group by was enabled but no keys were specified.");
}
final String[] gbKeys = groupByKeys.split(",");
if (gbKeys.length != tagKeys.length) {
throw new WorkloadException("Only " + gbKeys.length + " group by keys "
+ "were specified but there were " + tagKeys.length + " tag keys given.");
}
groupBys = new boolean[gbKeys.length];
for (int i = 0; i < gbKeys.length; i++) {
groupBys[i] = Integer.parseInt(gbKeys[i].trim()) == 0 ? false : true;
}
groupBy = true;
}
downsampleKey = p.getProperty(DOWNSAMPLING_KEY_PROPERTY, DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsampleFunction = p.getProperty(DOWNSAMPLING_FUNCTION_PROPERTY);
if (downsampleFunction != null && !downsampleFunction.isEmpty()) {
final String interval = p.getProperty(DOWNSAMPLING_INTERVAL_PROPERTY);
if (interval == null || interval.isEmpty()) {
throw new WorkloadException("'" + DOWNSAMPLING_INTERVAL_PROPERTY + "' was missing despite '"
+ DOWNSAMPLING_FUNCTION_PROPERTY + "' being set.");
}
downsampleInterval = Integer.parseInt(interval);
downsample = true;
}
delayedSeries = Double.parseDouble(p.getProperty(DELAYED_SERIES_PROPERTY, DELAYED_SERIES_PROPERTY_DEFAULT));
delayedIntervals = Integer.parseInt(p.getProperty(DELAYED_INTERVALS_PROPERTY, DELAYED_INTERVALS_PROPERTY_DEFAULT));
valueType = ValueType.fromString(p.getProperty(VALUE_TYPE_PROPERTY, VALUE_TYPE_PROPERTY_DEFAULT));
table = p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
initKeysAndTags();
validateSettings();
}
@Override
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
if (properties == null) {
throw new WorkloadException("Workload has not been initialized.");
}
return new ThreadState(mythreadid, threadcount);
}
@Override
public boolean doInsert(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, true);
if (db.insert(table, key, tags) == Status.OK) {
return true;
}
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db, threadstate);
break;
case "UPDATE":
doTransactionUpdate(db, threadstate);
break;
case "INSERT":
doTransactionInsert(db, threadstate);
break;
case "SCAN":
doTransactionScan(db, threadstate);
break;
case "DELETE":
doTransactionDelete(db, threadstate);
break;
default:
return false;
}
return true;
}
protected void doTransactionRead(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final String keyname = keys[keychooser.nextValue().intValue()];
final Random random = ThreadLocalRandom.current();
int offsets = state.queryOffsetGenerator.nextValue().intValue();
//int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + downsampleInterval);
}
final Map<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
final Status status = db.read(table, keyname, fields, cells);
if (dataintegrity && status == Status.OK) {
verifyRow(keyname, cells);
}
}
protected void doTransactionUpdate(final DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, false);
db.update(table, key, tags);
}
protected void doTransactionInsert(final DB db, Object threadstate) {
doInsert(db, threadstate);
}
protected void doTransactionScan(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final String keyname = keys[random.nextInt(keys.length)];
// choose a random scan length
int len = scanlength.nextValue().intValue();
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + tagPairDelimiter + downsampleInterval);
}
final Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>();
db.scan(table, keyname, len, fields, results);
}
protected void doTransactionDelete(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final StringBuilder buf = new StringBuilder().append(keys[random.nextInt(keys.length)]);
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
buf.append(deleteDelimiter)
.append(tagKeys[i]);
} else {
buf.append(deleteDelimiter).append(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp);
}
db.delete(table, buf.toString());
}
/**
* Parses the values returned by a read or scan operation and determines whether
* or not the integer value matches the hash and timestamp of the original timestamp.
* Only works for raw data points, will not work for group-by's or downsampled data.
* @param key The time series key.
* @param cells The cells read by the DB.
* @return {@link Status#OK} if the data matched or {@link Status#UNEXPECTED_STATE} if
* the data did not match.
*/
protected Status verifyRow(final String key, final Map<String, ByteIterator> cells) {
Status verifyStatus = Status.UNEXPECTED_STATE;
long startTime = System.nanoTime();
double value = 0;
long timestamp = 0;
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : cells.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
timestamp = it.getLong();
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
value = it.isFloatingPoint() ? it.getDouble() : it.getLong();
} else {
validationTags.put(entry.getKey(), entry.getValue().toString());
}
}
if (validationFunction(key, timestamp, validationTags) == value) {
verifyStatus = Status.OK;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
return verifyStatus;
}
/**
* Function used for generating a deterministic hash based on the combination
* of metric, tags and timestamp.
* @param key A non-null string representing the key.
* @param timestamp A timestamp in the proper units for the workload.
* @param tags A non-null map of tag keys and values NOT including the YCSB
* key or timestamp.
* @return A hash value as an 8 byte integer.
*/
protected long validationFunction(final String key, final long timestamp,
final TreeMap<String, String> tags) {
final StringBuilder validationBuffer = new StringBuilder(keys[0].length() +
(tagPairs * tagKeys[0].length()) + (tagPairs * tagCardinality[1]));
for (final Entry<String, String> pair : tags.entrySet()) {
validationBuffer.append(pair.getKey()).append(pair.getValue());
}
return (long) validationBuffer.toString().hashCode() ^ timestamp;
}
/**
* Breaks out the keys, tags and cardinality initialization in another method
* to keep CheckStyle happy.
* @throws WorkloadException If something goes pear shaped.
*/
protected void initKeysAndTags() throws WorkloadException {
final int keyLength = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_LENGTH_PROPERTY,
CoreWorkload.FIELD_LENGTH_PROPERTY_DEFAULT));
final int tagKeyLength = Integer.parseInt(properties.getProperty(
TAG_KEY_LENGTH_PROPERTY, TAG_KEY_LENGTH_PROPERTY_DEFAULT));
final int tagValueLength = Integer.parseInt(properties.getProperty(
TAG_VALUE_LENGTH_PROPERTY, TAG_VALUE_LENGTH_PROPERTY_DEFAULT));
keyGenerator = new IncrementingPrintableStringGenerator(keyLength);
tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeyLength);
tagValueGenerator = new IncrementingPrintableStringGenerator(tagValueLength);
final int threads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
final String tagCardinalityString = properties.getProperty(
TAG_CARDINALITY_PROPERTY,
TAG_CARDINALITY_PROPERTY_DEFAULT);
final String[] tagCardinalityParts = tagCardinalityString.split(",");
int idx = 0;
totalCardinality = numKeys;
perKeyCardinality = 1;
int maxCardinality = 0;
for (final String card : tagCardinalityParts) {
try {
tagCardinality[idx] = Integer.parseInt(card.trim());
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse cardinality: " +
card, nfe);
}
if (tagCardinality[idx] < 1) {
throw new WorkloadException("Cardinality must be greater than zero: " +
tagCardinality[idx]);
}
totalCardinality *= tagCardinality[idx];
perKeyCardinality *= tagCardinality[idx];
if (tagCardinality[idx] > maxCardinality) {
maxCardinality = tagCardinality[idx];
}
++idx;
if (idx >= tagPairs) {
// we have more cardinalities than tag keys so bail at this point.
break;
}
}
if (numKeys < threads) {
throw new WorkloadException("Field count " + numKeys + " (keys for time "
+ "series workloads) must be greater or equal to the number of "
+ "threads " + threads);
}
// fill tags without explicit cardinality with 1
if (idx < tagPairs) {
tagCardinality[idx++] = 1;
}
for (int i = 0; i < tagCardinality.length; ++i) {
if (tagCardinality[i] > 1) {
firstIncrementableCardinality = i;
break;
}
}
keys = new String[numKeys];
tagKeys = new String[tagPairs];
tagValues = new String[maxCardinality];
for (int i = 0; i < numKeys; ++i) {
keys[i] = keyGenerator.nextString();
}
for (int i = 0; i < tagPairs; ++i) {
tagKeys[i] = tagKeyGenerator.nextString();
}
for (int i = 0; i < maxCardinality; i++) {
tagValues[i] = tagValueGenerator.nextString();
}
if (randomizeTimeseriesOrder) {
Utils.shuffleArray(keys);
Utils.shuffleArray(tagValues);
}
maxOffsets = (recordcount / totalCardinality) + 1;
final int[] keyAndTagCardinality = new int[tagPairs + 1];
keyAndTagCardinality[0] = numKeys;
for (int i = 0; i < tagPairs; i++) {
keyAndTagCardinality[i + 1] = tagCardinality[i];
}
cumulativeCardinality = new int[keyAndTagCardinality.length];
for (int i = 0; i < keyAndTagCardinality.length; i++) {
int cumulation = 1;
for (int x = i; x <= keyAndTagCardinality.length - 1; x++) {
cumulation *= keyAndTagCardinality[x];
}
if (i > 0) {
cumulativeCardinality[i - 1] = cumulation;
}
}
cumulativeCardinality[cumulativeCardinality.length - 1] = 1;
}
/**
* Makes sure the settings as given are compatible.
* @throws WorkloadException If one or more settings were invalid.
*/
protected void validateSettings() throws WorkloadException {
if (dataintegrity) {
if (valueType != ValueType.INTEGERS) {
throw new WorkloadException("Data integrity was enabled. 'valuetype' must "
+ "be set to 'integers'.");
}
if (groupBy) {
throw new WorkloadException("Data integrity was enabled. 'groupbyfunction' must "
+ "be empty or null.");
}
if (downsample) {
throw new WorkloadException("Data integrity was enabled. 'downsamplingfunction' must "
+ "be empty or null.");
}
if (queryTimeSpan > 0) {
throw new WorkloadException("Data integrity was enabled. 'querytimespan' must "
+ "be empty or 0.");
}
if (randomizeTimeseriesOrder) {
throw new WorkloadException("Data integrity was enabled. 'randomizetimeseriesorder' must "
+ "be false.");
}
final String startTimestamp = properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startTimestamp == null || startTimestamp.isEmpty()) {
throw new WorkloadException("Data integrity was enabled. 'insertstart' must "
+ "be set to a Unix Epoch timestamp.");
}
}
}
/**
* Thread state class holding thread local generators and indices.
*/
protected class ThreadState {
/** The timestamp generator for this thread. */
protected final UnixEpochTimestampGenerator timestampGenerator;
/** An offset generator to select a random offset for queries. */
protected final NumberGenerator queryOffsetGenerator;
/** The current write key index. */
protected int keyIdx;
/** The starting fence for writing keys. */
protected int keyIdxStart;
/** The ending fence for writing keys. */
protected int keyIdxEnd;
/** Indices for each tag value for writes. */
protected int[] tagValueIdxs;
/** Whether or not all time series have written values for the current timestamp. */
protected boolean rollover;
/** The starting timestamp. */
protected long startTimestamp;
/**
* Default ctor.
* @param threadID The zero based thread ID.
* @param threadCount The total number of threads.
* @throws WorkloadException If something went pear shaped.
*/
protected ThreadState(final int threadID, final int threadCount) throws WorkloadException {
int totalThreads = threadCount > 0 ? threadCount : 1;
if (threadID >= totalThreads) {
throw new IllegalStateException("Thread ID " + threadID + " cannot be greater "
+ "than or equal than the thread count " + totalThreads);
}
if (keys.length < threadCount) {
throw new WorkloadException("Thread count " + totalThreads + " must be greater "
+ "than or equal to key count " + keys.length);
}
int keysPerThread = keys.length / totalThreads;
keyIdx = keysPerThread * threadID;
keyIdxStart = keyIdx;
if (totalThreads - 1 == threadID) {
keyIdxEnd = keys.length;
} else {
keyIdxEnd = keyIdxStart + keysPerThread;
}
tagValueIdxs = new int[tagPairs]; // all zeros
final String startingTimestamp =
properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startingTimestamp == null || startingTimestamp.isEmpty()) {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits, maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits);
} else {
try {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp), maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
CoreWorkload.INSERT_START_PROPERTY, nfe);
}
}
// Set the last value properly for the timestamp, otherwise it may start
// one interval ago.
startTimestamp = timestampGenerator.nextValue();
// TODO - pick it
queryOffsetGenerator = new UniformLongGenerator(0, maxOffsets - 2);
}
/**
* Generates the next write value for thread.
* @param map An initialized map to populate with tag keys and values as well
* as the timestamp and actual value.
* @param isInsert Whether or not it's an insert or an update. Updates will pick
* an older timestamp (if random isn't enabled).
* @return The next key to write.
*/
protected String nextDataPoint(final Map<String, ByteIterator> map, final boolean isInsert) {
final Random random = ThreadLocalRandom.current();
int iterations = sparsity <= 0 ? 1 : random.nextInt((int) ((double) perKeyCardinality * sparsity));
if (iterations < 1) {
iterations = 1;
}
while (true) {
iterations--;
if (rollover) {
timestampGenerator.nextValue();
rollover = false;
}
String key = null;
if (iterations <= 0) {
final TreeMap<String, String> validationTags;
if (dataintegrity) {
validationTags = new TreeMap<String, String>();
} else {
validationTags = null;
}
key = keys[keyIdx];
int overallIdx = keyIdx * cumulativeCardinality[0];
for (int i = 0; i < tagPairs; ++i) {
int tvidx = tagValueIdxs[i];
map.put(tagKeys[i], new StringByteIterator(tagValues[tvidx]));
if (dataintegrity) {
validationTags.put(tagKeys[i], tagValues[tvidx]);
}
if (delayedSeries > 0) {
overallIdx += (tvidx * cumulativeCardinality[i + 1]);
}
}
if (!isInsert) {
final long delta = (timestampGenerator.currentValue() - startTimestamp) / timestampInterval;
final int intervals = random.nextInt((int) delta);
map.put(timestampKey, new NumericByteIterator(startTimestamp + (intervals * timestampInterval)));
} else if (delayedSeries > 0) {
// See if the series falls in a delay bucket and calculate an offset earlier
// than the current timestamp value if so.
double pct = (double) overallIdx / (double) totalCardinality;
if (pct < delayedSeries) {
int modulo = overallIdx % delayedIntervals;
if (modulo < 0) {
modulo *= -1;
}
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue() -
timestampInterval * modulo));
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
if (dataintegrity) {
map.put(valueKey, new NumericByteIterator(validationFunction(key,
timestampGenerator.currentValue(), validationTags)));
} else {
switch (valueType) {
case INTEGERS:
map.put(valueKey, new NumericByteIterator(random.nextInt()));
break;
case FLOATS:
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
break;
case MIXED:
if (random.nextBoolean()) {
map.put(valueKey, new NumericByteIterator(random.nextInt()));
} else {
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
}
break;
default:
throw new IllegalStateException("Somehow we didn't have a value "
+ "type configured that we support: " + valueType);
}
}
}
boolean tagRollover = false;
for (int i = tagCardinality.length - 1; i >= 0; --i) {
if (tagCardinality[i] <= 1) {
tagRollover = true; // Only one tag so needs roll over.
continue;
}
if (tagValueIdxs[i] + 1 >= tagCardinality[i]) {
tagValueIdxs[i] = 0;
if (i == firstIncrementableCardinality) {
tagRollover = true;
}
} else {
++tagValueIdxs[i];
break;
}
}
if (tagRollover) {
if (keyIdx + 1 >= keyIdxEnd) {
keyIdx = keyIdxStart;
rollover = true;
} else {
++keyIdx;
}
}
if (iterations <= 0) {
return key;
}
}
}
}
}
\ No newline at end of file
/*
* Copyright (c) 2015 - 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB workloads.
*/
package site.ycsb.workloads;
# Copyright (c) 2016 YCSB contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
version=${project.version}
/**
* Copyright (c) 2012 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.*;
public class TestByteIterator {
@Test
public void testRandomByteIterator() {
int size = 100;
ByteIterator itor = new RandomByteIterator(size);
assertTrue(itor.hasNext());
assertEquals(size, itor.bytesLeft());
assertEquals(size, itor.toString().getBytes().length);
assertFalse(itor.hasNext());
assertEquals(0, itor.bytesLeft());
itor = new RandomByteIterator(size);
assertEquals(size, itor.toArray().length);
assertFalse(itor.hasNext());
assertEquals(0, itor.bytesLeft());
}
}
/**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.*;
public class TestNumericByteIterator {
@Test
public void testLong() throws Exception {
NumericByteIterator it = new NumericByteIterator(42L);
assertFalse(it.isFloatingPoint());
assertEquals(42L, it.getLong());
try {
it.getDouble();
fail("Expected IllegalStateException.");
} catch (IllegalStateException e) { }
try {
it.next();
fail("Expected UnsupportedOperationException.");
} catch (UnsupportedOperationException e) { }
assertEquals(8, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(7, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(6, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(5, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(4, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(3, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(2, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(1, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 42, (byte) it.nextByte());
assertEquals(0, it.bytesLeft());
assertFalse(it.hasNext());
it.reset();
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
}
@Test
public void testDouble() throws Exception {
NumericByteIterator it = new NumericByteIterator(42.75);
assertTrue(it.isFloatingPoint());
assertEquals(42.75, it.getDouble(), 0.001);
try {
it.getLong();
fail("Expected IllegalStateException.");
} catch (IllegalStateException e) { }
try {
it.next();
fail("Expected UnsupportedOperationException.");
} catch (UnsupportedOperationException e) { }
assertEquals(8, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 64, (byte) it.nextByte());
assertEquals(7, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 69, (byte) it.nextByte());
assertEquals(6, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 96, (byte) it.nextByte());
assertEquals(5, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(4, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(3, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(2, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(1, it.bytesLeft());
assertTrue(it.hasNext());
assertEquals((byte) 0, (byte) it.nextByte());
assertEquals(0, it.bytesLeft());
assertFalse(it.hasNext());
it.reset();
assertTrue(it.hasNext());
assertEquals((byte) 64, (byte) it.nextByte());
}
}
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import org.testng.annotations.Test;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
/**
* Test class for {@link Status}.
*/
public class TestStatus {
@Test
public void testAcceptableStatus() {
assertTrue(Status.OK.isOk());
assertTrue(Status.BATCHED_OK.isOk());
assertFalse(Status.BAD_REQUEST.isOk());
assertFalse(Status.ERROR.isOk());
assertFalse(Status.FORBIDDEN.isOk());
assertFalse(Status.NOT_FOUND.isOk());
assertFalse(Status.NOT_IMPLEMENTED.isOk());
assertFalse(Status.SERVICE_UNAVAILABLE.isOk());
assertFalse(Status.UNEXPECTED_STATE.isOk());
}
}
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import org.testng.annotations.Test;
public class TestUtils {
@Test
public void bytesToFromLong() throws Exception {
byte[] bytes = new byte[8];
assertEquals(Utils.bytesToLong(bytes), 0L);
assertArrayEquals(Utils.longToBytes(0), bytes);
bytes[7] = 1;
assertEquals(Utils.bytesToLong(bytes), 1L);
assertArrayEquals(Utils.longToBytes(1L), bytes);
bytes = new byte[] { 127, -1, -1, -1, -1, -1, -1, -1 };
assertEquals(Utils.bytesToLong(bytes), Long.MAX_VALUE);
assertArrayEquals(Utils.longToBytes(Long.MAX_VALUE), bytes);
bytes = new byte[] { -128, 0, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToLong(bytes), Long.MIN_VALUE);
assertArrayEquals(Utils.longToBytes(Long.MIN_VALUE), bytes);
bytes = new byte[] { (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF,
(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF };
assertEquals(Utils.bytesToLong(bytes), -1L);
assertArrayEquals(Utils.longToBytes(-1L), bytes);
// if the array is too long we just skip the remainder
bytes = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1, 42, 42, 42 };
assertEquals(Utils.bytesToLong(bytes), 1L);
}
@Test
public void bytesToFromDouble() throws Exception {
byte[] bytes = new byte[8];
assertEquals(Utils.bytesToDouble(bytes), 0, 0.0001);
assertArrayEquals(Utils.doubleToBytes(0), bytes);
bytes = new byte[] { 63, -16, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToDouble(bytes), 1, 0.0001);
assertArrayEquals(Utils.doubleToBytes(1), bytes);
bytes = new byte[] { -65, -16, 0, 0, 0, 0, 0, 0 };
assertEquals(Utils.bytesToDouble(bytes), -1, 0.0001);
assertArrayEquals(Utils.doubleToBytes(-1), bytes);
bytes = new byte[] { 127, -17, -1, -1, -1, -1, -1, -1 };
assertEquals(Utils.bytesToDouble(bytes), Double.MAX_VALUE, 0.0001);
assertArrayEquals(Utils.doubleToBytes(Double.MAX_VALUE), bytes);
bytes = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 };
assertEquals(Utils.bytesToDouble(bytes), Double.MIN_VALUE, 0.0001);
assertArrayEquals(Utils.doubleToBytes(Double.MIN_VALUE), bytes);
bytes = new byte[] { 127, -8, 0, 0, 0, 0, 0, 0 };
assertTrue(Double.isNaN(Utils.bytesToDouble(bytes)));
assertArrayEquals(Utils.doubleToBytes(Double.NaN), bytes);
bytes = new byte[] { 63, -16, 0, 0, 0, 0, 0, 0, 42, 42, 42 };
assertEquals(Utils.bytesToDouble(bytes), 1, 0.0001);
}
@Test (expectedExceptions = NullPointerException.class)
public void bytesToLongNull() throws Exception {
Utils.bytesToLong(null);
}
@Test (expectedExceptions = IndexOutOfBoundsException.class)
public void bytesToLongTooShort() throws Exception {
Utils.bytesToLong(new byte[] { 0, 0, 0, 0, 0, 0, 0 });
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void bytesToDoubleTooShort() throws Exception {
Utils.bytesToDouble(new byte[] { 0, 0, 0, 0, 0, 0, 0 });
}
@Test
public void jvmUtils() throws Exception {
// This should ALWAYS return at least one thread.
assertTrue(Utils.getActiveThreadCount() > 0);
// This should always be greater than 0 or something is goofed up in the JVM.
assertTrue(Utils.getUsedMemoryBytes() > 0);
// Some operating systems may not implement this so we don't have a good
// test. Just make sure it doesn't throw an exception.
Utils.getSystemLoadAverage();
// This will probably be zero but should never be negative.
assertTrue(Utils.getGCTotalCollectionCount() >= 0);
// Could be zero similar to GC total collection count
assertTrue(Utils.getGCTotalTime() >= 0);
// Could be empty
assertTrue(Utils.getGCStatst().size() >= 0);
}
/**
* Since this version of TestNG doesn't appear to have an assertArrayEquals,
* this will compare the two to make sure they're the same.
* @param actual Actual array to validate
* @param expected What the array should contain
* @throws AssertionError if the test fails.
*/
public void assertArrayEquals(final byte[] actual, final byte[] expected) {
if (actual == null && expected != null) {
throw new AssertionError("Expected " + Arrays.toString(expected) +
" but found [null]");
}
if (actual != null && expected == null) {
throw new AssertionError("Expected [null] but found " +
Arrays.toString(actual));
}
if (actual.length != expected.length) {
throw new AssertionError("Expected length " + expected.length +
" but found " + actual.length);
}
for (int i = 0; i < expected.length; i++) {
if (actual[i] != expected[i]) {
throw new AssertionError("Expected byte [" + expected[i] +
"] at index " + i + " but found [" + actual[i] + "]");
}
}
}
}
\ No newline at end of file
/**
* Copyright (c) 2015-2017 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import java.util.Random;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import org.testng.annotations.Test;
/**
* Tests for the AcknowledgedCounterGenerator class.
*/
public class AcknowledgedCounterGeneratorTest {
/**
* Test that advancing past {@link Integer#MAX_VALUE} works.
*/
@Test
public void testIncrementPastIntegerMaxValue() {
final long toTry = AcknowledgedCounterGenerator.WINDOW_SIZE * 3;
AcknowledgedCounterGenerator generator =
new AcknowledgedCounterGenerator(Integer.MAX_VALUE - 1000);
Random rand = new Random(System.currentTimeMillis());
BlockingQueue<Long> pending = new ArrayBlockingQueue<Long>(1000);
for (long i = 0; i < toTry; ++i) {
long value = generator.nextValue();
while (!pending.offer(value)) {
Long first = pending.poll();
// Don't always advance by one.
if (rand.nextBoolean()) {
generator.acknowledge(first);
} else {
Long second = pending.poll();
pending.add(first);
generator.acknowledge(second);
}
}
}
}
}
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.fail;
import java.util.NoSuchElementException;
import org.testng.annotations.Test;
public class TestIncrementingPrintableStringGenerator {
private final static int[] ATOC = new int[] { 65, 66, 67 };
@Test
public void rolloverOK() throws Exception {
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, ATOC);
assertNull(gen.lastValue());
assertEquals(gen.nextValue(), "AA");
assertEquals(gen.lastValue(), "AA");
assertEquals(gen.nextValue(), "AB");
assertEquals(gen.lastValue(), "AB");
assertEquals(gen.nextValue(), "AC");
assertEquals(gen.lastValue(), "AC");
assertEquals(gen.nextValue(), "BA");
assertEquals(gen.lastValue(), "BA");
assertEquals(gen.nextValue(), "BB");
assertEquals(gen.lastValue(), "BB");
assertEquals(gen.nextValue(), "BC");
assertEquals(gen.lastValue(), "BC");
assertEquals(gen.nextValue(), "CA");
assertEquals(gen.lastValue(), "CA");
assertEquals(gen.nextValue(), "CB");
assertEquals(gen.lastValue(), "CB");
assertEquals(gen.nextValue(), "CC");
assertEquals(gen.lastValue(), "CC");
assertEquals(gen.nextValue(), "AA"); // <-- rollover
assertEquals(gen.lastValue(), "AA");
}
@Test
public void rolloverOneCharacterOK() throws Exception {
// It would be silly to create a generator with one character.
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, new int[] { 65 });
for (int i = 0; i < 5; i++) {
assertEquals(gen.nextValue(), "AA");
}
}
@Test
public void rolloverException() throws Exception {
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, ATOC);
gen.setThrowExceptionOnRollover(true);
int i = 0;
try {
while(i < 11) {
++i;
gen.nextValue();
}
fail("Expected NoSuchElementException");
} catch (NoSuchElementException e) {
assertEquals(i, 10);
}
}
@Test
public void rolloverOneCharacterException() throws Exception {
// It would be silly to create a generator with one character.
final IncrementingPrintableStringGenerator gen =
new IncrementingPrintableStringGenerator(2, new int[] { 65 });
gen.setThrowExceptionOnRollover(true);
int i = 0;
try {
while(i < 3) {
++i;
gen.nextValue();
}
fail("Expected NoSuchElementException");
} catch (NoSuchElementException e) {
assertEquals(i, 2);
}
}
@Test
public void invalidLengths() throws Exception {
try {
new IncrementingPrintableStringGenerator(0, ATOC);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
try {
new IncrementingPrintableStringGenerator(-42, ATOC);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
}
@Test
public void invalidCharacterSets() throws Exception {
try {
new IncrementingPrintableStringGenerator(2, null);
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
try {
new IncrementingPrintableStringGenerator(2, new int[] {});
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) { }
}
}
/**
* Copyright (c) 2017 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.fail;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.testng.annotations.Test;
import org.testng.collections.Lists;
public class TestRandomDiscreteTimestampGenerator {
@Test
public void systemTime() throws Exception {
final RandomDiscreteTimestampGenerator generator =
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS, 60);
List<Long> generated = Lists.newArrayList();
for (int i = 0; i < 60; i++) {
generated.add(generator.nextValue());
}
assertEquals(generated.size(), 60);
try {
generator.nextValue();
fail("Expected IllegalStateException");
} catch (IllegalStateException e) { }
}
@Test
public void withStartTime() throws Exception {
final RandomDiscreteTimestampGenerator generator =
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS, 1072915200L, 60);
List<Long> generated = Lists.newArrayList();
for (int i = 0; i < 60; i++) {
generated.add(generator.nextValue());
}
assertEquals(generated.size(), 60);
Collections.sort(generated);
long ts = 1072915200L - 60; // starts 1 interval in the past
for (final long t : generated) {
assertEquals(t, ts);
ts += 60;
}
try {
generator.nextValue();
fail("Expected IllegalStateException");
} catch (IllegalStateException e) { }
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void tooLarge() throws Exception {
new RandomDiscreteTimestampGenerator(60, TimeUnit.SECONDS,
RandomDiscreteTimestampGenerator.MAX_INTERVALS + 1);
}
//TODO - With PowerMockito we could UT the initializeTimestamp(long) call.
// Otherwise it would involve creating more functions and that would get ugly.
}
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import static org.testng.Assert.assertEquals;
import java.util.concurrent.TimeUnit;
import org.testng.annotations.Test;
public class TestUnixEpochTimestampGenerator {
@Test
public void defaultCtor() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator();
final long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 60);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 120);
assertEquals((long) generator.lastValue(), startTime + 60);
assertEquals((long) generator.nextValue(), startTime + 180);
}
@Test
public void ctorWithIntervalAndUnits() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(120, TimeUnit.SECONDS);
final long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 120);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 240);
assertEquals((long) generator.lastValue(), startTime + 120);
}
@Test
public void ctorWithIntervalAndUnitsAndStart() throws Exception {
final UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(120, TimeUnit.SECONDS, 1072915200L);
assertEquals((long) generator.nextValue(), 1072915200L);
assertEquals((long) generator.lastValue(), 1072915200L - 120);
assertEquals((long) generator.nextValue(), 1072915200L + 120);
assertEquals((long) generator.lastValue(), 1072915200L);
}
@Test
public void variousIntervalsAndUnits() throws Exception {
// negatives could happen, just start and roll back in time
UnixEpochTimestampGenerator generator =
new UnixEpochTimestampGenerator(-60, TimeUnit.SECONDS);
long startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime - 60);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime - 120);
assertEquals((long) generator.lastValue(), startTime - 60);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.NANOSECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.MICROSECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.MILLISECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(100, TimeUnit.SECONDS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + 100);
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + 200);
assertEquals((long) generator.lastValue(), startTime + 100);
generator = new UnixEpochTimestampGenerator(1, TimeUnit.MINUTES);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60));
assertEquals((long) generator.lastValue(), startTime + (1 * 60));
generator = new UnixEpochTimestampGenerator(1, TimeUnit.HOURS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60 * 60));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60 * 60));
assertEquals((long) generator.lastValue(), startTime + (1 * 60 * 60));
generator = new UnixEpochTimestampGenerator(1, TimeUnit.DAYS);
startTime = generator.currentValue();
assertEquals((long) generator.nextValue(), startTime + (1 * 60 * 60 * 24));
assertEquals((long) generator.lastValue(), startTime);
assertEquals((long) generator.nextValue(), startTime + (2 * 60 * 60 * 24));
assertEquals((long) generator.lastValue(), startTime + (1 * 60 * 60 * 24));
}
// TODO - With PowerMockito we could UT the initializeTimestamp(long) call.
// Otherwise it would involve creating more functions and that would get ugly.
}
/**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.generator;
import org.testng.annotations.Test;
import static org.testng.AssertJUnit.assertFalse;
public class TestZipfianGenerator {
@Test
public void testMinAndMaxParameter() {
long min = 5;
long max = 10;
ZipfianGenerator zipfian = new ZipfianGenerator(min, max);
for (int i = 0; i < 10000; i++) {
long rnd = zipfian.nextValue();
assertFalse(rnd < min);
assertFalse(rnd > max);
}
}
}
/**
* Copyright (c) 2015 Yahoo! Inc. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.measurements.exporter;
import site.ycsb.generator.ZipfianGenerator;
import site.ycsb.measurements.Measurements;
import site.ycsb.measurements.OneMeasurementHistogram;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.testng.annotations.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Properties;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
public class TestMeasurementsExporter {
@Test
public void testJSONArrayMeasurementsExporter() throws IOException {
Properties props = new Properties();
props.put(Measurements.MEASUREMENT_TYPE_PROPERTY, "histogram");
props.put(OneMeasurementHistogram.VERBOSE_PROPERTY, "true");
Measurements mm = new Measurements(props);
ByteArrayOutputStream out = new ByteArrayOutputStream();
JSONArrayMeasurementsExporter export = new JSONArrayMeasurementsExporter(out);
long min = 5000;
long max = 100000;
ZipfianGenerator zipfian = new ZipfianGenerator(min, max);
for (int i = 0; i < 1000; i++) {
int rnd = zipfian.nextValue().intValue();
mm.measure("UPDATE", rnd);
}
mm.exportMeasurements(export);
export.close();
ObjectMapper mapper = new ObjectMapper();
JsonNode json = mapper.readTree(out.toString("UTF-8"));
assertTrue(json.isArray());
assertEquals(json.get(0).get("measurement").asText(), "Operations");
assertEquals(json.get(4).get("measurement").asText(), "MaxLatency(us)");
assertEquals(json.get(11).get("measurement").asText(), "4");
}
}
/**
* Copyright (c) 2016 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import static org.testng.Assert.assertTrue;
import java.util.Properties;
import org.testng.annotations.Test;
import site.ycsb.generator.DiscreteGenerator;
public class TestCoreWorkload {
@Test
public void createOperationChooser() {
final Properties p = new Properties();
p.setProperty(CoreWorkload.READ_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.UPDATE_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.INSERT_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.SCAN_PROPORTION_PROPERTY, "0.20");
p.setProperty(CoreWorkload.READMODIFYWRITE_PROPORTION_PROPERTY, "0.20");
final DiscreteGenerator generator = CoreWorkload.createOperationGenerator(p);
final int[] counts = new int[5];
for (int i = 0; i < 100; ++i) {
switch (generator.nextString()) {
case "READ":
++counts[0];
break;
case "UPDATE":
++counts[1];
break;
case "INSERT":
++counts[2];
break;
case "SCAN":
++counts[3];
break;
default:
++counts[4];
}
}
for (int i : counts) {
// Doesn't do a wonderful job of equal distribution, but in a hundred, if we
// don't see at least one of each operation then the generator is really broke.
assertTrue(i > 1);
}
}
@Test (expectedExceptions = IllegalArgumentException.class)
public void createOperationChooserNullProperties() {
CoreWorkload.createOperationGenerator(null);
}
}
\ No newline at end of file
/**
* Copyright (c) 2017 YCSB contributors All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.workloads;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.TreeMap;
import java.util.Vector;
import site.ycsb.ByteIterator;
import site.ycsb.Client;
import site.ycsb.DB;
import site.ycsb.NumericByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.Utils;
import site.ycsb.WorkloadException;
import site.ycsb.measurements.Measurements;
import org.testng.annotations.Test;
public class TestTimeSeriesWorkload {
@Test
public void twoThreads() throws Exception {
final Properties p = getUTProperties();
Measurements.setProperties(p);
final TimeSeriesWorkload wl = new TimeSeriesWorkload();
wl.init(p);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAB");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test (expectedExceptions = WorkloadException.class)
public void badTimeUnit() throws Exception {
final Properties p = new Properties();
p.put(TimeSeriesWorkload.TIMESTAMP_UNITS_PROPERTY, "foobar");
getWorkload(p, true);
}
@Test (expectedExceptions = WorkloadException.class)
public void failedToInitWorkloadBeforeThreadInit() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, false);
//wl.init(p); // <-- we NEED this :(
final Object threadState = wl.initThread(p, 0, 2);
final MockDB db = new MockDB();
wl.doInsert(db, threadState);
}
@Test (expectedExceptions = IllegalStateException.class)
public void failedToInitThread() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final MockDB db = new MockDB();
wl.doInsert(db, null);
}
@Test
public void insertOneKeyOneTagCardinalityOne() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
p.put(TimeSeriesWorkload.TAG_COUNT_PROPERTY, "1");
p.put(TimeSeriesWorkload.TAG_CARDINALITY_PROPERTY, "1");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
timestamp += 60;
}
}
@Test
public void insertOneKeyTwoTagsLowCardinality() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void insertTwoKeysTwoTagsLowCardinality() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
int metricCtr = 0;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
}
if (metricCtr++ > 1) {
assertEquals(db.keys.get(i), "AAAB");
if (metricCtr >= 4) {
metricCtr = 0;
timestamp += 60;
}
} else {
assertEquals(db.keys.get(i), "AAAA");
}
}
}
@Test
public void insertTwoKeysTwoThreads() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA"); // <-- key 1
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAB"); // <-- key 2
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void insertThreeKeysTwoThreads() throws Exception {
// To make sure the distribution doesn't miss any metrics
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "3");
final TimeSeriesWorkload wl = getWorkload(p, true);
Object threadState = wl.initThread(p, 0, 2);
MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertTrue(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
threadState = wl.initThread(p, 1, 2);
db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
timestamp = 1451606400;
int metricCtr = 0;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertNotNull(db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT));
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
}
if (metricCtr++ > 1) {
assertEquals(db.keys.get(i), "AAAC");
if (metricCtr >= 4) {
metricCtr = 0;
timestamp += 60;
}
} else {
assertEquals(db.keys.get(i), "AAAB");
}
}
}
@Test
public void insertWithValidation() throws Exception {
final Properties p = getUTProperties();
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "1");
p.put(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
p.put(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers");
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 74; i++) {
assertTrue(wl.doInsert(db, threadState));
}
assertEquals(db.keys.size(), 74);
assertEquals(db.values.size(), 74);
long timestamp = 1451606400;
for (int i = 0; i < db.keys.size(); i++) {
assertEquals(db.keys.get(i), "AAAA");
assertEquals(db.values.get(i).get("AA").toString(), "AAAA");
assertEquals(Utils.bytesToLong(db.values.get(i).get(
TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT).toArray()), timestamp);
assertFalse(((NumericByteIterator) db.values.get(i)
.get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).isFloatingPoint());
// validation check
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : db.values.get(i).entrySet()) {
if (entry.getKey().equals(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT) ||
entry.getKey().equals(TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT)) {
continue;
}
validationTags.put(entry.getKey(), entry.getValue().toString());
}
assertEquals(wl.validationFunction(db.keys.get(i), timestamp, validationTags),
((NumericByteIterator) db.values.get(i).get(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT)).getLong());
if (i % 2 == 0) {
assertEquals(db.values.get(i).get("AB").toString(), "AAAA");
} else {
assertEquals(db.values.get(i).get("AB").toString(), "AAAB");
timestamp += 60;
}
}
}
@Test
public void read() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final Object threadState = wl.initThread(p, 0, 1);
final MockDB db = new MockDB();
for (int i = 0; i < 20; i++) {
wl.doTransactionRead(db, threadState);
}
}
@Test
public void verifyRow() throws Exception {
final Properties p = getUTProperties();
final TimeSeriesWorkload wl = getWorkload(p, true);
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
final HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
validationTags.put("AA", "AAAA");
cells.put("AA", new StringByteIterator("AAAA"));
validationTags.put("AB", "AAAB");
cells.put("AB", new StringByteIterator("AAAB"));
long hash = wl.validationFunction("AAAA", 1451606400L, validationTags);
cells.put(TimeSeriesWorkload.TIMESTAMP_KEY_PROPERTY_DEFAULT, new NumericByteIterator(1451606400L));
cells.put(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT, new NumericByteIterator(hash));
assertEquals(wl.verifyRow("AAAA", cells), Status.OK);
// tweak the last value a bit
for (final ByteIterator it : cells.values()) {
it.reset();
}
cells.put(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT, new NumericByteIterator(hash + 1));
assertEquals(wl.verifyRow("AAAA", cells), Status.UNEXPECTED_STATE);
// no value cell, returns an unexpected state
for (final ByteIterator it : cells.values()) {
it.reset();
}
cells.remove(TimeSeriesWorkload.VALUE_KEY_PROPERTY_DEFAULT);
assertEquals(wl.verifyRow("AAAA", cells), Status.UNEXPECTED_STATE);
}
@Test
public void validateSettingsDataIntegrity() throws Exception {
Properties p = getUTProperties();
// data validation incompatibilities
p.setProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers"); // now it's ok
p.setProperty(TimeSeriesWorkload.GROUPBY_PROPERTY, "sum"); // now it's not
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.GROUPBY_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, "sum");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, "60");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_FUNCTION_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.DOWNSAMPLING_INTERVAL_PROPERTY, "");
p.setProperty(TimeSeriesWorkload.QUERY_TIMESPAN_PROPERTY, "60");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p = getUTProperties();
p.setProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY, "true");
p.setProperty(TimeSeriesWorkload.VALUE_TYPE_PROPERTY, "integers");
p.setProperty(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "true");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
p.setProperty(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "false");
p.setProperty(TimeSeriesWorkload.INSERT_START_PROPERTY, "");
try {
getWorkload(p, true);
fail("Expected WorkloadException");
} catch (WorkloadException e) { }
}
/** Helper method that generates unit testing defaults for the properties map */
private Properties getUTProperties() {
final Properties p = new Properties();
p.put(Client.RECORD_COUNT_PROPERTY, "10");
p.put(CoreWorkload.FIELD_COUNT_PROPERTY, "2");
p.put(CoreWorkload.FIELD_LENGTH_PROPERTY, "4");
p.put(TimeSeriesWorkload.TAG_KEY_LENGTH_PROPERTY, "2");
p.put(TimeSeriesWorkload.TAG_VALUE_LENGTH_PROPERTY, "4");
p.put(TimeSeriesWorkload.TAG_COUNT_PROPERTY, "2");
p.put(TimeSeriesWorkload.TAG_CARDINALITY_PROPERTY, "1,2");
p.put(CoreWorkload.INSERT_START_PROPERTY, "1451606400");
p.put(TimeSeriesWorkload.DELAYED_SERIES_PROPERTY, "0");
p.put(TimeSeriesWorkload.RANDOMIZE_TIMESERIES_ORDER_PROPERTY, "false");
return p;
}
/** Helper to setup the workload for testing. */
private TimeSeriesWorkload getWorkload(final Properties p, final boolean init)
throws WorkloadException {
Measurements.setProperties(p);
if (!init) {
return new TimeSeriesWorkload();
} else {
final TimeSeriesWorkload workload = new TimeSeriesWorkload();
workload.init(p);
return workload;
}
}
static class MockDB extends DB {
final List<String> keys = new ArrayList<String>();
final List<Map<String, ByteIterator>> values =
new ArrayList<Map<String, ByteIterator>>();
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
return Status.OK;
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
// TODO Auto-generated method stub
return Status.OK;
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
// TODO Auto-generated method stub
return Status.OK;
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
keys.add(key);
this.values.add(values);
return Status.OK;
}
@Override
public Status delete(String table, String key) {
// TODO Auto-generated method stub
return Status.OK;
}
public void dumpStdout() {
for (int i = 0; i < keys.size(); i++) {
System.out.print("[" + i + "] Key: " + keys.get(i) + " Values: {");
int x = 0;
for (final Entry<String, ByteIterator> entry : values.get(i).entrySet()) {
if (x++ > 0) {
System.out.print(", ");
}
System.out.print("{" + entry.getKey() + " => ");
if (entry.getKey().equals("YCSBV")) {
System.out.print(new String(Utils.bytesToDouble(entry.getValue().toArray()) + "}"));
} else if (entry.getKey().equals("YCSBTS")) {
System.out.print(new String(Utils.bytesToLong(entry.getValue().toArray()) + "}"));
} else {
System.out.print(new String(entry.getValue().toArray()) + "}");
}
}
System.out.println("}");
}
}
}
}
\ No newline at end of file
<!--
Copyright (c) 2012 - 2020 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>root</artifactId>
<version>0.18.0-SNAPSHOT</version>
</parent>
<artifactId>ycsb</artifactId>
<name>YCSB Release Distribution Builder</name>
<packaging>pom</packaging>
<description>
This module creates the release package of the YCSB with all DB library bindings.
It is only used by the build process and does not contain any real
code of itself.
</description>
<dependencies>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>hpdos-binding</artifactId>
<version>${project.version}</version>
</dependency>
<!--<dependency>
<groupId>site.ycsb</groupId>
<artifactId>accumulo1.9-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>aerospike-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>arangodb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>asynchbase-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>cassandra-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>cloudspanner-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>couchbase-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>couchbase2-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>crail-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>azurecosmos-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>azuretablestorage-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>dynamodb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>elasticsearch-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>elasticsearch5-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>foundationdb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>geode-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>googledatastore-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>googlebigtable-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>griddb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>hbase1-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>hbase2-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>ignite-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>infinispan-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>jdbc-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>kudu-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>memcached-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>maprdb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>maprjsondb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>mongodb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>nosqldb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>orientdb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>postgrenosql-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>rados-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>redis-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>rest-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>riak-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>rocksdb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>s3-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>seaweedfs-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>scylla-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>solr7-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>tarantool-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>tablestore-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>voltdb-binding</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>zookeeper-binding</artifactId>
<version>${project.version}</version>
</dependency>-->
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven.assembly.version}</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/distribution.xml</descriptor>
</descriptors>
<appendAssemblyId>false</appendAssemblyId>
<tarLongFileMode>posix</tarLongFileMode>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>ycsb-release</id>
<properties>
<maven.deploy.skip>true</maven.deploy.skip>
</properties>
</profile>
</profiles>
</project>
<!--
Copyright (c) 2012 - 2015 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
<id>package</id>
<formats>
<format>tar.gz</format>
</formats>
<includeBaseDirectory>true</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>..</directory>
<outputDirectory>.</outputDirectory>
<fileMode>0644</fileMode>
<includes>
<include>README</include>
<include>LICENSE.txt</include>
<include>NOTICE.txt</include>
</includes>
</fileSet>
<fileSet>
<directory>../bin</directory>
<outputDirectory>bin</outputDirectory>
<fileMode>0755</fileMode>
<includes>
<include>ycsb*</include>
</includes>
</fileSet>
<fileSet>
<directory>../bin</directory>
<outputDirectory>bin</outputDirectory>
<fileMode>0644</fileMode>
<includes>
<include>bindings.properties</include>
</includes>
</fileSet>
<fileSet>
<directory>../workloads</directory>
<outputDirectory>workloads</outputDirectory>
<fileMode>0644</fileMode>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<outputDirectory>lib</outputDirectory>
<includes>
<include>site.ycsb:core</include>
</includes>
<scope>runtime</scope>
<useProjectArtifact>false</useProjectArtifact>
<useProjectAttachments>false</useProjectAttachments>
<useTransitiveDependencies>true</useTransitiveDependencies>
<useTransitiveFiltering>true</useTransitiveFiltering>
</dependencySet>
</dependencySets>
<moduleSets>
<moduleSet>
<useAllReactorProjects>true</useAllReactorProjects>
<includeSubModules>true</includeSubModules>
<excludes>
<exclude>site.ycsb:core</exclude>
<exclude>site.ycsb:binding-parent</exclude>
<exclude>site.ycsb:datastore-specific-descriptor</exclude>
<exclude>site.ycsb:ycsb</exclude>
<exclude>site.ycsb:root</exclude>
</excludes>
<sources>
<fileSets>
<fileSet>
<includes>
<include>README.md</include>
</includes>
</fileSet>
<fileSet>
<outputDirectory>conf</outputDirectory>
<directory>src/main/conf</directory>
</fileSet>
<fileSet>
<outputDirectory>lib</outputDirectory>
<directory>target/dependency</directory>
</fileSet>
</fileSets>
</sources>
<binaries>
<includeDependencies>false</includeDependencies>
<outputDirectory>${module.artifactId}/lib</outputDirectory>
<unpack>false</unpack>
</binaries>
</moduleSet>
</moduleSets>
</assembly>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - Core workload package properties</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Core workload package properties</h2>
The property files used with the core workload generator can specify values for the following properties:<p>
<UL>
<LI><b>fieldcount</b>: the number of fields in a record (default: 10)
<LI><b>fieldlength</b>: the size of each field (default: 100)
<LI><b>readallfields</b>: should reads read all fields (true) or just one (false) (default: true)
<LI><b>readproportion</b>: what proportion of operations should be reads (default: 0.95)
<LI><b>updateproportion</b>: what proportion of operations should be updates (default: 0.05)
<LI><b>insertproportion</b>: what proportion of operations should be inserts (default: 0)
<LI><b>scanproportion</b>: what proportion of operations should be scans (default: 0)
<LI><b>readmodifywriteproportion</b>: what proportion of operations should be read a record, modify it, write it back (default: 0)
<LI><b>requestdistribution</b>: what distribution should be used to select the records to operate on - uniform, zipfian or latest (default: uniform)
<LI><b>maxscanlength</b>: for scans, what is the maximum number of records to scan (default: 1000)
<LI><b>scanlengthdistribution</b>: for scans, what distribution should be used to choose the number of records to scan, for each scan, between 1 and maxscanlength (default: uniform)
<LI><b>insertorder</b>: should records be inserted in order by key ("ordered"), or in hashed order ("hashed") (default: hashed)
<LI><b>fieldnameprefix</b>: string prefix for the field name (default: “field”)
</UL>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</BODY>
</HTML>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - Core workloads</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Core workloads</h2>
YCSB includes a set of core workloads that define a basic benchmark for cloud systems. Of course, you can define your own workloads, as described <a href="workload.html">here</A>. However,
the core workloads are a useful first step, and obtaining these benchmark numbers for a variety of different systems would allow you to understand the performance
tradeoffs of different systems.
<P>
The core workloads consist of six different workloads:
<P>
<B>Workload A: Update heavy workload</B>
<P>
This workload has a mix of 50/50 reads and writes. An application example is a session store recording recent actions.
<P>
<B>Workload B: Read mostly workload</B>
<P>
This workload has a 95/5 reads/write mix. Application example: photo tagging; add a tag is an update, but most operations are to read tags.
<P>
<B>Workload C: Read only</B>
<P>
This workload is 100% read. Application example: user profile cache, where profiles are constructed elsewhere (e.g., Hadoop).
<P>
<B>Workload D: Read latest workload</B>
<P>
In this workload, new records are inserted, and the most recently inserted records are the most popular. Application example: user status updates; people want to read the latest.
<P>
<B>Workload E: Short ranges</B>
<P>
In this workload, short ranges of records are queried, instead of individual records. Application example: threaded conversations, where each scan is for the posts in a given thread (assumed to be clustered by thread id).
<P>
<B>Workload F: Read-modify-write</B>
<P>
In this workload, the client will read a record, modify it, and write back the changes. Application example: user database, where user records are read and modified by the user or to record user activity.
<HR>
<H2>Running the workloads</H2>
All six workloads have a data set which is similar. Workloads D and E insert records during the test run. Thus, to keep the database size consistent, we recommend the following sequence:
<OL>
<LI>Load the database, using workload A's parameter file (workloads/workloada) and the "-load" switch to the client.
<LI>Run workload A (using workloads/workloada and "-t") for a variety of throughputs.
<LI>Run workload B (using workloads/workloadb and "-t") for a variety of throughputs.
<LI>Run workload C (using workloads/workloadc and "-t") for a variety of throughputs.
<LI>Run workload F (using workloads/workloadf and "-t") for a variety of throughputs.
<LI>Run workload D (using workloads/workloadd and "-t") for a variety of throughputs. This workload inserts records, increasing the size of the database.
<LI>Delete the data in the database.
<LI>Reload the database, using workload E's parameter file (workloads/workloade) and the "-load switch to the client.
<LI>Run workload E (using workloads/workloadd and "-t") for a variety of throughputs. This workload inserts records, increasing the size of the database.
</OL>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</BODY>
</HTML>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - DB Interface Layer</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Implementing a database interface layer - overview</H2>
The database interface layer hides the details of the specific database you are benchmarking from the YCSB Client. This
allows the client to generate operations like "read record" or "update record" without having to understand
the specific API of your database. Thus, it is very easy to benchmark new database systems; once you have
created the database interface layer, the rest of the benchmark framework runs without having to change.
<P>
The database interface layer is a simple abstract class that provides read, insert, update, delete and scan operations for your
database. Implementing a database interface layer for your database means filling out the body of each of those methods. Once you
have compiled your layer, you can specify the name of your implemented class on the command line (or as a property) to the YCSB Client.
The YCSB Client will load your implementation dynamically when it starts. Thus, you do not need to recompile the YCSB Client itself
to add or change a database interface layer.
<HR>
<H2>Creating a new layer step-by-step</H2>
<h3>Step 1 - Extend site.ycsb.DB</h3>
The base class of all database interface layer implementations is site.ycsb.DB. This is an abstract class, so you need to create a new
class which extends the DB class. Your class must have a public no-argument constructor, because the instances will be constructed inside a factory
which will use the no-argument constructor.
<P>
The YCSB Client framework will create one instance of your DB class per worker thread, but there might be multiple worker threads generating the workload,
so there might be multiple instances of your DB class created.
<H3>Step 2 - Implement init() if necessary</h3>
You can perform any initialization of your DB object by implementing the following method
<pre>
public void init() throws DBException
</pre>
to perform any initialization actions. The init() method will be called once per DB instance; so if there are multiple threads, each DB instance will have init()
called separately.
<P>
The init() method should be used to set up the connection to the database and do any other initialization. In particular, you can configure your database layer
using properties passed to the YCSB Client at runtime. In fact, the YCSB Client will pass to the DB interface layer
all of the
properties specified in all parameter files specified when the Client starts up. Thus, you can create new properties for configuring your DB interface layer,
set them in your parameter files (or on the command line), and
then retrieve them inside your implementation of the DB interface layer.
<P>
These properties will be passed to the DB instance <i>after</i> the constructor, so it is important to retrieve them only in the init() method and not the
constructor. You can get the set of properties using the
<pre>
public Properties getProperties()
</pre>
method which is already implemented and inherited from the DB base class.
<h3>Step 3 - Implement the database query and update methods</h3>
The methods that you need to implement are:
<pre>
//Read a single record
public int read(String table, String key, Set<String> fields, HashMap<String,String> result);
//Perform a range scan
public int scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String,String>> result);
//Update a single record
public int update(String table, String key, HashMap<String,String> values);
//Insert a single record
public int insert(String table, String key, HashMap<String,String> values);
//Delete a single record
public int delete(String table, String key);
</pre>
In each case, the method takes a table name and record key. (In the case of scan, the record key is the first key in the range to scan.) For the
read methods (read() and scan()) the methods additionally take a set of fields to be read, and provide a structure (HashMap or Vector of HashMaps) to store
the returned data. For the write methods (insert() and update()) the methods take HashMap which maps field names to values.
<P>
The database should have the appropriate tables created before you run the benchmark. So you can assume in your implementation of the above methods
that the appropriate tables already exist, and just write code to read or write from the tables named in the "table" parameter.
<h3>Step 4 - Compile your database interface layer</h3>
Your code can be compiled separately from the compilation of the YCSB Client and framework. In particular, you can make changes to your DB class and
recompile without having to recompile the YCSB Client.
<h3>Step 5 - Use it with the YCSB Client</h3>
Make sure that the classes for your implementation (or a jar containing those classes) are available on your CLASSPATH, as well as any libraries/jar files used
by your implementation. Now, when you run the YCSB Client, specify the "-db" argument on the command line and provide the fully qualified classname of your
DB class. For example, to run workloada with your DB class:
<pre>
% java -cp build/ycsb.jar:yourjarpath site.ycsb.Client -t -db com.foo.YourDBClass -P workloads/workloada -P large.dat -s > transactions.dat
</pre>
You can also specify the DB interface layer using the DB property in your parameter file:
<pre>
db=com.foo.YourDBClass
</pre>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</BODY>
</HTML>
<html>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<head>
<title>YCSB - Yahoo! Cloud Serving Benchmark</title>
</head>
<body>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<hr>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<UL>
<LI><A href="#overview">Overview</A>
<LI><A href="#download">Download YCSB</A>
<LI><A href="#gettingstarted">Getting started</A>
<LI><A href="#extending">Extending YCSB</A>
</UL>
<HR>
<A name="overview">
<H2>Overview</H2>
There are many new serving databases available, including:
<ul>
<LI>BigTable
<LI><A HREF="http://hadoop.apache.org/hbase/">HBase</A>
<LI><A HREF="http://www.microsoft.com/windowsazure/">Azure</A>
<LI><A HREF="http://incubator.apache.org/cassandra/">Cassandra</A>
<LI><A HREF="http://couchdb.apache.org/">CouchDB</A>
<LI><A HREF=http://wiki.github.com/cliffmoon/dynomite/dynomite-framework">Dynomite</A>
<li>...and many others
</ul>
It is difficult to decide which system is right for your application, partially because the features differ between
systems, and partially because there is not an easy way to compare the performance of one system versus another.
<P>
The goal of the YCSB project is to develop a framework and common set of workloads for evaluating the performance of
different "key-value" and "cloud" serving stores. The project comprises two things:
<ul>
<LI>The YCSB Client, an extensible workload generator
<LI>The Core workloads, a set of workload scenarios to be executed by the generator
</UL>
Although the core workloads provide a well rounded picture of a system's performance, the Client is extensible so that
you can define new and different workloads to examine system aspects, or application scenarios, not adequately covered by
the core workload. Similarly, the Client is extensible to support benchmarking different databases. Although we include
sample code for benchmarking HBase and Cassandra, it is straightforward to write a new interface layer to benchmark
your favorite database.
<P>
A common use of the tool is to benchmark multiple systems and compare them. For example, you can install multiple systems
on the same hardward configuration, and run the same workloads against each system. Then you can plot the performance
of each system (for example, as latency versus throughput curves) to see when one system does better than another.
<HR>
<A name="download">
<H2>Download YCSB</H2>
YCSB is available
at <A HREF="http://wiki.github.com/brianfrankcooper/YCSB/">http://wiki.github.com/brianfrankcooper/YCSB/</A>.
<HR>
<a name="gettingstarted">
<H2>Getting started</H2>
Detailed instructions for using YCSB are available on the GitHub wiki:
<A HREF="http://wiki.github.com/brianfrankcooper/YCSB/getting-started">http://wiki.github.com/brianfrankcooper/YCSB/getting-started</A>.
<HR>
<A name="extending">
<H1>Extending YCSB</H1>
YCSB is designed to be extensible. It is easy to add a new database interface layer to support benchmarking a new database. It is also easy to define new workloads.
<ul>
<li><A HREF="dblayer.html">DB Interface Layer</a>
<li><A HREF="workload.html">Implementing new workloads</a>
</UL>
More details about the entire class structure of YCSB is available here:
<UL>
<LI><A HREF="javadoc/index.html">YCSB javadoc documentation</A>
</ul>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</body>
</html>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - Parallel clients</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Running multiple clients in parallel</h2>
It is straightforward to run the transaction phase of the workload from multiple servers - just start up clients on different servers, each running the same workload. Each client will
produce performance statistics when it is done, and you'll have to aggregate these individual files into a single set of results.
<P>
In some cases it makes sense to load the database using multiple servers. In this case, you will want to partition the records to be loaded among the clients. Normally, YCSB just loads
all of the records (as defined by the recordcount property). However, if you want to partition the load you need to additionally specify two other properties for each client:
<UL>
<LI><b>insertstart</b>: The index of the record to start at.
<LI><b>insertcount</b>: The number of records to insert.
</UL>
These properties can be specified in a property file or on the command line using the -p option.
<P>
For example, imagine you want to load 100 million records (so recordcount=100000000). Imagine you want to load with four clients. For the first client:
<pre>
insertstart=0
insertcount=25000000
</pre>
For the second client:
<pre>
insertstart=25000000
insertcount=25000000
</pre>
For the third client:
<pre>
insertstart=50000000
insertcount=25000000
</pre>
And for the fourth client:
<pre>
insertstart=75000000
insertcount=25000000
</pre>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</body>
</html>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - Tips and FAQ</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Tips</h2>
<B>Tip 1 - Carefully adjust the number of threads</B>
<P>
The number of threads determines how much workload you can generate against the database. Imagine that you are trying to run a test with 10,000 operations per second,
but you are only achieving 8,000 operations per second. Is this because the database can't keep up with the load? Not necessarily. Imagine that you are running with 100
client threads (e.g. "-threads 100") and each operation is taking 12 milliseconds on average. Each thread will only be able to generate 83 operations per second, because each
thread operates sequentially. Over 100 threads, your client will only generate 8300 operations per second, even if the database can support more. Increasing the number of threads
ensures there are enough parallel clients hitting the database so that the database, not the client, is the bottleneck.
<P>
To calculate the number of threads needed, you should have some idea of the expected latency. For example, at 10,000 operations per second, we might expect the database
to have a latency of 10-30 milliseconds on average. So you to generate 10,000 operations per second, you will need (Ops per sec / (1000 / avg latency in ms) ), or (10000/(1000/30))=300 threads.
In fact, to be conservative, you might consider having 400 threads. Although this is a lot of threads, each thread will spend most of its time waiting for the database to respond,
so the context switching overhead will be low.
<P>
Experiment with increasing the number of threads, especially if you find you are not reaching your target throughput. Eventually, of course, you will saturate the database
and there will be no way to increase the number of threads to get more throughput (in fact, increasing the number of client threads may make things worse) but you need to have
enough threads to ensure it is the database, not the client, that is the bottleneck.
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</BODY>
</HTML>
<HTML>
<!--
Copyright (c) 2010 Yahoo! Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<HEAD>
<TITLE>YCSB - Implementing new workloads</TITLE>
</HEAD>
<BODY>
<H1><img src="images/ycsb.jpg" width=150> Yahoo! Cloud Serving Benchmark</H1>
<H3>Version 0.1.2</H3>
<HR>
<A HREF="index.html">Home</A> - <A href="coreworkloads.html">Core workloads</A> - <a href="tipsfaq.html">Tips and FAQ</A>
<HR>
<H2>Implementing new workloads - overview</h2>
A workload represents the load that a given application will put on the database system. For benchmarking purposes, we must define
workloads that are relatively simple compared to real applications, so that we can better reason about the benchmarking results
we get. However, a workload should be detailed enough so that once we measure the database's performance, we know what kinds of applications
might experience similar performance.
<p>
In the context of YCSB, a workload defines both a <b>data set</b>, which is a set of records to be loaded into the database, and a <b>transaction set</b>,
which are the set of read and write operations against the database. Creating the transactions requires understanding the structure of the records, which
is why both the data and the transactions must be defined in the workload.
<P>
For a complete benchmark, multiple important (but distinct) workloads might be grouped together into a <i>workload package</I>. The CoreWorkload
package included with the YCSB client is an example of such a collection of workloads.
<P>
Typically a workload consists of two files:
<UL>
<LI>A java class which contains the code to create data records and generate transactions against them
<LI>A parameter file which tunes the specifics of the workload
</UL>
For example, a workload class file might generate some combination of read and update operations against the database. The parameter
file might specify whether the mix of reads and updates is 50/50, 80/20, etc.
<P>
There are two ways to create a new workload or package of workloads.
<P>
<h3>Option 1: new parameter files</h3>
<P>
The core workloads included with YCSB are defined by a set of parameter files (workloada, workloadb, etc.) You can create your own parameter file with new values
for the read/write mix, request distribution, etc. For example, the workloada file has the following contents:
<pre>
workload=site.ycsb.workloads.CoreWorkload
readallfields=false
readproportion=0.5
updateproportion=0.5
scanproportion=0
insertproportion=0
requestdistribution=zipfian
</pre>
Creating a new file that changes any of these values will produce a new workload with different characteristics. The set of properties that can be specified is <a href="coreproperties.html">here</a>.
<P>
<h3>Option 2: new java class</h3>
<P>
The workload java class will be created by the YCSB Client at runtime, and will use an instance of the <a href="dblayer.html">DB interface layer</A>
to generate the actual operations against the database. Thus, the java class only needs to decide (based on settings in the parameter file) what records
to create for the data set, and what reads, updates etc. to generate for the transaction phase. The YCSB Client will take care of creating the workload java class,
passing it to a worker thread for executing, deciding how many records to create or how many operations to execute, and measuring the resulting
performance.
<P>
If the CoreWorkload (or some other existing package) does not have the ability to generate the workload you desire, you can create a new workload java class.
This is done using the following steps:
<H3>Step 1. Extend <a href="javadoc/site/ycsb/Workload.html">site.ycsb.Workload</A></H3>
The base class of all workload classes is site.ycsb.Workload. This is an abstract class, so you create a new workload that extends this base class. Your
class must have a public no-argument constructor, because the workload will be created in a factory using the no-argument constructor. The YCSB Client will
create one Workload object for each worker thread, so if you run the Client with multiple threads, multiple workload objects will be created.
<H3>Step 2. Write code to initialize your workload class</H3>
The parameter fill will be passed to the workload object after the constructor has been called, so if you are using any parameter properties, you must
use them to initialize your workload using either the init() or initThread() methods.
<UL>
<LI>init() - called once for all workload instances. Used to initialize any objects shared by all threads.
<LI>initThread() - called once per workload instance in the context of the worker thread. Used to initialize any objects specific to a single Workload instance
and single worker thread.
</UL>
In either case, you can access the parameter properties using the Properties object passed in to both methods. These properties will include all properties defined
in any property file passed to the YCSB Client or defined on the client command line.
<H3>Step 3. Write any cleanup code</H3>
The cleanup() method is called once for all workload instances, after the workload has completed.
<H3>Step 4. Define the records to be inserted</H3>
The YCSB Client will call the doInsert() method once for each record to be inserted into the database. So you should implement this method
to create and insert a single record. The DB object you can use to perform the insert will be passed to the doInsert() method.
<H3>Step 5. Define the transactions</H3>
The YCSB Client will call the doTransaction() method once for every transaction that is to be executed. So you should implement this method to execute
a single transaction, using the DB object passed in to access the database. Your implementation of this method can choose between different types of
transactions, and can make multiple calls to the DB interface layer. However, each invocation of the method should be a logical transaction. In particular, when you run the client,
you'll specify the number of operations to execute; if you request 1000 operations then doTransaction() will be executed 1000 times.
<P>
Note that you do not have to do any throttling of your transactions (or record insertions) to achieve the target throughput. The YCSB Client will do the throttling
for you.
<P>
Note also that it is allowable to insert records inside the doTransaction() method. You might do this if you wish the database to grow during the workload. In this case,
the initial dataset will be constructed using calls to the doInsert() method, while additional records would be inserted using calls to the doTransaction() method.
<h3>Step 6 - Measure latency, if necessary</h3>
The YCSB client will automatically measure the latency and throughput of database operations, even for workloads that you define. However, the client will only measure
the latency of individual calls to the database, not of more complex transactions. Consider for example a workload that reads a record, modifies it, and writes
the changes back to the database. The YCSB client will automatically measure the latency of the read operation to the database; and separately will automatically measure the
latency of the update operation. However, if you would like to measure the latency of the entire read-modify-write transaction, you will need to add an additional timing step to your
code.
<P>
Measurements are gathered using the Measurements.measure() call. There is a singleton instance of Measurements, which can be obtained using the
Measurements.getMeasurements() static method. For each metric you are measuring, you need to assign a string tag; this tag will label the resulting
average, min, max, histogram etc. measurements output by the tool at the end of the workload. For example, consider the following code:
<pre>
long st=System.currentTimeMillis();
db.read(TABLENAME,keyname,fields,new HashMap<String,String>());
db.update(TABLENAME,keyname,values);
long en=System.currentTimeMillis();
Measurements.getMeasurements().measure("READ-MODIFY-WRITE", (int)(en-st));
</pre>
In this code, the calls to System.currentTimeMillis() are used to time the read and write transaction. Then, the call to measure() reports the latency to the
measurement component.
<p>
Using this pattern, your custom measurements will be gathered and aggregated using the same mechanism that is used to gather measurements for individual READ, UPDATE etc. operations.
<h3>Step 7 - Use it with the YCSB Client</h3>
Make sure that the classes for your implementation (or a jar containing those classes) are available on your CLASSPATH, as well as any libraries/jar files used
by your implementation. Now, when you run the YCSB Client, specify the "workload" property to provide the fully qualified classname of your
DB class. For example:
<pre>
workload=com.foo.YourWorkloadClass
</pre>
<HR>
YCSB - Yahoo! Research - Contact cooperb@yahoo-inc.com.
</body>
</html>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>binding-parent</artifactId>
<version>0.18.0-SNAPSHOT</version>
<relativePath>../binding-parent</relativePath>
</parent>
<artifactId>hpdos-binding</artifactId>
<name>HPDOS Java Binding</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.25</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
package site.ycsb.db;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public class HpdosClient extends DB {
//private static final Logger LOGGER = LoggerFactory.getLogger(RocksDBClient.class);
public HpdosClient() {
}
public void init() throws DBException {
Properties prop = getProperties();
prop.forEach((k, v) -> System.out.println(k + "" + v));
System.out.println("init hpdos");
}
public void cleanup() throws DBException {
}
// Read a single record
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
System.out.println("Read" + key);
return Status.OK;
}
// Perform a range scan
public Status scan(String table, String startkey, int recordCount, Set<String> fieds,
Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
// Update a single record
public Status update(String table, String key, Map<String, ByteIterator> values) {
System.out.println("update" + key);
return Status.OK;
}
// Insert a single record
public Status insert(String table, String key, Map<String, ByteIterator> values) {
return Status.OK;
}
// Delete a single record
public Status delete(String table, String key) {
return Status.OK;
}
}
// -Dcheckstyle.skip
\ No newline at end of file
package site.ycsb.db;
public class JNIClient {
public static long endpointGroup;
public long client;
public native int put(long client, byte[] key, byte[] value);
public native byte[] get(long client, byte[] key);
public native int delete(long client, byte[] key);
public native long createEndpointGroup(int sendQSize, int recvQSize, int compQSize,
int sendMsqSize, int recvMsgSize, int maxInLine, int timeout);
public native long createClient(long endpointGroup);
public native void closeClient(long client);
public native void closeEndpointGroup(long endpointGroup);
}
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012 - 2020 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>site.ycsb</groupId>
<artifactId>root</artifactId>
<version>0.18.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>YCSB Root</name>
<description>
This is the top level project that builds, packages the core and all the DB bindings for YCSB infrastructure.
</description>
<url>https://ycsb.site/</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>https://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<developers>
<!-- Please see git for authorship information.
This list is project maintainers -->
<developer>
<id>allanbank</id>
<name>Robert J. Moore</name>
<email>robert.j.moore@allanbank.com</email>
</developer>
<developer>
<id>busbey</id>
<name>Sean Busbey</name>
<email>sean.busbey@gmail.com</email>
</developer>
<developer>
<id>cmatser</id>
<name>Chrisjan Matser</name>
<email>cmatser@codespinnerinc.com</email>
</developer>
<developer>
<id>stfeng2</id>
<name>Stanley Feng</name>
<email>stfeng@google.com</email>
</developer>
</developers>
<scm>
<connection>scm:git:git://github.com/brianfrankcooper/YCSB.git</connection>
<tag>master</tag>
<url>https://github.com/brianfrankcooper/YCSB</url>
</scm>
<distributionManagement>
<repository>
<id>sonatype.releases.https</id>
<name>Release Repo at sonatype oss.</name>
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2</url>
</repository>
<snapshotRepository>
<id>sonatype.snapshots.https</id>
<name>snapshot Repo at sonatype oss.</name>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
</snapshotRepository>
</distributionManagement>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>7.7.1</version>
</dependency>
<dependency>
<groupId>org.jdom</groupId>
<artifactId>jdom</artifactId>
<version>1.1</version>
</dependency>
<dependency>
<groupId>com.google.collections</groupId>
<artifactId>google-collections</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
</dependencies>
</dependencyManagement>
<!-- Properties Management -->
<properties>
<maven.assembly.version>2.5.5</maven.assembly.version>
<maven.dependency.version>2.10</maven.dependency.version>
<!-- datastore binding versions, lex sorted -->
<hpdos.version>1.0.0</hpdos.version>
<!--<accumulo.1.9.version>1.9.3</accumulo.1.9.version>
<aerospike.version>3.1.2</aerospike.version>
<arangodb.version>4.4.1</arangodb.version>
<asynchbase.version>1.8.2</asynchbase.version>
<azurecosmos.version>4.8.0</azurecosmos.version>
<azurestorage.version>4.0.0</azurestorage.version>
<cassandra.cql.version>3.0.0</cassandra.cql.version>
<cloudspanner.version>2.0.1</cloudspanner.version>
<couchbase.version>1.4.10</couchbase.version>
<couchbase2.version>2.3.1</couchbase2.version>
<crail.version>1.1-incubating</crail.version>
<elasticsearch5-version>5.5.1</elasticsearch5-version>
<foundationdb.version>5.2.5</foundationdb.version>
<geode.version>1.2.0</geode.version>
<googlebigtable.version>1.4.0</googlebigtable.version>
<griddb.version>4.0.0</griddb.version>
<hbase1.version>1.4.12</hbase1.version>
<hbase2.version>2.2.3</hbase2.version>
<ignite.version>2.7.6</ignite.version>
<infinispan.version>7.2.2.Final</infinispan.version>
<kudu.version>1.11.1</kudu.version>
<maprhbase.version>1.1.8-mapr-1710</maprhbase.version>
<mongodb.version>3.11.0</mongodb.version>
<mongodb.async.version>2.0.1</mongodb.async.version>
<openjpa.jdbc.version>2.1.1</openjpa.jdbc.version>
<orientdb.version>2.2.37</orientdb.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<redis.version>2.9.0</redis.version>
<riak.version>2.0.5</riak.version>
<rocksdb.version>6.2.2</rocksdb.version>
<s3.version>1.10.20</s3.version>
<seaweed.client.version>1.4.1</seaweed.client.version>
<scylla.cql.version>3.10.2-scylla-1</scylla.cql.version>
<solr7.version>7.7.2</solr7.version>
<tarantool.version>1.6.5</tarantool.version>
<thrift.version>0.8.0</thrift.version>
<tablestore.version>4.8.0</tablestore.version>
<voltdb.version>10.1.1</voltdb.version>
<zookeeper.version>3.5.8</zookeeper.version>-->
</properties>
<modules>
<!-- our internals -->
<module>core</module>
<module>binding-parent</module>
<module>distribution</module>
<!-- all the datastore bindings, lex sorted please -->
<module>hpdos</module>
<!--<module>accumulo1.9</module>
<module>aerospike</module>
<module>arangodb</module>
<module>asynchbase</module>
<module>azurecosmos</module>
<module>azuretablestorage</module>
<module>cassandra</module>
<module>cloudspanner</module>
<module>couchbase</module>
<module>couchbase2</module>
<module>crail</module>
<module>dynamodb</module>
<module>elasticsearch</module>
<module>elasticsearch5</module>
<module>foundationdb</module>
<module>geode</module>
<module>googlebigtable</module>
<module>googledatastore</module>
<module>griddb</module>
<module>hbase1</module>
<module>hbase2</module>
<module>ignite</module>
<module>infinispan</module>
<module>jdbc</module>
<module>kudu</module>
<module>maprdb</module>
<module>maprjsondb</module>
<module>memcached</module>
<module>mongodb</module>
<module>nosqldb</module>
<module>orientdb</module>
<module>postgrenosql</module>
<module>rados</module>
<module>redis</module>
<module>rest</module>
<module>riak</module>
<module>rocksdb</module>
<module>s3</module>
<module>seaweedfs</module>
<module>scylla</module>
<module>solr7</module>
<module>tarantool</module>
<module>tablestore</module>
<module>voltdb</module>
<module>zookeeper</module>-->
</modules>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<skip>true</skip>
</configuration>
<version>2.16</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>3.0.0-M1</version>
<executions>
<execution>
<id>enforce-maven</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireMavenVersion>
<!--
Maven 3.6.2 has issues
https://github.com/brianfrankcooper/YCSB/issues/1390
-->
<version>[3.1.0,3.6.2),(3.6.2,)</version>
</requireMavenVersion>
</rules>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.7.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<executions>
<execution>
<id>validate</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
<configuration>
<configLocation>checkstyle.xml</configLocation>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<!-- plugins needed to pass sonatype repo checks -->
<id>ycsb-release</id>
<properties>
<doclint>none</doclint>
</properties>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<version>3.0.0-M1</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>3.1.0</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>3.1.1</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
<version>1.6</version>
<executions>
<execution>
<id>sign-release-artifacts</id>
<goals>
<goal>sign</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>
<!--
Copyright (c) 2012 - 2018 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
## Quick Start
This section describes how to run YCSB on RocksDB running locally (within the same JVM).
NOTE: RocksDB is an embedded database and so articles like [How to run in parallel](https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload-in-Parallel) are not applicable here.
### 1. Set Up YCSB
Clone the YCSB git repository and compile:
git clone https://github.com/brianfrankcooper/YCSB.git
cd YCSB
mvn -pl site.ycsb:rocksdb-binding -am clean package
### 2. Run YCSB
Now you are ready to run! First, load the data:
./bin/ycsb load rocksdb -s -P workloads/workloada -p rocksdb.dir=/tmp/ycsb-rocksdb-data
Then, run the workload:
./bin/ycsb run rocksdb -s -P workloads/workloada -p rocksdb.dir=/tmp/ycsb-rocksdb-data
## RocksDB Configuration Parameters
* ```rocksdb.dir``` - (required) A path to a folder to hold the RocksDB data files.
* EX. ```/tmp/ycsb-rocksdb-data```
* ```rocksdb.optionsfile``` - A path to a [RocksDB options file](https://github.com/facebook/rocksdb/wiki/RocksDB-Options-File).
* EX. ```ycsb-rocksdb-options.ini```
## Note on RocksDB Options
If `rocksdb.optionsfile` is given, YCSB will apply all [RocksDB options](https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning) exactly as specified in the options file.
Otherwise, YCSB will try to set reasonable defaults.
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2017 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>binding-parent</artifactId>
<version>0.18.0-SNAPSHOT</version>
<relativePath>../binding-parent</relativePath>
</parent>
<artifactId>rocksdb-binding</artifactId>
<name>RocksDB Java Binding</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
<version>${rocksdb.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.25</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
/*
* Copyright (c) 2018 - 2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import site.ycsb.*;
import site.ycsb.Status;
import net.jcip.annotations.GuardedBy;
import org.rocksdb.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.ByteBuffer;
import java.nio.file.*;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static java.nio.charset.StandardCharsets.UTF_8;
/**
* RocksDB binding for <a href="http://rocksdb.org/">RocksDB</a>.
*
* See {@code rocksdb/README.md} for details.
*/
public class RocksDBClient extends DB {
static final String PROPERTY_ROCKSDB_DIR = "rocksdb.dir";
static final String PROPERTY_ROCKSDB_OPTIONS_FILE = "rocksdb.optionsfile";
private static final String COLUMN_FAMILY_NAMES_FILENAME = "CF_NAMES";
private static final Logger LOGGER = LoggerFactory.getLogger(RocksDBClient.class);
@GuardedBy("RocksDBClient.class") private static Path rocksDbDir = null;
@GuardedBy("RocksDBClient.class") private static Path optionsFile = null;
@GuardedBy("RocksDBClient.class") private static RocksObject dbOptions = null;
@GuardedBy("RocksDBClient.class") private static RocksDB rocksDb = null;
@GuardedBy("RocksDBClient.class") private static int references = 0;
private static final ConcurrentMap<String, ColumnFamily> COLUMN_FAMILIES = new ConcurrentHashMap<>();
private static final ConcurrentMap<String, Lock> COLUMN_FAMILY_LOCKS = new ConcurrentHashMap<>();
@Override
public void init() throws DBException {
synchronized(RocksDBClient.class) {
if(rocksDb == null) {
rocksDbDir = Paths.get(getProperties().getProperty(PROPERTY_ROCKSDB_DIR));
LOGGER.info("RocksDB data dir: " + rocksDbDir);
String optionsFileString = getProperties().getProperty(PROPERTY_ROCKSDB_OPTIONS_FILE);
if (optionsFileString != null) {
optionsFile = Paths.get(optionsFileString);
LOGGER.info("RocksDB options file: " + optionsFile);
}
try {
if (optionsFile != null) {
rocksDb = initRocksDBWithOptionsFile();
} else {
rocksDb = initRocksDB();
}
} catch (final IOException | RocksDBException e) {
throw new DBException(e);
}
}
references++;
}
}
/**
* Initializes and opens the RocksDB database.
*
* Should only be called with a {@code synchronized(RocksDBClient.class)` block}.
*
* @return The initialized and open RocksDB instance.
*/
private RocksDB initRocksDBWithOptionsFile() throws IOException, RocksDBException {
if(!Files.exists(rocksDbDir)) {
Files.createDirectories(rocksDbDir);
}
final DBOptions options = new DBOptions();
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
RocksDB.loadLibrary();
OptionsUtil.loadOptionsFromFile(optionsFile.toAbsolutePath().toString(), Env.getDefault(), options, cfDescriptors);
dbOptions = options;
final RocksDB db = RocksDB.open(options, rocksDbDir.toAbsolutePath().toString(), cfDescriptors, cfHandles);
for(int i = 0; i < cfDescriptors.size(); i++) {
String cfName = new String(cfDescriptors.get(i).getName());
final ColumnFamilyHandle cfHandle = cfHandles.get(i);
final ColumnFamilyOptions cfOptions = cfDescriptors.get(i).getOptions();
COLUMN_FAMILIES.put(cfName, new ColumnFamily(cfHandle, cfOptions));
}
return db;
}
/**
* Initializes and opens the RocksDB database.
*
* Should only be called with a {@code synchronized(RocksDBClient.class)` block}.
*
* @return The initialized and open RocksDB instance.
*/
private RocksDB initRocksDB() throws IOException, RocksDBException {
if(!Files.exists(rocksDbDir)) {
Files.createDirectories(rocksDbDir);
}
final List<String> cfNames = loadColumnFamilyNames();
final List<ColumnFamilyOptions> cfOptionss = new ArrayList<>();
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
for(final String cfName : cfNames) {
final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()
.optimizeLevelStyleCompaction();
final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(
cfName.getBytes(UTF_8),
cfOptions
);
cfOptionss.add(cfOptions);
cfDescriptors.add(cfDescriptor);
}
final int rocksThreads = Runtime.getRuntime().availableProcessors() * 2;
if(cfDescriptors.isEmpty()) {
final Options options = new Options()
.optimizeLevelStyleCompaction()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true)
.setIncreaseParallelism(rocksThreads)
.setMaxBackgroundCompactions(rocksThreads)
.setInfoLogLevel(InfoLogLevel.INFO_LEVEL);
dbOptions = options;
return RocksDB.open(options, rocksDbDir.toAbsolutePath().toString());
} else {
final DBOptions options = new DBOptions()
.setCreateIfMissing(true)
.setCreateMissingColumnFamilies(true)
.setIncreaseParallelism(rocksThreads)
.setMaxBackgroundCompactions(rocksThreads)
.setInfoLogLevel(InfoLogLevel.INFO_LEVEL);
dbOptions = options;
final List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
final RocksDB db = RocksDB.open(options, rocksDbDir.toAbsolutePath().toString(), cfDescriptors, cfHandles);
for(int i = 0; i < cfNames.size(); i++) {
COLUMN_FAMILIES.put(cfNames.get(i), new ColumnFamily(cfHandles.get(i), cfOptionss.get(i)));
}
return db;
}
}
@Override
public void cleanup() throws DBException {
super.cleanup();
synchronized (RocksDBClient.class) {
try {
if (references == 1) {
for (final ColumnFamily cf : COLUMN_FAMILIES.values()) {
cf.getHandle().close();
}
rocksDb.close();
rocksDb = null;
dbOptions.close();
dbOptions = null;
for (final ColumnFamily cf : COLUMN_FAMILIES.values()) {
cf.getOptions().close();
}
saveColumnFamilyNames();
COLUMN_FAMILIES.clear();
rocksDbDir = null;
}
} catch (final IOException e) {
throw new DBException(e);
} finally {
references--;
}
}
}
@Override
public Status read(final String table, final String key, final Set<String> fields,
final Map<String, ByteIterator> result) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
final byte[] values = rocksDb.get(cf, key.getBytes(UTF_8));
if(values == null) {
return Status.NOT_FOUND;
}
deserializeValues(values, fields, result);
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status scan(final String table, final String startkey, final int recordcount, final Set<String> fields,
final Vector<HashMap<String, ByteIterator>> result) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
try(final RocksIterator iterator = rocksDb.newIterator(cf)) {
int iterations = 0;
for (iterator.seek(startkey.getBytes(UTF_8)); iterator.isValid() && iterations < recordcount;
iterator.next()) {
final HashMap<String, ByteIterator> values = new HashMap<>();
deserializeValues(iterator.value(), fields, values);
result.add(values);
iterations++;
}
}
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status update(final String table, final String key, final Map<String, ByteIterator> values) {
//TODO(AR) consider if this would be faster with merge operator
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
final Map<String, ByteIterator> result = new HashMap<>();
final byte[] currentValues = rocksDb.get(cf, key.getBytes(UTF_8));
if(currentValues == null) {
return Status.NOT_FOUND;
}
deserializeValues(currentValues, null, result);
//update
result.putAll(values);
//store
rocksDb.put(cf, key.getBytes(UTF_8), serializeValues(result));
return Status.OK;
} catch(final RocksDBException | IOException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status insert(final String table, final String key, final Map<String, ByteIterator> values) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
rocksDb.put(cf, key.getBytes(UTF_8), serializeValues(values));
return Status.OK;
} catch(final RocksDBException | IOException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
@Override
public Status delete(final String table, final String key) {
try {
if (!COLUMN_FAMILIES.containsKey(table)) {
createColumnFamily(table);
}
final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle();
rocksDb.delete(cf, key.getBytes(UTF_8));
return Status.OK;
} catch(final RocksDBException e) {
LOGGER.error(e.getMessage(), e);
return Status.ERROR;
}
}
private void saveColumnFamilyNames() throws IOException {
final Path file = rocksDbDir.resolve(COLUMN_FAMILY_NAMES_FILENAME);
try(final PrintWriter writer = new PrintWriter(Files.newBufferedWriter(file, UTF_8))) {
writer.println(new String(RocksDB.DEFAULT_COLUMN_FAMILY, UTF_8));
for(final String cfName : COLUMN_FAMILIES.keySet()) {
writer.println(cfName);
}
}
}
private List<String> loadColumnFamilyNames() throws IOException {
final List<String> cfNames = new ArrayList<>();
final Path file = rocksDbDir.resolve(COLUMN_FAMILY_NAMES_FILENAME);
if(Files.exists(file)) {
try (final LineNumberReader reader =
new LineNumberReader(Files.newBufferedReader(file, UTF_8))) {
String line = null;
while ((line = reader.readLine()) != null) {
cfNames.add(line);
}
}
}
return cfNames;
}
private Map<String, ByteIterator> deserializeValues(final byte[] values, final Set<String> fields,
final Map<String, ByteIterator> result) {
final ByteBuffer buf = ByteBuffer.allocate(4);
int offset = 0;
while(offset < values.length) {
buf.put(values, offset, 4);
buf.flip();
final int keyLen = buf.getInt();
buf.clear();
offset += 4;
final String key = new String(values, offset, keyLen);
offset += keyLen;
buf.put(values, offset, 4);
buf.flip();
final int valueLen = buf.getInt();
buf.clear();
offset += 4;
if(fields == null || fields.contains(key)) {
result.put(key, new ByteArrayByteIterator(values, offset, valueLen));
}
offset += valueLen;
}
return result;
}
private byte[] serializeValues(final Map<String, ByteIterator> values) throws IOException {
try(final ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
final ByteBuffer buf = ByteBuffer.allocate(4);
for(final Map.Entry<String, ByteIterator> value : values.entrySet()) {
final byte[] keyBytes = value.getKey().getBytes(UTF_8);
final byte[] valueBytes = value.getValue().toArray();
buf.putInt(keyBytes.length);
baos.write(buf.array());
baos.write(keyBytes);
buf.clear();
buf.putInt(valueBytes.length);
baos.write(buf.array());
baos.write(valueBytes);
buf.clear();
}
return baos.toByteArray();
}
}
private ColumnFamilyOptions getDefaultColumnFamilyOptions(final String destinationCfName) {
final ColumnFamilyOptions cfOptions;
if (COLUMN_FAMILIES.containsKey("default")) {
LOGGER.warn("no column family options for \"" + destinationCfName + "\" " +
"in options file - using options from \"default\"");
cfOptions = COLUMN_FAMILIES.get("default").getOptions();
} else {
LOGGER.warn("no column family options for either \"" + destinationCfName + "\" or " +
"\"default\" in options file - initializing with empty configuration");
cfOptions = new ColumnFamilyOptions();
}
LOGGER.warn("Add a CFOptions section for \"" + destinationCfName + "\" to the options file, " +
"or subsequent runs on this DB will fail.");
return cfOptions;
}
private void createColumnFamily(final String name) throws RocksDBException {
COLUMN_FAMILY_LOCKS.putIfAbsent(name, new ReentrantLock());
final Lock l = COLUMN_FAMILY_LOCKS.get(name);
l.lock();
try {
if(!COLUMN_FAMILIES.containsKey(name)) {
final ColumnFamilyOptions cfOptions;
if (optionsFile != null) {
// RocksDB requires all options files to include options for the "default" column family;
// apply those options to this column family
cfOptions = getDefaultColumnFamilyOptions(name);
} else {
cfOptions = new ColumnFamilyOptions().optimizeLevelStyleCompaction();
}
final ColumnFamilyHandle cfHandle = rocksDb.createColumnFamily(
new ColumnFamilyDescriptor(name.getBytes(UTF_8), cfOptions)
);
COLUMN_FAMILIES.put(name, new ColumnFamily(cfHandle, cfOptions));
}
} finally {
l.unlock();
}
}
private static final class ColumnFamily {
private final ColumnFamilyHandle handle;
private final ColumnFamilyOptions options;
private ColumnFamily(final ColumnFamilyHandle handle, final ColumnFamilyOptions options) {
this.handle = handle;
this.options = options;
}
public ColumnFamilyHandle getHandle() {
return handle;
}
public ColumnFamilyOptions getOptions() {
return options;
}
}
}
/*
* Copyright (c) 2018 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The RocksDB Java binding for <a href="http://rocksdb.org/">RocksDB</a>.
*/
package site.ycsb.db.rocksdb;
/*
* Copyright (c) 2018 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.workloads.CoreWorkload;
import org.junit.*;
import org.junit.rules.TemporaryFolder;
import java.util.*;
import static org.junit.Assert.assertEquals;
public class RocksDBClientTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
private static final String MOCK_TABLE = "ycsb";
private static final String MOCK_KEY0 = "0";
private static final String MOCK_KEY1 = "1";
private static final String MOCK_KEY2 = "2";
private static final String MOCK_KEY3 = "3";
private static final int NUM_RECORDS = 10;
private static final String FIELD_PREFIX = CoreWorkload.FIELD_NAME_PREFIX_DEFAULT;
private static final Map<String, ByteIterator> MOCK_DATA;
static {
MOCK_DATA = new HashMap<>(NUM_RECORDS);
for (int i = 0; i < NUM_RECORDS; i++) {
MOCK_DATA.put(FIELD_PREFIX + i, new StringByteIterator("value" + i));
}
}
private RocksDBClient instance;
@Before
public void setup() throws Exception {
instance = new RocksDBClient();
final Properties properties = new Properties();
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_DIR, tmpFolder.getRoot().getAbsolutePath());
instance.setProperties(properties);
instance.init();
}
@After
public void tearDown() throws Exception {
instance.cleanup();
}
@Test
public void insertAndRead() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Set<String> fields = MOCK_DATA.keySet();
final Map<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
final Status readResult = instance.read(MOCK_TABLE, MOCK_KEY0, fields, resultParam);
assertEquals(Status.OK, readResult);
}
@Test
public void insertAndDelete() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY1, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Status result = instance.delete(MOCK_TABLE, MOCK_KEY1);
assertEquals(Status.OK, result);
}
@Test
public void insertUpdateAndRead() throws Exception {
final Map<String, ByteIterator> newValues = new HashMap<>(NUM_RECORDS);
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY2, MOCK_DATA);
assertEquals(Status.OK, insertResult);
for (int i = 0; i < NUM_RECORDS; i++) {
newValues.put(FIELD_PREFIX + i, new StringByteIterator("newvalue" + i));
}
final Status result = instance.update(MOCK_TABLE, MOCK_KEY2, newValues);
assertEquals(Status.OK, result);
//validate that the values changed
final Map<String, ByteIterator> resultParam = new HashMap<>(NUM_RECORDS);
instance.read(MOCK_TABLE, MOCK_KEY2, MOCK_DATA.keySet(), resultParam);
for (int i = 0; i < NUM_RECORDS; i++) {
assertEquals("newvalue" + i, resultParam.get(FIELD_PREFIX + i).toString());
}
}
@Test
public void insertAndScan() throws Exception {
final Status insertResult = instance.insert(MOCK_TABLE, MOCK_KEY3, MOCK_DATA);
assertEquals(Status.OK, insertResult);
final Set<String> fields = MOCK_DATA.keySet();
final Vector<HashMap<String, ByteIterator>> resultParam = new Vector<>(NUM_RECORDS);
final Status result = instance.scan(MOCK_TABLE, MOCK_KEY3, NUM_RECORDS, fields, resultParam);
assertEquals(Status.OK, result);
}
}
/*
* Copyright (c) 2019 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.rocksdb;
import org.junit.*;
import org.junit.rules.TemporaryFolder;
import org.rocksdb.*;
import java.util.*;
import static org.junit.Assert.assertEquals;
public class RocksDBOptionsFileTest {
@Rule
public TemporaryFolder tmpFolder = new TemporaryFolder();
private RocksDBClient instance;
@Test
public void loadOptionsFromFile() throws Exception {
final String optionsPath = RocksDBClient.class.getClassLoader().getResource("testcase.ini").getPath();
final String dbPath = tmpFolder.getRoot().getAbsolutePath();
initDbWithOptionsFile(dbPath, optionsPath);
checkOptions(dbPath);
}
private void initDbWithOptionsFile(final String dbPath, final String optionsPath) throws Exception {
instance = new RocksDBClient();
final Properties properties = new Properties();
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_DIR, dbPath);
properties.setProperty(RocksDBClient.PROPERTY_ROCKSDB_OPTIONS_FILE, optionsPath);
instance.setProperties(properties);
instance.init();
instance.cleanup();
}
private void checkOptions(final String dbPath) throws Exception {
final List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
final DBOptions dbOptions = new DBOptions();
RocksDB.loadLibrary();
OptionsUtil.loadLatestOptions(dbPath, Env.getDefault(), dbOptions, cfDescriptors);
try {
assertEquals(dbOptions.walSizeLimitMB(), 42);
// the two CFs should be "default" and "usertable"
assertEquals(cfDescriptors.size(), 2);
assertEquals(cfDescriptors.get(0).getOptions().ttl(), 42);
assertEquals(cfDescriptors.get(1).getOptions().ttl(), 42);
}
finally {
dbOptions.close();
}
}
};
#
# This file is only for testing the rocksdb.optionsfile property in YCSB.
# The values are chosen for being extremely unlikely to match defaults.
#
[Version]
rocksdb_version=6.2.2
options_file_version=1.1
[DBOptions]
WAL_size_limit_MB=42
create_if_missing=true
create_missing_column_families=true
[CFOptions "default"]
ttl=42
[TableOptions/BlockBasedTable "default"]
[CFOptions "usertable"]
ttl=42
[TableOptions/BlockBasedTable "usertable"]
# Copyright (c) 2017 YCSB contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Time Series Workload Template: Default Values
#
# File contains all properties that can be set to define a
# YCSB session. All properties are set to their default
# value if one exists. If not, the property is commented
# out. When a property has a finite number of settings,
# the default is enabled and the alternates are shown in
# comments below it.
#
# Use of each property is explained through comments in Client.java,
# CoreWorkload.java, TimeSeriesWorkload.java or on the YCSB wiki page:
# https://github.com/brianfrankcooper/YCSB/wiki/TimeSeriesWorkload
# The name of the workload class to use. Always the following.
workload=site.ycsb.workloads.TimeSeriesWorkload
# The default is Java's Long.MAX_VALUE.
# The number of records in the table to be inserted in
# the load phase or the number of records already in the
# table before the run phase.
recordcount=1000000
# There is no default setting for operationcount but it is
# required to be set.
# The number of operations to use during the run phase.
operationcount=3000000
# The number of insertions to do, if different from recordcount.
# Used with insertstart to grow an existing table.
#insertcount=
# ..::NOTE::.. This is different from the CoreWorkload!
# The starting timestamp of a run as a Unix Epoch numeral in the
# unit set in 'timestampunits'. This is used to determine what
# the first timestamp should be when writing or querying as well
# as how many offsets (based on 'timestampinterval').
#insertstart=
# The units represented by the 'insertstart' timestamp as well as
# durations such as 'timestampinterval', 'querytimespan', etc.
# For values, see https://docs.oracle.com/javase/7/docs/api/java/util/concurrent/TimeUnit.html
# Note that only seconds through nanoseconds are supported.
timestampunits=SECONDS
# The amount of time between each value in every time series in
# the units of 'timestampunits'.
timestampinterval=60
# ..::NOTE::.. This is different from the CoreWorkload!
# Represents the number of unique "metrics" or "keys" for time series.
# E.g. "sys.cpu" may be a single field or "metric" while there may be many
# time series sharing that key (perhaps a host tag with "web01" and "web02"
# as options).
fieldcount=16
# The number of characters in the "metric" or "key".
fieldlength=8
# --- TODO ---?
# The distribution used to choose the length of a field
fieldlengthdistribution=constant
#fieldlengthdistribution=uniform
#fieldlengthdistribution=zipfian
# The number of unique tag combinations for each time series. E.g
# if this value is 4, each record will have a key and 4 tag combinations
# such as A=A, B=A, C=A, D=A.
tagcount=4
# The cardinality (number of unique values) of each tag value for
# every "metric" or field as a comma separated list. Each value must
# be a number from 1 to Java's Integer.MAX_VALUE and there must be
# 'tagcount' values. If there are more or fewer values than
#'tagcount' then either it is ignored or 1 is substituted respectively.
tagcardinality=1,2,4,8
# The length of each tag key in characters.
tagkeylength=8
# The length of each tag value in characters.
tagvaluelength=8
# The character separating tag keys from tag values when reads, deletes
# or scans are executed against a database. The default is the equals sign
# so a field passed in a read to a DB may look like 'AA=AB'.
tagpairdelimiter==
# The delimiter between keys and tags when a delete is passed to the DB.
# E.g. if there was a key and a field, the request key would look like:
# 'AA:AA=AB'
deletedelimiter=:
# Whether or not to randomize the timestamp order when performing inserts
# and updates against a DB. By default all writes perform with the
# timestamps moving linearly forward in time once all time series for a
# given key have been written.
randomwritetimestamporder=false
# Whether or not to randomly shuffle the time series order when writing.
# This will shuffle the keys, tag keys and tag values.
# ************************************************************************
# WARNING - When this is enabled, reads and scans will likely return many
# empty results as invalid tag combinations will be chosen. Likewise
# this setting is INCOMPATIBLE with data integrity checks.
# ************************************************************************
randomtimeseriesorder=false
# The type of numerical data generated for each data point. The values are
# 64 bit signed integers, double precision floating points or a random mix.
# For data integrity, this setting is ignored and values are switched to
# 64 bit signed ints.
#valuetype=integers
valuetype=floats
#valuetype=mixed
# A value from 0 to 0.999999 representing how sparse each time series
# should be. The higher this value, the greater the time interval between
# values in a single series. For example, if sparsity is 0 and there are
# 10 time series with a 'timestampinterval' of 60 seconds with a total
# time range of 10 intervals, you would see 100 values written, one per
# timestamp interval per time series. If the sparsity is 0.50 then there
# would be only about 50 values written so some time series would have
# missing values at each interval.
sparsity=0.00
# The percentage of time series that are "lagging" behind the current
# timestamp of the writer. This is used to mimic a common behavior where
# most sources (agents, sensors, etc) are writing data in sync (same timestamp)
# but a subset are running behind due to buffering, latency issues, etc.
delayedSeries=0.10
# The maximum amount of delay for delayed series in interval counts. The
# actual delay is chosen based on a modulo of the series index.
delayedIntervals=5
# The fixed or maximum amount of time added to the start time of a
# read or scan operation to generate a query over a range of time
# instead of a single timestamp. Units are shared with 'timestampunits'.
# For example if the value is set to 3600 seconds (1 hour) then
# each read would pick a random start timestamp based on the
#'insertstart' value and number of intervals, then add 3600 seconds
# to create the end time of the query. If this value is 0 then reads
# will only provide a single timestamp.
# WARNING: Cannot be used with 'dataintegrity'.
querytimespan=0
# Whether or not reads should choose a random time span (aligned to
# the 'timestampinterval' value) for each read or scan request starting
# at 0 and reaching 'querytimespan' as the max.
queryrandomtimespan=false
# A delimiter character used to separate the start and end timestamps
# of a read query when 'querytimespan' is enabled.
querytimespandelimiter=,
# A unique key given to read, scan and delete operations when the
# operation should perform a group-by (multi-series aggregation) on one
# or more tags. If 'groupbyfunction' is set, this key will be given with
# the configured function.
groupbykey=YCSBGB
# A function name (e.g. 'sum', 'max' or 'avg') passed during reads,
# scans and deletes to cause the database to perform a group-by
# operation on one or more tags. If this value is empty or null
# (default), group-by operations are not performed
#groupbyfunction=
# A comma separated list of 0s or 1s to denote which of the tag keys
# should be grouped during group-by operations. The number of values
# must match the number of tags in 'tagcount'.
#groupbykeys=0,0,1,1
# A unique key given to read and scan operations when the operation
# should downsample the results of a query into lower resolution
# data. If 'downsamplingfunction' is set, this key will be given with
# the configured function.
downsamplingkey=YCSBDS
# A function name (e.g. 'sum', 'max' or 'avg') passed during reads and
# scans to cause the database to perform a downsampling operation
# returning lower resolution data. If this value is empty or null
# (default), downsampling is not performed.
#downsamplingfunction=
# A time interval for which to downsample the raw data into. Shares
# the same units as 'timestampinterval'. This value must be greater
# than 'timestampinterval'. E.g. if the timestamp interval for raw
# data is 60 seconds, the downsampling interval could be 3600 seconds
# to roll up the data into 1 hour buckets.
#downsamplinginterval=
# What proportion of operations are reads
readproportion=0.10
# What proportion of operations are updates
updateproportion=0.00
# What proportion of operations are inserts
insertproportion=0.90
# The distribution of requests across the keyspace
requestdistribution=zipfian
#requestdistribution=uniform
#requestdistribution=latest
# The name of the database table to run queries against
table=usertable
# Whether or not data should be validated during writes and reads. If
# set then the data type is always a 64 bit signed integer and is the
# hash code of the key, timestamp and tags.
dataintegrity=false
# How the latency measurements are presented
measurementtype=histogram
#measurementtype=timeseries
#measurementtype=raw
# When measurementtype is set to raw, measurements will be output
# as RAW datapoints in the following csv format:
# "operation, timestamp of the measurement, latency in us"
#
# Raw datapoints are collected in-memory while the test is running. Each
# data point consumes about 50 bytes (including java object overhead).
# For a typical run of 1 million to 10 million operations, this should
# fit into memory most of the time. If you plan to do 100s of millions of
# operations per run, consider provisioning a machine with larger RAM when using
# the RAW measurement type, or split the run into multiple runs.
#
# Optionally, you can specify an output file to save raw datapoints.
# Otherwise, raw datapoints will be written to stdout.
# The output file will be appended to if it already exists, otherwise
# a new output file will be created.
#measurement.raw.output_file = /tmp/your_output_file_for_this_run
# JVM Reporting.
#
# Measure JVM information over time including GC counts, max and min memory
# used, max and min thread counts, max and min system load and others. This
# setting must be enabled in conjunction with the "-s" flag to run the status
# thread. Every "status.interval", the status thread will capture JVM
# statistics and record the results. At the end of the run, max and mins will
# be recorded.
# measurement.trackjvm = false
# The range of latencies to track in the histogram (milliseconds)
histogram.buckets=1000
# Granularity for time series (in milliseconds)
timeseries.granularity=1000
# Latency reporting.
#
# YCSB records latency of failed operations separately from successful ones.
# Latency of all OK operations will be reported under their operation name,
# such as [READ], [UPDATE], etc.
#
# For failed operations:
# By default we don't track latency numbers of specific error status.
# We just report latency of all failed operation under one measurement name
# such as [READ-FAILED]. But optionally, user can configure to have either:
# 1. Record and report latency for each and every error status code by
# setting reportLatencyForEachError to true, or
# 2. Record and report latency for a select set of error status codes by
# providing a CSV list of Status codes via the "latencytrackederrors"
# property.
# reportlatencyforeacherror=false
# latencytrackederrors="<comma separated strings of error codes>"
# Copyright (c) 2017 YCSB contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload A: Small cardinality consistent data for 2 days
# Application example: Typical monitoring of a single compute or small
# sensor station where 90% of the load is write and only 10% is read
# (it's usually much less). All writes are inserts. No sparsity so
# every series will have a value at every timestamp.
#
# Read/insert ratio: 10/90
# Cardinality: 16 per key (field), 64 fields for a total of 1,024
# time series.
workload=site.ycsb.workloads.TimeSeriesWorkload
recordcount=1474560
operationcount=2949120
fieldlength=8
fieldcount=64
tagcount=4
tagcardinality=1,2,4,2
sparsity=0.0
delayedSeries=0.0
delayedIntervals=0
timestampunits=SECONDS
timestampinterval=60
querytimespan=3600
readproportion=0.10
updateproportion=0.00
insertproportion=0.90
# Copyright (c) 2012-2016 YCSB contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload Template: Default Values
#
# File contains all properties that can be set to define a
# YCSB session. All properties are set to their default
# value if one exists. If not, the property is commented
# out. When a property has a finite number of settings,
# the default is enabled and the alternates are shown in
# comments below it.
#
# Use of most explained through comments in Client.java or
# CoreWorkload.java or on the YCSB wiki page:
# https://github.com/brianfrankcooper/YCSB/wiki/Core-Properties
# The name of the workload class to use
workload=site.ycsb.workloads.CoreWorkload
# There is no default setting for recordcount but it is
# required to be set.
# The number of records in the table to be inserted in
# the load phase or the number of records already in the
# table before the run phase.
recordcount=1000000
# There is no default setting for operationcount but it is
# required to be set.
# The number of operations to use during the run phase.
operationcount=3000000
# The number of insertions to do, if different from recordcount.
# Used with insertstart to grow an existing table.
#insertcount=
# The offset of the first insertion
insertstart=0
# The number of fields in a record
fieldcount=10
# The size of each field (in bytes)
fieldlength=100
# Should read all fields
readallfields=true
# Should write all fields on update
writeallfields=false
# The distribution used to choose the length of a field
fieldlengthdistribution=constant
#fieldlengthdistribution=uniform
#fieldlengthdistribution=zipfian
# What proportion of operations are reads
readproportion=0.95
# What proportion of operations are updates
updateproportion=0.05
# What proportion of operations are inserts
insertproportion=0
# What proportion of operations read then modify a record
readmodifywriteproportion=0
# What proportion of operations are scans
scanproportion=0
# On a single scan, the maximum number of records to access
maxscanlength=1000
# The distribution used to choose the number of records to access on a scan
scanlengthdistribution=uniform
#scanlengthdistribution=zipfian
# Should records be inserted in order or pseudo-randomly
insertorder=hashed
#insertorder=ordered
# The distribution of requests across the keyspace
requestdistribution=zipfian
#requestdistribution=uniform
#requestdistribution=latest
# Percentage of data items that constitute the hot set
hotspotdatafraction=0.2
# Percentage of operations that access the hot set
hotspotopnfraction=0.8
# Maximum execution time in seconds
#maxexecutiontime=
# The name of the database table to run queries against
table=usertable
# The column family of fields (required by some databases)
#columnfamily=
# How the latency measurements are presented
measurementtype=histogram
#measurementtype=timeseries
#measurementtype=raw
# When measurementtype is set to raw, measurements will be output
# as RAW datapoints in the following csv format:
# "operation, timestamp of the measurement, latency in us"
#
# Raw datapoints are collected in-memory while the test is running. Each
# data point consumes about 50 bytes (including java object overhead).
# For a typical run of 1 million to 10 million operations, this should
# fit into memory most of the time. If you plan to do 100s of millions of
# operations per run, consider provisioning a machine with larger RAM when using
# the RAW measurement type, or split the run into multiple runs.
#
# Optionally, you can specify an output file to save raw datapoints.
# Otherwise, raw datapoints will be written to stdout.
# The output file will be appended to if it already exists, otherwise
# a new output file will be created.
#measurement.raw.output_file = /tmp/your_output_file_for_this_run
# Whether or not to emit individual histogram buckets when measuring
# using histograms.
# measurement.histogram.verbose = false
# JVM Reporting.
#
# Measure JVM information over time including GC counts, max and min memory
# used, max and min thread counts, max and min system load and others. This
# setting must be enabled in conjunction with the "-s" flag to run the status
# thread. Every "status.interval", the status thread will capture JVM
# statistics and record the results. At the end of the run, max and mins will
# be recorded.
# measurement.trackjvm = false
# The range of latencies to track in the histogram (milliseconds)
histogram.buckets=1000
# Granularity for time series (in milliseconds)
timeseries.granularity=1000
# Latency reporting.
#
# YCSB records latency of failed operations separately from successful ones.
# Latency of all OK operations will be reported under their operation name,
# such as [READ], [UPDATE], etc.
#
# For failed operations:
# By default we don't track latency numbers of specific error status.
# We just report latency of all failed operation under one measurement name
# such as [READ-FAILED]. But optionally, user can configure to have either:
# 1. Record and report latency for each and every error status code by
# setting reportLatencyForEachError to true, or
# 2. Record and report latency for a select set of error status codes by
# providing a CSV list of Status codes via the "latencytrackederrors"
# property.
# reportlatencyforeacherror=false
# latencytrackederrors="<comma separated strings of error codes>"
# Insertion error retry for the core workload.
#
# By default, the YCSB core workload does not retry any operations.
# However, during the load process, if any insertion fails, the entire
# load process is terminated.
# If a user desires to have more robust behavior during this phase, they can
# enable retry for insertion by setting the following property to a positive
# number.
# core_workload_insertion_retry_limit = 0
#
# the following number controls the interval between retries (in seconds):
# core_workload_insertion_retry_interval = 3
# Distributed Tracing via Apache HTrace (http://htrace.incubator.apache.org/)
#
# Defaults to blank / no tracing
# Below sends to a local file, sampling at 0.1%
#
# htrace.sampler.classes=ProbabilitySampler
# htrace.sampler.fraction=0.001
# htrace.span.receiver.classes=org.apache.htrace.core.LocalFileSpanReceiver
# htrace.local.file.span.receiver.path=/some/path/to/local/file
#
# To capture all spans, use the AlwaysSampler
#
# htrace.sampler.classes=AlwaysSampler
#
# To send spans to an HTraced receiver, use the below and ensure
# your classpath contains the htrace-htraced jar (i.e. when invoking the ycsb
# command add -cp /path/to/htrace-htraced.jar)
#
# htrace.span.receiver.classes=org.apache.htrace.impl.HTracedSpanReceiver
# htrace.htraced.receiver.address=example.com:9075
# htrace.htraced.error.log.period.ms=10000
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload A: Update heavy workload
# Application example: Session store recording recent actions
#
# Read/update ratio: 50/50
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.5
updateproportion=0.5
scanproportion=0
insertproportion=0
requestdistribution=zipfian
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload B: Read mostly workload
# Application example: photo tagging; add a tag is an update, but most operations are to read tags
#
# Read/update ratio: 95/5
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.95
updateproportion=0.05
scanproportion=0
insertproportion=0
requestdistribution=zipfian
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload C: Read only
# Application example: user profile cache, where profiles are constructed elsewhere (e.g., Hadoop)
#
# Read/update ratio: 100/0
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=1
updateproportion=0
scanproportion=0
insertproportion=0
requestdistribution=zipfian
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload D: Read latest workload
# Application example: user status updates; people want to read the latest
#
# Read/update/insert ratio: 95/0/5
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: latest
# The insert order for this is hashed, not ordered. The "latest" items may be
# scattered around the keyspace if they are keyed by userid.timestamp. A workload
# which orders items purely by time, and demands the latest, is very different than
# workload here (which we believe is more typical of how people build systems.)
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.95
updateproportion=0
scanproportion=0
insertproportion=0.05
requestdistribution=latest
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload E: Short ranges
# Application example: threaded conversations, where each scan is for the posts in a given thread (assumed to be clustered by thread id)
#
# Scan/insert ratio: 95/5
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
# The insert order is hashed, not ordered. Although the scans are ordered, it does not necessarily
# follow that the data is inserted in order. For example, posts for thread 342 may not be inserted contiguously, but
# instead interspersed with posts from lots of other threads. The way the YCSB client works is that it will pick a start
# key, and then request a number of records; this works fine even for hashed insertion.
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0
updateproportion=0
scanproportion=0.95
insertproportion=0.05
requestdistribution=zipfian
maxscanlength=100
scanlengthdistribution=uniform
# Copyright (c) 2010 Yahoo! Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
# Yahoo! Cloud System Benchmark
# Workload F: Read-modify-write workload
# Application example: user database, where user records are read and modified by the user or to record user activity.
#
# Read/read-modify-write ratio: 50/50
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=1000
operationcount=1000
workload=site.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.5
updateproportion=0
scanproportion=0
insertproportion=0
readmodifywriteproportion=0.5
requestdistribution=zipfian
<!--
Copyright (c) 2020 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
## Quick Start
This section describes how to run YCSB on ZooKeeper.
### 1. Start ZooKeeper Server(s)
### 2. Install Java and Maven
### 3. Set Up YCSB
Git clone YCSB and compile:
git clone http://github.com/brianfrankcooper/YCSB.git
# more details in the landing page for instructions on downloading YCSB(https://github.com/brianfrankcooper/YCSB#getting-started).
cd YCSB
mvn -pl site.ycsb:zookeeper-binding -am clean package -DskipTests
### 4. Provide ZooKeeper Connection Parameters
Set connectString, sessionTimeout, watchFlag in the workload you plan to run.
- `zookeeper.connectString`
- `zookeeper.sessionTimeout`
- `zookeeper.watchFlag`
* A parameter for enabling ZooKeeper's watch, optional values:true or false.the default value is false.
* This parameter cannot test the watch performance, but for testing what effect will take on the read/write requests when enabling the watch.
```bash
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p zookeeper.watchFlag=true
```
Or, you can set configs with the shell command, EG:
# create a /benchmark namespace for sake of cleaning up the workspace after test.
# e.g the CLI:create /benchmark
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p zookeeper.sessionTimeout=30000
### 5. Load data and run tests
Load the data:
# -p recordcount,the count of records/paths you want to insert
./bin/ycsb load zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p recordcount=10000 > outputLoad.txt
Run the workload test:
# YCSB workloadb is the most suitable workload for read-heavy workload for the ZooKeeper in the real world.
# -p fieldlength, test the length of value/data-content took effect on performance
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p fieldlength=1000
# -p fieldcount
./bin/ycsb run zookeeper -s -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p fieldcount=20
# -p hdrhistogram.percentiles,show the hdrhistogram benchmark result
./bin/ycsb run zookeeper -threads 1 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p hdrhistogram.percentiles=10,25,50,75,90,95,99,99.9 -p histogram.buckets=500
# -threads: multi-clients test, increase the **maxClientCnxns** in the zoo.cfg to handle more connections.
./bin/ycsb run zookeeper -threads 10 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark
# show the timeseries benchmark result
./bin/ycsb run zookeeper -threads 1 -P workloads/workloadb -p zookeeper.connectString=127.0.0.1:2181/benchmark -p measurementtype=timeseries -p timeseries.granularity=50
# cluster test
./bin/ycsb run zookeeper -P workloads/workloadb -p zookeeper.connectString=192.168.10.43:2181,192.168.10.45:2181,192.168.10.27:2181/benchmark
# test leader's read/write performance by setting zookeeper.connectString to leader's(192.168.10.43:2181)
./bin/ycsb run zookeeper -P workloads/workloadb -p zookeeper.connectString=192.168.10.43:2181/benchmark
# test for large znode(by default: jute.maxbuffer is 1048575 bytes/1 MB ). Notice:jute.maxbuffer should also be set the same value in all the zk servers.
./bin/ycsb run zookeeper -jvm-args="-Djute.maxbuffer=4194304" -s -P workloads/workloadc -p zookeeper.connectString=127.0.0.1:2181/benchmark
# Cleaning up the workspace after finishing the benchmark.
# e.g the CLI:deleteall /benchmark
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2020 YCSB contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License. See accompanying
LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>site.ycsb</groupId>
<artifactId>binding-parent</artifactId>
<version>0.18.0-SNAPSHOT</version>
<relativePath>../binding-parent</relativePath>
</parent>
<artifactId>zookeeper-binding</artifactId>
<name>ZooKeeper DB Binding</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>site.ycsb</groupId>
<artifactId>core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
<version>1.1.1</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.21</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>4.2.0</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
/**
* Copyright (c) 2020 YCSB contributors. All rights reserved.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
* <p>
* ZooKeeper client binding for YCSB.
* <p>
*/
package site.ycsb.db.zookeeper;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.TimeUnit;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooKeeper;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import site.ycsb.ByteIterator;
import site.ycsb.DB;
import site.ycsb.DBException;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* YCSB binding for <a href="https://zookeeper.apache.org/">ZooKeeper</a>.
*
* See {@code zookeeper/README.md} for details.
*/
public class ZKClient extends DB {
private ZooKeeper zk;
private Watcher watcher;
private static final String CONNECT_STRING = "zookeeper.connectString";
private static final String DEFAULT_CONNECT_STRING = "127.0.0.1:2181";
private static final String SESSION_TIMEOUT_PROPERTY = "zookeeper.sessionTimeout";
private static final long DEFAULT_SESSION_TIMEOUT = TimeUnit.SECONDS.toMillis(30L);
private static final String WATCH_FLAG = "zookeeper.watchFlag";
private static final Charset UTF_8 = Charset.forName("UTF-8");
private static final Logger LOG = LoggerFactory.getLogger(ZKClient.class);
public void init() throws DBException {
Properties props = getProperties();
String connectString = props.getProperty(CONNECT_STRING);
if (connectString == null || connectString.length() == 0) {
connectString = DEFAULT_CONNECT_STRING;
}
if(Boolean.parseBoolean(props.getProperty(WATCH_FLAG))) {
watcher = new SimpleWatcher();
} else {
watcher = null;
}
long sessionTimeout;
String sessionTimeoutString = props.getProperty(SESSION_TIMEOUT_PROPERTY);
if (sessionTimeoutString != null) {
sessionTimeout = Integer.parseInt(sessionTimeoutString);
} else {
sessionTimeout = DEFAULT_SESSION_TIMEOUT;
}
try {
zk = new ZooKeeper(connectString, (int) sessionTimeout, new SimpleWatcher());
} catch (IOException e) {
throw new DBException("Creating connection failed.");
}
}
public void cleanup() throws DBException {
try {
zk.close();
} catch (InterruptedException e) {
throw new DBException("Closing connection failed.");
}
}
@Override
public Status read(String table, String key, Set<String> fields,
Map<String, ByteIterator> result) {
String path = getPath(key);
try {
byte[] data = zk.getData(path, watcher, null);
if (data == null || data.length == 0) {
return Status.NOT_FOUND;
}
deserializeValues(data, fields, result);
return Status.OK;
} catch (KeeperException | InterruptedException e) {
LOG.error("Error when reading a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status insert(String table, String key,
Map<String, ByteIterator> values) {
String path = getPath(key);
String data = getJsonStrFromByteMap(values);
try {
zk.create(path, data.getBytes(UTF_8), ZooDefs.Ids.OPEN_ACL_UNSAFE,
CreateMode.PERSISTENT);
return Status.OK;
} catch (KeeperException.NodeExistsException e1) {
return Status.OK;
} catch (KeeperException | InterruptedException e2) {
LOG.error("Error when inserting a path:{},tableName:{}", path, table, e2);
return Status.ERROR;
}
}
@Override
public Status delete(String table, String key) {
String path = getPath(key);
try {
zk.delete(path, -1);
return Status.OK;
} catch (InterruptedException | KeeperException e) {
LOG.error("Error when deleting a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status update(String table, String key,
Map<String, ByteIterator> values) {
String path = getPath(key);
try {
// we have to do a read operation here before setData to meet the YCSB's update semantics:
// update a single record in the database, adding or replacing the specified fields.
byte[] data = zk.getData(path, watcher, null);
if (data == null || data.length == 0) {
return Status.NOT_FOUND;
}
final Map<String, ByteIterator> result = new HashMap<>();
deserializeValues(data, null, result);
result.putAll(values);
// update
zk.setData(path, getJsonStrFromByteMap(result).getBytes(UTF_8), -1);
return Status.OK;
} catch (KeeperException | InterruptedException e) {
LOG.error("Error when updating a path:{},tableName:{}", path, table, e);
return Status.ERROR;
}
}
@Override
public Status scan(String table, String startkey, int recordcount,
Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {
return Status.NOT_IMPLEMENTED;
}
private String getPath(String key) {
return key.startsWith("/") ? key : "/" + key;
}
/**
* converting the key:values map to JSON Strings.
*/
private static String getJsonStrFromByteMap(Map<String, ByteIterator> map) {
Map<String, String> stringMap = StringByteIterator.getStringMap(map);
return JSONValue.toJSONString(stringMap);
}
private Map<String, ByteIterator> deserializeValues(final byte[] data, final Set<String> fields,
final Map<String, ByteIterator> result) {
JSONObject jsonObject = (JSONObject)JSONValue.parse(new String(data, UTF_8));
Iterator<String> iterator = jsonObject.keySet().iterator();
while(iterator.hasNext()) {
String field = iterator.next();
String value = jsonObject.get(field).toString();
if(fields == null || fields.contains(field)) {
result.put(field, new StringByteIterator(value));
}
}
return result;
}
private static class SimpleWatcher implements Watcher {
public void process(WatchedEvent e) {
if (e.getType() == Event.EventType.None) {
return;
}
if (e.getState() == Event.KeeperState.SyncConnected) {
//do nothing
}
}
}
}
/*
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/**
* The YCSB binding for <a href="https://zookeeper.apache.org/">ZooKeeper</a>.
*/
package site.ycsb.db.zookeeper;
/**
* Copyright (c) 2020 YCSB contributors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package site.ycsb.db.zookeeper;
import org.apache.curator.test.TestingServer;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import site.ycsb.ByteIterator;
import site.ycsb.Status;
import site.ycsb.StringByteIterator;
import site.ycsb.measurements.Measurements;
import site.ycsb.workloads.CoreWorkload;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import static junit.framework.TestCase.assertEquals;
import static org.junit.Assert.fail;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY;
import static site.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT;
/**
* Integration tests for the YCSB ZooKeeper client.
*/
public class ZKClientTest {
private static TestingServer zkTestServer;
private ZKClient client;
private String tableName;
private final static String path = "benchmark";
private static final int PORT = 2181;
@BeforeClass
public static void setUpClass() throws Exception {
zkTestServer = new TestingServer(PORT);
zkTestServer.start();
}
@AfterClass
public static void tearDownClass() throws Exception {
zkTestServer.stop();
}
@Before
public void setUp() throws Exception {
client = new ZKClient();
Properties p = new Properties();
p.setProperty("zookeeper.connectString", "127.0.0.1:" + String.valueOf(PORT));
Measurements.setProperties(p);
final CoreWorkload workload = new CoreWorkload();
workload.init(p);
tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
client.setProperties(p);
client.init();
}
@After
public void tearDown() throws Exception {
client.cleanup();
}
@Test
public void testZKClient() {
// insert
Map<String, String> m = new HashMap<>();
String field1 = "field_1";
String value1 = "value_1";
m.put(field1, value1);
Map<String, ByteIterator> result = StringByteIterator.getByteIteratorMap(m);
client.insert(tableName, path, result);
// read
result.clear();
Status status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
assertEquals(1, result.size());
assertEquals(value1, result.get(field1).toString());
// update(the same field)
m.clear();
result.clear();
String newVal = "value_new";
m.put(field1, newVal);
result = StringByteIterator.getByteIteratorMap(m);
client.update(tableName, path, result);
assertEquals(1, result.size());
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
// here we only have one field: field_1
assertEquals(1, result.size());
assertEquals(newVal, result.get(field1).toString());
// update(two different field)
m.clear();
result.clear();
String field2 = "field_2";
String value2 = "value_2";
m.put(field2, value2);
result = StringByteIterator.getByteIteratorMap(m);
client.update(tableName, path, result);
assertEquals(1, result.size());
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
assertEquals(Status.OK, status);
// here we have two field: field_1 and field_2
assertEquals(2, result.size());
assertEquals(value2, result.get(field2).toString());
assertEquals(newVal, result.get(field1).toString());
// delete
status = client.delete(tableName, path);
assertEquals(Status.OK, status);
// Verify result
result.clear();
status = client.read(tableName, path, null, result);
// NoNode return ERROR
assertEquals(Status.ERROR, status);
assertEquals(0, result.size());
}
@Test
@Ignore("Not yet implemented")
public void testScan() {
fail("Not yet implemented");
}
}
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class Hello */
#ifndef _Included_Hello
#define _Included_Hello
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: Hello
* Method: put
* Signature: ([B[B)I
*/
JNIEXPORT jint JNICALL Java_Hello_put
(JNIEnv *, jobject, jbyteArray, jbyteArray);
/*
* Class: Hello
* Method: get
* Signature: ([B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_Hello_get
(JNIEnv *, jobject, jbyteArray);
/*
* Class: Hello
* Method: delete
* Signature: ([B)I
*/
JNIEXPORT jint JNICALL Java_Hello_delete
(JNIEnv *, jobject, jbyteArray);
#ifdef __cplusplus
}
#endif
#endif
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class JClient */
#ifndef _Included_JClient
#define _Included_JClient
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: JClient
* Method: put
* Signature: (J[B[B)I
*/
JNIEXPORT jint JNICALL Java_JClient_put
(JNIEnv *, jobject, jlong, jbyteArray, jbyteArray);
/*
* Class: JClient
* Method: get
* Signature: (J[B)[B
*/
JNIEXPORT jbyteArray JNICALL Java_JClient_get
(JNIEnv *, jobject, jlong, jbyteArray);
/*
* Class: JClient
* Method: delete
* Signature: (J[B)I
*/
JNIEXPORT jint JNICALL Java_JClient_delete
(JNIEnv *, jobject, jlong, jbyteArray);
/*
* Class: JClient
* Method: createEndpointGroup
* Signature: (IIIIIII)J
*/
JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup
(JNIEnv *, jobject, jint, jint, jint, jint, jint, jint, jint);
/*
* Class: JClient
* Method: createClient
* Signature: (J)J
*/
JNIEXPORT jlong JNICALL Java_JClient_createClient
(JNIEnv *, jobject, jlong);
/*
* Class: JClient
* Method: closeClient
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_JClient_closeClient
(JNIEnv *, jobject, jlong);
/*
* Class: JClient
* Method: closeEndpointGroup
* Signature: (J)V
*/
JNIEXPORT void JNICALL Java_JClient_closeEndpointGroup
(JNIEnv *, jobject, jlong);
#ifdef __cplusplus
}
#endif
#endif
...@@ -4,13 +4,13 @@ enum RequestType ...@@ -4,13 +4,13 @@ enum RequestType
{ {
GET, GET,
PUT, PUT,
DELETE, DELETE
INVALIDATE
}; };
enum ResponseStatus enum ResponseStatus
{ {
SUCCESS, SUCCESS,
FAILURE FAILURE,
INVALIDATE
}; };
struct __attribute__ ((__packed__)) SalRequestHeader struct __attribute__ ((__packed__)) SalRequestHeader
...@@ -34,7 +34,7 @@ struct __attribute__ ((__packed__)) SalResponseHeader ...@@ -34,7 +34,7 @@ struct __attribute__ ((__packed__)) SalResponseHeader
struct __attribute__ ((__packed__)) InvRequestHeader struct __attribute__ ((__packed__)) InvRequestHeader
{ {
uint32_t id; uint32_t id;
enum RequestType type; enum ResponseStatus type;
uint32_t keySize; uint32_t keySize;
}; };
......
#ifndef __RDMACLIENTENDPOINT__
#define __RDMACLIENTENDPOINT__
#include <rdma/rdma_cma.h> #include <rdma/rdma_cma.h>
#include <rdma/rdma_verbs.h> #include <rdma/rdma_verbs.h>
#include <stdint.h> #include <stdint.h>
...@@ -5,13 +8,16 @@ ...@@ -5,13 +8,16 @@
#include <iostream> #include <iostream>
#include <errno.h> #include <errno.h>
#include <arpa/inet.h> #include <arpa/inet.h>
//#include <boost/lockfree/queue.hpp>
#ifndef __RDMACLIENTENDPOINT__ #include <queue>
#define __RDMACLIENTENDPOINT__ #include <map>
#include <mutex>
#include "Buffer.hpp" #include "Buffer.hpp"
#include "Logger.hpp"
#include "RdmaEndpointGroup.hpp" #include "RdmaEndpointGroup.hpp"
#include "MessageFormats.hpp" #include "MessageFormats.hpp"
#include <boost/lockfree/queue.hpp> #include "RdmaFuture.hpp"
class RdmaClientEndpoint class RdmaClientEndpoint
{ {
...@@ -33,16 +39,20 @@ class RdmaClientEndpoint ...@@ -33,16 +39,20 @@ class RdmaClientEndpoint
int _sendMsgSize; int _sendMsgSize;
int _recvMsgSize; int _recvMsgSize;
int _state; int _state;
int _timeoutMs;
int _maxInLine; int _maxInLine;
int _timeoutMs;
const char *_connData; const char *_connData;
void *_sendBuff = NULL; char *_sendBuff = NULL;
void *_recvBuff = NULL; char *_recvBuff = NULL;
struct ibv_mr *_sendMr = NULL; struct ibv_mr *_sendMr = NULL;
struct ibv_mr *_recvMr = NULL; struct ibv_mr *_recvMr = NULL;
boost::lockfree::queue<void *> *_sendBuffers; //boost::lockfree::queue<char *> *_sendBuffers;
std::queue<char*> _sendBuffers;
std::mutex _sendBuffersM;
std::map<uint64_t,RdmaFuture*> futures;
void completeClose(); void completeClose();
void connect(); void connect();
...@@ -51,15 +61,20 @@ class RdmaClientEndpoint ...@@ -51,15 +61,20 @@ class RdmaClientEndpoint
public: public:
std::atomic<uint64_t> _requestId{12}; std::atomic<uint64_t> _requestId{12};
RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup *group, int sendQueueSize, int recvQueueSize, RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup *group, int sendQueueSize, int recvQueueSize,
int sendMsgSize, int recvMsgSize, int maxInLine, int timeout); int sendMsgSize, int recvMsgSize, int maxInLine, int timeout);
void connect(const char *ip, const char *port, const char *connData); void connect(const char *ip, const char *port, const char *connData);
bool isConnected(); bool isConnected();
void processCmEvent(struct rdma_cm_event *event);
void close(); void close();
int sendMessage(const char *buffer, int size);
void processCmEvent(struct rdma_cm_event *event);
void processSendComp(struct ibv_wc); void processSendComp(struct ibv_wc);
void processRecvComp(struct ibv_wc); void processRecvComp(struct ibv_wc);
int sendMessage(const char *buffer, int size);
RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture* get(const char *key, int keySize);
RdmaFuture* deleteKey(const char *key, int keySize);
}; };
#endif #endif
\ No newline at end of file
#ifndef __RdmaFuture__ #ifndef __RdmaFuture__
#define __RdmaFuture__ #define __RdmaFuture__
#include <stdint.h> #include <stdint.h>
class Future #include <memory>
#include <atomic>
#include <iostream>
#include <mutex>
class RdmaFuture
{ {
public: static int DONE;
uint8_t status; static int PENDING;
char* value; uint64_t _requestId;
std::mutex stateMutex;
uint8_t state{0};
char *_data;
uint8_t _status;
public:
RdmaFuture(uint64_t requestId);
char *get();
char *wait_for(int timeout);
void put(char *data);
}; };
#endif #endif
\ No newline at end of file
public class Hello {
public native int put(byte[] key, byte[] value);
public native byte[] get(byte[] key);
public native int delete(byte[] key);
static { System.loadLibrary("HelloImpl"); }
public static void main (String[] args) {
Hello hello = new Hello();
System.out.println(hello.put("paras".getBytes(), "garg".getBytes()));
System.out.println(hello.get("paras".getBytes()));
System.out.println(hello.delete("paras".getBytes()));
}
}
\ No newline at end of file
import javax.security.auth.login.CredentialException;
public class JClient {
public native int put(long client, byte[] key, byte[] value);
public native byte[] get(long client, byte[] key);
public native int delete(long client, byte[] key);
public native long createEndpointGroup(int sendQSize, int recvQSize, int compQSize, int sendMsqSize, int recvMsgSize,
int maxInLine, int timeout);
public native long createClient(long endpointGroup);
public native void closeClient(long client);
public native void closeEndpointGroup(long endpointGroup);
public static long endpointGroup;
public long client;
static {
System.loadLibrary("hpdosclient");
}
public static void main(String[] args) {
System.out.println(System.getProperty("java.library.path"));
try {
JClient jclient = new JClient();
endpointGroup = jclient.createEndpointGroup(5, 5, 5, 500, 500, 0, 1000);
jclient.client = jclient.createClient(endpointGroup);
System.out.println("jc" + jclient.put(jclient.client, "paras".getBytes(), "garg".getBytes()));
var res = jclient.get(jclient.client, "paras".getBytes());
System.out.println("val size"+res.length+" "+new String(res));
System.out.println("jc" + jclient.delete(jclient.client, "paras".getBytes()));
} catch (Exception e) {
e.printStackTrace();
}
}
}
\ No newline at end of file
#For commenting used # empty line are ignored
#comments after parameters also supported
# use key=value format
#All Parameters will be taken as string
# Fixed Parameters
ENABLE_LOGGING=0
SERVER_IP=192.168.200.20
SERVER_PORT=1921
EXECUTOR_POOL_SIZE=4
\ No newline at end of file
...@@ -10,7 +10,25 @@ int main() ...@@ -10,7 +10,25 @@ int main()
while (!clientEp->isConnected()); while (!clientEp->isConnected());
std::cout << "client : connected" << std::endl; std::cout << "client : connected" << std::endl;
char *message = new char[100]; auto r1 =clientEp->put("paras",5,"garg",4);
auto r2 = clientEp->get("paras",5);
auto r3 = clientEp->deleteKey("paras",5);
std::cout<<"waiting for output"<<std::endl;
r3->get();
delete r3;
struct SalResponseHeader *response = (struct SalResponseHeader *)r1->get();
std::cout << "Recieve response for id " << response->id << " status " << response->status;
std::cout << " size " << response->valueSize << std::endl;
delete r1;
response = (struct SalResponseHeader *)r2->get();
std::cout << "Recieve response for id " << response->id << " status " << response->status;
std::cout << " size " << response->valueSize << std::endl;
delete r2;
/*char *message = new char[100];
struct SalRequestHeader *request = (struct SalRequestHeader *)message; struct SalRequestHeader *request = (struct SalRequestHeader *)message;
request->id = clientEp->_requestId.fetch_add(1, std::memory_order_relaxed); request->id = clientEp->_requestId.fetch_add(1, std::memory_order_relaxed);
request->type = RequestType::PUT; request->type = RequestType::PUT;
...@@ -40,7 +58,7 @@ int main() ...@@ -40,7 +58,7 @@ int main()
std::cout << "send" << clientEp->sendMessage(message, SalRequestHeaderSize + 14) << std::endl; std::cout << "send" << clientEp->sendMessage(message, SalRequestHeaderSize + 14) << std::endl;
*/
// memcpy(re->value,"aa",2); // memcpy(re->value,"aa",2);
int send = 0; int send = 0;
...@@ -49,7 +67,7 @@ int main() ...@@ -49,7 +67,7 @@ int main()
std::cin >> send; std::cin >> send;
if (send == 1) if (send == 1)
{ {
std::cout << "send" << clientEp->sendMessage(message, 10) << std::endl; // std::cout << "send" << clientEp->sendMessage(message, 10) << std::endl;
} }
} }
} }
\ No newline at end of file
#include <stdio.h>
#include "Hello.h" // generated by javah
#include "RdmaClientEndpoint.hpp"
/*
bool rocksdb_put_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
const ROCKSDB_NAMESPACE::WriteOptions& write_options,
ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle,
jbyteArray jkey, jint jkey_off, jint jkey_len,
jbyteArray jval, jint jval_off, jint jval_len) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] key;
return false;
}
jbyte* value = new jbyte[jval_len];
env->GetByteArrayRegion(jval, jval_off, jval_len, value);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] value;
delete[] key;
return false;
}
*/
JNIEXPORT jbyteArray JNICALL Java_Hello_get(JNIEnv *jenv, jobject jobj, jbyteArray jkey)
{
int keyLen = jenv->GetArrayLength(jkey);
char* key = new char[keyLen];
jenv->GetByteArrayRegion(jkey,0,keyLen,reinterpret_cast<jbyte*>( key));
if(jenv->ExceptionCheck())
{
std::cout<<"exception occurs get"<<std::endl;
delete[] key;
}
std::cout<<"get "<<key<< "len "<<keyLen<<std::endl;
int valLen;
valLen = 6;
jbyteArray jvalue = jenv->NewByteArray(valLen);
jenv->SetByteArrayRegion(jvalue,0,5,reinterpret_cast<const jbyte*>("paras"));
delete[] key;
return jvalue;
}
JNIEXPORT jint JNICALL Java_Hello_put(JNIEnv *jenv, jobject jobj, jbyteArray jkey, jbyteArray jval)
{
int keyLen = jenv->GetArrayLength(jkey);
char* key = new char[keyLen];
jenv->GetByteArrayRegion(jkey,0,keyLen,reinterpret_cast<jbyte*>(key));
int valLen = jenv->GetArrayLength(jval);
char* val = new char[valLen];
jenv->GetByteArrayRegion(jval,0,valLen,reinterpret_cast<jbyte*>(val));
if(jenv->ExceptionCheck())
{
std::cout<<"put exception occur"<<std::endl;
delete[] key;
delete[] val;
}
std::cout<<"put "<<key<< " len "<<keyLen<<" "<<val<<" len "<<valLen<<std::endl;
delete[] key;
delete[] val;
return 1;
}
JNIEXPORT jint JNICALL Java_Hello_delete(JNIEnv *jenv, jobject jobj, jbyteArray jkey)
{
int keyLen = jenv->GetArrayLength(jkey);
char* key = new char[keyLen];
jenv->GetByteArrayRegion(jkey,0,keyLen,reinterpret_cast<jbyte*>(key));
if(jenv->ExceptionCheck())
{
std::cout<<"exception occurs delete"<<std::endl;
delete[] key;
}
std::cout<<"delete "<<key<< "len "<<keyLen<<std::endl;
delete[] key;
return 1;
}
\ No newline at end of file
#include <stdio.h>
#include "JClient.h" // generated by javah
#include "Logger.hpp"
#include "RdmaClientEndpoint.hpp"
#include "RdmaClientEndpointGroup.hpp"
/*
bool rocksdb_put_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db,
const ROCKSDB_NAMESPACE::WriteOptions& write_options,
ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle,
jbyteArray jkey, jint jkey_off, jint jkey_len,
jbyteArray jval, jint jval_off, jint jval_len) {
jbyte* key = new jbyte[jkey_len];
env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] key;
return false;
}
jbyte* value = new jbyte[jval_len];
env->GetByteArrayRegion(jval, jval_off, jval_len, value);
if (env->ExceptionCheck()) {
// exception thrown: ArrayIndexOutOfBoundsException
delete[] value;
delete[] key;
return false;
}
*/
JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup(JNIEnv *jenv, jobject jobj,
jint sendq, jint recvq, jint compq,
jint sendm, jint recvm,
jint maxinline, jint timeout)
{
RdmaClientEndpointGroup *group = new RdmaClientEndpointGroup(sendq, recvq, compq, sendm,
recvm, maxinline, timeout);
return reinterpret_cast<jlong>(group);
}
JNIEXPORT jlong JNICALL Java_JClient_createClient(JNIEnv *jenv, jobject jobj, jlong group)
{
RdmaClientEndpoint *client = (reinterpret_cast<RdmaClientEndpointGroup *>(group))
->createEndpoint();
client->connect("192.168.200.20", "1921", "sal");
while (!client->isConnected())
;
return reinterpret_cast<jlong>(client);
}
JNIEXPORT jbyteArray JNICALL Java_JClient_get(JNIEnv *jenv, jobject jobj, jlong jclient,
jbyteArray jkey)
{
int keyLen = jenv->GetArrayLength(jkey);
char *key = new char[keyLen];
jenv->GetByteArrayRegion(jkey, 0, keyLen, reinterpret_cast<jbyte *>(key));
if (jenv->ExceptionCheck())
{
CPPLog::LOG_ERROR("exception occurs in jni get");
delete[] key;
}
RdmaClientEndpoint *client = reinterpret_cast<RdmaClientEndpoint *>(jclient);
auto future = client->get(key, keyLen);
delete[] key;
if (future == nullptr)
{
jbyteArray jvalue = jenv->NewByteArray(0);
return jvalue;
}
auto data = future->get();
delete future;
struct SalResponseHeader *response = (struct SalResponseHeader *)data;
if (response->status == ResponseStatus::FAILURE)
{
jbyteArray jvalue = jenv->NewByteArray(0);
return jvalue;
}
jbyteArray jvalue = jenv->NewByteArray(response->valueSize);
std::cout<<"value size "<<response->valueSize<<" "<< (data+SalResponseHeaderSize);
jenv->SetByteArrayRegion(jvalue, 0, response->valueSize - 1,
reinterpret_cast<const jbyte *>(data + SalResponseHeaderSize));
return jvalue;
}
JNIEXPORT jint JNICALL Java_JClient_put(JNIEnv *jenv, jobject jobj, jlong jclient,
jbyteArray jkey, jbyteArray jval)
{
int keyLen = jenv->GetArrayLength(jkey);
char *key = new char[keyLen];
jenv->GetByteArrayRegion(jkey, 0, keyLen, reinterpret_cast<jbyte *>(key));
if (jenv->ExceptionCheck())
{
CPPLog::LOG_ERROR("exception occurs in jni put");
delete[] key;
}
int valLen = jenv->GetArrayLength(jval);
char *val = new char[valLen];
jenv->GetByteArrayRegion(jval, 0, valLen, reinterpret_cast<jbyte *>(val));
std::cout<<"put got "<<val<<" len "<<valLen;
if (jenv->ExceptionCheck())
{
CPPLog::LOG_ERROR("exception occurs in jni put");
delete[] key;
delete[] val;
}
RdmaClientEndpoint *client = reinterpret_cast<RdmaClientEndpoint *>(jclient);
auto future = client->put(key, keyLen, val, valLen);
delete[] key;
delete[] val;
if (future == nullptr)
{
return 0;
}
auto data = future->get();
delete future;
struct SalResponseHeader *response = (struct SalResponseHeader *)data;
if (response->status == ResponseStatus::FAILURE)
{
return 0;
}
return 1;
}
JNIEXPORT jint JNICALL Java_JClient_delete(JNIEnv *jenv, jobject jobj, jlong jclient,
jbyteArray jkey)
{
int keyLen = jenv->GetArrayLength(jkey);
char *key = new char[keyLen];
jenv->GetByteArrayRegion(jkey, 0, keyLen, reinterpret_cast<jbyte *>(key));
if (jenv->ExceptionCheck())
{
CPPLog::LOG_ERROR("exception occurs in jni delete");
delete[] key;
}
RdmaClientEndpoint *client = reinterpret_cast<RdmaClientEndpoint *>(jclient);
auto future = client->deleteKey(key, keyLen);
delete[] key;
if (future == nullptr)
{
return 0;
}
auto data = future->get();
delete future;
struct SalResponseHeader *response = (struct SalResponseHeader *)data;
if (response->status == ResponseStatus::FAILURE)
{
return 0;
}
return 1;
}
JNIEXPORT void JNICALL Java_JClient_closeClient(JNIEnv *, jobject, jlong)
{
}
JNIEXPORT void JNICALL Java_JClient_closeEndpointGroup(JNIEnv *, jobject, jlong)
{
}
\ No newline at end of file
#include <string>
#include<iostream>
#include<fstream>
#include<map>
class Properties{
private:
std::map<std::string,std::string> _props;
const std::string _WHITESPACE = " \n\r\t\f\v";
std::string ltrim(const std::string& s)
{
size_t start = s.find_first_not_of(_WHITESPACE);
return (start == std::string::npos) ? "" : s.substr(start);
}
std::string rtrim(const std::string& s)
{
size_t end = s.find_last_not_of(_WHITESPACE);
return (end == std::string::npos) ? "" : s.substr(0, end + 1);
}
std::string trim(const std::string& s)
{
return rtrim(ltrim(s));
}
public:
Properties(std::string filename){
//std::cout<<"Reading Properties From file named prop.config ...........\n";
std::ifstream file (filename);
if(!file.is_open()){
std::cout<<"Confiq file opening failed\n";
exit(0);
}
std::string line;
std::string key,value;
int delimPos;
while(getline(file,line)){
delimPos=line.find('#');
line=trim(line);
if(!line.empty()){
line=line.substr(0,delimPos);
delimPos=line.find('=');
_props.insert(make_pair(trim(line.substr(0,delimPos)),trim(line.substr(delimPos+1))));
}
}
}
std::string getValue(std::string key){
auto it=_props.find(key);
if(it==_props.end()){
return "";
}
return it->second;
}
};
\ No newline at end of file
...@@ -14,80 +14,107 @@ RdmaClientEndpoint::RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup ...@@ -14,80 +14,107 @@ RdmaClientEndpoint::RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup
_sendMsgSize(sendMsgSize), _recvMsgSize(recvMsgSize), _maxInLine(maxInLine), _timeoutMs(timeout) _sendMsgSize(sendMsgSize), _recvMsgSize(recvMsgSize), _maxInLine(maxInLine), _timeoutMs(timeout)
{ {
_state = CONN_STATE_INITIALIZED; _state = CONN_STATE_INITIALIZED;
_sendBuffers = new boost::lockfree::queue<void*>(_sendQueueSize); //_sendBuffers = new boost::lockfree::queue<char *>(_sendQueueSize);
} }
void RdmaClientEndpoint::connect(const char *ip, const char *port, const char *connData) void RdmaClientEndpoint::connect(const char *ip, const char *port, const char *connData)
{ {
_connData = connData; _connData = connData;
if (_state != CONN_STATE_INITIALIZED) if (_state != CONN_STATE_INITIALIZED)
{ {
std::cout << "RdmaClientEndpoint : connect state not initialized" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : connect state not initialized");
return; return;
} }
int ret; int ret;
std::cout << "RdmaClientEndpoint : step2 getaddrinfo" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 getaddrinfo");
struct addrinfo *addr; struct addrinfo *addr;
ret = getaddrinfo(ip, port, NULL, &addr); ret = getaddrinfo(ip, port, NULL, &addr);
if (ret) if (ret)
{ {
std::cout << "RdmaServerEndpointGroup : get_addr_info failed" << std::endl; CPPLog::LOG_ERROR("RdmaServerEndpointGroup : get_addr_info failed");
} }
std::cout << "RdmaClientEndpoint : step2 resolve addr" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 resolve addr");
ret = rdma_resolve_addr(_cm_id, NULL, addr->ai_addr, _timeoutMs); ret = rdma_resolve_addr(_cm_id, NULL, addr->ai_addr, _timeoutMs);
if (ret) if (ret)
{ {
std::cout << "unable to resolve addr" << std::endl; CPPLog::LOG_ERROR("unable to resolve addr");
return; return;
} }
std::cout << "RdmaClientEndpoint : step2 resolve addr resolved" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 resolve addr resolved");
_state = CONN_STATE_ADDR_RESOLVED; _state = CONN_STATE_ADDR_RESOLVED;
} }
bool RdmaClientEndpoint::isConnected() bool RdmaClientEndpoint::isConnected()
{ {
return _state == CONN_STATE_CONNECTED; return _state == CONN_STATE_CONNECTED;
} }
void RdmaClientEndpoint::connect()
{
if (_connData != NULL)
{
struct rdma_conn_param conn_param;
memset(&conn_param, 0, sizeof(conn_param));
conn_param.responder_resources = 1;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 7;
conn_param.private_data = _connData;
conn_param.private_data_len = strlen(_connData);
rdma_connect(_cm_id, &conn_param);
}
else
{
rdma_connect(_cm_id, NULL);
}
}
void RdmaClientEndpoint::processCmEvent(struct rdma_cm_event *event) void RdmaClientEndpoint::processCmEvent(struct rdma_cm_event *event)
{ {
if (event->event == RDMA_CM_EVENT_ADDR_RESOLVED && event->id != NULL) if (event->event == RDMA_CM_EVENT_ADDR_RESOLVED && event->id != NULL)
{ {
std::cout << "RdmaClientEndpoint : step3 resolve_route" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step3 resolve_route");
createResources(); createResources();
rdma_resolve_route(_cm_id, _timeoutMs); rdma_resolve_route(_cm_id, _timeoutMs);
} }
else if (event->event == RDMA_CM_EVENT_ROUTE_RESOLVED && event->id != NULL) else if (event->event == RDMA_CM_EVENT_ROUTE_RESOLVED && event->id != NULL)
{ {
registerMemory(); registerMemory();
std::cout << "RdmaClientEndpoint : step5 connect" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step5 connect");
connect(); connect();
} }
else if (event->id != NULL && event->event == RDMA_CM_EVENT_ESTABLISHED) else if (event->id != NULL && event->event == RDMA_CM_EVENT_ESTABLISHED)
{ {
std::cout << "RdmaClientEndpoint : step6 Connected" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step6 Connected");
_state = CONN_STATE_CONNECTED; _state = CONN_STATE_CONNECTED;
} }
else if (event->id != NULL && event->event == RDMA_CM_EVENT_DISCONNECTED) else if (event->id != NULL && event->event == RDMA_CM_EVENT_DISCONNECTED)
{ {
std::cout << "RdmaClientEndpoint : step7 Closed" << std::endl; CPPLog::LOG_DEBUG("RdmaClientEndpoint : step7 Closed");
completeClose(); completeClose();
} }
else else
{ {
std::cout << "RdmaClientEndpoint : Not able to procces CM EVent" << rdma_event_str(event->event) << event->id << " " << event->listen_id << std::endl; std::ostringstream ss;
ss << "RdmaClientEndpoint : Not able to procces CM EVent";
ss << rdma_event_str(event->event) << event->id << " " << event->listen_id;
CPPLog::LOG_ERROR(ss);
} }
} }
void RdmaClientEndpoint::close() void RdmaClientEndpoint::close()
{ {
if (_state != CONN_STATE_CONNECTED) if (_state != CONN_STATE_CONNECTED)
{ {
std::cout << "RdmaClientEndpoint : close invalid state" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : close invalid state");
return; return;
} }
_state = CONN_STATE_PARTIAL_CLOSED; _state = CONN_STATE_PARTIAL_CLOSED;
int ret = rdma_disconnect(_cm_id); int ret = rdma_disconnect(_cm_id);
if (ret) if (ret)
{ {
std::cout << "RdmaClientEndpoint : rdma_disconnect failed" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : rdma_disconnect failed");
} }
} }
...@@ -95,11 +122,11 @@ void RdmaClientEndpoint::completeClose() ...@@ -95,11 +122,11 @@ void RdmaClientEndpoint::completeClose()
{ {
if (_state != CONN_STATE_PARTIAL_CLOSED) if (_state != CONN_STATE_PARTIAL_CLOSED)
{ {
std::cout << "RdmaClientEndpoint : completeClose invalid state" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : completeClose invalid state");
return; return;
} }
_state = CONN_STATE_CLOSED; _state = CONN_STATE_CLOSED;
delete _sendBuffers; // delete _sendBuffers;
free(_sendBuff); free(_sendBuff);
free(_recvBuff); free(_recvBuff);
rdma_dereg_mr(_sendMr); rdma_dereg_mr(_sendMr);
...@@ -108,71 +135,54 @@ void RdmaClientEndpoint::completeClose() ...@@ -108,71 +135,54 @@ void RdmaClientEndpoint::completeClose()
rdma_destroy_id(_cm_id); rdma_destroy_id(_cm_id);
} }
void RdmaClientEndpoint::connect()
{
if (_connData != NULL)
{
struct rdma_conn_param conn_param;
memset(&conn_param, 0, sizeof(conn_param));
conn_param.responder_resources = 1;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 7;
conn_param.private_data = _connData;
conn_param.private_data_len = strlen(_connData);
rdma_connect(_cm_id, &conn_param);
}
else
{
rdma_connect(_cm_id, NULL);
}
}
void RdmaClientEndpoint::registerMemory() void RdmaClientEndpoint::registerMemory()
{ {
if (_state != CONN_STATE_ROUTE_RESOLVED) if (_state != CONN_STATE_ROUTE_RESOLVED)
{ {
std::cout << "RdmaClientEndpoint : createResource address not resolved" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : createResource address not resolved");
return; return;
} }
_sendBuff = malloc(_sendMsgSize * _sendQueueSize); _sendBuff = (char *)malloc(_sendMsgSize * _sendQueueSize);
if (_sendBuff == NULL) if (_sendBuff == NULL)
{ {
std::cout << "RdmaClientEndpoint : sendBuff malloc failed" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : sendBuff malloc failed");
return; return;
} }
_sendMr = rdma_reg_msgs(_cm_id, _sendBuff, _sendMsgSize * _sendQueueSize); _sendMr = rdma_reg_msgs(_cm_id, reinterpret_cast<void *>(_sendBuff),
_sendMsgSize * _sendQueueSize);
if (_sendMr == NULL) if (_sendMr == NULL)
{ {
std::cout << "RdmaClientEndpoint : sendMr reg failed" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : sendMr reg failed");
return; return;
} }
_recvBuff = malloc(_recvMsgSize * _recvQueueSize); _recvBuff = (char *)malloc(_recvMsgSize * _recvQueueSize);
if (_recvBuff == NULL) if (_recvBuff == NULL)
{ {
std::cout << "RdmaClientEndpoint : recvBuff malloc failed" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : recvBuff malloc failed\n");
return; return;
} }
_recvMr = rdma_reg_msgs(_cm_id, _recvBuff, _recvMsgSize * _recvQueueSize); _recvMr = rdma_reg_msgs(_cm_id, reinterpret_cast<void *>(_recvBuff),
_recvMsgSize * _recvQueueSize);
if (_recvMr == NULL) if (_recvMr == NULL)
{ {
std::cout << "RdmaClientEndpoint : recvMr reg failed" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : recvMr reg failed\n");
return; return;
} }
char *buffer = (char *)_recvBuff;
for (int i = 0; i < _recvQueueSize; i++) for (int i = 0; i < _recvQueueSize; i++)
{ {
void* const location = buffer + i * _recvMsgSize; char *buffer = _recvBuff + i * _recvMsgSize;
rdma_post_recv(_cm_id, reinterpret_cast<void *>(location), reinterpret_cast<void *>(location), rdma_post_recv(_cm_id, reinterpret_cast<void *>(buffer), reinterpret_cast<void *>(buffer),
_recvMsgSize, _recvMr); _recvMsgSize, _recvMr);
} }
buffer = (char *)_sendBuff;
for (int i = 0; i < _sendQueueSize; i++) for (int i = 0; i < _sendQueueSize; i++)
{ {
void* const location = buffer + i * _sendMsgSize; char *buffer = _sendBuff + i * _recvMsgSize;
_sendBuffers->push(location); _sendBuffers.push(buffer);
} }
_state = CONN_STATE_RESOURCES_ALLOCATED; _state = CONN_STATE_RESOURCES_ALLOCATED;
...@@ -181,19 +191,19 @@ void RdmaClientEndpoint::createResources() ...@@ -181,19 +191,19 @@ void RdmaClientEndpoint::createResources()
{ {
if (_state != CONN_STATE_ADDR_RESOLVED) if (_state != CONN_STATE_ADDR_RESOLVED)
{ {
std::cout << "RdmaClientEndpoint : createResource address not resolved" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : createResource address not resolved");
return; return;
} }
_protectionDomain = ibv_alloc_pd(_cm_id->verbs); _protectionDomain = ibv_alloc_pd(_cm_id->verbs);
if (_protectionDomain == NULL) if (_protectionDomain == NULL)
{ {
std::cout << "RdmaClientEndpoint : ibv_alloc_pd failed " << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : ibv_alloc_pd failed ");
return; return;
} }
struct ibv_cq *completionQueue = _group->createCq(_cm_id); struct ibv_cq *completionQueue = _group->createCq(_cm_id);
struct ibv_qp_init_attr qp_init_attr; struct ibv_qp_init_attr qp_init_attr;
memset(&qp_init_attr, 0, sizeof(qp_init_attr)); memset(&qp_init_attr, 0, sizeof(qp_init_attr));
//This is used to set endpoint address with qp // This is used to set endpoint address with qp
qp_init_attr.qp_context = (void *)this; qp_init_attr.qp_context = (void *)this;
// if not set 0, all work requests submitted to SQ will always generate a Work Completion // if not set 0, all work requests submitted to SQ will always generate a Work Completion
qp_init_attr.sq_sig_all = 1; qp_init_attr.sq_sig_all = 1;
...@@ -215,11 +225,11 @@ void RdmaClientEndpoint::createResources() ...@@ -215,11 +225,11 @@ void RdmaClientEndpoint::createResources()
int ret = rdma_create_qp(_cm_id, _protectionDomain, &qp_init_attr); int ret = rdma_create_qp(_cm_id, _protectionDomain, &qp_init_attr);
if (ret) if (ret)
{ {
std::cout << "RdmaClientEndpoint : ibv_create_cq failed\n"; CPPLog::LOG_ERROR("RdmaClientEndpoint : ibv_create_cq failed");
} }
if (_cm_id->pd == NULL) if (_cm_id->pd == NULL)
{ {
std::cout << "RdmaClientEndpoint : pd not set" << std::endl; CPPLog::LOG_ERROR("RdmaClientEndpoint : pd not set");
_cm_id->pd = _protectionDomain; _cm_id->pd = _protectionDomain;
} }
...@@ -230,21 +240,109 @@ int RdmaClientEndpoint::sendMessage(const char *buffer, int size) ...@@ -230,21 +240,109 @@ int RdmaClientEndpoint::sendMessage(const char *buffer, int size)
{ {
if (size > _sendMsgSize) if (size > _sendMsgSize)
return -1; return -1;
void* sendBuffer = nullptr; std::unique_lock<std::mutex> lock(_sendBuffersM);
_sendBuffers->pop(sendBuffer); if (_sendBuffers.size() == 0)
if (sendBuffer == nullptr)
return -1; return -1;
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
memcpy(sendBuffer, buffer, size); memcpy(sendBuffer, buffer, size);
return rdma_post_send(_cm_id, sendBuffer, sendBuffer, size, _sendMr, 0); return rdma_post_send(_cm_id, sendBuffer, sendBuffer, size, _sendMr, 0);
} }
void RdmaClientEndpoint::processSendComp(struct ibv_wc wc) void RdmaClientEndpoint::processSendComp(struct ibv_wc wc)
{ {
_sendBuffers->push((void *)wc.wr_id); std::unique_lock<std::mutex> lock(_sendBuffersM);
_sendBuffers.push((char *)wc.wr_id);
} }
void RdmaClientEndpoint::processRecvComp(struct ibv_wc wc) void RdmaClientEndpoint::processRecvComp(struct ibv_wc wc)
{ {
struct SalResponseHeader* response = (struct SalResponseHeader*)wc.wr_id; char *data = new char[wc.byte_len];
std::cout<<"Recieve response for id "<<response->id<<" status "<<response->status <<" size "<<response->valueSize<<"\n"; memcpy(data, (void *)wc.wr_id, wc.byte_len);
rdma_post_recv(_cm_id, (void *)wc.wr_id, (void *)wc.wr_id, _recvMsgSize, _recvMr); rdma_post_recv(_cm_id, (void *)wc.wr_id, (void *)wc.wr_id, _recvMsgSize, _recvMr);
}
\ No newline at end of file struct SalResponseHeader *response = (struct SalResponseHeader *)data;
/*std::ostringstream ss;
ss << "Recieve response for id " << response->id << " status " << response->status;
ss << " size " << response->valueSize << std::endl;
CPPLog::LOG_INFO(ss);*/
if (response->status != ResponseStatus::INVALIDATE)
{
std::cout<<"f"<<response->status<<std::endl;
auto it = futures.find(response->id);
if (it == futures.end())
{
std::ostringstream ss;
ss << "Recv completion for invalid id" << response->id;
CPPLog::LOG_DEBUG(ss);
}
it->second->put(data);
}
}
RdmaFuture *RdmaClientEndpoint::put(const char *key, int keySize, const char *value, int valueSize)
{
if (keySize + valueSize + (int)SalRequestHeaderSize > _sendMsgSize)
return nullptr;
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
return nullptr;
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
struct SalRequestHeader *request = (struct SalRequestHeader *)sendBuffer;
request->id = _requestId.fetch_add(1, std::memory_order_relaxed);
request->type = RequestType::PUT;
request->keySize = keySize;
request->valueSize = valueSize;
memcpy(sendBuffer + SalRequestHeaderSize, key, keySize);
memcpy(sendBuffer + SalRequestHeaderSize + keySize, value, valueSize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, SalRequestHeaderSize + keySize + valueSize,
_sendMr, 0);
RdmaFuture *future = new RdmaFuture(request->id);
futures[request->id] = future;
return future;
}
RdmaFuture *RdmaClientEndpoint::get(const char *key, int keySize)
{
if (keySize + (int)SalRequestHeaderSize > _sendMsgSize)
return nullptr;
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
return nullptr;
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
struct SalRequestHeader *request = (struct SalRequestHeader *)sendBuffer;
request->id = _requestId.fetch_add(1, std::memory_order_relaxed);
request->type = RequestType::GET;
request->keySize = keySize;
memcpy(sendBuffer + SalRequestHeaderSize, key, keySize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, SalRequestHeaderSize + keySize,
_sendMr, 0);
RdmaFuture *future = new RdmaFuture(request->id);
futures[request->id] = future;
return future;
}
RdmaFuture *RdmaClientEndpoint::deleteKey(const char *key, int keySize)
{
if (keySize + (int)SalRequestHeaderSize > _sendMsgSize)
return nullptr;
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
return nullptr;
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
struct SalRequestHeader *request = (struct SalRequestHeader *)sendBuffer;
request->id = _requestId.fetch_add(1, std::memory_order_relaxed);
request->type = RequestType::DELETE;
request->keySize = keySize;
memcpy(sendBuffer + SalRequestHeaderSize, key, keySize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, SalRequestHeaderSize + keySize,
_sendMr, 0);
RdmaFuture *future = new RdmaFuture(request->id);
futures[request->id] = future;
return future;
}
#include "RdmaFuture.hpp"
int RdmaFuture::DONE = 2;
int RdmaFuture::PENDING = 1;
RdmaFuture::RdmaFuture(uint64_t id)
: _requestId(id), state(PENDING), _data(nullptr) {}
char *RdmaFuture::get()
{
//std::cout << (unsigned)state << std::endl;
int current = 0;
do
{
std::unique_lock<std::mutex> lock(stateMutex);
current = state;
lock.unlock();
} while (current != DONE);
//std::cout<<"get"<<std::endl;
return _data;
}
char *RdmaFuture::wait_for(int timeout)
{
std::unique_lock<std::mutex> lock(stateMutex);
if (state == DONE)
return _data;
lock.unlock();
//add wait logic
return nullptr;
}
void RdmaFuture::put(char *data)
{
_data = data;
//std::cout << "got data current state" <<data<< (unsigned)state;
std::unique_lock<std::mutex> lock(stateMutex);
state = DONE;
//std::cout << "updated" << (unsigned)state;
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment