upload android base code part6

This commit is contained in:
August 2018-08-08 17:48:24 +08:00
parent 421e214c7d
commit 4e516ec6ed
35396 changed files with 9188716 additions and 0 deletions

View file

@ -0,0 +1,24 @@
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BasedOnStyle: Google
CommentPragmas: NOLINT:.*
DerivePointerAlignment: false
AllowShortFunctionsOnASingleLine: Inline
ColumnLimit: 100
TabWidth: 4
UseTab: Never
IndentWidth: 4

View file

@ -0,0 +1,42 @@
// Copyright (C) 2016 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_library_shared {
name: "libfmq",
shared_libs: [
"libbase",
"liblog",
"libcutils",
"libutils",
],
export_shared_lib_headers: [
"libcutils",
"libutils",
],
export_include_dirs: ["include"],
local_include_dirs: ["include"],
clang: true,
srcs: [
"EventFlag.cpp",
"FmqInternal.cpp",
],
cflags: [
"-Wall",
"-Werror",
],
vendor_available: true,
vndk: {
enabled: true,
},
}

View file

@ -0,0 +1,275 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "FMQ_EventFlags"
#include <fmq/EventFlag.h>
#include <linux/futex.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <utils/Log.h>
#include <utils/SystemClock.h>
#include <new>
namespace android {
namespace hardware {
status_t EventFlag::createEventFlag(int fd, off_t offset, EventFlag** flag) {
if (flag == nullptr) {
return BAD_VALUE;
}
status_t status = NO_MEMORY;
*flag = nullptr;
EventFlag* evFlag = new (std::nothrow) EventFlag(fd, offset, &status);
if (evFlag != nullptr) {
if (status == NO_ERROR) {
*flag = evFlag;
} else {
delete evFlag;
}
}
return status;
}
status_t EventFlag::createEventFlag(std::atomic<uint32_t>* fwAddr,
EventFlag** flag) {
if (flag == nullptr) {
return BAD_VALUE;
}
status_t status = NO_MEMORY;
*flag = nullptr;
EventFlag* evFlag = new (std::nothrow) EventFlag(fwAddr, &status);
if (evFlag != nullptr) {
if (status == NO_ERROR) {
*flag = evFlag;
} else {
delete evFlag;
}
}
return status;
}
/*
* mmap memory for the futex word
*/
EventFlag::EventFlag(int fd, off_t offset, status_t* status) {
mEfWordPtr = static_cast<std::atomic<uint32_t>*>(mmap(NULL,
sizeof(std::atomic<uint32_t>),
PROT_READ | PROT_WRITE,
MAP_SHARED, fd, offset));
mEfWordNeedsUnmapping = true;
if (mEfWordPtr != MAP_FAILED) {
*status = NO_ERROR;
} else {
*status = -errno;
ALOGE("Attempt to mmap event flag word failed: %s\n", strerror(errno));
}
}
/*
* Use this constructor if we already know where the futex word for
* the EventFlag group lives.
*/
EventFlag::EventFlag(std::atomic<uint32_t>* fwAddr, status_t* status) {
*status = NO_ERROR;
if (fwAddr == nullptr) {
*status = BAD_VALUE;
} else {
mEfWordPtr = fwAddr;
}
}
/*
* Set the specified bits of the futex word here and wake up any
* thread waiting on any of the bits.
*/
status_t EventFlag::wake(uint32_t bitmask) {
/*
* Return early if there are no set bits in bitmask.
*/
if (bitmask == 0) {
return NO_ERROR;
}
status_t status = NO_ERROR;
uint32_t old = std::atomic_fetch_or(mEfWordPtr, bitmask);
/*
* No need to call FUTEX_WAKE_BITSET if there were deferred wakes
* already available for all set bits from bitmask.
*/
if ((~old & bitmask) != 0) {
int ret = syscall(__NR_futex, mEfWordPtr, FUTEX_WAKE_BITSET,
INT_MAX, NULL, NULL, bitmask);
if (ret == -1) {
status = -errno;
ALOGE("Error in event flag wake attempt: %s\n", strerror(errno));
}
}
return status;
}
/*
* Wait for any of the bits in the bitmask to be set
* and return which bits caused the return.
*/
status_t EventFlag::waitHelper(uint32_t bitmask, uint32_t* efState, int64_t timeoutNanoSeconds) {
/*
* Return early if there are no set bits in bitmask.
*/
if (bitmask == 0 || efState == nullptr) {
return BAD_VALUE;
}
status_t status = NO_ERROR;
uint32_t old = std::atomic_fetch_and(mEfWordPtr, ~bitmask);
uint32_t setBits = old & bitmask;
/*
* If there was a deferred wake available, no need to call FUTEX_WAIT_BITSET.
*/
if (setBits != 0) {
*efState = setBits;
return status;
}
uint32_t efWord = old & ~bitmask;
/*
* The syscall will put the thread to sleep only
* if the futex word still contains the expected
* value i.e. efWord. If the futex word contents have
* changed, it fails with the error EAGAIN; If a timeout
* is specified and exceeded the syscall fails with ETIMEDOUT.
*/
int ret = 0;
if (timeoutNanoSeconds) {
struct timespec waitTimeAbsolute;
addNanosecondsToCurrentTime(timeoutNanoSeconds, &waitTimeAbsolute);
ret = syscall(__NR_futex, mEfWordPtr, FUTEX_WAIT_BITSET,
efWord, &waitTimeAbsolute, NULL, bitmask);
} else {
ret = syscall(__NR_futex, mEfWordPtr, FUTEX_WAIT_BITSET, efWord, NULL, NULL, bitmask);
}
if (ret == -1) {
status = -errno;
if (status != -EAGAIN && status != -ETIMEDOUT) {
ALOGE("Event flag wait was unsuccessful: %s\n", strerror(errno));
}
*efState = 0;
} else {
old = std::atomic_fetch_and(mEfWordPtr, ~bitmask);
*efState = old & bitmask;
if (*efState == 0) {
/* Return -EINTR for a spurious wakeup */
status = -EINTR;
}
}
return status;
}
/*
* Wait for any of the bits in the bitmask to be set
* and return which bits caused the return. If 'retry'
* is true, wait again on a spurious wake-up.
*/
status_t EventFlag::wait(uint32_t bitmask,
uint32_t* efState,
int64_t timeoutNanoSeconds,
bool retry) {
if (!retry) {
return waitHelper(bitmask, efState, timeoutNanoSeconds);
}
bool shouldTimeOut = timeoutNanoSeconds != 0;
int64_t prevTimeNs = shouldTimeOut ? android::elapsedRealtimeNano() : 0;
status_t status;
while (true) {
if (shouldTimeOut) {
int64_t currentTimeNs = android::elapsedRealtimeNano();
/*
* Decrement TimeOutNanos to account for the time taken to complete the last
* iteration of the while loop.
*/
timeoutNanoSeconds -= currentTimeNs - prevTimeNs;
prevTimeNs = currentTimeNs;
if (timeoutNanoSeconds <= 0) {
status = -ETIMEDOUT;
*efState = 0;
break;
}
}
status = waitHelper(bitmask, efState, timeoutNanoSeconds);
if ((status != -EAGAIN) && (status != -EINTR)) {
break;
}
}
return status;
}
status_t EventFlag::unmapEventFlagWord(std::atomic<uint32_t>* efWordPtr,
bool* efWordNeedsUnmapping) {
status_t status = NO_ERROR;
if (*efWordNeedsUnmapping) {
int ret = munmap(efWordPtr, sizeof(std::atomic<uint32_t>));
if (ret != 0) {
status = -errno;
ALOGE("Error in deleting event flag group: %s\n", strerror(errno));
}
*efWordNeedsUnmapping = false;
}
return status;
}
status_t EventFlag::deleteEventFlag(EventFlag** evFlag) {
if (evFlag == nullptr || *evFlag == nullptr) {
return BAD_VALUE;
}
status_t status = unmapEventFlagWord((*evFlag)->mEfWordPtr,
&(*evFlag)->mEfWordNeedsUnmapping);
delete *evFlag;
*evFlag = nullptr;
return status;
}
void EventFlag::addNanosecondsToCurrentTime(int64_t nanoSeconds, struct timespec* waitTime) {
static constexpr int64_t kNanosPerSecond = 1000000000;
clock_gettime(CLOCK_MONOTONIC, waitTime);
waitTime->tv_sec += nanoSeconds / kNanosPerSecond;
waitTime->tv_nsec += nanoSeconds % kNanosPerSecond;
if (waitTime->tv_nsec >= kNanosPerSecond) {
waitTime->tv_sec++;
waitTime->tv_nsec -= kNanosPerSecond;
}
}
EventFlag::~EventFlag() {
unmapEventFlagWord(mEfWordPtr, &mEfWordNeedsUnmapping);
}
} // namespace hardware
} // namespace android

View file

@ -0,0 +1,34 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "FMQ"
#include <android-base/logging.h>
namespace android {
namespace hardware {
namespace details {
void check(bool exp) {
CHECK(exp);
}
void logError(const std::string &message) {
LOG(ERROR) << message;
}
} // namespace details
} // namespace hardware
} // namespace android

View file

@ -0,0 +1,4 @@
smoreland@google.com
elsk@google.com
malchev@google.com
hridya@google.com

View file

@ -0,0 +1,5 @@
[Options]
ignore_merged_commits = true
[Builtin Hooks]
clang_format = true

View file

@ -0,0 +1,38 @@
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
msgq_benchmark_client.cpp
LOCAL_SHARED_LIBRARIES := \
libhwbinder \
libbase \
libcutils \
libutils \
libhidlbase \
libhidltransport
LOCAL_REQUIRED_MODULES := \
android.hardware.tests.msgq@1.0-impl_32 \
android.hardware.tests.msgq@1.0-impl
LOCAL_SHARED_LIBRARIES += android.hardware.tests.msgq@1.0 libfmq
LOCAL_MODULE := mq_benchmark_client
include $(BUILD_NATIVE_TEST)

View file

@ -0,0 +1,439 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <android-base/logging.h>
#include <gtest/gtest.h>
#include <utils/StrongPointer.h>
#include <chrono>
#include <iostream>
#include <android/hardware/tests/msgq/1.0/IBenchmarkMsgQ.h>
#include <fmq/MessageQueue.h>
#include <hidl/ServiceManagement.h>
// libutils:
using android::OK;
using android::sp;
using android::status_t;
// generated
using android::hardware::tests::msgq::V1_0::IBenchmarkMsgQ;
using std::cerr;
using std::cout;
using std::endl;
// libhidl
using android::hardware::kSynchronizedReadWrite;
using android::hardware::MQDescriptorSync;
using android::hardware::MessageQueue;
using android::hardware::details::waitForHwService;
/*
* All the benchmark cases will be performed on an FMQ of size kQueueSize.
*/
static const int32_t kQueueSize = 1024 * 16;
/*
* The number of iterations for each experiment.
*/
static const uint32_t kNumIterations = 1000;
/*
* The various packet sizes used are as follows.
*/
enum PacketSizes {
kPacketSize64 = 64,
kPacketSize128 = 128,
kPacketSize256 = 256,
kPacketSize512 = 512,
kPacketSize1024 = 1024
};
class MQTestClient : public ::testing::Test {
protected:
virtual void TearDown() {
delete mFmqInbox;
delete mFmqOutbox;
}
virtual void SetUp() {
// waitForHwService is required because IBenchmarkMsgQ is not in manifest.xml.
// "Real" HALs shouldn't be doing this.
waitForHwService(IBenchmarkMsgQ::descriptor, "default");
service = IBenchmarkMsgQ::getService();
ASSERT_NE(service, nullptr);
ASSERT_TRUE(service->isRemote());
/*
* Request service to configure the client inbox queue.
*/
service->configureClientInboxSyncReadWrite([this](bool ret,
const MQDescriptorSync<uint8_t>& in) {
ASSERT_TRUE(ret);
mFmqInbox = new (std::nothrow) MessageQueue<uint8_t, kSynchronizedReadWrite>(in);
});
ASSERT_TRUE(mFmqInbox != nullptr);
ASSERT_TRUE(mFmqInbox->isValid());
/*
* Reqeust service to configure the client outbox queue.
*/
service->configureClientOutboxSyncReadWrite([this](bool ret,
const MQDescriptorSync<uint8_t>& out) {
ASSERT_TRUE(ret);
mFmqOutbox = new (std::nothrow) MessageQueue<uint8_t,
kSynchronizedReadWrite>(out);
});
ASSERT_TRUE(mFmqOutbox != nullptr);
ASSERT_TRUE(mFmqOutbox->isValid());
}
sp<IBenchmarkMsgQ> service;
android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite>* mFmqInbox = nullptr;
android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite>* mFmqOutbox = nullptr;
};
/*
* Client writes a 64 byte packet into the outbox queue, service reads the
* same and
* writes the packet into the client's inbox queue. Client reads the packet. The
* average time taken for the cycle is measured.
*/
TEST_F(MQTestClient, BenchMarkMeasurePingPongTransfer) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize64];
ASSERT_TRUE(data != nullptr);
int64_t accumulatedTime = 0;
size_t numRoundTrips = 0;
/*
* This method requests the service to create a thread which reads
* from mFmqOutbox and writes into mFmqInbox.
*/
service->benchmarkPingPong(kNumIterations);
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
while (numRoundTrips < kNumIterations) {
while (mFmqOutbox->write(data, kPacketSize64) == 0) {
}
while (mFmqInbox->read(data, kPacketSize64) == 0) {
}
numRoundTrips++;
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += static_cast<int64_t>(std::chrono::duration_cast<std::chrono::nanoseconds>(
timeEnd - timeStart).count());
accumulatedTime /= kNumIterations;
cout << "Round trip time for " << kPacketSize64 << "bytes: " <<
accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to read 64 bytes from the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureRead64Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize64];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize64;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
bool ret = service->requestWrite(kQueueSize);
ASSERT_TRUE(ret);
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* The read() method returns true only if the the correct number of bytes
* were succesfully read from the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqInbox->read(data, kPacketSize64));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to read" << kPacketSize64
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to read 128 bytes.
*/
TEST_F(MQTestClient, BenchMarkMeasureRead128Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize128];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize128;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
bool ret = service->requestWrite(kQueueSize);
ASSERT_TRUE(ret);
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* The read() method returns true only if the the correct number of bytes
* were succesfully read from the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqInbox->read(data, kPacketSize128));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to read" << kPacketSize128
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to read 256 bytes from the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureRead256Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize256];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize256;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
bool ret = service->requestWrite(kQueueSize);
ASSERT_TRUE(ret);
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* The read() method returns true only if the the correct number of bytes
* were succesfully read from the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqInbox->read(data, kPacketSize256));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to read" << kPacketSize256
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to read 512 bytes from the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureRead512Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize512];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize512;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
bool ret = service->requestWrite(kQueueSize);
ASSERT_TRUE(ret);
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* The read() method returns true only if the the correct number of bytes
* were succesfully read from the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqInbox->read(data, kPacketSize512));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to read" << kPacketSize512
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to write 64 bytes into the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureWrite64Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize64];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize64;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* Write until the queue is full and request service to empty the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
bool result = mFmqOutbox->write(data, kPacketSize64);
ASSERT_TRUE(result);
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
bool ret = service->requestRead(kQueueSize);
ASSERT_TRUE(ret);
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to write " << kPacketSize64
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to write 128 bytes into the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureWrite128Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize128];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize128;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* Write until the queue is full and request service to empty the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqOutbox->write(data, kPacketSize128));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
bool ret = service->requestRead(kQueueSize);
ASSERT_TRUE(ret);
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to write " << kPacketSize128
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to write 256 bytes into the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureWrite256Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize256];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize256;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* Write until the queue is full and request service to empty the queue.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqOutbox->write(data, kPacketSize256));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
bool ret = service->requestRead(kQueueSize);
ASSERT_TRUE(ret);
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to write " << kPacketSize256
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Measure the average time taken to write 512 bytes into the queue.
*/
TEST_F(MQTestClient, BenchMarkMeasureWrite512Bytes) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize512];
ASSERT_TRUE(data != nullptr);
uint32_t numLoops = kQueueSize / kPacketSize512;
uint64_t accumulatedTime = 0;
for (uint32_t i = 0; i < kNumIterations; i++) {
std::chrono::time_point<std::chrono::high_resolution_clock> timeStart =
std::chrono::high_resolution_clock::now();
/*
* Write until the queue is full and request service to empty the queue.
* The write() method returns true only if the specified number of bytes
* were succesfully written.
*/
for (uint32_t j = 0; j < numLoops; j++) {
ASSERT_TRUE(mFmqOutbox->write(data, kPacketSize512));
}
std::chrono::time_point<std::chrono::high_resolution_clock> timeEnd =
std::chrono::high_resolution_clock::now();
accumulatedTime += (timeEnd - timeStart).count();
bool ret = service->requestRead(kQueueSize);
ASSERT_TRUE(ret);
}
accumulatedTime /= (numLoops * kNumIterations);
cout << "Average time to write " << kPacketSize512
<< "bytes: " << accumulatedTime << "ns" << endl;
delete[] data;
}
/*
* Service continuously writes a packet of 64 bytes into the client's inbox
* queue
* of size 16K. Client keeps reading from the inbox queue. The average write to
* read delay is calculated.
*/
TEST_F(MQTestClient, BenchMarkMeasureServiceWriteClientRead) {
uint8_t* data = new (std::nothrow) uint8_t[kPacketSize64];
ASSERT_TRUE(data != nullptr);
/*
* This method causes the service to create a thread which writes
* into the mFmqInbox queue kNumIterations packets.
*/
service->benchmarkServiceWriteClientRead(kNumIterations);
android::hardware::hidl_vec<int64_t> clientRcvTimeArray;
clientRcvTimeArray.resize(kNumIterations);
for (uint32_t i = 0; i < kNumIterations; i++) {
do {
clientRcvTimeArray[i] =
std::chrono::high_resolution_clock::now().time_since_epoch().count();
} while (mFmqInbox->read(data, kPacketSize64) == 0);
}
service->sendTimeData(clientRcvTimeArray);
delete[] data;
}

View file

@ -0,0 +1,151 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HIDL_EVENTFLAG_H
#define HIDL_EVENTFLAG_H
#include <time.h>
#include <utils/Errors.h>
#include <atomic>
namespace android {
namespace hardware {
/**
* EventFlag is an abstraction that application code utilizing FMQ can use to wait on
* conditions like full, empty, data available etc. The same EventFlag object
* can be used with multiple FMQs.
*/
struct EventFlag {
/**
* Create an event flag object with mapping information.
*
* @param fd File descriptor to be mmapped to create the event flag word.
* There is no transfer of ownership of the fd. The caller will still
* own the fd for the purpose of closing it.
* @param offset Offset parameter to mmap.
* @param ef Pointer to address of the EventFlag object that gets created. Will be set to
* nullptr if unsuccesful.
*
* @return status Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful or BAD_VALUE due to invalid
* mapping arguments.
*/
static status_t createEventFlag(int fd, off_t offset, EventFlag** ef);
/**
* Create an event flag object from the address of the flag word.
*
* @param efWordPtr Pointer to the event flag word.
* @param status Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful or BAD_VALUE if efWordPtr is a null
* pointer.
* @param ef Pointer to the address of the EventFlag object that gets created. Will be set to
* nullptr if unsuccesful.
*
* @return Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful or BAD_VALUE if efAddr is a null
* pointer.
*
*/
static status_t createEventFlag(std::atomic<uint32_t>* efWordPtr,
EventFlag** ef);
/**
* Delete an EventFlag object.
*
* @param ef A double pointer to the EventFlag object to be destroyed.
*
* @return Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful or BAD_VALUE due to
* a bad input parameter.
*/
static status_t deleteEventFlag(EventFlag** ef);
/**
* Set the specified bits of the event flag word here and wake up a thread.
* @param bitmask The bits to be set on the event flag word.
*
* @return Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful or BAD_VALUE if the bit mask
* does not have any bits set.
*/
status_t wake(uint32_t bitmask);
/**
* Wait for any of the bits in the bit mask to be set.
*
* @param bitmask The bits to wait on.
* @param timeoutNanoSeconds Specifies timeout duration in nanoseconds. It is converted to
* an absolute timeout for the wait according to the CLOCK_MONOTONIC clock.
* @param efState The event flag bits that caused the return from wake.
* @param retry If true, retry automatically for a spurious wake. If false,
* will return -EINTR or -EAGAIN for a spurious wake.
*
* @return Returns a status_t error code. Likely error codes are
* NO_ERROR if the method is successful, BAD_VALUE due to bad input
* parameters, TIMED_OUT if the wait timedout as per the timeout
* parameter, -EAGAIN or -EINTR to indicate that the caller needs to invoke
* wait() again. -EAGAIN or -EINTR error codes will not be returned if
* 'retry' is true since the method will retry waiting in that case.
*/
status_t wait(uint32_t bitmask,
uint32_t* efState,
int64_t timeOutNanoSeconds = 0,
bool retry = false);
private:
bool mEfWordNeedsUnmapping = false;
std::atomic<uint32_t>* mEfWordPtr = nullptr;
/*
* mmap memory for the event flag word.
*/
EventFlag(int fd, off_t offset, status_t* status);
/*
* Use this constructor if we already know where the event flag word
* lives.
*/
EventFlag(std::atomic<uint32_t>* efWordPtr, status_t* status);
/*
* Disallow constructor without argument and copying.
*/
EventFlag();
EventFlag& operator=(const EventFlag& other) = delete;
EventFlag(const EventFlag& other) = delete;
/*
* Wait for any of the bits in the bit mask to be set.
*/
status_t waitHelper(uint32_t bitmask, uint32_t* efState, int64_t timeOutNanoSeconds);
/*
* Utility method to unmap the event flag word.
*/
static status_t unmapEventFlagWord(std::atomic<uint32_t>* efWordPtr,
bool* efWordNeedsUnmapping);
/*
* Utility method to convert timeout duration to an absolute CLOCK_MONOTONIC
* clock time which is required by futex syscalls.
*/
inline void addNanosecondsToCurrentTime(int64_t nanoseconds, struct timespec* timeAbs);
~EventFlag();
};
} // namespace hardware
} // namespace android
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,71 @@
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := fmq_test
LOCAL_MODULE_CLASS := NATIVE_TESTS
LOCAL_SRC_FILES := fmq_test
LOCAL_REQUIRED_MODULES := \
mq_test_client \
android.hardware.tests.msgq@1.0-service-test \
mq_test_client_32 \
android.hardware.tests.msgq@1.0-service-test_32 \
hidl_test_helper
include $(BUILD_PREBUILT)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
msgq_test_client.cpp
LOCAL_SHARED_LIBRARIES := \
libhidlbase \
libhidltransport \
libhwbinder \
libcutils \
libutils \
libbase \
libfmq \
liblog
LOCAL_CFLAGS := -Wall -Werror
LOCAL_SHARED_LIBRARIES += android.hardware.tests.msgq@1.0 libfmq
LOCAL_MODULE := mq_test_client
LOCAL_REQUIRED_MODULES := \
android.hardware.tests.msgq@1.0-impl_32 \
android.hardware.tests.msgq@1.0-impl
include $(BUILD_NATIVE_TEST)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
mq_test.cpp
LOCAL_STATIC_LIBRARIES := libutils libcutils liblog
LOCAL_SHARED_LIBRARIES := \
libhidlbase \
libhidltransport \
libhwbinder \
libbase \
libfmq
LOCAL_MODULE := mq_test
LOCAL_CFLAGS := -Wall -Werror
include $(BUILD_NATIVE_TEST)
include $(CLEAR_VARS)
LOCAL_MODULE := VtsFmqUnitTests
VTS_CONFIG_SRC_DIR := system/libfmq/tests
-include test/vts/tools/build/Android.host_config.mk

View file

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2016 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration description="Config for VTS FMQ unit tests">
<target_preparer class="com.android.compatibility.common.tradefed.targetprep.VtsFilePusher">
<option name="push-group" value="HostDrivenTest.push" />
<option name="cleanup" value="true" />
<option name="push" value="DATA/nativetest/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test->/data/nativetest/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" />
<option name="push" value="DATA/nativetest64/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test->/data/nativetest64/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" />
<option name="push" value="DATA/nativetest/mq_test_client/mq_test_client->/data/nativetest/mq_test_client/mq_test_client" />
<option name="push" value="DATA/nativetest64/mq_test_client/mq_test_client->/data/nativetest64/mq_test_client/mq_test_client" />
<option name="push" value="DATA/nativetest64/hidl_test_helper->/data/nativetest64/hidl_test_helper" />
<option name="push" value="DATA/nativetest64/fmq_test->/data/nativetest64/fmq_test" />
<option name="push" value="DATA/lib/android.hardware.tests.msgq@1.0.so->/data/local/tmp/system/lib/android.hardware.tests.msgq@1.0.so" />
<option name="push" value="DATA/lib64/android.hardware.tests.msgq@1.0.so->/data/local/tmp/system/lib64/android.hardware.tests.msgq@1.0.so" />
<option name="push" value="DATA/lib/hw/android.hardware.tests.msgq@1.0-impl.so->/data/local/tmp/system/lib/hw/android.hardware.tests.msgq@1.0-impl.so" />
<option name="push" value="DATA/lib64/hw/android.hardware.tests.msgq@1.0-impl.so->/data/local/tmp/system/lib64/hw/android.hardware.tests.msgq@1.0-impl.so" />
</target_preparer>
<target_preparer class="com.android.tradefed.targetprep.VtsPythonVirtualenvPreparer" />
<test class="com.android.tradefed.testtype.VtsMultiDeviceTest">
<option name="test-module-name" value="VtsFmqUnitTests" />
<option name="binary-test-type" value="binary_test" />
<option name="binary-test-source" value="DATA/nativetest64/fmq_test->/data/nativetest64/fmq_test" />
<option name="binary-test-envp" value="TREBLE_TESTING_OVERRIDE=true" />
<option name="binary-test-envp" value="VTS_ROOT_PATH=/data/local/tmp" />
<option name="binary-test-args" value="-x /data/local/tmp/system/lib" />
<option name="binary-test-args" value="-y /data/local/tmp/system/lib64" />
</test>
</configuration>

View file

@ -0,0 +1,9 @@
source /data/nativetest64/hidl_test_helper
TREBLE_TESTING_OVERRIDE=true run_all_tests \
"/data/nativetest/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" \
"/data/nativetest64/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" \
"/data/nativetest/mq_test_client/mq_test_client" \
"/data/nativetest64/mq_test_client/mq_test_client" \
"fmq_test" \
"$@"

View file

@ -0,0 +1,809 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <asm-generic/mman.h>
#include <gtest/gtest.h>
#include <atomic>
#include <cstdlib>
#include <sstream>
#include <thread>
#include <fmq/MessageQueue.h>
#include <fmq/EventFlag.h>
enum EventFlagBits : uint32_t {
kFmqNotEmpty = 1 << 0,
kFmqNotFull = 1 << 1,
};
typedef android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>
MessageQueueSync;
typedef android::hardware::MessageQueue<uint8_t, android::hardware::kUnsynchronizedWrite>
MessageQueueUnsync;
class SynchronizedReadWrites : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
static constexpr size_t kNumElementsInQueue = 2048;
mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
}
MessageQueueSync* mQueue = nullptr;
size_t mNumMessagesMax = 0;
};
class UnsynchronizedWrite : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
static constexpr size_t kNumElementsInQueue = 2048;
mQueue = new (std::nothrow) MessageQueueUnsync(kNumElementsInQueue);
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
}
MessageQueueUnsync* mQueue = nullptr;
size_t mNumMessagesMax = 0;
};
class BlockingReadWrites : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
static constexpr size_t kNumElementsInQueue = 2048;
mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
/*
* Initialize the EventFlag word to indicate Queue is not full.
*/
std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
}
MessageQueueSync* mQueue;
std::atomic<uint32_t> mFw;
size_t mNumMessagesMax = 0;
};
class QueueSizeOdd : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
static constexpr size_t kNumElementsInQueue = 2049;
mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue,
true /* configureEventFlagWord */);
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
auto evFlagWordPtr = mQueue->getEventFlagWord();
ASSERT_NE(nullptr, evFlagWordPtr);
/*
* Initialize the EventFlag word to indicate Queue is not full.
*/
std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
}
MessageQueueSync* mQueue;
size_t mNumMessagesMax = 0;
};
class BadQueueConfig: public ::testing::Test {
};
/*
* Utility function to initialize data to be written to the FMQ
*/
inline void initData(uint8_t* data, size_t count) {
for (size_t i = 0; i < count; i++) {
data[i] = i & 0xFF;
}
}
/*
* This thread will attempt to read and block. When wait returns
* it checks if the kFmqNotEmpty bit is actually set.
* If the read is succesful, it signals Wake to kFmqNotFull.
*/
void ReaderThreadBlocking(
android::hardware::MessageQueue<uint8_t,
android::hardware::kSynchronizedReadWrite>* fmq,
std::atomic<uint32_t>* fwAddr) {
const size_t dataLen = 64;
uint8_t data[dataLen];
android::hardware::EventFlag* efGroup = nullptr;
android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_NE(nullptr, efGroup);
while (true) {
uint32_t efState = 0;
android::status_t ret = efGroup->wait(kFmqNotEmpty,
&efState,
5000000000 /* timeoutNanoSeconds */);
/*
* Wait should not time out here after 5s
*/
ASSERT_NE(android::TIMED_OUT, ret);
if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
efGroup->wake(kFmqNotFull);
break;
}
}
status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
ASSERT_EQ(android::NO_ERROR, status);
}
/*
* This thread will attempt to read and block using the readBlocking() API and
* passes in a pointer to an EventFlag object.
*/
void ReaderThreadBlocking2(
android::hardware::MessageQueue<uint8_t,
android::hardware::kSynchronizedReadWrite>* fmq,
std::atomic<uint32_t>* fwAddr) {
const size_t dataLen = 64;
uint8_t data[dataLen];
android::hardware::EventFlag* efGroup = nullptr;
android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_NE(nullptr, efGroup);
bool ret = fmq->readBlocking(data,
dataLen,
static_cast<uint32_t>(kFmqNotFull),
static_cast<uint32_t>(kFmqNotEmpty),
5000000000 /* timeOutNanos */,
efGroup);
ASSERT_TRUE(ret);
status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
ASSERT_EQ(android::NO_ERROR, status);
}
TEST_F(BadQueueConfig, QueueSizeTooLarge) {
typedef android::hardware::MessageQueue<uint16_t, android::hardware::kSynchronizedReadWrite>
MessageQueueSync16;
size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
MessageQueueSync16 * fmq = new (std::nothrow) MessageQueueSync16(numElementsInQueue);
ASSERT_NE(nullptr, fmq);
/*
* Should fail due to size being too large to fit into size_t.
*/
ASSERT_FALSE(fmq->isValid());
}
/*
* Test that basic blocking works. This test uses the non-blocking read()/write()
* APIs.
*/
TEST_F(BlockingReadWrites, SmallInputTest1) {
const size_t dataLen = 64;
uint8_t data[dataLen] = {0};
android::hardware::EventFlag* efGroup = nullptr;
android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_NE(nullptr, efGroup);
/*
* Start a thread that will try to read and block on kFmqNotEmpty.
*/
std::thread Reader(ReaderThreadBlocking, mQueue, &mFw);
struct timespec waitTime = {0, 100 * 1000000};
ASSERT_EQ(0, nanosleep(&waitTime, NULL));
/*
* After waiting for some time write into the FMQ
* and call Wake on kFmqNotEmpty.
*/
ASSERT_TRUE(mQueue->write(data, dataLen));
status = efGroup->wake(kFmqNotEmpty);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_EQ(0, nanosleep(&waitTime, NULL));
Reader.join();
status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
ASSERT_EQ(android::NO_ERROR, status);
}
/*
* Test that basic blocking works. This test uses the
* writeBlocking()/readBlocking() APIs.
*/
TEST_F(BlockingReadWrites, SmallInputTest2) {
const size_t dataLen = 64;
uint8_t data[dataLen] = {0};
android::hardware::EventFlag* efGroup = nullptr;
android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_NE(nullptr, efGroup);
/*
* Start a thread that will try to read and block on kFmqNotEmpty. It will
* call wake() on kFmqNotFull when the read is successful.
*/
std::thread Reader(ReaderThreadBlocking2, mQueue, &mFw);
bool ret = mQueue->writeBlocking(data,
dataLen,
static_cast<uint32_t>(kFmqNotFull),
static_cast<uint32_t>(kFmqNotEmpty),
5000000000 /* timeOutNanos */,
efGroup);
ASSERT_TRUE(ret);
Reader.join();
status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
ASSERT_EQ(android::NO_ERROR, status);
}
/*
* Test that basic blocking times out as intended.
*/
TEST_F(BlockingReadWrites, BlockingTimeOutTest) {
android::hardware::EventFlag* efGroup = nullptr;
android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
ASSERT_EQ(android::NO_ERROR, status);
ASSERT_NE(nullptr, efGroup);
/* Block on an EventFlag bit that no one will wake and time out in 1s */
uint32_t efState = 0;
android::status_t ret = efGroup->wait(kFmqNotEmpty,
&efState,
1000000000 /* timeoutNanoSeconds */);
/*
* Wait should time out in a second.
*/
EXPECT_EQ(android::TIMED_OUT, ret);
status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
ASSERT_EQ(android::NO_ERROR, status);
}
/*
* Test that odd queue sizes do not cause unaligned error
* on access to EventFlag object.
*/
TEST_F(QueueSizeOdd, EventFlagTest) {
const size_t dataLen = 64;
uint8_t data[dataLen] = {0};
bool ret = mQueue->writeBlocking(data,
dataLen,
static_cast<uint32_t>(kFmqNotFull),
static_cast<uint32_t>(kFmqNotEmpty),
5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
/*
* Verify that a few bytes of data can be successfully written and read.
*/
TEST_F(SynchronizedReadWrites, SmallInputTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_EQ(0, memcmp(data, readData, dataLen));
}
/*
* Verify that a few bytes of data can be successfully written and read using
* beginRead/beginWrite/CommitRead/CommitWrite
*/
TEST_F(SynchronizedReadWrites, SmallInputTest2) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
ASSERT_TRUE(mQueue->commitWrite(dataLen));
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
ASSERT_TRUE(mQueue->commitRead(dataLen));
ASSERT_EQ(0, memcmp(data, readData, dataLen));
}
/*
* Verify that a few bytes of data can be successfully written and read using
* beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
*/
TEST_F(SynchronizedReadWrites, SmallInputTest3) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
for (size_t i = 0; i < dataLen; i++) {
uint8_t* ptr = tx.getSlot(i);
*ptr = data[i];
}
ASSERT_TRUE(mQueue->commitWrite(dataLen));
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
first = tx.getFirstRegion();
second = tx.getSecondRegion();
ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
for (size_t i = 0; i < dataLen; i++) {
uint8_t* ptr = tx.getSlot(i);
readData[i] = *ptr;
}
ASSERT_TRUE(mQueue->commitRead(dataLen));
ASSERT_EQ(0, memcmp(data, readData, dataLen));
}
/*
* Verify that read() returns false when trying to read from an empty queue.
*/
TEST_F(SynchronizedReadWrites, ReadWhenEmpty1) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 2;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t readData[dataLen];
ASSERT_FALSE(mQueue->read(readData, dataLen));
}
/*
* Verify that beginRead() returns a MemTransaction object with null pointers when trying
* to read from an empty queue.
*/
TEST_F(SynchronizedReadWrites, ReadWhenEmpty2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 2;
ASSERT_LE(dataLen, mNumMessagesMax);
MessageQueueSync::MemTransaction tx;
ASSERT_FALSE(mQueue->beginRead(dataLen, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
ASSERT_EQ(nullptr, first.getAddress());
ASSERT_EQ(nullptr, second.getAddress());
}
/*
* Write the queue until full. Verify that another write is unsuccessful.
* Verify that availableToWrite() returns 0 as expected.
*/
TEST_F(SynchronizedReadWrites, WriteWhenFull1) {
ASSERT_EQ(0UL, mQueue->availableToRead());
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_FALSE(mQueue->write(&data[0], 1));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Write the queue until full. Verify that beginWrite() returns
* a MemTransaction object with null base pointers.
*/
TEST_F(SynchronizedReadWrites, WriteWhenFull2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
MessageQueueSync::MemTransaction tx;
ASSERT_FALSE(mQueue->beginWrite(1, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
ASSERT_EQ(nullptr, first.getAddress());
ASSERT_EQ(nullptr, second.getAddress());
}
/*
* Write a chunk of data equal to the queue size.
* Verify that the write is successful and the subsequent read
* returns the expected data.
*/
TEST_F(SynchronizedReadWrites, LargeInputTest1) {
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Attempt to write a chunk of data larger than the queue size.
* Verify that it fails. Verify that a subsequent read fails and
* the queue is still empty.
*/
TEST_F(SynchronizedReadWrites, LargeInputTest2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 4096;
ASSERT_GT(dataLen, mNumMessagesMax);
std::vector<uint8_t> data(dataLen);
initData(&data[0], dataLen);
ASSERT_FALSE(mQueue->write(&data[0], dataLen));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_NE(data, readData);
ASSERT_EQ(0UL, mQueue->availableToRead());
}
/*
* After the queue is full, try to write more data. Verify that
* the attempt returns false. Verify that the attempt did not
* affect the pre-existing data in the queue.
*/
TEST_F(SynchronizedReadWrites, LargeInputTest3) {
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_FALSE(mQueue->write(&data[0], 1));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Verify that beginWrite() returns a MemTransaction with
* null base pointers when attempting to write data larger
* than the queue size.
*/
TEST_F(SynchronizedReadWrites, LargeInputTest4) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 4096;
ASSERT_GT(dataLen, mNumMessagesMax);
MessageQueueSync::MemTransaction tx;
ASSERT_FALSE(mQueue->beginWrite(dataLen, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
ASSERT_EQ(nullptr, first.getAddress());
ASSERT_EQ(nullptr, second.getAddress());
}
/*
* Verify that multiple reads one after the other return expected data.
*/
TEST_F(SynchronizedReadWrites, MultipleRead) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t dataLen = chunkSize * chunkNum;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
uint8_t readData[dataLen] = {};
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
}
ASSERT_EQ(0, memcmp(readData, data, dataLen));
}
/*
* Verify that multiple writes one after the other happens correctly.
*/
TEST_F(SynchronizedReadWrites, MultipleWrite) {
const int chunkSize = 100;
const int chunkNum = 5;
const size_t dataLen = chunkSize * chunkNum;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
for (unsigned int i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
}
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_EQ(0, memcmp(readData, data, dataLen));
}
/*
* Write enough messages into the FMQ to fill half of it
* and read back the same.
* Write mNumMessagesMax messages into the queue. This will cause a
* wrap around. Read and verify the data.
*/
TEST_F(SynchronizedReadWrites, ReadWriteWrapAround1) {
size_t numMessages = mNumMessagesMax - 1;
std::vector<uint8_t> data(mNumMessagesMax);
std::vector<uint8_t> readData(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], numMessages));
ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Use beginRead/CommitRead/beginWrite/commitWrite APIs
* to test wrap arounds are handled correctly.
* Write enough messages into the FMQ to fill half of it
* and read back the same.
* Write mNumMessagesMax messages into the queue. This will cause a
* wrap around. Read and verify the data.
*/
TEST_F(SynchronizedReadWrites, ReadWriteWrapAround2) {
size_t dataLen = mNumMessagesMax - 1;
std::vector<uint8_t> data(mNumMessagesMax);
std::vector<uint8_t> readData(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], dataLen));
ASSERT_TRUE(mQueue->read(&readData[0], dataLen));
/*
* The next write and read will have to deal with with wrap arounds.
*/
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(mNumMessagesMax, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */, mNumMessagesMax));
ASSERT_TRUE(mQueue->commitWrite(mNumMessagesMax));
ASSERT_TRUE(mQueue->beginRead(mNumMessagesMax, &tx));
first = tx.getFirstRegion();
second = tx.getSecondRegion();
ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, mNumMessagesMax));
ASSERT_TRUE(mQueue->commitRead(mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Verify that a few bytes of data can be successfully written and read.
*/
TEST_F(UnsynchronizedWrite, SmallInputTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_EQ(0, memcmp(data, readData, dataLen));
}
/*
* Verify that read() returns false when trying to read from an empty queue.
*/
TEST_F(UnsynchronizedWrite, ReadWhenEmpty) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 2;
ASSERT_TRUE(dataLen < mNumMessagesMax);
uint8_t readData[dataLen];
ASSERT_FALSE(mQueue->read(readData, dataLen));
}
/*
* Write the queue when full. Verify that a subsequent writes is succesful.
* Verify that availableToWrite() returns 0 as expected.
*/
TEST_F(UnsynchronizedWrite, WriteWhenFull1) {
ASSERT_EQ(0UL, mQueue->availableToRead());
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_TRUE(mQueue->write(&data[0], 1));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
}
/*
* Write the queue when full. Verify that a subsequent writes
* using beginRead()/commitRead() is succesful.
* Verify that the next read fails as expected for unsynchronized flavor.
*/
TEST_F(UnsynchronizedWrite, WriteWhenFull2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
std::vector<uint8_t> data(mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
MessageQueueUnsync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(1, &tx));
ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
ASSERT_TRUE(mQueue->commitWrite(1));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
}
/*
* Write a chunk of data equal to the queue size.
* Verify that the write is successful and the subsequent read
* returns the expected data.
*/
TEST_F(UnsynchronizedWrite, LargeInputTest1) {
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}
/*
* Attempt to write a chunk of data larger than the queue size.
* Verify that it fails. Verify that a subsequent read fails and
* the queue is still empty.
*/
TEST_F(UnsynchronizedWrite, LargeInputTest2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t dataLen = 4096;
ASSERT_GT(dataLen, mNumMessagesMax);
std::vector<uint8_t> data(dataLen);
initData(&data[0], dataLen);
ASSERT_FALSE(mQueue->write(&data[0], dataLen));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_NE(data, readData);
ASSERT_EQ(0UL, mQueue->availableToRead());
}
/*
* After the queue is full, try to write more data. Verify that
* the attempt is succesful. Verify that the read fails
* as expected.
*/
TEST_F(UnsynchronizedWrite, LargeInputTest3) {
std::vector<uint8_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_TRUE(mQueue->write(&data[0], 1));
std::vector<uint8_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
}
/*
* Verify that multiple reads one after the other return expected data.
*/
TEST_F(UnsynchronizedWrite, MultipleRead) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t dataLen = chunkSize * chunkNum;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
uint8_t readData[dataLen] = {};
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
}
ASSERT_EQ(0, memcmp(readData, data, dataLen));
}
/*
* Verify that multiple writes one after the other happens correctly.
*/
TEST_F(UnsynchronizedWrite, MultipleWrite) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t dataLen = chunkSize * chunkNum;
ASSERT_LE(dataLen, mNumMessagesMax);
uint8_t data[dataLen];
initData(data, dataLen);
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
}
uint8_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_EQ(0, memcmp(readData, data, dataLen));
}
/*
* Write enough messages into the FMQ to fill half of it
* and read back the same.
* Write mNumMessagesMax messages into the queue. This will cause a
* wrap around. Read and verify the data.
*/
TEST_F(UnsynchronizedWrite, ReadWriteWrapAround) {
size_t numMessages = mNumMessagesMax - 1;
std::vector<uint8_t> data(mNumMessagesMax);
std::vector<uint8_t> readData(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], numMessages));
ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_EQ(data, readData);
}

View file

@ -0,0 +1,892 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#ifndef GTEST_IS_THREADSAFE
#error "GTest did not detect pthread library."
#endif
#include <android/hardware/tests/msgq/1.0/ITestMsgQ.h>
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
#include <hidl/ServiceManagement.h>
// libutils:
using android::OK;
using android::sp;
using android::status_t;
// generated
using android::hardware::tests::msgq::V1_0::ITestMsgQ;
// libhidl
using android::hardware::kSynchronizedReadWrite;
using android::hardware::kUnsynchronizedWrite;
using android::hardware::MessageQueue;
using android::hardware::MQDescriptorSync;
using android::hardware::MQDescriptorUnsync;
using android::hardware::details::waitForHwService;
typedef MessageQueue<uint16_t, kSynchronizedReadWrite> MessageQueueSync;
typedef MessageQueue<uint16_t, kUnsynchronizedWrite> MessageQueueUnsync;
static sp<ITestMsgQ> waitGetTestService() {
// waitForHwService is required because ITestMsgQ is not in manifest.xml.
// "Real" HALs shouldn't be doing this.
waitForHwService(ITestMsgQ::descriptor, "default");
return ITestMsgQ::getService();
}
class UnsynchronizedWriteClientMultiProcess : public ::testing::Test {
protected:
void getQueue(MessageQueueUnsync** fmq, sp<ITestMsgQ>* service, bool setupQueue) {
*service = waitGetTestService();
*fmq = nullptr;
if (*service == nullptr) return;
if (!(*service)->isRemote()) return;
(*service)->getFmqUnsyncWrite(setupQueue,
[fmq](bool ret, const MQDescriptorUnsync<uint16_t>& in) {
ASSERT_TRUE(ret);
*fmq = new (std::nothrow) MessageQueueUnsync(in);
});
}
};
class SynchronizedReadWriteClient : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
mService = waitGetTestService();
ASSERT_NE(mService, nullptr);
ASSERT_TRUE(mService->isRemote());
mService->configureFmqSyncReadWrite([this](
bool ret, const MQDescriptorSync<uint16_t>& in) {
ASSERT_TRUE(ret);
mQueue = new (std::nothrow) MessageQueueSync(in);
});
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
}
sp<ITestMsgQ> mService;
MessageQueueSync* mQueue = nullptr;
size_t mNumMessagesMax = 0;
};
class UnsynchronizedWriteClient : public ::testing::Test {
protected:
virtual void TearDown() {
delete mQueue;
}
virtual void SetUp() {
mService = waitGetTestService();
ASSERT_NE(mService, nullptr);
ASSERT_TRUE(mService->isRemote());
mService->getFmqUnsyncWrite(true /* configureFmq */,
[this](bool ret, const MQDescriptorUnsync<uint16_t>& in) {
ASSERT_TRUE(ret);
mQueue = new (std::nothrow) MessageQueueUnsync(in);
});
ASSERT_NE(nullptr, mQueue);
ASSERT_TRUE(mQueue->isValid());
mNumMessagesMax = mQueue->getQuantumCount();
}
sp<ITestMsgQ> mService;
MessageQueueUnsync* mQueue = nullptr;
size_t mNumMessagesMax = 0;
};
/*
* Utility function to verify data read from the fast message queue.
*/
bool verifyData(uint16_t* data, size_t count) {
for (size_t i = 0; i < count; i++) {
if (data[i] != i) return false;
}
return true;
}
/*
* Utility function to initialize data to be written to the FMQ
*/
inline void initData(uint16_t* data, size_t count) {
for (size_t i = 0; i < count; i++) {
data[i] = i;
}
}
/*
* Verify that for an unsynchronized flavor of FMQ, multiple readers
* can recover from a write overflow condition.
*/
TEST_F(UnsynchronizedWriteClientMultiProcess, MultipleReadersAfterOverflow) {
const size_t dataLen = 16;
pid_t pid;
/* creating first reader process */
if ((pid = fork()) == 0) {
sp<ITestMsgQ> testService;
MessageQueueUnsync* queue = nullptr;
getQueue(&queue, &testService, true /* setupQueue */);
ASSERT_NE(testService, nullptr);
ASSERT_TRUE(testService->isRemote());
ASSERT_NE(queue, nullptr);
ASSERT_TRUE(queue->isValid());
size_t numMessagesMax = queue->getQuantumCount();
// The following two writes will cause a write overflow.
auto ret = testService->requestWriteFmqUnsync(numMessagesMax);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
ret = testService->requestWriteFmqUnsync(1);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
// The following read should fail due to the overflow.
std::vector<uint16_t> readData(numMessagesMax);
ASSERT_FALSE(queue->read(&readData[0], numMessagesMax));
/*
* Request another write to verify that the reader can recover from the
* overflow condition.
*/
ASSERT_LT(dataLen, numMessagesMax);
ret = testService->requestWriteFmqUnsync(dataLen);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
// Verify that the read is successful.
ASSERT_TRUE(queue->read(&readData[0], dataLen));
ASSERT_TRUE(verifyData(&readData[0], dataLen));
delete queue;
exit(0);
}
ASSERT_GT(pid, 0 /* parent should see PID greater than 0 for a good fork */);
int status;
// wait for the first reader process to exit.
ASSERT_EQ(pid, waitpid(pid, &status, 0 /* options */));
// creating second reader process.
if ((pid = fork()) == 0) {
sp<ITestMsgQ> testService;
MessageQueueUnsync* queue = nullptr;
getQueue(&queue, &testService, false /* setupQueue */);
ASSERT_NE(testService, nullptr);
ASSERT_TRUE(testService->isRemote());
ASSERT_NE(queue, nullptr);
ASSERT_TRUE(queue->isValid());
// This read should fail due to the write overflow.
std::vector<uint16_t> readData(dataLen);
ASSERT_FALSE(queue->read(&readData[0], dataLen));
/*
* Request another write to verify that the process that recover from
* the overflow condition.
*/
auto ret = testService->requestWriteFmqUnsync(dataLen);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
// verify that the read is successful.
ASSERT_TRUE(queue->read(&readData[0], dataLen));
ASSERT_TRUE(verifyData(&readData[0], dataLen));
delete queue;
exit(0);
}
ASSERT_GT(pid, 0 /* parent should see PID greater than 0 for a good fork */);
ASSERT_EQ(pid, waitpid(pid, &status, 0 /* options */));
}
/*
* Test that basic blocking works using readBlocking()/writeBlocking() APIs
* using the EventFlag object owned by FMQ.
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWrite1) {
const size_t dataLen = 64;
uint16_t data[dataLen] = {0};
/*
* Request service to perform a blocking read. This call is oneway and will
* return immediately.
*/
mService->requestBlockingRead(dataLen);
bool ret = mQueue->writeBlocking(data,
dataLen,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY),
5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
/*
* Test that basic blocking works using readBlocking()/writeBlocking() APIs
* using the EventFlag object owned by FMQ and using the default EventFlag
* notification bit mask.
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWrite2) {
const size_t dataLen = 64;
std::vector<uint16_t> data(mNumMessagesMax);
/*
* Request service to perform a blocking read using default EventFlag
* notification bit mask. This call is oneway and will
* return immediately.
*/
mService->requestBlockingReadDefaultEventFlagBits(dataLen);
/* Cause a context switch to allow service to block */
sched_yield();
bool ret = mQueue->writeBlocking(&data[0],
dataLen);
ASSERT_TRUE(ret);
/*
* If the blocking read was successful, another write of size
* mNumMessagesMax will succeed.
*/
ret = mQueue->writeBlocking(&data[0], mNumMessagesMax, 5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
/*
* Test that repeated blocking reads and writes work using readBlocking()/writeBlocking() APIs
* using the EventFlag object owned by FMQ.
* Each write operation writes the same amount of data as a single read
* operation.
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWriteRepeat1) {
const size_t dataLen = 64;
uint16_t data[dataLen] = {0};
/*
* Request service to perform a blocking read. This call is oneway and will
* return immediately.
*/
const size_t writeCount = 1024;
mService->requestBlockingReadRepeat(dataLen, writeCount);
for (size_t i = 0; i < writeCount; i++) {
bool ret = mQueue->writeBlocking(
data,
dataLen,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY),
5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
}
/*
* Test that repeated blocking reads and writes work using readBlocking()/writeBlocking() APIs
* using the EventFlag object owned by FMQ. Each read operation reads twice the
* amount of data as a single write.
*
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWriteRepeat2) {
const size_t dataLen = 64;
uint16_t data[dataLen] = {0};
/*
* Request service to perform a blocking read. This call is oneway and will
* return immediately.
*/
const size_t writeCount = 1024;
mService->requestBlockingReadRepeat(dataLen*2, writeCount/2);
for (size_t i = 0; i < writeCount; i++) {
bool ret = mQueue->writeBlocking(
data,
dataLen,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY),
5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
}
/*
* Test that basic blocking works using readBlocking()/writeBlocking() APIs
* using the EventFlag object owned by FMQ. Each write operation writes twice
* the amount of data as a single read.
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWriteRepeat3) {
const size_t dataLen = 64;
uint16_t data[dataLen] = {0};
/*
* Request service to perform a blocking read. This call is oneway and will
* return immediately.
*/
size_t writeCount = 1024;
mService->requestBlockingReadRepeat(dataLen/2, writeCount*2);
for (size_t i = 0; i < writeCount; i++) {
bool ret = mQueue->writeBlocking(
data,
dataLen,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY),
5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
}
/*
* Test that writeBlocking()/readBlocking() APIs do not block on
* attempts to write/read 0 messages and return true.
*/
TEST_F(SynchronizedReadWriteClient, BlockingReadWriteZeroMessages) {
uint16_t data = 0;
/*
* Trigger a blocking write for zero messages with no timeout.
*/
bool ret = mQueue->writeBlocking(
&data,
0,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY));
ASSERT_TRUE(ret);
/*
* Trigger a blocking read for zero messages with no timeout.
*/
ret = mQueue->readBlocking(&data,
0,
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_EMPTY));
ASSERT_TRUE(ret);
}
/*
* Request mService to write a small number of messages
* to the FMQ. Read and verify data.
*/
TEST_F(SynchronizedReadWriteClient, SmallInputReaderTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
bool ret = mService->requestWriteFmqSync(dataLen);
ASSERT_TRUE(ret);
uint16_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_TRUE(verifyData(readData, dataLen));
}
/*
* Request mService to write a small number of messages
* to the FMQ. Read and verify each message using
* beginRead/Commit read APIs.
*/
TEST_F(SynchronizedReadWriteClient, SmallInputReaderTest2) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
auto ret = mService->requestWriteFmqSync(dataLen);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
size_t firstRegionLength = first.getLength();
for (size_t i = 0; i < dataLen; i++) {
if (i < firstRegionLength) {
ASSERT_EQ(i, *(first.getAddress() + i));
} else {
ASSERT_EQ(i, *(second.getAddress() + i - firstRegionLength));
}
}
ASSERT_TRUE(mQueue->commitRead(dataLen));
}
/*
* Write a small number of messages to FMQ. Request
* mService to read and verify that the write was succesful.
*/
TEST_F(SynchronizedReadWriteClient, SmallInputWriterTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
size_t originalCount = mQueue->availableToWrite();
uint16_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
bool ret = mService->requestReadFmqSync(dataLen);
ASSERT_TRUE(ret);
size_t availableCount = mQueue->availableToWrite();
ASSERT_EQ(originalCount, availableCount);
}
/*
* Write a small number of messages to FMQ using the beginWrite()/CommitWrite()
* APIs. Request mService to read and verify that the write was succesful.
*/
TEST_F(SynchronizedReadWriteClient, SmallInputWriterTest2) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
size_t originalCount = mQueue->availableToWrite();
uint16_t data[dataLen];
initData(data, dataLen);
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
auto first = tx.getFirstRegion();
auto second = tx.getSecondRegion();
size_t firstRegionLength = first.getLength();
uint16_t* firstBaseAddress = first.getAddress();
uint16_t* secondBaseAddress = second.getAddress();
for (size_t i = 0; i < dataLen; i++) {
if (i < firstRegionLength) {
*(firstBaseAddress + i) = i;
} else {
*(secondBaseAddress + i - firstRegionLength) = i;
}
}
ASSERT_TRUE(mQueue->commitWrite(dataLen));
auto ret = mService->requestReadFmqSync(dataLen);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
size_t availableCount = mQueue->availableToWrite();
ASSERT_EQ(originalCount, availableCount);
}
/*
* Verify that the FMQ is empty and read fails when it is empty.
*/
TEST_F(SynchronizedReadWriteClient, ReadWhenEmpty) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t numMessages = 2;
ASSERT_LE(numMessages, mNumMessagesMax);
uint16_t readData[numMessages];
ASSERT_FALSE(mQueue->read(readData, numMessages));
}
/*
* Verify FMQ is empty.
* Write enough messages to fill it.
* Verify availableToWrite() method returns is zero.
* Try writing another message and verify that
* the attempted write was unsuccesful. Request mService
* to read and verify the messages in the FMQ.
*/
TEST_F(SynchronizedReadWriteClient, WriteWhenFull) {
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_FALSE(mQueue->write(&data[0], 1));
bool ret = mService->requestReadFmqSync(mNumMessagesMax);
ASSERT_TRUE(ret);
}
/*
* Verify FMQ is empty.
* Request mService to write data equal to queue size.
* Read and verify data in mQueue.
*/
TEST_F(SynchronizedReadWriteClient, LargeInputTest1) {
bool ret = mService->requestWriteFmqSync(mNumMessagesMax);
ASSERT_TRUE(ret);
std::vector<uint16_t> readData(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
ASSERT_TRUE(verifyData(&readData[0], mNumMessagesMax));
}
/*
* Request mService to write more than maximum number of messages to the FMQ.
* Verify that the write fails. Verify that availableToRead() method
* still returns 0 and verify that attempt to read fails.
*/
TEST_F(SynchronizedReadWriteClient, LargeInputTest2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t numMessages = 2048;
ASSERT_GT(numMessages, mNumMessagesMax);
bool ret = mService->requestWriteFmqSync(numMessages);
ASSERT_FALSE(ret);
uint16_t readData;
ASSERT_EQ(0UL, mQueue->availableToRead());
ASSERT_FALSE(mQueue->read(&readData, 1));
}
/*
* Write until FMQ is full.
* Verify that the number of messages available to write
* is equal to mNumMessagesMax.
* Verify that another write attempt fails.
* Request mService to read. Verify read count.
*/
TEST_F(SynchronizedReadWriteClient, LargeInputTest3) {
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_FALSE(mQueue->write(&data[0], 1));
bool ret = mService->requestReadFmqSync(mNumMessagesMax);
ASSERT_TRUE(ret);
}
/*
* Confirm that the FMQ is empty. Request mService to write to FMQ.
* Do multiple reads to empty FMQ and verify data.
*/
TEST_F(SynchronizedReadWriteClient, MultipleRead) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t numMessages = chunkSize * chunkNum;
ASSERT_LE(numMessages, mNumMessagesMax);
size_t availableToRead = mQueue->availableToRead();
size_t expectedCount = 0;
ASSERT_EQ(expectedCount, availableToRead);
bool ret = mService->requestWriteFmqSync(numMessages);
ASSERT_TRUE(ret);
uint16_t readData[numMessages] = {};
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
}
ASSERT_TRUE(verifyData(readData, numMessages));
}
/*
* Write to FMQ in bursts.
* Request mService to read data. Verify the read was successful.
*/
TEST_F(SynchronizedReadWriteClient, MultipleWrite) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t numMessages = chunkSize * chunkNum;
ASSERT_LE(numMessages, mNumMessagesMax);
uint16_t data[numMessages];
initData(&data[0], numMessages);
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
}
bool ret = mService->requestReadFmqSync(numMessages);
ASSERT_TRUE(ret);
}
/*
* Write enough messages into the FMQ to fill half of it.
* Request mService to read back the same.
* Write mNumMessagesMax messages into the queue. This should cause a
* wrap around. Request mService to read and verify the data.
*/
TEST_F(SynchronizedReadWriteClient, ReadWriteWrapAround) {
size_t numMessages = mNumMessagesMax / 2;
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], numMessages));
bool ret = mService->requestReadFmqSync(numMessages);
ASSERT_TRUE(ret);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ret = mService->requestReadFmqSync(mNumMessagesMax);
ASSERT_TRUE(ret);
}
/*
* Use beginWrite/commitWrite/getSlot APIs to test wrap arounds are handled
* correctly.
* Write enough messages into the FMQ to fill half of it
* and read back the same.
* Write mNumMessagesMax messages into the queue. This will cause a
* wrap around. Read and verify the data.
*/
TEST_F(SynchronizedReadWriteClient, ReadWriteWrapAround2) {
size_t numMessages = mNumMessagesMax / 2;
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], numMessages));
auto ret = mService->requestReadFmqSync(numMessages);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
/*
* The next write and read will have to deal with with wrap arounds.
*/
MessageQueueSync::MemTransaction tx;
ASSERT_TRUE(mQueue->beginWrite(mNumMessagesMax, &tx));
ASSERT_EQ(tx.getFirstRegion().getLength() + tx.getSecondRegion().getLength(), mNumMessagesMax);
for (size_t i = 0; i < mNumMessagesMax; i++) {
uint16_t* ptr = tx.getSlot(i);
*ptr = data[i];
}
ASSERT_TRUE(mQueue->commitWrite(mNumMessagesMax));
ret = mService->requestReadFmqSync(mNumMessagesMax);
ASSERT_TRUE(ret.isOk());
ASSERT_TRUE(ret);
}
/*
* Request mService to write a small number of messages
* to the FMQ. Read and verify data.
*/
TEST_F(UnsynchronizedWriteClient, SmallInputReaderTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
bool ret = mService->requestWriteFmqUnsync(dataLen);
ASSERT_TRUE(ret);
uint16_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_TRUE(verifyData(readData, dataLen));
}
/*
* Write a small number of messages to FMQ. Request
* mService to read and verify that the write was succesful.
*/
TEST_F(UnsynchronizedWriteClient, SmallInputWriterTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
uint16_t data[dataLen];
initData(data, dataLen);
ASSERT_TRUE(mQueue->write(data, dataLen));
bool ret = mService->requestReadFmqUnsync(dataLen);
ASSERT_TRUE(ret);
}
/*
* Verify that the FMQ is empty and read fails when it is empty.
*/
TEST_F(UnsynchronizedWriteClient, ReadWhenEmpty) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t numMessages = 2;
ASSERT_LE(numMessages, mNumMessagesMax);
uint16_t readData[numMessages];
ASSERT_FALSE(mQueue->read(readData, numMessages));
}
/*
* Verify FMQ is empty.
* Write enough messages to fill it.
* Verify availableToWrite() method returns is zero.
* Try writing another message and verify that
* the attempted write was successful. Request mService
* to read the messages in the FMQ and verify that it is unsuccesful.
*/
TEST_F(UnsynchronizedWriteClient, WriteWhenFull) {
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_TRUE(mQueue->write(&data[0], 1));
bool ret = mService->requestReadFmqUnsync(mNumMessagesMax);
ASSERT_FALSE(ret);
}
/*
* Verify FMQ is empty.
* Request mService to write data equal to queue size.
* Read and verify data in mQueue.
*/
TEST_F(UnsynchronizedWriteClient, LargeInputTest1) {
bool ret = mService->requestWriteFmqUnsync(mNumMessagesMax);
ASSERT_TRUE(ret);
std::vector<uint16_t> data(mNumMessagesMax);
ASSERT_TRUE(mQueue->read(&data[0], mNumMessagesMax));
ASSERT_TRUE(verifyData(&data[0], mNumMessagesMax));
}
/*
* Request mService to write more than maximum number of messages to the FMQ.
* Verify that the write fails. Verify that availableToRead() method
* still returns 0 and verify that attempt to read fails.
*/
TEST_F(UnsynchronizedWriteClient, LargeInputTest2) {
ASSERT_EQ(0UL, mQueue->availableToRead());
const size_t numMessages = mNumMessagesMax + 1;
bool ret = mService->requestWriteFmqUnsync(numMessages);
ASSERT_FALSE(ret);
uint16_t readData;
ASSERT_EQ(0UL, mQueue->availableToRead());
ASSERT_FALSE(mQueue->read(&readData, 1));
}
/*
* Write until FMQ is full.
* Verify that the number of messages available to write
* is equal to mNumMessagesMax.
* Verify that another write attempt is succesful.
* Request mService to read. Verify that read is unsuccessful.
* Perform another write and verify that the read is succesful
* to check if the reader process can recover from the error condition.
*/
TEST_F(UnsynchronizedWriteClient, LargeInputTest3) {
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ASSERT_EQ(0UL, mQueue->availableToWrite());
ASSERT_TRUE(mQueue->write(&data[0], 1));
bool ret = mService->requestReadFmqUnsync(mNumMessagesMax);
ASSERT_FALSE(ret);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ret = mService->requestReadFmqUnsync(mNumMessagesMax);
ASSERT_TRUE(ret);
}
/*
* Confirm that the FMQ is empty. Request mService to write to FMQ.
* Do multiple reads to empty FMQ and verify data.
*/
TEST_F(UnsynchronizedWriteClient, MultipleRead) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t numMessages = chunkSize * chunkNum;
ASSERT_LE(numMessages, mNumMessagesMax);
size_t availableToRead = mQueue->availableToRead();
size_t expectedCount = 0;
ASSERT_EQ(expectedCount, availableToRead);
bool ret = mService->requestWriteFmqUnsync(numMessages);
ASSERT_TRUE(ret);
uint16_t readData[numMessages] = {};
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
}
ASSERT_TRUE(verifyData(readData, numMessages));
}
/*
* Write to FMQ in bursts.
* Request mService to read data, verify that it was successful.
*/
TEST_F(UnsynchronizedWriteClient, MultipleWrite) {
const size_t chunkSize = 100;
const size_t chunkNum = 5;
const size_t numMessages = chunkSize * chunkNum;
ASSERT_LE(numMessages, mNumMessagesMax);
uint16_t data[numMessages];
initData(data, numMessages);
for (size_t i = 0; i < chunkNum; i++) {
ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
}
bool ret = mService->requestReadFmqUnsync(numMessages);
ASSERT_TRUE(ret);
}
/*
* Write enough messages into the FMQ to fill half of it.
* Request mService to read back the same.
* Write mNumMessagesMax messages into the queue. This should cause a
* wrap around. Request mService to read and verify the data.
*/
TEST_F(UnsynchronizedWriteClient, ReadWriteWrapAround) {
size_t numMessages = mNumMessagesMax / 2;
std::vector<uint16_t> data(mNumMessagesMax);
initData(&data[0], mNumMessagesMax);
ASSERT_TRUE(mQueue->write(&data[0], numMessages));
bool ret = mService->requestReadFmqUnsync(numMessages);
ASSERT_TRUE(ret);
ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
ret = mService->requestReadFmqUnsync(mNumMessagesMax);
ASSERT_TRUE(ret);
}
/*
* Request mService to write a small number of messages
* to the FMQ. Read and verify data from two threads configured
* as readers to the FMQ.
*/
TEST_F(UnsynchronizedWriteClient, SmallInputMultipleReaderTest) {
auto desc = mQueue->getDesc();
std::unique_ptr<MessageQueue<uint16_t, kUnsynchronizedWrite>> mQueue2(
new (std::nothrow) MessageQueue<uint16_t, kUnsynchronizedWrite>(*desc));
ASSERT_NE(nullptr, mQueue2.get());
const size_t dataLen = 16;
ASSERT_LE(dataLen, mNumMessagesMax);
bool ret = mService->requestWriteFmqUnsync(dataLen);
ASSERT_TRUE(ret);
pid_t pid;
if ((pid = fork()) == 0) {
/* child process */
uint16_t readData[dataLen] = {};
ASSERT_TRUE(mQueue2->read(readData, dataLen));
ASSERT_TRUE(verifyData(readData, dataLen));
exit(0);
} else {
ASSERT_GT(pid,
0 /* parent should see PID greater than 0 for a good fork */);
uint16_t readData[dataLen] = {};
ASSERT_TRUE(mQueue->read(readData, dataLen));
ASSERT_TRUE(verifyData(readData, dataLen));
}
}
/*
* Request mService to write into the FMQ until it is full.
* Request mService to do another write and verify it is successful.
* Use two reader processes to read and verify that both fail.
*/
TEST_F(UnsynchronizedWriteClient, OverflowNotificationTest) {
auto desc = mQueue->getDesc();
std::unique_ptr<MessageQueue<uint16_t, kUnsynchronizedWrite>> mQueue2(
new (std::nothrow) MessageQueue<uint16_t, kUnsynchronizedWrite>(*desc));
ASSERT_NE(nullptr, mQueue2.get());
bool ret = mService->requestWriteFmqUnsync(mNumMessagesMax);
ASSERT_TRUE(ret);
ret = mService->requestWriteFmqUnsync(1);
ASSERT_TRUE(ret);
pid_t pid;
if ((pid = fork()) == 0) {
/* child process */
std::vector<uint16_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue2->read(&readData[0], mNumMessagesMax));
exit(0);
} else {
ASSERT_GT(pid, 0/* parent should see PID greater than 0 for a good fork */);
std::vector<uint16_t> readData(mNumMessagesMax);
ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
}
}