本篇介紹
本篇接著<<Android 音頻低延時mmap介紹(2)>>繼續(xù)介紹aaudio 的mmap機制,前面介紹了共享模式和獨占模式的差異,本篇介紹aaudio的數據驅動流程。
aaudio mmap介紹
數據驅動的開頭是AudioStreamInternal中的createThread_l,創(chuàng)建了數據驅動的線程, 執(zhí)行的任務如下:
static void *aaudio_callback_thread_proc(void *context)
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
//LOGD("oboe_callback_thread, stream = %p", stream);
if (stream != nullptr) {
return stream->callbackLoop();
} else {
return nullptr;
}
}
接下來先看采集的callbackLoop:
// Read data from the stream and pass it to the callback for processing.
void *AudioStreamInternalCapture::callbackLoop() {
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
if (!isDataCallbackSet()) return nullptr;
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
// Read audio data from stream.
int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// This is a BLOCKING READ!
result = read(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
ALOGE("callbackLoop: read() returned %d", result);
if (result >= 0) {
// Only read some of the frames requested. The stream can be disconnected
// or timed out.
processCommands();
result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
}
maybeCallErrorCallback(result);
break;
}
// Call application using the AAudio callback interface.
callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
result = systemStopInternal();
break;
}
}
ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
result, (int) isActive());
return nullptr;
}
我們假設使用的讀取數據的方式是被動式,也就是依賴回調,那么就會進入while循環(huán)來驅動客戶測。
可以看到這兒如下邏輯:
- 先計算讀取mCallbackFrames對應的超時時間
- 從buffer中讀取數據
- 回調給應用
接下來先看第一個邏輯:
int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
// Wait for at least a second or some number of callbacks to join the thread.
int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
* framesPerOperation
* AAUDIO_NANOS_PER_SECOND)
/ getSampleRate();
if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
timeoutNanoseconds = MIN_TIMEOUT_NANOS;
}
return timeoutNanoseconds;
}
計算讀取mCallbackFrames對應的超時時間其實就是按照回調的數據幀對應的時長,然后乘以一個閾值。該操作的邏輯就是比如要讀取20ms的數據,那最多等待20ms的閾值倍數。
接下來看下數據讀取:
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
return processData(buffer, numFrames, timeoutNanoseconds);
}
跟著看下該函數:
// Read or write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
if (isDisconnected()) {
return AAUDIO_ERROR_DISCONNECTED;
}
if (!mInService &&
AAudioBinderClient::getInstance().getServiceLifetimeId() != getServiceLifetimeId()) {
// The service lifetime id will be changed whenever the binder died. In that case, if
// the service lifetime id from AAudioBinderClient is different from the cached one,
// returns AAUDIO_ERROR_DISCONNECTED.
// Note that only compare the service lifetime id if it is not in service as the streams
// in service will all be gone when aaudio service dies.
mClockModel.stop(AudioClock::getNanoseconds());
// Set the stream as disconnected as the service lifetime id will only change when
// the binder dies.
setDisconnected();
return AAUDIO_ERROR_DISCONNECTED;
}
const char * traceName = "aaProc";
const char * fifoName = "aaRdy";
ATRACE_BEGIN(traceName);
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
}
aaudio_result_t result = AAUDIO_OK;
int32_t loopCount = 0;
uint8_t* audioData = (uint8_t*)buffer;
int64_t currentTimeNanos = AudioClock::getNanoseconds();
const int64_t entryTimeNanos = currentTimeNanos;
const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
// Loop until all the data has been processed or until a timeout occurs.
while (framesLeft > 0) {
// The call to processDataNow() will not block. It will just process as much as it can.
int64_t wakeTimeNanos = 0;
aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
currentTimeNanos, &wakeTimeNanos);
if (framesProcessed < 0) {
result = framesProcessed;
break;
}
framesLeft -= (int32_t) framesProcessed;
audioData += framesProcessed * getBytesPerFrame();
// Should we block?
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (wakeTimeNanos != 0) {
if (!mAudioEndpoint->isFreeRunning()) {
// If there is software on the other end of the FIFO then it may get delayed.
// So wake up just a little after we expect it to be ready.
wakeTimeNanos += mWakeupDelayNanos;
}
currentTimeNanos = AudioClock::getNanoseconds();
int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
// Guarantee a minimum sleep time.
if (wakeTimeNanos < earliestWakeTime) {
wakeTimeNanos = earliestWakeTime;
}
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
ALOGW("processData(): entered at %lld nanos, currently %lld",
(long long) entryTimeNanos, (long long) currentTimeNanos);
ALOGW("processData(): TIMEOUT after %lld nanos",
(long long) timeoutNanoseconds);
ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
(long long) wakeTimeNanos, (long long) deadlineNanos);
ALOGW("processData(): past deadline by %d micros",
(int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
mClockModel.dump();
mAudioEndpoint->dump();
break;
}
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
}
AudioClock::sleepUntilNanoTime(wakeTimeNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
}
// return error or framesProcessed
(void) loopCount;
ATRACE_END();
return (result < 0) ? result : numFrames - framesLeft;
}
包含了如下邏輯:
- 如果對端service,也就是audioserver有crash過,那么就返回斷開錯誤
- 從buffer中讀取數據
- systrace記錄,開發(fā)可以在systrace中看到buffer的實時信息
接下來看下processDataNow:
// Read as much data as we can without blocking.
aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
int64_t currentNanoTime, int64_t *wakeTimePtr) {
aaudio_result_t result = processCommands();
if (result != AAUDIO_OK) {
return result;
}
const char *traceName = "aaRdNow";
ATRACE_BEGIN(traceName);
if (mClockModel.isStarting()) {
// Still haven't got any timestamps from server.
// Keep waiting until we get some valid timestamps then start writing to the
// current buffer position.
ALOGD("processDataNow() wait for valid timestamps");
// Sleep very briefly and hope we get a timestamp soon.
*wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
ATRACE_END();
return 0;
}
// If we have gotten this far then we have at least one timestamp from server.
if (mAudioEndpoint->isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
// Jitter in the DSP can cause late writes to the FIFO.
// This might be caused by resampling.
// We want to read the FIFO after the latest possible time
// that the DSP could have written the data.
int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
// TODO refactor, maybe use setRemoteCounter()
mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
}
// This code assumes that we have already received valid timestamps.
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
advanceClientToMatchServerPosition(0 /*serverMargin*/);
mNeedCatchUp.acknowledge();
}
// If the capture buffer is full beyond capacity then consider it an overrun.
// For shared streams, the xRunCount is passed up from the service.
if (mAudioEndpoint->isFreeRunning()
&& mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaOverRuns", mXRunCount);
}
}
// Read some data from the buffer.
//ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
// numFrames, framesProcessed);
if (ATRACE_ENABLED()) {
ATRACE_INT("aaRead", framesProcessed);
}
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesProcessed >= 0) {
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
//ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
// AAudio_convertStreamStateToText(state));
switch (state) {
case AAUDIO_STREAM_STATE_OPEN:
case AAUDIO_STREAM_STATE_STARTING:
break;
case AAUDIO_STREAM_STATE_STARTED:
{
// When do we expect the next write burst to occur?
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
getDeviceFramesPerBurst();
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
default:
break;
}
*wakeTimePtr = wakeTime;
}
ATRACE_END();
return framesProcessed;
}
這兒就是和server端的交互了,包括了如下流程:
- 處理server端的命令
- 和server端的讀寫同步
- 讀取采集的數據
接下來我們挨個看下:
// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
AAudioServiceMessage message;
if (!mAudioEndpoint) {
break;
}
if (mAudioEndpoint->readUpCommand(&message) != 1) {
break; // no command this time, no problem
}
switch (message.what) {
case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
result = onTimestampService(&message);
break;
case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
result = onTimestampHardware(&message);
break;
case AAudioServiceMessage::code::EVENT:
result = onEventFromServer(&message);
break;
default:
ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
}
}
return result;
}
我們在之前介紹的時候有提到,在創(chuàng)建mmap流時,應用這邊會收到兩個共享內存的fd,一個是用來存放指令的,一個是用來存放數據的,現在就是從第一個共享內存中讀取指令。那server端什么時候發(fā)送指令呢?在server端open流的時候會啟動一個專門發(fā)送指令的線程,如下:
aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request) {
...
// Make sure this object does not get deleted before the run() method
// can protect it by making a strong pointer.
mCommandQueue.startWaiting();
mThreadEnabled = true;
incStrong(nullptr); // See run() method.
result = mCommandThread.start(this);
}
mCommandThread就是對應的指令線程,再看下start邏輯:
void AAudioThread::dispatch() {
if (mRunnable != nullptr) {
mRunnable->run();
} else {
run();
}
}
aaudio_result_t AAudioThread::start(Runnable *runnable) {
if (mHasThread) {
ALOGE("start() - mHasThread already true");
return AAUDIO_ERROR_INVALID_STATE;
}
// mRunnable will be read by the new thread when it starts. A std::thread is created.
mRunnable = runnable;
mHasThread = true;
mThread = std::thread(&AAudioThread::dispatch, this);
return AAUDIO_OK;
}
由于在AAudioServiceStreamBase中將this傳遞給了AAudioThread,此時的Runnable就是AAudioServiceStreamBase對象了,運行的run也就是AAudioServiceStreamBase中的邏輯了:
void AAudioServiceStreamBase::run() {
ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
// Hold onto the ref counted stream until the end.
android::sp<AAudioServiceStreamBase> holdStream(this);
TimestampScheduler timestampScheduler;
int64_t nextTimestampReportTime;
int64_t nextDataReportTime;
// When to try to enter standby.
int64_t standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
// Balance the incStrong from when the thread was launched.
holdStream->decStrong(nullptr);
// Taking mLock while starting the thread. All the operation must be able to
// run with holding the lock.
std::scoped_lock<std::mutex> _l(mLock);
int32_t loopCount = 0;
while (mThreadEnabled.load()) {
loopCount++;
int64_t timeoutNanos = -1; // wait forever
if (isDisconnected_l() || isIdle_l()) {
if (isStandbyImplemented() && !isStandby_l()) {
// If not in standby mode, wait until standby time.
timeoutNanos = standbyTime - AudioClock::getNanoseconds();
timeoutNanos = std::max<int64_t>(0, timeoutNanos);
}
// Otherwise, keep `timeoutNanos` as -1 to wait forever until next command.
} else if (isRunning()) {
timeoutNanos = std::min(nextTimestampReportTime, nextDataReportTime)
- AudioClock::getNanoseconds();
timeoutNanos = std::max<int64_t>(0, timeoutNanos);
}
auto command = mCommandQueue.waitForCommand(timeoutNanos);
if (!mThreadEnabled) {
// Break the loop if the thread is disabled.
break;
}
// Is it time to send timestamps?
if (isRunning() && !isDisconnected_l()) {
auto currentTimestamp = AudioClock::getNanoseconds();
if (currentTimestamp >= nextDataReportTime) {
reportData_l();
nextDataReportTime = nextDataReportTime_l();
}
if (currentTimestamp >= nextTimestampReportTime) {
// It is time to update timestamp.
if (sendCurrentTimestamp_l() != AAUDIO_OK) {
ALOGE("Failed to send current timestamp, stop updating timestamp");
disconnect_l();
}
nextTimestampReportTime = timestampScheduler.nextAbsoluteTime();
}
}
...
}
這兒有2個命令隊列,一個是mCommandQueue,這個是server 內部用的,用來避免線程安全問題,不需要跨進程,一個是mUpMessageQueue,這個是給應用側發(fā)送指令的,我們先看下如何同步的時間戳,該方法剩余的邏輯主要是處理server內部的調用指令,之前有介紹過,都是類似的。
可以看到,這兒會先看nextDataReportTime,如果當前時間大于nextDataReportTime,就執(zhí)行reportData_l,同時繼續(xù)更新nextDataReportTime。先看下reportData_l:
void AAudioServiceStreamMMAP::reportData_l() {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return;
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
return serviceEndpointMMAP->reportData();
}
可以看到并不是通知應用側的,而是發(fā)給hal層的:
void AAudioServiceEndpointMMAP::reportData() {
const std::lock_guard<std::mutex> lock(mMmapStreamLock);
if (mMmapStream == nullptr) {
// This must not happen
ALOGE("%s() invalid state, mmap stream is not initialized", __func__);
return;
}
auto fifo = mAudioDataWrapper->getFifoBuffer();
if (fifo == nullptr) {
ALOGE("%s() fifo buffer is not initialized, cannot report data", __func__);
return;
}
WrappingBuffer wrappingBuffer;
fifo_frames_t framesAvailable = fifo->getFullDataAvailable(&wrappingBuffer);
for (size_t i = 0; i < WrappingBuffer::SIZE; ++i) {
if (wrappingBuffer.numFrames[i] > 0) {
mMmapStream->reportData(wrappingBuffer.data[i], wrappingBuffer.numFrames[i]);
}
}
fifo->advanceReadIndex(framesAvailable);
}
這兒的mAudioDataWrapper就是用來和hal共享內存的,而WrappingBuffer用來描述這塊內存的可用數據,由于共享內存是以環(huán)形buffer使用的,這樣一塊連續(xù)的數據可能會被切成兩部分,用WrappingBuffer就可以分別表示這兩部分,用這兒也可以猜到這塊邏輯就是用來通知hal層的可用數據的, 到了這兒也可以猜到,這個邏輯是針對播放的
status_t MmapPlaybackThread::reportData(const void* buffer, size_t frameCount) {
// Send to MelProcessor for sound dose measurement.
auto processor = mMelProcessor.load();
if (processor) {
processor->process(buffer, frameCount * mFrameSize);
}
return NO_ERROR;
}
這兒的mMelProcessor是用來計算MEL (momentary exposure levels) 。
接下來再看下nextDataReportTime_l:
int64_t AAudioServiceStreamMMAP::nextDataReportTime_l() {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return std::numeric_limits<int64_t>::max();
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
return serviceEndpointMMAP->nextDataReportTime();
}
這兒又是service內部的邏輯:
int64_t AAudioServiceEndpointMMAP::nextDataReportTime() {
return getDirection() == AAUDIO_DIRECTION_OUTPUT
? AudioClock::getNanoseconds() + mDataReportOffsetNanos
: std::numeric_limits<int64_t>::max();
}
可以也可以驗證我們之前的邏輯,僅針對播放,在當前時間上mDataReportOffsetNanos,這個值是在打開流的時候就固定下來了:
// If the position is not updated while the timestamp is updated for more than a certain amount,
// the timestamp reported from the HAL may not be accurate. Here, a timestamp grace period is
// set as 5 burst size. We may want to update this value if there is any report from OEMs saying
// that is too short.
static constexpr int kTimestampGraceBurstCount = 5;
mTimestampGracePeriodMs = ((int64_t) kTimestampGraceBurstCount * mFramesPerBurst
* AAUDIO_MILLIS_PER_SECOND) / getSampleRate();
mDataReportOffsetNanos = ((int64_t)mTimestampGracePeriodMs) * AAUDIO_NANOS_PER_MILLISECOND;
也就是按照5個burst對應的時間來的,如果這段時間內都沒更新,說明hal層可能有異常了。
接下來回到AAudioServiceStreamBase繼續(xù)看sendCurrentTimestamp_l:
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
AAudioServiceMessage command;
// It is not worth filling up the queue with timestamps.
// That can cause the stream to get suspended.
// So just drop the timestamp if the queue is getting full.
if (isUpMessageQueueBusy()) {
return AAUDIO_OK;
}
// Send a timestamp for the clock model.
aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() SERVICE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_SERVICE;
result = writeUpMessageQueue(&command);
if (result == AAUDIO_OK) {
// Send a hardware timestamp for presentation time.
result = getHardwareTimestamp_l(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() HARDWARE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_HARDWARE;
result = writeUpMessageQueue(&command);
}
}
}
if (result == AAUDIO_ERROR_UNAVAILABLE) { // TODO review best error code
result = AAUDIO_OK; // just not available yet, try again later
}
return result;
}
這兒就可以看到發(fā)送TIMESTAMP_SERVICE和TIMESTAMP_HARDWARE指令。那TIMESTAMP_SERVICE中的信息是什么呢?
// Get free-running DSP or DMA hardware position from the HAL.
aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
int64_t *timeNanos) {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
if (result == AAUDIO_OK) {
Timestamp timestamp(*positionFrames, *timeNanos);
mAtomicStreamTimestamp.write(timestamp);
*positionFrames = timestamp.getPosition();
*timeNanos = timestamp.getNanoseconds();
} else if (result != AAUDIO_ERROR_UNAVAILABLE) {
disconnect_l();
}
return result;
}
這兒是嘗試從hal中獲取dma的硬件位置
// Get free-running DSP or DMA hardware position from the HAL.
aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
int64_t *timeNanos) {
const std::lock_guard<std::mutex> lock(mMmapStreamLock);
if (mMmapStream == nullptr) {
ALOGW("%s(): called after mMmapStream set to NULL", __func__);
return AAUDIO_ERROR_NULL;
}
struct audio_mmap_position position;
const status_t status = mMmapStream->getMmapPosition(&position);
ALOGV("%s() status= %d, pos = %d, nanos = %lld\n",
__func__, status, position.position_frames, (long long) position.time_nanoseconds);
const aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
if (result == AAUDIO_ERROR_UNAVAILABLE) {
ALOGW("%s(): getMmapPosition() has no position data available", __func__);
} else if (result != AAUDIO_OK) {
ALOGE("%s(): getMmapPosition() returned status %d", __func__, status);
} else {
// Convert 32-bit position to 64-bit position.
mFramesTransferred.update32(position.position_frames);
*positionFrames = mFramesTransferred.get();
*timeNanos = position.time_nanoseconds;
}
return result;
}
這兒就會從hal層查詢到當前的讀寫位置與時間戳信息。
對應的hal層實現是:
static int out_get_mmap_position(const struct audio_stream_out *stream,
struct audio_mmap_position *position)
{
int ret = 0;
struct stream_out *out = (struct stream_out *)stream;
ALOGVV("%s", __func__);
if (position == NULL) {
return -EINVAL;
}
lock_output_stream(out);
if (out->usecase != USECASE_AUDIO_PLAYBACK_MMAP ||
out->pcm == NULL) {
ret = -ENOSYS;
goto exit;
}
struct timespec ts = { 0, 0 };
ret = pcm_mmap_get_hw_ptr(out->pcm, (unsigned int *)&position->position_frames, &ts);
if (ret < 0) {
ALOGE("%s: %s", __func__, pcm_get_error(out->pcm));
goto exit;
}
position->time_nanoseconds = audio_utils_ns_from_timespec(&ts)
+ out->mmap_time_offset_nanos;
exit:
pthread_mutex_unlock(&out->lock);
return ret;
}
接下來就會到tinyalsa中:
/* Returns current read/write position in the mmap buffer with associated time stamp. */
int pcm_mmap_get_hw_ptr(struct pcm* pcm, unsigned int *hw_ptr, struct timespec *tstamp)
{
int rc;
if (pcm == NULL || hw_ptr == NULL || tstamp == NULL)
return oops(pcm, EINVAL, "pcm %p, hw_ptr %p, tstamp %p", pcm, hw_ptr, tstamp);
if (!pcm_is_ready(pcm))
return oops(pcm, errno, "pcm_is_ready failed");
rc = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
if (rc < 0)
return oops(pcm, errno, "pcm_sync_ptr failed");
if (pcm->mmap_status == NULL)
return oops(pcm, EINVAL, "pcm %p, mmap_status is NULL", pcm);
if ((pcm->mmap_status->state != PCM_STATE_RUNNING) &&
(pcm->mmap_status->state != PCM_STATE_DRAINING))
return oops(pcm, ENOSYS, "invalid stream state %d", pcm->mmap_status->state);
*tstamp = pcm->mmap_status->tstamp;
if (tstamp->tv_sec == 0 && tstamp->tv_nsec == 0)
return oops(pcm, errno, "invalid time stamp");
*hw_ptr = pcm->mmap_status->hw_ptr;
return 0;
}
這兒的hw_ptr就是mmap數據讀寫的位置,而tstamp 就是該位置的數據幀進入或離開audio 流水線的時間。
繼續(xù)回到 AAudioServiceStreamBase::sendCurrentTimestamp_l,看下writeUpMessageQueue:
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return AAUDIO_ERROR_NULL;
}
int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
if (count != 1) {
ALOGW("%s(): Queue full. Did client stop? Suspending stream. what = %u, %s",
__func__, static_cast<unsigned>(command->what), getTypeText());
setSuspended(true);
return AAUDIO_ERROR_WOULD_BLOCK;
} else {
if (isSuspended()) {
ALOGW("%s(): Queue no longer full. Un-suspending the stream.", __func__);
setSuspended(false);
}
return AAUDIO_OK;
}
}
這兒就和我們預期一樣了,往共享內存的指令buffer中寫指令,現在的指令是TIMESTAMP_SERVICE,在發(fā)完該指令后,還會獲取硬件時間戳:
// Get timestamp from presentation position.
// If it fails, get timestamp that was written by getFreeRunningPosition()
aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp_l(int64_t *positionFrames,
int64_t *timeNanos) {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
uint64_t position;
aaudio_result_t result = serviceEndpointMMAP->getExternalPosition(&position, timeNanos);
if (result == AAUDIO_OK) {
ALOGV("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
__func__, position, *timeNanos);
*positionFrames = (int64_t) position;
return AAUDIO_OK;
} else {
ALOGV("%s() getExternalPosition() returns error %d", __func__, result);
}
if (mAtomicStreamTimestamp.isValid()) {
Timestamp timestamp = mAtomicStreamTimestamp.read();
*positionFrames = timestamp.getPosition();
*timeNanos = timestamp.getNanoseconds() + serviceEndpointMMAP->getHardwareTimeOffsetNanos();
return AAUDIO_OK;
} else {
return AAUDIO_ERROR_UNAVAILABLE;
}
}
這個就是獲取hal層硬件正在渲染或者采集的位置了,相對于FreeRunningPosition更接近于驅動一些,可以看到當獲取失敗后,就直接用FreeRunningPosition的位置,只是時間戳會添加一個硬件的延時。
接下來先看下getExternalPosition:
aaudio_result_t AAudioServiceEndpointMMAP::getExternalPosition(uint64_t *positionFrames,
int64_t *timeNanos)
{
const std::lock_guard<std::mutex> lock(mMmapStreamLock);
if (mHalExternalPositionStatus != AAUDIO_OK) {
return mHalExternalPositionStatus;
}
if (mMmapStream == nullptr) {
ALOGW("%s(): called after mMmapStream set to NULL", __func__);
return AAUDIO_ERROR_NULL;
}
uint64_t tempPositionFrames;
int64_t tempTimeNanos;
const status_t status = mMmapStream->getExternalPosition(&tempPositionFrames, &tempTimeNanos);
if (status != OK) {
// getExternalPosition reports error. The HAL may not support the API. Cache the result
// so that the call will not go to the HAL next time.
mHalExternalPositionStatus = AAudioConvert_androidToAAudioResult(status);
return mHalExternalPositionStatus;
}
// If the HAL keeps reporting the same position or timestamp, the HAL may be having some issues
// to report correct external position. In that case, we will not trust the values reported from
// the HAL. Ideally, we may want to stop querying external position if the HAL cannot report
// correct position within a period. But it may not be a good idea to get system time too often.
// In that case, a maximum number of frozen external position is defined so that if the
// count of the same timestamp or position is reported by the HAL continuously, the values from
// the HAL will no longer be trusted.
static constexpr int kMaxFrozenCount = 20;
// If the HAL version is less than 7.0, the getPresentationPosition is an optional API.
// If the HAL version is 7.0 or later, the getPresentationPosition is a mandatory API.
// In that case, even the returned status is NO_ERROR, it doesn't indicate the returned
// position is a valid one. Do a simple validation, which is checking if the position is
// forward within half a second or not, here so that this function can return error if
// the validation fails. Note that we don't only apply this validation logic to HAL API
// less than 7.0. The reason is that there is a chance the HAL is not reporting the
// timestamp and position correctly.
if (mLastPositionFrames > tempPositionFrames) {
// If the position is going backwards, there must be something wrong with the HAL.
// In that case, we do not trust the values reported by the HAL.
ALOGW("%s position is going backwards, last position(%jd) current position(%jd)",
__func__, mLastPositionFrames, tempPositionFrames);
mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
return mHalExternalPositionStatus;
} else if (mLastPositionFrames == tempPositionFrames) {
if (tempTimeNanos - mTimestampNanosForLastPosition >
AAUDIO_NANOS_PER_MILLISECOND * mTimestampGracePeriodMs) {
ALOGW("%s, the reported position is not changed within %d msec. "
"Set the external position as not supported", __func__, mTimestampGracePeriodMs);
mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
return mHalExternalPositionStatus;
}
mFrozenPositionCount++;
} else {
mFrozenPositionCount = 0;
}
if (mTimestampNanosForLastPosition > tempTimeNanos) {
// If the timestamp is going backwards, there must be something wrong with the HAL.
// In that case, we do not trust the values reported by the HAL.
ALOGW("%s timestamp is going backwards, last timestamp(%jd), current timestamp(%jd)",
__func__, mTimestampNanosForLastPosition, tempTimeNanos);
mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
return mHalExternalPositionStatus;
} else if (mTimestampNanosForLastPosition == tempTimeNanos) {
mFrozenTimestampCount++;
} else {
mFrozenTimestampCount = 0;
}
if (mFrozenTimestampCount + mFrozenPositionCount > kMaxFrozenCount) {
ALOGW("%s too many frozen external position from HAL.", __func__);
mHalExternalPositionStatus = AAUDIO_ERROR_INTERNAL;
return mHalExternalPositionStatus;
}
mLastPositionFrames = tempPositionFrames;
mTimestampNanosForLastPosition = tempTimeNanos;
// Only update the timestamp and position when they looks valid.
*positionFrames = tempPositionFrames;
*timeNanos = tempTimeNanos;
return mHalExternalPositionStatus;
}
這兒就是向hal查詢位置信息并且作一個校驗,正常情況下位置和時間戳都是遞增的,如果出現倒退,那hal層返回來的數據就不可信了。
接下來再回到AudioStreamInternal::processCommands(),看下收到TIMESTAMP_SERVICE后的邏輯:
aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) {
#if LOG_TIMESTAMPS
logTimestamp(*message);
#endif
processTimestamp(message->timestamp.position,
message->timestamp.timestamp + mTimeOffsetNanos);
return AAUDIO_OK;
}
繼續(xù)往下看下是:
void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
mClockModel.processTimestamp(position, time);
}
mClockModel可以用來決策是否采播已經正常開始運轉了。內部的原理是利用postion和time,計算數據窗口,看positon對應的時差是否小于等于time對應的時差,如果是,則可以認為采播開始正常運轉了。
// This code assumes that we have already received valid timestamps.
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
advanceClientToMatchServerPosition(0 /*serverMargin*/);
mNeedCatchUp.acknowledge();
}
對于采集來說,這兒就是將fifo中讀取數據的位置更新為寫入的位置,其實就是將讀寫位置初始化為一樣,可讀數據量就成0了,可以看成是位置初始化。
讓可讀數據量大于buffer閾值時就發(fā)生了overrun,對于獨占模式,overrun是可以直接被client感知到,對于共享模式,overrun是被server感知到。
// If the capture buffer is full beyond capacity then consider it an overrun.
// For shared streams, the xRunCount is passed up from the service.
if (mAudioEndpoint->isFreeRunning()
&& mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaOverRuns", mXRunCount);
}
}
接下來就是讀取數據
aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
int32_t numFrames) {
WrappingBuffer wrappingBuffer;
uint8_t *byteBuffer = (uint8_t *) buffer;
int32_t framesLeftInByteBuffer = numFrames;
if (framesLeftInByteBuffer > 0) {
// Pull data from the flowgraph in case there is residual data.
const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
(void *)byteBuffer,
framesLeftInByteBuffer);
const int32_t numBytesActuallyWrittenToByteBuffer =
framesActuallyWrittenToByteBuffer * getBytesPerFrame();
byteBuffer += numBytesActuallyWrittenToByteBuffer;
framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
}
mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
// Write data in one or two parts.
int partIndex = 0;
int framesReadFromAudioEndpoint = 0;
while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
if (framesAvailableInWrappingBuffer <= 0) break;
// Put data from the wrapping buffer into the flowgraph 8 frames at a time.
// Continuously pull as much data as possible from the flowgraph into the byte buffer.
// The return value of mFlowGraph.process is the number of frames actually pulled.
while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
framesAvailableInWrappingBuffer);
const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
framesToReadFromWrappingBuffer;
// If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
// that all the data is pulled. If there is no more space in the byteBuffer, the
// remaining data will be pulled in the following readNowWithConversion().
const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
(void *)currentWrappingBuffer,
framesToReadFromWrappingBuffer,
(void *)byteBuffer,
framesLeftInByteBuffer);
const int32_t numBytesActuallyWrittenToByteBuffer =
framesActuallyWrittenToByteBuffer * getBytesPerFrame();
byteBuffer += numBytesActuallyWrittenToByteBuffer;
framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;
//ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
// "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
// , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
// framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
}
framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
framesAvailableInWrappingBuffer;
partIndex++;
}
// The audio endpoint should reference the number of frames written to the wrapping buffer.
mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);
// The internal code should use the number of frames read from the app.
return numFrames - framesLeftInByteBuffer;
}