android 怎么让两个音频输出,Android新增一个音频类型及双音频输出的实现

本文介绍了如何在Android系统中通过增加自定义音频类型和修改AudioPolicyManager的策略,实现在智能电视等设备上同时播放电视声音和音乐,即双音频输出功能。关键在于对`getDeviceForStrategy`方法的修改,为自定义音频类型选择USB音频设备,同时确保常规音频如MEDIA类型使用speaker设备。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

android定义了很多种音频类型,完整定义在native层如下,system\core\include\system\audio.h文件中:

/* Audio stream types */

typedef enum {

/* These values must kept in sync with

* frameworks/base/media/java/android/media/AudioSystem.java

*/

AUDIO_STREAM_DEFAULT = -1,

AUDIO_STREAM_MIN = 0,

AUDIO_STREAM_VOICE_CALL = 0,

AUDIO_STREAM_SYSTEM = 1,

AUDIO_STREAM_RING = 2,

AUDIO_STREAM_MUSIC = 3,

AUDIO_STREAM_ALARM = 4,

AUDIO_STREAM_NOTIFICATION = 5,

AUDIO_STREAM_BLUETOOTH_SCO = 6,

AUDIO_STREAM_ENFORCED_AUDIBLE = 7, /* Sounds that cannot be muted by user

* and must be routed to speaker

*/

AUDIO_STREAM_DTMF = 8,

AUDIO_STREAM_TTS = 9, /* Transmitted Through Speaker.

* Plays over speaker only, silent on other devices.

*/

AUDIO_STREAM_USB_HEADSET = 10, /* For accessibility talk back prompts */

AUDIO_STREAM_REROUTING = 11, /* For dynamic policy output mixes */

AUDIO_STREAM_PATCH = 12, /* For internal audio flinger tracks. Fixed volume */

AUDIO_STREAM_USB_MIC = 13,

AUDIO_STREAM_ACCESSIBILITY = 14,

AUDIO_STREAM_PUBLIC_CNT = AUDIO_STREAM_USB_MIC + 1,

AUDIO_STREAM_CNT = AUDIO_STREAM_ACCESSIBILITY + 1,

} audio_stream_type_t;

android为不同音频类型设置了不同的路由,根据路由选择不同的输出设备,这便是android的音频管理策略。

比如,应用层传入的音频类型是STREAM_MUSIC,插上耳机时,这种类型的声音会从speaker切换到耳机,如果音频类型是STREAM_RING,则会从耳机和speaker同时传出来。

AudioPolicyManager.h中定义了一下几种路由策略:

enum routing_strategy {

STRATEGY_MEDIA,

STRATEGY_PHONE,

STRATEGY_SONIFICATION,

STRATEGY_SONIFICATION_RESPECTFUL,

STRATEGY_DTMF,

STRATEGY_ENFORCED_AUDIBLE,

STRATEGY_TRANSMITTED_THROUGH_SPEAKER,

STRATEGY_ACCESSIBILITY,

STRATEGY_REROUTING,

STRATEGY_USB_HEADST,

NUM_STRATEGIES

};

根据路由为不同音频类型选择输出设备主要在AudioPolicyManager的getDeviceForStrategy方法,因此通过增加自定义音频类型和修改getDeviceForStrategy的音频策略,即可以对android的音频管理策略实现自定义。

例如实现这样的一个功能,在android智能电视上配合应用实现双音频输出的功能,即用户在看电视的过程中同时还可以听音乐,电视的声音从扬声器输出,而音乐的声音从耳机中输出,这里我们选择了一个usb 耳机设备。

实现原理即增加一个音频类型为音乐应用使用,打开双音频输出功能时,该应用传入的音频类型为我们自定义的,为该音频类型选择usb audio设备,同时,普通的tv及第三方应用使用的则是STREAM_MUSIC类型,该音频类型对应路由策略的是STRATEGY_MEDIA类型,我们在双音频功能打开的时候为该策略强制选择speaker设备,这样即实现了我们的双音频功能。

case STRATEGY_USB_HEADST:

case STRATEGY_MEDIA: {

char propDoubOutput[PROPERTY_VALUE_MAX];

property_get("audio.output.double_output",propDoubOutput,"null");

if ((strcmp(propDoubOutput,"1") == 0) && strategy == STRATEGY_USB_HEADST) {

device = mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_USB_DEVICE;

if (device != AUDIO_DEVICE_NONE) {

device = AUDIO_DEVICE_OUT_USB_DEVICE;

}else{

ALOGE("getDeviceForStrategy() no device found for STRATEGY_USB_HEADST");

}

} else {

uint32_t device2 = AUDIO_DEVICE_NONE;

if (strategy != STRATEGY_SONIFICATION) {

// no sonification on remote submix (e.g. WFD)

if (mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {

device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;

}

}

STRATEGY_USB_HEADST类型是我们自定义的策略类型,"audio.output.double_output"为我们自己添加的一个属性,作为底层判断是否上层设置了双音频属性,可以看出在非双音频模式下,STRATEGY_USB_HEADST类型与MEDIA类型是完全一样的,在设置了双音频属性时,我们为  STRATEGY_USB_HEADST类型选择了usbaudio设备,device = AUDIO_DEVICE_OUT_USB_DEVICE;而同时我们还要为MEDIA设备选择speaker设备:

property_get("audio.output.double_output",propDoubOutput,"null");

if (strcmp(propDoubOutput, "1") ==0) {

device = AUDIO_DEVICE_OUT_AUX_DIGITAL |AUDIO_DEVICE_OUT_SPEAKER;

} else {

device |= device2;

}

选择设备的工作基本就做完了,但是前提是需要j从ava层到framework层为该音频类型打通过程。实际上这个参照一种音频类型的实现就很容易解决。基本上理清一个audiotrack从java层到native层的调用过程即可,在java层audiomanger与audiosystem中添加我们自定义的音频类型之后来看audiotrack的构造函数,5.1之于4.4多了一个AudioAttributes,这对上层传下来的streamType做了一层封装,看上去是更方便了我们的扩展,通过上层stream_type转化得到  private int mUsage = USAGE_UNKNOWN;

和   private int mContentType = CONTENT_TYPE_UNKNOWN两种类型,到了native层AudioTrack.cpp的set函数中:

status_t AudioTrack::set(

audio_stream_type_t streamType,

uint32_t sampleRate,

audio_format_t format,

audio_channel_mask_t channelMask,

size_t frameCount,

audio_output_flags_t flags,

callback_t cbf,

void* user,

uint32_t notificationFrames,

const sp& sharedBuffer,

bool threadCanCallJava,

int sessionId,

transfer_type transferType,

const audio_offload_info_t *offloadInfo,

int uid,

pid_t pid,

const audio_attributes_t* pAttributes)

{

ALOGI("set(): %p streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "

"flags #%x, notificationFrames %u, sessionId %d, transferType %d",

this,streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,

sessionId, transferType);

switch (transferType) {

case TRANSFER_DEFAULT:

if (sharedBuffer != 0) {

transferType = TRANSFER_SHARED;

} else if (cbf == NULL || threadCanCallJava) {

transferType = TRANSFER_SYNC;

} else {

transferType = TRANSFER_CALLBACK;

}

break;

case TRANSFER_CALLBACK:

if (cbf == NULL || sharedBuffer != 0) {

ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");

return BAD_VALUE;

}

break;

case TRANSFER_OBTAIN:

case TRANSFER_SYNC:

if (sharedBuffer != 0) {

ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");

return BAD_VALUE;

}

break;

case TRANSFER_SHARED:

if (sharedBuffer == 0) {

ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");

return BAD_VALUE;

}

break;

default:

ALOGE("Invalid transfer type %d", transferType);

return BAD_VALUE;

}

mSharedBuffer = sharedBuffer;

mTransfer = transferType;

ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),

sharedBuffer->size());

ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);

AutoMutex lock(mLock);

// invariant that mAudioTrack != 0 is true only after set() returns successfully

if (mAudioTrack != 0) {

ALOGE("Track already in use");

return INVALID_OPERATION;

}

// handle default values first.

if (streamType == AUDIO_STREAM_DEFAULT) {

streamType = AUDIO_STREAM_MUSIC;

}

if (pAttributes == NULL) {

if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {

ALOGE("Invalid stream type %d", streamType);

return BAD_VALUE;

}

mStreamType = streamType;

} else {

// stream type shouldn't be looked at, this track has audio attributes

memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));

ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",

mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);

mStreamType = AUDIO_STREAM_DEFAULT;

}

// these below should probably come from the audioFlinger too...

if (format == AUDIO_FORMAT_DEFAULT) {

format = AUDIO_FORMAT_PCM_16_BIT;

}

......看到   mStreamType = AUDIO_STREAM_DEFAULT; stream_type已经被设为-1,后面获取设备时不再关心stream_type,而是由audio_attributes_t这个结构体来选择,再来看看这个结构体的定义:

typedef struct {

audio_content_type_t content_type;

audio_usage_t usage;

audio_source_t source;

audio_flags_mask_t flags;

char tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE]; /* UTF8 */

} audio_attributes_t;

正是前面提到的mUsage 和mContentType 。

再回到AudioPolicyManager,看看getOutputForAttr接口,改接口调用了我们之前修改过的getDeviceForStrategy来获取设备:

......

ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",

attributes.usage, attributes.content_type, attributes.tags, attributes.flags);

routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);

audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);

......

所以在上层将stream_type 与AudioAttributes的转换做好,这条路就基本打通了,双音频输出的功能就实现了。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值