精华内容
参与话题
问答
  • android audio

    千次阅读 2013-12-19 13:24:55
    status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,  int triggerSession) {  status = AudioSystem::star

    AudioTrack::AudioTrack(
    )
    {
        mStatus = set(streamType, sampleRate, format, channelMask,
                0 /*frameCount*/, flags, cbf, user, notificationFrames,
                sharedBuffer, false /*threadCanCallJava*/, sessionId);


    status_t AudioTrack::set()

    {

        // create the IAudioTrack
        status_t status = createTrack_l(streamType,
                                      sampleRate,
                                      format,
                                      (uint32_t)channelMask,
                                      frameCount,
                                      flags,
                                      sharedBuffer,
                                      output);

    }

    void AudioTrack::start()
    {

                status = mAudioTrack->start();

    }

    status_t AudioTrack::createTrack_l(

        sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
                                                          streamType,
                                                          sampleRate,
                                                          format,
                                                          channelMask,
                                                          frameCount,
                                                          trackFlags,
                                                          sharedBuffer,
                                                          output,
                                                          tid,
                                                          &mSessionId,
                                                          &status);

      mAudioTrack = track;


    }


    sp<IAudioTrack> AudioFlinger::createTrack()

    {

            track = thread->createTrack_l(client, streamType, sampleRate, format,
                    channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, &lStatus);

       trackHandle = new TrackHandle(track);

        return trackHandle;

    }

    sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l()

    {

                track = new Track(this, client, streamType, sampleRate, format,
                        channelMask, frameCount, sharedBuffer, sessionId, flags);

              return track;

    }


    status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,

                                                        int triggerSession)

    {

                status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);

    }

    status_t AudioSystem::startOutput(audio_io_handle_t output,
                                      audio_stream_type_t stream,
                                      int session)
    {
        const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
        if (aps == 0) return PERMISSION_DENIED;
        return aps->startOutput(output, stream, session);
    }

    status_t AudioPolicyService::startOutput(audio_io_handle_t output,
                                             audio_stream_type_t stream,
                                             int session)
    {
        if (mpAudioPolicy == NULL) {
            return NO_INIT;
        }
        ALOGV("startOutput() tid %d", gettid());
        Mutex::Autolock _l(mLock);
        return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
    }

    static int create_legacy_ap(const struct audio_policy_device *device,
                                struct audio_policy_service_ops *aps_ops,
                                void *service,
                                struct audio_policy **ap)
    {

        lap->policy.start_output = ap_start_output;

    }

    static int ap_start_output(struct audio_policy *pol, audio_io_handle_t output,
                               audio_stream_type_t stream, int session)
    {
        struct legacy_audio_policy *lap = to_lap(pol);
        return lap->apm->startOutput(output, (AudioSystem::stream_type)stream,
                                     session);
    }

    status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
                                                 AudioSystem::stream_type stream,
                                                 int session)
    {

            audio_devices_t prevDevice = outputDesc->device();
            audio_devices_t newDevice = getNewDevice(output, false /*fromCache*/);
            routing_strategy strategy = getStrategy(stream);


          uint32_t muteWaitMs = setOutputDevice(output, newDevice, force);


            // apply volume rules for current stream and device if necessary
            checkAndSetVolume(stream,
                              mStreams[stream].getVolumeIndex((audio_devices_t)newDevice),
                              output,
                              newDevice);

    }


    audio_devices_t AudioPolicyManagerBase::getNewDevice(audio_io_handle_t output, bool fromCache)
    {
        audio_devices_t device = (audio_devices_t)0;

        AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
        // check the following by order of priority to request a routing change if necessary:
        // 1: the strategy enforced audible is active on the output:
        //      use device for strategy enforced audible
        // 2: we are in call or the strategy phone is active on the output:
        //      use device for strategy phone
        // 3: the strategy sonification is active on the output:
        //      use device for strategy sonification
        // 4: the strategy "respectful" sonification is active on the output:
        //      use device for strategy "respectful" sonification
        // 5: the strategy media is active on the output:
        //      use device for strategy media
        // 6: the strategy DTMF is active on the output:
        //      use device for strategy DTMF
        if (outputDesc->isUsedByStrategy(STRATEGY_ENFORCED_AUDIBLE)) {
            device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
        } else if (isInCall() ||
                        outputDesc->isUsedByStrategy(STRATEGY_PHONE)) {
            device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
        } else if (outputDesc->isUsedByStrategy(STRATEGY_SONIFICATION)) {
            device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
        } else if (outputDesc->isUsedByStrategy(STRATEGY_SONIFICATION_RESPECTFUL)) {
            device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
        } else if (outputDesc->isUsedByStrategy(STRATEGY_MEDIA)) {
            device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
        } else if (outputDesc->isUsedByStrategy(STRATEGY_DTMF)) {
            device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
        } else if (outputDesc->isUsedByStrategy(STRATEGY_FM)) {
            device = getDeviceForStrategy(STRATEGY_FM, fromCache);
        }

        ALOGV("getNewDevice() selected device %x", device);
        return device;
    }


    audio_devices_t AudioPolicyManagerBase::getDeviceForStrategy(routing_strategy strategy,
                                                                 bool fromCache)
    {
        uint32_t device = 0;

        if (fromCache) {
            ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
                  strategy, mDeviceForStrategy[strategy]);
            return mDeviceForStrategy[strategy];
        }

        switch (strategy) {

        case STRATEGY_SONIFICATION_RESPECTFUL:
            if (isInCall()) {
                device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
            } else if (isStreamActive(AudioSystem::MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
                // while media is playing (or has recently played), use the same device
                device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
            } else {
                // when media is not playing anymore, fall back on the sonification behavior
                device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
            }

            break;

        case STRATEGY_DTMF:
            if (!isInCall()) {
                // when off call, DTMF strategy follows the same rules as MEDIA strategy
                device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
                break;
            }
            // when in call, DTMF and PHONE strategies follow the same rules
            // FALL THROUGH

        case STRATEGY_PHONE:
            // for phone strategy, we first consider the forced use and then the available devices by order
            // of priority
            switch (mForceUse[AudioSystem::FOR_COMMUNICATION]) {
            case AudioSystem::FORCE_BT_SCO:
                if (!isInCall() || strategy != STRATEGY_DTMF) {
                    device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
                    if (device) break;
                }
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_SCO;
                if (device) break;
                // if SCO device is requested but no SCO device is available, fall back to default case
                // FALL THROUGH

            default:    // FORCE_NONE
                // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
                if (mHasA2dp && !isInCall() &&
                        (mForceUse[AudioSystem::FOR_MEDIA] != AudioSystem::FORCE_NO_BT_A2DP) &&
                        (getA2dpOutput() != 0) && !mA2dpSuspended) {
                    device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
                    if (device) break;
                    device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
                    if (device) break;
                }
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_EARPIECE;
                if (device) break;
                device = mDefaultOutputDevice;
                if (device == 0) {
                    ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
                }
                break;

            case AudioSystem::FORCE_SPEAKER:
                // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
                // A2DP speaker when forcing to speaker output
                if (mHasA2dp && !isInCall() &&
                        (mForceUse[AudioSystem::FOR_MEDIA] != AudioSystem::FORCE_NO_BT_A2DP) &&
                        (getA2dpOutput() != 0) && !mA2dpSuspended) {
                    device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                    if (device) break;
                }
                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
                if (device) break;
                device = mDefaultOutputDevice;
                if (device == 0) {
                    ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
                }
                break;
            }
        break;

        case STRATEGY_SONIFICATION:

            // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
            // handleIncallSonification().
            if (isInCall()) {
                device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/);
                break;
            }
            // FALL THROUGH

        case STRATEGY_ENFORCED_AUDIBLE:
            //Fix bug 137934. Note we can delete routing policy in STRATEGY_SONIFICATION
            if (isInCall()) {
                device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/);
                break;
            }
            // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
            // except in countries where not enforced in which case it follows STRATEGY_MEDIA

            if (strategy == STRATEGY_SONIFICATION ||
                    !mStreams[AUDIO_STREAM_ENFORCED_AUDIBLE].mCanBeMuted) {
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
                if (device == 0) {
                    ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
                }
            }
            // The second device used for sonification is the same as the device used by media strategy
            // FALL THROUGH

        case STRATEGY_MEDIA: {
            uint32_t device2 = 0;
                switch (mForceUse[AudioSystem::FOR_MEDIA]) {
                case AudioSystem::FORCE_SPEAKER:
                    ALOGD("geting device of force_speaker");
                    if (device2 == 0) {
    //HTC_AUD_START, eleven add for tfa9887 fm
    #if defined(BOARD_HAVE_TFA9887)
            if(FM_status == 1)
                device = AudioSystem::DEVICE_OUT_FM_SPEAKER;
            else
    #endif
    //HTC_AUD_END.
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
                }
                break;
                default:
            if ((device2 == 0) && mHasA2dp && (mForceUse[AudioSystem::FOR_MEDIA] != AudioSystem::FORCE_NO_BT_A2DP) &&
                    (getA2dpOutput() != 0) && !mA2dpSuspended) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP;
                if (device2 == 0) {
                    device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
                }
                if (device2 == 0) {
                    device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                }
    #ifdef ENABLE_HTC_SUBWOOFER
                if (device2 != 0) {
                    char subwoofer[PROPERTY_VALUE_MAX];
                    property_get("htc.audio.subwoofer",subwoofer, "0");
                    if (atoi(subwoofer) == 1) {
                        device2 |= mAvailableOutputDevices & AUDIO_DEVICE_OUT_SPEAKER;
                    }
                }
    #endif
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADPHONE;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_WIRED_HEADSET;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_DGTL_DOCK_HEADSET;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_AUX_DIGITAL;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_ANLG_DOCK_HEADSET;
            }
            if (device2 == 0) {
                device2 = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_SPEAKER;
            }

            // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
            // STRATEGY_ENFORCED_AUDIBLE, 0 otherwise
            device |= device2;
            if (device) break;
            device = mDefaultOutputDevice;
            if (device == 0) {
                ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
            }
            }
                }break;

        case STRATEGY_FM: {
    //HTC_AUD_START, eleven add for FM forse use.
            switch (mForceUse[AudioSystem::FOR_MEDIA]) {
            case AudioSystem::FORCE_SPEAKER:
                ALOGD("geting device of force_speaker");
            device = AudioSystem::DEVICE_OUT_FM_SPEAKER;
            break;
        case AudioSystem::FORCE_NO_BT_A2DP:
                ALOGD("geting device of force_headset");
            device = AudioSystem::DEVICE_OUT_FM_HEADSET;
            break;
        case AudioSystem::FORCE_NONE:
        default:
                ALOGD("Do nothing for force none");
            break;
            }
            if (device == 0) {
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_FM_HEADSET;
            }

            if (device == 0) {
                device = mAvailableOutputDevices & AudioSystem::DEVICE_OUT_FM_SPEAKER;
            }
        } break;
    //HTC_AUD_END.
        default:
            ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
            break;
        }

        ALOGV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
        return (audio_devices_t)device;
    }


    uint32_t AudioPolicyManagerBase::setOutputDevice(audio_io_handle_t output,
                                                 audio_devices_t device,
                                                 bool force,
                                                 int delayMs)

    {

        // do the routing
        param.addInt(String8(AudioParameter::keyRouting), (int)device);
        mpClientInterface->setParameters(output, param.toString(), delayMs);


        // update stream volumes according to new device
        applyStreamVolumes(output, device, delayMs);


    }


    AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clientInterface)
        :
    #ifdef AUDIO_POLICY_TEST
        Thread(false),
    #endif //AUDIO_POLICY_TEST
        mPrimaryOutput((audio_io_handle_t)0),
        mAvailableOutputDevices((audio_devices_t)0),
        mPhoneState(AudioSystem::MODE_NORMAL),
        mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
        mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
        mA2dpSuspended(false), mHasA2dp(false), mHasUsb(false), mHasStartupSound(true)
    {
        mpClientInterface = clientInterface;

    }

    static int create_legacy_ap(const struct audio_policy_device *device,
                                struct audio_policy_service_ops *aps_ops,
                                void *service,
                                struct audio_policy **ap)
    {

        lap->apm = createAudioPolicyManager(lap->service_client);

    }


    extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
    {
        return new AudioPolicyManagerSPRD(clientInterface);
    }

    class AudioPolicyCompatClient : public AudioPolicyClientInterface {

          virtual void setParameters(audio_io_handle_t ioHandle,
                                   const String8& keyValuePairs,
                                   int delayMs = 0);

    }


    void AudioPolicyCompatClient::setParameters(audio_io_handle_t ioHandle,
                                                const String8& keyValuePairs,
                                                int delayMs)
    {
        mServiceOps->set_parameters(mService, ioHandle, keyValuePairs.string(),
                               delayMs);
    }


        struct audio_policy_service_ops aps_ops = {
            open_output           : aps_open_output,
            open_duplicate_output : aps_open_dup_output,
            close_output          : aps_close_output,
            suspend_output        : aps_suspend_output,
            restore_output        : aps_restore_output,
            open_input            : aps_open_input,
            close_input           : aps_close_input,
            set_stream_volume     : aps_set_stream_volume,
            set_stream_output     : aps_set_stream_output,
            set_parameters        : aps_set_parameters,
            get_parameters        : aps_get_parameters,
            start_tone            : aps_start_tone,
            stop_tone             : aps_stop_tone,
            set_voice_volume      : aps_set_voice_volume,
            move_effects          : aps_move_effects,
            load_hw_module        : aps_load_hw_module,
            open_output_on_module : aps_open_output_on_module,
            open_input_on_module  : aps_open_input_on_module,
        };

    static void aps_set_parameters(void *service, audio_io_handle_t io_handle,
                                       const char *kv_pairs, int delay_ms)
    {
        AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;

        audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
    }


    void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
                                           const char *keyValuePairs,
                                           int delayMs)
    {
        mAudioCommandThread->parametersCommand(ioHandle, keyValuePairs,
                                               delayMs);
    }

    status_t AudioPolicyService::AudioCommandThread::parametersCommand(audio_io_handle_t ioHandle,
                                                                       const char *keyValuePairs,
                                                                       int delayMs)
    {
        status_t status = NO_ERROR;

        AudioCommand *command = new AudioCommand();
        command->mCommand = SET_PARAMETERS;
     
        insertCommand_l(command, delayMs);

    }


    void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
    {

    case SET_PARAMETERS: {

    }

        mAudioCommands.insertAt(command, i + 1);

    }

    bool AudioPolicyService::AudioCommandThread::threadLoop()
    {

                    case SET_PARAMETERS: {

                        command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
    }

    }

    status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
        const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
        if (af == 0) return PERMISSION_DENIED;
        return af->setParameters(ioHandle, keyValuePairs);
    }

    status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
    {

        //AudioParameter paramRouting = AudioParameter(keyValuePairs);
        String8 keyRouting = String8("routing");
        int valueRouting;

                AudioParameter param = AudioParameter(keyValuePairs);
                int value;
                if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
                        (value != 0)) {
                    for (size_t i = 0; i < mRecordThreads.size(); i++) {
                        mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
                    }
                }

    }

    status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
    {

        mNewParameters.add(keyValuePairs);
     
    }

    const char * const AudioParameter::keyRouting = AUDIO_PARAMETER_STREAM_ROUTING;

    bool AudioFlinger::MixerThread::checkForNewParameters_l()
    {

                status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                        keyValuePair.string());

    }

    static int adev_open_output_stream(struct audio_hw_device *dev,
                                  audio_io_handle_t handle,
                                  audio_devices_t devices,
                                  audio_output_flags_t flags,
                                  struct audio_config *config,
                                  struct audio_stream_out **stream_out)
    {


        out->stream.common.set_parameters = out_set_parameters;

    }


    static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
    {

        parms = str_parms_create_str(kvpairs);

        ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));

                old_device = adev->devices; //HTC_AUD_ADD
                adev->devices &= ~AUDIO_DEVICE_OUT_ALL;
                adev->devices |= val;
                out->devices = val;
                ALOGW("out_set_parameters want to set devices:0x%x old_mode:%d new_mode:%d call_start:%d ",adev->devices,cur_mode,adev->mode,adev->call_start);
                cur_mode = adev->mode;

    }

    static int adev_open(const hw_module_t* module, const char* name,
                         hw_device_t** device)
    {

        ret = stream_routing_manager_create(adev);

    }

    static int stream_routing_manager_create(struct tiny_audio_device *adev)
    {

        /* create a thread to manager the device routing switch.*/
        ret = pthread_create(&adev->routing_mgr.routing_switch_thread, NULL,
                                stream_routing_thread_entry, (void *)adev);


    }

    static void *stream_routing_thread_entry(void * param)
    {
        struct tiny_audio_device *adev = (struct tiny_audio_device *)param;

                /* switch device routing here.*/
                do_select_devices(adev);

        return 0;
    }
    static void do_select_devices(struct tiny_audio_device *adev)
    {

            set_route_by_array(adev->mixer, adev->dev_cfgs[i].off,
                       adev->dev_cfgs[i].off_len);

    }

    static int set_route_by_array(struct mixer *mixer, struct route_setting *route,
                      unsigned int len)
    {
        struct mixer_ctl *ctl;
        unsigned int i, j, ret;

        /* Go through the route array and set each value */
        for (i = 0; i < len; i++) {
            ctl = mixer_get_ctl_by_name(mixer, route[i].ctl_name);
            if (!ctl) {
                ALOGE("Unknown control '%s'\n", route[i].ctl_name);
                continue;
            }

            if (route[i].strval) {
                ret = mixer_ctl_set_enum_by_string(ctl, route[i].strval);
                if (ret != 0) {
                    ALOGE("Failed to set '%s' to '%s'\n",
                    route[i].ctl_name, route[i].strval);
                } else {
                    ALOGI("Set '%s' to '%s'\n",
                    route[i].ctl_name, route[i].strval);
                }
            } else {
                /* This ensures multiple (i.e. stereo) values are set jointly */
                for (j = 0; j < mixer_ctl_get_num_values(ctl); j++) {
                    ret = mixer_ctl_set_value(ctl, j, route[i].intval);
                    if (ret != 0) {
                        ALOGE("Failed to set '%s'.%d to %d\n",
                        route[i].ctl_name, j, route[i].intval);
                    } else {
                        ALOGI("Set '%s'.%d to %d\n",
                        route[i].ctl_name, j, route[i].intval);
                    }
                }
            }
        }

        return 0;
    }


    static int adev_open(const hw_module_t* module, const char* name,
                         hw_device_t** device)
    {

        /* parse mixer ctl */
        ret = adev_config_parse(adev);

    }

    static int adev_config_parse(struct tiny_audio_device *adev)
    {
        struct config_parse_state s;
        FILE *f;
        XML_Parser p;
        char property[PROPERTY_VALUE_MAX];
        char file[80];
        int ret = 0;
        bool eof = false;
        int len;

        //property_get("ro.product.device", property, "tiny_hw");
        snprintf(file, sizeof(file), "/system/etc/%s", "tiny_hw.xml");

        ALOGV("Reading configuration from %s\n", file);
        f = fopen(file, "r");
        if (!f) {
            ALOGE("Failed to open %s\n", file);
            return -ENODEV;
        }

        p = XML_ParserCreate(NULL);
        if (!p) {
            ALOGE("Failed to create XML parser\n");
            ret = -ENOMEM;
            goto out;
        }

        memset(&s, 0, sizeof(s));
        s.adev = adev;
        XML_SetUserData(p, &s);

        XML_SetElementHandler(p, adev_config_start, adev_config_end);

        while (!eof) {
            len = fread(file, 1, sizeof(file), f);
            if (ferror(f)) {
                ALOGE("I/O error reading config\n");
                ret = -EIO;
                goto out_parser;
            }
            eof = feof(f);

            if (XML_Parse(p, file, len, eof) == XML_STATUS_ERROR) {
                ALOGE("Parse error at line %u:\n%s\n",
                 (unsigned int)XML_GetCurrentLineNumber(p),
                 XML_ErrorString(XML_GetErrorCode(p)));
                ret = -EINVAL;
                goto out_parser;
            }
        }

     out_parser:
        XML_ParserFree(p);
     out:
        fclose(f);

        return ret;
    }


    static void adev_config_start(void *data, const XML_Char *elem,
                      const XML_Char **attr)
    {


        if (property_get(FM_DIGITAL_SUPPORT_PROPERTY, value, "0") && strcmp(value, "1") == 0)
        {
            dev_names = dev_names_digitalfm;
            dev_num = sizeof(dev_names_digitalfm) / sizeof(dev_names_digitalfm[0]);
        }
        else
        {
            dev_names = dev_names_linein;
            dev_num = sizeof(dev_names_linein) / sizeof(dev_names_linein[0]);
        }

    }


    static const dev_names_para_t dev_names_linein[] = {
        { AUDIO_DEVICE_OUT_SPEAKER | AUDIO_DEVICE_OUT_FM_SPEAKER, "speaker" },
        { AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE |AUDIO_DEVICE_OUT_FM_HEADSET,
              "headphone" },
        { AUDIO_DEVICE_OUT_EARPIECE, "earpiece" },
        /* ANLG for voice call via linein*/
        { AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET | AUDIO_DEVICE_OUT_ALL_FM, "line" },
        { AUDIO_DEVICE_OUT_FM_HEADSET, "line-headphone" },
        { AUDIO_DEVICE_OUT_FM_SPEAKER, "line-speaker" },

        { AUDIO_DEVICE_IN_COMMUNICATION, "comms" },
        { AUDIO_DEVICE_IN_AMBIENT, "ambient" },
        { AUDIO_DEVICE_IN_BUILTIN_MIC, "builtin-mic" },
        { AUDIO_DEVICE_IN_WIRED_HEADSET, "headset-in" },
        { AUDIO_DEVICE_IN_AUX_DIGITAL, "digital" },
        { AUDIO_DEVICE_IN_BACK_MIC, "back-mic" },
        //{ "linein-capture"},
    };
    static const dev_names_para_t dev_names_digitalfm[] = {
        { AUDIO_DEVICE_OUT_SPEAKER | AUDIO_DEVICE_OUT_FM_SPEAKER, "speaker" },
        { AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE |AUDIO_DEVICE_OUT_FM_HEADSET,
              "headphone" },
        { AUDIO_DEVICE_OUT_EARPIECE, "earpiece" },
            /* ANLG for voice call via linein*/
        { AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "line" },
        { AUDIO_DEVICE_OUT_ALL_FM, "digital-fm" },


        { AUDIO_DEVICE_IN_COMMUNICATION, "comms" },
        { AUDIO_DEVICE_IN_AMBIENT, "ambient" },
        { AUDIO_DEVICE_IN_BUILTIN_MIC, "builtin-mic" },
        { AUDIO_DEVICE_IN_WIRED_HEADSET, "headset-in" },
        { AUDIO_DEVICE_IN_AUX_DIGITAL, "digital" },
        { AUDIO_DEVICE_IN_BACK_MIC, "back-mic" },
        //{ "linein-capture"},
    };



    root@android:/system/etc # cat tiny_hw.xml
    <!-- TinyHAL configuration file for GOD -->
    <tinyalsa-audio device="SC8830 audio">

       <!-- We are able to have most of our routing static so do that -->
       <path>

         <!-- DAC->Headphone -->
         <ctl name="HPL Playback Volume" val="8" />
         <ctl name="HPR Playback Volume" val="8" />
         <ctl name="DACL Playback Volume" val="7" />
         <ctl name="DACR Playback Volume" val="7" />

         <!-- DAC->Speaker -->
         <ctl name="SPKL Playback Volume" val="10" />
         <ctl name="SPKR Playback Volume" val="10" />
         <ctl name="DACL Playback Volume" val="5" />
         <ctl name="DACR Playback Volume" val="5" />

         <!-- DAC->Earpiece -->
         <ctl name="EAR Playback Volume" val="10" />
         <ctl name="DACL Playback Volume" val="5" />
         <ctl name="DACR Playback Volume" val="5" />

         <!-- ADC ->(main mic)  -->
         <ctl name="ADCL Capture Volume" val="12" />
         <ctl name="ADCR Capture Volume" val="12" />

       </path>

    <!-- MUST NOT modify private field directly -->
    <private name="vb control">
       <func name="VBC Switch" val="arm" />
    </private>
    <private name="eq update">
      <func name="VBC EQ Update" val="0" />
    </private>

    <private name="da eq switch">
      <func name="VBC DA EQ Switch" val="0" />
    </private>

    <private name="ad01 eq switch">
      <func name="VBC AD01 EQ Switch" val="0" />
    </private>

    <private name="ad23 eq switch">
      <func name="VBC AD02 EQ Switch" val="0" />
    </private>

    <private name="da eq profile">
      <func name="VBC DA EQ Profile Select" val="0" />
    </private>
    <private name="ad01 eq profile">
      <func name="VBC AD01 EQ Profile Select" val="0" />
    </private>
    <private name="ad23 eq profile">
      <func name="VBC AD23 EQ Profile Select" val="0" />
    </private>

    <private name="internal PA">
      <func name="Inter PA Config" val="304" />
    </private>

    <device name="builtin-mic">
      <path name="on">
         <ctl name="ADCL Mixer MainMICADCL Switch" val="1" />
         <ctl name="ADCR Mixer MainMICADCR Switch" val="1" />
         <ctl name="Mic Function" val="1" />
      </path>
      <path name="off">
         <ctl name="Mic Function" val="0" />
         <ctl name="ADCL Mixer MainMICADCL Switch" val="0" />
         <ctl name="ADCR Mixer MainMICADCR Switch" val="0" />
      </path>
    </device>

    <device name="back-mic">
      <path name="on">
         <ctl name="ADCL Mixer AuxMICADCL Switch" val="1" />
         <ctl name="ADCR Mixer AuxMICADCR Switch" val="1" />
         <ctl name="Aux Mic Function" val="1" />
      </path>
      <path name="off">
         <ctl name="Aux Mic Function" val="0" />
         <ctl name="ADCL Mixer AuxMICADCL Switch" val="0" />
         <ctl name="ADCR Mixer AuxMICADCR Switch" val="0" />
      </path>
    </device>

    <device name="headset-in">
      <path name="on">
         <ctl name="ADCL Mixer HPMICADCL Switch" val="1" />
         <ctl name="ADCR Mixer HPMICADCR Switch" val="1" />
         <ctl name="HP Mic Function" val="1" />
      </path>
      <path name="off">
         <ctl name="HP Mic Function" val="0" />
         <ctl name="ADCL Mixer HPMICADCL Switch" val="0" />
         <ctl name="ADCR Mixer HPMICADCR Switch" val="0" />
      </path>
    </device>

    <device name="line-speaker">
      <path name="on">
         <ctl name="SPKL Mixer ADCLSPKL Switch" val="1" />
         <ctl name="SPKL Mixer ADCRSPKL Switch" val="1" />
      </path>
      <path name="off">
         <ctl name="SPKL Mixer ADCLSPKL Switch" val="0" />
         <ctl name="SPKL Mixer ADCRSPKL Switch" val="0" />
      </path>
    </device>

    <device name="line-headphone">
      <path name="on">
         <ctl name="HPL Mixer ADCRHPL Switch" val="1" />
         <ctl name="HPR Mixer ADCLHPR Switch" val="1" />
      </path>
      <path name="off">
         <ctl name="HPL Mixer ADCRHPL Switch" val="0" />
         <ctl name="HPR Mixer ADCLHPR Switch" val="0" />
      </path>
    </device>

    <device name="line">
      <path name="on">
         <ctl name="ADCL Mixer AILADCL Switch" val="1" />
         <ctl name="ADCR Mixer AIRADCR Switch" val="1" />
         <ctl name="Line Function" val="1" />
      </path>
      <path name="off">
         <ctl name="Line Function" val="0" />
         <ctl name="ADCL Mixer AILADCL Switch" val="0" />
         <ctl name="ADCR Mixer AIRADCR Switch" val="0" />
      </path>
    </device>

    <device name="digital-fm">
      <path name="on">
         <ctl name="Digital FM Function" val="1" />
         <ctl name="AD IISMUX" val="2" />
      </path>
      <path name="off">
         <ctl name="Digital FM Function" val="0" />
         <ctl name="AD IISMUX" val="0" />
      </path>
    </device>

    <device name="speaker">
       <path name="on">
         <ctl name="SPKR Mixer DACLSPKR Switch" val="1" />
         <ctl name="SPKR Mixer DACRSPKR Switch" val="1" />
         <ctl name="Speaker2 Function" val="1" />
       </path>
       <path name="off">
         <ctl name="Speaker2 Function" val="0" />
         <ctl name="SPKR Mixer DACLSPKR Switch" val="0" />
         <ctl name="SPKR Mixer DACRSPKR Switch" val="0" />
       </path>
    </device>

    <device name="headphone">
       <path name="on">
         <ctl name="HPR Mixer DACRHPR Switch" val="1" />
         <ctl name="HPL Mixer DACLHPL Switch" val="1" />
         <ctl name="HeadPhone Function" val="1" />
       </path>
       <path name="off">
         <ctl name="HeadPhone Function" val="0" />
         <ctl name="HPR Mixer DACRHPR Switch" val="0" />
         <ctl name="HPL Mixer DACLHPL Switch" val="0" />
       </path>
    </device>

    <device name="earpiece">
       <path name="on">
         <ctl name="Earpiece Function" val="1" />
       </path>
       <path name="off">
         <ctl name="Earpiece Function" val="0" />
       </path>
    </device>

    </tinyalsa-audio>


    展开全文
  • Android Audio

    2016-05-05 15:30:21
    Audio TrackAudioTrack 为android的播放器,只能播放PCM格式的音源,其实Android自带的MediaPlayer类也是通过解码改为PCM格式的然后调用AudioTrack并向其写入数据,AudioTrack同时会通过Binder机制调用另外一端的...

    Audio Track

    AudioTrack 为android的播放器,只能播放PCM格式的音源,其实Android自带的MediaPlayer类也是通过解码改为PCM格式的然后调用AudioTrack并向其写入数据,AudioTrack同时会通过Binder机制调用另外一端的AudioFlinger,得到IAudioTrack对象,通过它和AudioFlinger交互,然后AudioFlinger再与硬件交互,进行播放声音,之间的过程就不写了,很多博客上有。。

    AudioTrack的使用

    刚刚说过,AudioTrack只能播放PCM格式的数据,对于Android来说,无非就是Buffer了,直接表现为byte[],AudioTrack中的write(byte[] buffer,int offset,int len)方法,buffer参数就是我们得到的数据,而数据又是从歌曲中拿到的,一般的歌曲文件为wav/mp3文件,当然mp3多一点,对这块我具体不是很熟,我们用最简单的wav文件播放举例说明吧~

    当我们拿到wav文件的时候,首先要解析它,解析的话推荐看一下这篇文章

    https://zhuanlan.zhihu.com/p/20657836?refer=jhuster
    这个专栏还有很多音频开发知识,多谢作者的分享了

    展开全文
  • 注:本系列开始的一些文章主要是以翻译Google官方的文档为主。1. Android AudioAndroid的音频HAL(硬件抽象层)连接着更上层的, 在android.media包中的音频专属的API和更下层的驱动... Audio架构及代码分布Android audio

    注:本系列开始的一些文章主要是以翻译Google官方的文档为主。

    1. Android Audio

    Android的音频HAL(硬件抽象层)连接着更上层的, 在android.media包中的音频专属的API和更下层的驱动与硬件,粗略的分,可以将HAL分为一层,用户空间和内核空间分一层。
    这里写图片描述

    本系列主要讲述Audio的实现以及提高性能的一些tips.

    2. Audio架构及代码分布

    Android audio架构定义了audio是如何实现的,同时也指明这个实现涉及到的相关代码。

    Android audio架构图:
    这里写图片描述

    下面简单介绍一下这个架构图:

    • Application framework
      这部分包括App的代码,App使用android.media中的API来和audio硬件交互。在内部,App是通过JNI调用对应的native代码。

    • JNI
      Framework的api需要通过JNI调用到更下层的实现。是一个中介的作用。JNI的代码位于:frameworks/base/core/jni 和 frameworks/base/media/jni.

    • Native framework
      它提供和Framework相对应的本地实现,通过调用Binder代理和media server交互。代码位于:frameworks/av/media/libmedia

    • Binder IPC
      通过Binder跨进程和media server交互。Binder Proxy一般是以I开头的文件。代码位于:frameworks/av/media/libmedia

    • Media server
      包括AudioFlinger和AudioPolicy, 它们是真正直接和HAL打交道的服务,代码位于:frameworks/av/services/audioflinger
      frameworks/av/services/audiopolicy

    • HAL
      HAL定义了标准的接口,必须实现这些接口才能让Audio正常工作起来。接口定义文件:
      hardware/libhardware/include/hardware/audio.h

    • Kernel driver
      Driver连接着HAL和硬件,厂商可以使用ALSA, OSS或者自定义的driver。

    • Android native audio based on Open SL ES
      这部分在结构图中没有展示出来,这部分API是Android NDK的一部分,它的结构位置和android.media相同。

    展开全文
  • Android Audio Framework

    2019-03-01 23:55:24
    Android Audio架构分析,从上层到底层的分析Android Audio架构
  • Android AudioRecorder录制mp3文件(已经完整封装好,直接调用) 源码包中有2个类 AudioRecorder2Mp3Util 负责录音和转换 MainActivity 用户的操作界面 注意用的时候需要加上权限 <uses-permission android:name=...
  • android audio system

    2012-11-29 10:36:28
    android audio system
  •  AudioFlinger是Android音频系统的两大服务之一,另一个服务是AudioPolicyService,这两大服务都在系统启动时有MediaSever加载,加载的代码位于:frameworks/base/media/mediaserver/main_mediaserver.cpp。...
    
    

    引言

        AudioFlinger是Android音频系统的两大服务之一,另一个服务是AudioPolicyService,这两大服务都在系统启动时有MediaSever加载,加载的代码位于:frameworks/base/media/mediaserver/main_mediaserver.cpp。AudioPolicyService的相关内容请参考另一编文章:《Android Audio System 之三: AudioPolicyService 和 AudioPolicyManager 》

    http://blog.csdn.net/DroidPhone/archive/2010/10/18/5949280.aspx

     

        本文主要介绍AudioFlinger,AudioFlinger向下访问AudioHardware,实现输出音频数据,控制音频参数。同时,AudioFlinger向上通过IAudioFinger接口提供服务。所以,AudioFlinger在Android的音频系统框架中起着承上启下的作用,地位相当重要。AudioFlinger的相关代码主要在:frameworks/base/libs/audioflinger,也有部分相关的代码在frameworks/base/media/libmedia里。

    AudioFlinger的类结构

    下面的图示描述了AudioFlinger类的内部结构和关系:

     

                                                                 图一   AudioFlinger的类结构

    不知道各位是否和我一样,第一次看到AudioFlinger类的定义的时候都很郁闷--这个类实在是庞大和臃肿,可是当你理清他的关系以后,你会觉得相当合理。下面我们一一展开讨论。

    • IAudioFlinger接口

        这是AudioFlinger向外提供服务的接口,例如openOutput,openInput,createTrack,openRecord等等,应用程序或者其他service通过ServiceManager可以获得该接口。该接口通过继承BnAudioFlinger得到。

    • ThreadBase

        在AudioFlinger中,Android为每一个放音/录音设备均创建一个处理线程,负责音频数据的I/O和合成,ThreadBase是这些线程的基类,所有的播放和录音线程都派生自ThreadBase

    • TrackBase

        应用程序每创建一个音轨(AudioTrack/AudioRecord),在AudioFlinger中都会创建一个对应的Track实例,TrackBase就是这些Track的基类,他的派生类有:

    •  
      • PlaybackTread::Track    // 用于普通播放,对应于应用层的AudioTrack
      • PlaybackThread::OutputTrack    // 用于多重设备输出,当蓝牙播放开启时使用
      • RecordThread::RecordTrack    // 用于录音,对应于应用层的AudioRecord
    • 播放

        默认的播放线程是MixerThread,它由AudioPolicyManager创建,在AudioPolicyManager的构造函数中,有以下代码:

    [c-sharp] view plaincopy
    1. mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,  
    2.                                     &outputDesc->mSamplingRate,  
    3.                                     &outputDesc->mFormat,  
    4.                                     &outputDesc->mChannels,  
    5.                                     &outputDesc->mLatency,  
    6.                                     outputDesc->mFlags);  

    最终会进入AudioFlinger的openOut函数:

    [c-sharp] view plaincopy
    1. ......  
    2. thread = new MixerThread(this, output, ++mNextThreadId);  
    3. ......  
    4. mPlaybackThreads.add(mNextThreadId, thread);  
    5. ......  
    6. return mNextThreadId;  

    可以看到,创建好的线程会把该线程和它的Id保存在AudioFlinger的成员变量mPlaybackThreads中,mPlaybackThreads是一个Vector,AudioFlinger创建的线程都会保存在里面,最后,openOutput返回该线程的Id,该Id也就是所谓的audio_io_handle_t,AudioFlinger的调用者这能看到这个audio_io_handle_t,当需要访问时传入该audio_io_handle_t,AudioFlinger会通过mPlaybackThreads,得到该线程的指针。

        要播放声音,应用程序首先要通过IAudioFlinger接口,调用createTrack(),关于createTrack的流程,可以参看我的另一篇文章:

              http://blog.csdn.net/DroidPhone/archive/2010/10/14/5941344.aspx

    createTrack会调用PlaybackThread类的createTrack_l函数:

    [c-sharp] view plaincopy
    1. track = thread->createTrack_l(client, streamType, sampleRate, format,  
    2.                 channelCount, frameCount, sharedBuffer, &lStatus);  

    再跟入createTrack_l函数中,可以看到创建了PlaybackThread::Track类,然后加入播放线程的track列表mTracks中。

    [c-sharp] view plaincopy
    1. track = thread->createTrack_l(client, streamType, sampleRate, format,  
    2.                 channelCount, frameCount, sharedBuffer, &lStatus);  
    3. ......  
    4. mTracks.add(track);  

    在createTrack的最后,创建了TrackHandle类并返回,TrackHandle继承了IAudioTrack接口,以后,createTrack的调用者可以通过IAudioTrack接口与AudioFlinger中对应的Track实例交互。

    [c-sharp] view plaincopy
    1. trackHandle = new TrackHandle(track);  
    2. ......  
    3. return trackHandle;  

     最后,在系统运行时,AudioFlinger中的线程和Track的结构大致如下图所示:它会拥有多个工作线程,每个线程拥有多个Track。

                                              图二     AudioFlinger的线程结构

    播放线程实际上是MixerThread的一个实例,MixerThread的threadLoop()中,会把该线程中的各个Track进行混合,必要时还要进行ReSample(重采样)的动作,转换为统一的采样率(44.1K),然后通过音频系统的AudioHardware层输出音频数据。

    • 录音

         录音的流程和放音差不多,只不过数据流动的方向相反,录音线程变成RecordThread,Track变成了RecordTrack,openRecord返回RecordHandle,详细的暂且不表。

    • DuplicatingThread

        AudioFlinger中有一个特殊的线程类:DuplicatingThread,从图一可以知道,它是MixerThread的子类。当系统中有两个设备要同时输出时,DuplicatingThread将被创建,通过IAudioFlinger的openDuplicateOutput方法创建DuplicatingThread。

    [c-sharp] view plaincopy
    1. int AudioFlinger::openDuplicateOutput(int output1, int output2)  
    2. {  
    3.     Mutex::Autolock _l(mLock);  
    4.     MixerThread *thread1 = checkMixerThread_l(output1);  
    5.     MixerThread *thread2 = checkMixerThread_l(output2);  
    6.     ......  
    7.     DuplicatingThread *thread = new DuplicatingThread(this, thread1, ++mNextThreadId);  
    8.     thread->addOutputTrack(thread2);  
    9.     mPlaybackThreads.add(mNextThreadId, thread);  
    10.     return mNextThreadId;  
    11. }  

        创建 DuplicatingThread时,传入2个需要同时输出的目标线程Id,openDuplicateOutput先从mPlaybackThreads中根据Id取得相应输出线程的实例,然后为每个线程创建一个虚拟的AudioTrack----OutputTrack,然后把这个虚拟的AudioTrack加入到目标线程的mTracks列表中,DuplicatingThread在它的threadLoop()中,把Mixer好的数据同时写入两个虚拟的OutputTrack中,因为这两个OutputTrack已经加入到目标线程的mTracks列表,所以,两个目标线程会同时输出DuplicatingThread的声音。

        实际上,创建DuplicatingThread的工作是有AudioPolicyService中的AudioPolicyManager里发起的。主要是当蓝牙耳机和本机输出都开启时,AudioPolicyManager会做出以下动作:

    • 首先打开(或创建)蓝牙输出线程A2dpOutput
    • 以HardwareOutput和A2dpOutput作为参数,调用openDuplicateOutput,创建DuplicatingThread
    • 把属于STRATEGY_MEDIA类型的Track移到A2dpOutput中
    • 把属于STRATEGY_DTMF类型的Track移到A2dpOutput中
    • 把属于STRATEGY_SONIFICATION类型的Track移到DuplicateOutput中

    结果是,音乐和DTMF只会在蓝牙耳机中输出,而按键音和铃声等提示音会同时在本机和蓝牙耳机中输出。

                                                                               图三  本机播放时的Thread和Track

     

     

     

     

     

     

     

     

     

                                                                            图四   蓝牙播放时的Thread和Track

    展开全文
  • Android Audio AudioEffect

    千次阅读 2011-10-14 07:23:48
    在看AudioSessionId相关代码的时候了解到,共用一个AudioSessionId的AudioTrack和MediaPlayer会共用一个AudioEffect。 今天就来看看AudioEffect是个什么东东。 看这个类的目的,主要是为了搞清楚AudioEffect
  • Android audioRecord录音Demo

    热门讨论 2012-10-24 22:27:43
    使用Android audioRecord录音完整Demo,最终生成wav文件。
  • Android AudioRecord类

    千次阅读 2015-03-06 12:45:46
    预备知识 一 音频采样率和采样大小 音频采样率是指录音设备在一秒钟内对声音信号的采样次数,采样频率越高声音的还原就越真实越自然。在当今的主流采集卡上,采样频率一般共分为22.05KHz、44.1KHz、48KHz三个等级,...
  • 实现android audioRecorder 录音并保存为m4a文件,mediaRecorder也集成了,尽量不要用mediaRecorder,因为声音小,音质也差一些。
  •  AudioPolicyService是Android音频系统的两大服务之一,另一个服务是AudioFlinger,这两大服务都在系统启动时有MediaSever加载,加载的代码位于:frameworks/base/media/mediaserver/main_mediaserver.cpp。Audio...
  • audio policy xml里面有2种角色:source和sink,每种角色又分为devicePorts和mixPorts。 devicePorts(source):为实际的硬件输入设备; devicePorts(sink):为实际的硬件输出设备; mixPorts(source):为经过...
  • Android audio设备的定义

    万次阅读 2012-06-05 14:53:47
    audio设备的定义位于AudioSystem.h文件中 enum audio_devices { // output devices 1.输出设备: DEVICE_OUT_EARPIECE = 0x1, // 听筒 DEVICE_OUT_SPEAKER = 0x2, // 扬声器 DEVICE_OUT_WIRED_HEADSET = 0x4, //...
  • Android audio codec power consumption

    千次阅读 2011-05-23 20:02:00
    Androidaudio codec power consumption、hardware、Incall、FM、ATV、Codec power状态控制方案
  • If I call the following : ...sound = Gdx.audio.newSound(Gdx.files.getFileHandle("sound/bullet.mp3", FileType.Internal)); Followed by: sound.play(); Play the sound does not come out, wait for a whi
  • Android Audio Architecture 图如下所示   详细调用路径如下 1. 音频播放 示例代码 MediaPlayer mp = new MediaPlayer(); mp.setDataSource(PATH_TO_FILE); mp.prepare(); mp.start();   1.1 ...
  • Android Framework的音频子系统中,每一个音频流对应着一个AudioTrack类的一个实例,每个AudioTrack会在创建时注册到AudioFlinger中,由AudioFlinger把所有的AudioTrack进行混合(Mixer),然后输送到AudioHardware...
  • android audio 框架流程分析图
  • AudioRecord介绍在android中采集音频的api是android.media.AudioRecord类其中构造器的几个参数就是标准的声音采集参数以下是参数的含义解释public AudioRecord (int audioSource, int sampleRateInHz, int ...
  • Android Audio框架

    千次阅读 2018-08-31 11:25:04
    ASLA -Advanced Sound Linux Architecture OSS -以前的Linux音频体系结构,被ASLA取代并兼容 I2S/PCM/AC97 - Codec与CPU间音频的通信协议/接口/总线 ...DAPM - Dynamic Audio Power Mana...
  • android audio flinger

    2017-03-09 22:49:51
    AudioFlinger创建 ./frameworks/av/media/mediaserver/main_mediaserver.cpp: 47 int main(int argc __unused, char** argv) 48 { ...  135: AudioFlinger::instantiate();  139 AudioPolicyService
  • Android AudioFlinger

    万次阅读 2011-10-19 17:19:21
    AudioFlinger是Android音频系统的两大服务之一,另一个服务是AudioPolicyService,这两大服务都在系统启动时有MediaSever加载,加载的代码位于:frameworks/base/media/mediaserver/main_mediaserver.cpp。...
  • android Audio机制

    千次阅读 2015-11-23 16:25:58
    android Audio机制 ALSA tinyalsa Audioflinger AudioPolicyService 前言 这篇文章是最近自己学习android audio的学习心得,希望大牛提出宝贵意见。 本文内容基于android 5.0 目录 一. 硬件架构 (1).编...

空空如也

1 2 3 4 5 ... 20
收藏数 8,594
精华内容 3,437
热门标签
关键字:

android audio