#我的鸿蒙开发手记#鸿蒙开发 个人笔记 - 各种模块 原创

第一小趴菜
发布于 2025-5-7 17:02
浏览
1收藏

个人笔记,有需要可以参考:

1. 权限申请:

记得先在module.json添加需要申请的权限

"requestPermissions":[
  {
    "name" : "ohos.permission.CAMERA",
    "reason": "$string:reason",
    "usedScene": {
      "abilities": [
        "EntryAbility",
      ],
      "when":"inuse"
    }
  },
  {
    "name": "ohos.permission.INTERNET",
    "reason": "$string:app_name",
    "usedScene": {
      "when": "always"
    }
  }
]

然后加上弹出申请权限的询问框的方法

// 需要申请的权限
const PERMISSIONS: Array<Permissions> = [
  'ohos.permission.CAMERA',
];
export  function reqPermissionsFromUser(context: common.UIAbilityContext): void {
  let atManager: abilityAccessCtrl.AtManager = abilityAccessCtrl.createAtManager();
  // requestPermissionsFromUser会判断权限的授权状态来决定是否唤起弹窗
  atManager.requestPermissionsFromUser(context, PERMISSIONS).then((data) => {
    let grantStatus: Array<number> = data.authResults;
    let length: number = grantStatus.length;
    for (let i = 0; i < length; i++) {
      if (grantStatus[i] === 0) {
        // 用户授权,可以继续访问目标操作
      } else {
        // 用户拒绝授权,提示用户必须授权才能访问当前页面的功能,并引导用户到系统设置中打开相应的权限
        return;
      }
    }
    // 授权成功
  }).catch((err: BusinessError) => {
    console.error(`Failed to request permissions from user. Code is ${err.code}, message is ${err.message}`);
  })
}

最后调用:

let context: common.UIAbilityContext = getContext(this) as common.UIAbilityContext;
reqPermissionsFromUser(context);

2. 相机预览和拍照模块

api版本说明:api12
先要通过上方的权限申请后才可以调用相机

export default class CameraService {
  private cameraManager: camera.CameraManager = undefined;
  private camerasArray: Array<camera.CameraDevice> = undefined;
  private cameraInput: camera.CameraInput = undefined;
  private capability: camera.CameraOutputCapability = undefined;
  private previewOutput: camera.PreviewOutput = undefined;
  private capSession: camera.PhotoSession = undefined;
  private receiver: image.ImageReceiver = undefined;
  private profile: camera.Profile = undefined;
  private imgRecvOutput: camera.PhotoOutput = undefined;

  constructor() {
  }

  // 初始化
  async initCamera(surfaceId: string, cameraID: number, context: common.UIAbilityContext,send:(buffer:ArrayBuffer)=>void): Promise<void> {
    await this.cameraRelease()
    this.cameraManager = camera.getCameraManager(context);

    console.info(TAG, `getCameraManager OK`)
    try {
      this.camerasArray = this.cameraManager.getSupportedCameras();
    } catch (error) {
      let err = error as BusinessError;
      console.error(TAG, `The getSupportedCameras call failed. error code: ${err.code}`);
    }
    console.info(TAG, ` camerasArray length ${this.camerasArray.length}`);
    if (cameraID >= this.camerasArray.length) {
      cameraID = 0;
    }
    let mCamera = this.camerasArray[cameraID];
    this.cameraInput = this.cameraManager.createCameraInput(mCamera);
    console.info(TAG, `createCameraInput OK`)

    this.cameraInput.open(false).then(() => {
      console.info(TAG, 'Promise returned with camera opened.');
    })

    let modes: Array<camera.SceneMode> = [];
    try {
      modes = this.cameraManager.getSupportedSceneModes(mCamera);
    } catch (error) {
      let err = error as BusinessError;
      console.error(TAG, `The getSupportedSceneModes call failed. error code: ${err.code}`);
    }
    // 0-普通拍照,1-普通录像
    console.info(TAG, `modes: ${modes[0]}`);
    this.capability = this.cameraManager.getSupportedOutputCapability(mCamera, modes[0]);

    console.info(TAG, `getSupportedOutputCapability OK`);
    console.info(TAG, `profile ${JSON.stringify(this.capability.previewProfiles)}`)
    this.profile = this.capability.previewProfiles[1];
    this.receiver = image.createImageReceiver(
      this.profile.size,
      image.ImageFormat.JPEG,
      1
    );
    try {
      this.previewOutput = this.cameraManager.createPreviewOutput(this.profile, surfaceId);
      let recvSurfaceId: string = await this.receiver.getReceivingSurfaceId();
      this.imgRecvOutput = this.cameraManager.createPhotoOutput(
        this.profile,
        recvSurfaceId
      );
      console.info(TAG, 'createPreviewOutput OK');
      console.info(TAG, 'createCaptureSession');
      this.imageArrival(send)
      try {
        this.capSession = this.cameraManager.createSession(modes[0]) as camera.PhotoSession;
        this.capSession.beginConfig()
        console.info(TAG, 'beginConfig');
        this.capSession.addInput(this.cameraInput);
        this.capSession.addOutput(this.previewOutput);
        this.capSession.addOutput(this.imgRecvOutput);
        await this.capSession.commitConfig();
        console.info(TAG, 'Promise returned to indicate the commit config success.');
        this.capSession.start()
      } catch (error) {
        // 失败返回错误码error.code并处理。
        let err = error as BusinessError;
        console.error(`createCaptureSession error. error code: ${err.code}`);
      }
    } catch (error) {
      // 失败返回错误码error.code并处理。
      let err = error as BusinessError;
      console.error(TAG, `The createPreviewOutput call failed. error code: ${err.code}`);
    }
  }
  
  // 拍照时的回调
  imageArrival(send:(buffer)=>void): void {
    console.log(TAG, 'createImageReceiver');
    this.receiver.on('imageArrival', () => {
      this.receiver.readNextImage((err: BusinessError, nextImage: image.Image) => {
        console.log(TAG, "read image");
        if (err || nextImage === undefined) {
          return;
        }
        nextImage.getComponent(image.ComponentType.JPEG, async (err: BusinessError, imgComponent: image.Component) => {
          if (err || imgComponent === undefined) {
            return;
          }
          console.log(TAG, "receive image", imgComponent.byteBuffer);
          send(imgComponent.byteBuffer)
          nextImage.release();
        });

      });
    });
  }
  // 拍照时调用
  async photoCapture(): Promise<void> {
    console.log(TAG, 'takePicture');
    let photoSettings = {
      rotation: camera.ImageRotation.ROTATION_0,
      quality: camera.QualityLevel.QUALITY_LEVEL_HIGH,
      mirror: false,
    };
    this.imgRecvOutput.capture(photoSettings).then((res) => {
      console.info(TAG, 'capture success')
    }).catch((err: BusinessError) => {
      console.log(TAG, `photoOutPut capture catch code:${JSON.stringify(err)}`);
      console.log(TAG, `photoOutPut capture catch code:${JSON.stringify(err.code)}`);
    })
  }


  // 释放资源
  async cameraRelease(): Promise<void> {
    console.log(TAG, 'releaseCamera');
    if (this.cameraInput) {
      await this.cameraInput.close();
    }
    if (this.previewOutput) {
      await this.previewOutput.release();
    }
    if (this.capSession) {
      await this.capSession.stop().then(()=>{
        console.info(TAG,"stop camera success");
      });
      await this.capSession.release();
    }
    if (this.imgRecvOutput) {
      await this.imgRecvOutput.release();
    }
    if (this.receiver) {
      await this.receiver.release();
    }
  }
}

使用:
先创建参数等

private cameraUtil: CameraUtil = new CameraUtil();
private context: common.UIAbilityContext = getContext(this) as common.UIAbilityContext;
// 生命周期函数释放资源
aboutToDisappear(): void {
    this.cameraUtil.cameraRelease();
}

使用

Column() {
  Row() {
    Button("开启摄像头")
      .onClick(() => {
        reqPermissionsFromUser(this.context);
          console.info(TAG, `开启摄像头`);
          this.cameraUtil.initCamera(this.surfaceId, this.cameraID, this.context, (byteBuffer) => {
            console.info(TAG, `拍照数据:${byteBuffer}`);
            // 这里填写回调操作
          });
      })
    Button("拍照")
      .onClick(() => {
        console.info(TAG, `拍照`);
        this.cameraUtil.photoCapture()
      })
  }

  XComponent({
    id: `XComponent`,
    type: XComponentType.SURFACE,
    controller: this.mXComponentController
  }).onLoad(async (event) => {
    this.mXComponentController.setXComponentSurfaceRect({
      surfaceWidth: px2vp("100px"),// 这里需要自己修改
      surfaceHeight: px2vp("100px")// 这里需要自己修改
    });
    this.surfaceId = this.mXComponentController.getXComponentSurfaceId();
  })


}

3. 音频输入

api版本说明:api12
先要通过上方的权限申请后才可以调用听筒

import audio from '@ohos.multimedia.audio';
import { reqPermissionsFromUser } from './PermissionsRequest'
import { common } from '@kit.AbilityKit';
import { util } from '@kit.ArkTS';

import { fileIo as fs, ReadOptions } from '@kit.CoreFileKit';

const TAG = "[Voice]"

@ComponentV2
export default struct VoiceControl {
  @Local isRecording: boolean = false
  @Local recordTime: number = 0
  @Local microphonePermission: boolean = false;
  private timer: number = 0
  private audioCapturer: audio.AudioCapturer | null = null
  private audioStream: Uint8Array | null = null;

  aboutToAppear() {
    const context: common.UIAbilityContext = getContext(this) as common.UIAbilityContext;
    reqPermissionsFromUser(context);
  }

  // 开始录音
  private async startRecording() {
    try {
      // 2. 开始采集
      this.audioCapturer = await audio.createAudioCapturer({
        streamInfo: {
          samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_16000,
          channels: audio.AudioChannel.CHANNEL_1,
          sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
          encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
        },
        capturerInfo: {
          source: audio.SourceType.SOURCE_TYPE_MIC,
          capturerFlags: 0
        }
      });
      // 4. 实时监听音频数据
      this.audioCapturer?.on('readData', (buffer: ArrayBuffer) => this.captureAudioData(buffer));

      await this.audioCapturer.start()
      console.log('录音启动成功');

      this.isRecording = true
      this.recordTime = 0
      // 3. 启动计时器
      this.timer = setInterval(() => {
        this.recordTime++
      }, 1000)
    } catch (error) {
      console.error('录音启动失败:', error.code, error.message)
    }
  }

  // 捕获音频数据
  private captureAudioData(buffer: ArrayBuffer) {
    console.log(`read${buffer.byteLength}`);
    if (!this.audioStream) {
      this.audioStream = new Uint8Array(buffer);
      console.log(`read${this.audioStream.byteLength}`);
    } else {
      let newBuffer = new Uint8Array(this.audioStream.byteLength + buffer.byteLength);
      newBuffer.set(this.audioStream, 0);
      newBuffer.set(new Uint8Array(buffer), this.audioStream.length);
      this.audioStream = newBuffer;
      console.log(`read${this.audioStream.byteLength}`);
    }
  }

  // 停止录音
  private async stopRecording() {
    console.log('录音停止中');
    try {
      // 1. 停止计时器
      clearInterval(this.timer)

      // 2. 停止采集
      if (this.audioCapturer) {
        await this.audioCapturer.stop()
        await this.audioCapturer.release()
        this.audioCapturer = null
      }

      this.isRecording = false

      // 3. 处理录音数据
      await this.processRecording()
    } catch (error) {
      console.error('录音停止失败:', error.code, error.message)
    }
  }

  // 处理录音数据
  private async processRecording() {
    // 这里可以添加音频数据处理逻辑
    console.info(`录音完成,时长: ${this.recordTime}s,数据大小: ${this.audioStream?.byteLength}字节`)
    // 此时的this.audioStream.buff就是音频的二进制数据,可以加工一下
    let inst = await this.disposeSpeech(this.audioStream)
    // 清空缓存
    this.audioStream = null;
    this.recordTime = 0
  }


  private async disposeSpeech(data: Uint8Array | null) {
    try {
      let base64 = new util.Base64Helper();
      let speechData = base64.encodeToStringSync(data);
      let WavData = pcmUint8ArrayToWav(data)
      let context = getContext(this).getApplicationContext();
      let path = context.filesDir;
      console.log("path : ", path)
      const filePath = path + '/MyVoice.wav'; // 使用沙箱路径获取文件,实际路径为 /data/app/el2/100/base/当前项目的包名/files/MyVoice.wav

      let destFile =
        fs.openSync(filePath, fs.OpenMode.READ_WRITE | fs.OpenMode.READ_ONLY | fs.OpenMode.CREATE | fs.OpenMode.TRUNC);
      fs.writeSync(destFile?.fd, WavData.buffer);
      fs.close(destFile)

      this.VoiceWebSocket.sendMassage(WavData.buffer)
      return ""

    } catch (err) {
      console.warn("语音请求失败" + err.message)
    }
    return ""

  }

  build() {
    Column() {
      Column() {
        Image(`/pages/Voice.svg`)
          .size({ width: "150px", height: "150px" })
      }
      .size({ width: "200px", height: "200px" })
      .border({ radius: "50%" })
      .justifyContent(FlexAlign.Center)
      .alignItems(HorizontalAlign.Center)
      .backgroundColor(this.isRecording ? '#FF4D4F' : '#53CFFF')
      .width(150)
      .height(150)
      .onTouch((event: TouchEvent) => {
        if (event.type === TouchType.Down) {
          this.isRecording = true;
          this.startRecording();
        } else if (event.type === TouchType.Up) {
          // this.stopRecording()
          this.isRecording = false;
          this.stopRecording();
        }
      })

      if (this.isRecording) {
        Text(`录音中: ${this.recordTime}s`)
          .fontSize(16)
          .margin({ top: 8 })
      }
    }
  }
}

// 添加wav格式的文件头
function pcmUint8ArrayToWav(pcmBytes: Uint8Array): Uint8Array {
  const channels = 1;
  const sampleRate = 16000;
  const bitsPerSample = 16;

  const pcmSize = pcmBytes.byteLength;
  const wavSize = 44 + pcmSize;

  const buffer = new ArrayBuffer(wavSize);
  const view = new DataView(buffer);

  const writeString = (view: DataView, offset: number, str: string) => {
    for (let i = 0; i < str.length; i++) {
      view.setUint8(offset + i, str.charCodeAt(i));
    }
  };

  // RIFF chunk descriptor
  writeString(view, 0, 'RIFF');
  view.setUint32(4, wavSize - 8, true);
  writeString(view, 8, 'WAVE');

  // fmt sub-chunk
  writeString(view, 12, 'fmt ');
  view.setUint32(16, 16, true); // Subchunk1Size (16 for PCM)
  view.setUint16(20, 1, true); // AudioFormat (1 for PCM)
  view.setUint16(22, channels, true);
  view.setUint32(24, sampleRate, true);
  view.setUint32(28, sampleRate * channels * bitsPerSample / 8, true); // ByteRate
  view.setUint16(32, channels * bitsPerSample / 8, true); // BlockAlign
  view.setUint16(34, bitsPerSample, true);

  // data sub-chunk
  writeString(view, 36, 'data');
  view.setUint32(40, pcmSize, true);

  // 写PCM数据
  new Uint8Array(buffer, 44).set(pcmBytes);

  // 直接返回Uint8Array
  return new Uint8Array(buffer);
}

音频会保存在/data/app/el2/100/base/当前项目的包名/files/MyVoice.wav目录下

可以通过hdc命令将音频文件拉到本地

4. image.Image转image.PixelMap

拿到的image是经过jpeg编码后的图片数据,我们需要先将image格式的内容转为ImageSource对象,然后才可以解码并转换为PixelMap对象
下面的decodingOptions是我们需要解码进行的配置,相关的详细说明见文档

// this.receiver是 image.ImageReceiver
this.receiver.readNextImage((err: BusinessError, nextImage: image.Image) => {
  console.log(TAG, "read image");
  if (err || nextImage === undefined) {
    return;
  }


  nextImage.getComponent(image.ComponentType.JPEG, async (err: BusinessError, imgComponent: image.Component) => {
    if (err || imgComponent === undefined) {
      return;
    }

    const imageSourceApi: image.ImageSource = image.createImageSource(imgComponent.byteBuffer);

    let decodingOptions: image.DecodingOptions = {
      sampleSize: 1,
      editable: true,
      desiredSize: nextImage.size,
      rotate: 0,
      desiredPixelFormat: image.PixelMapFormat.RGBA_8888,
      desiredRegion: { size:nextImage.size, x: 0, y: 0 },
      index: 0
    };
    let pixelmap = imageSourceApi.createPixelMapSync(decodingOptions);
    // 需要进行的操作
    this.imageDialogCallback(pixelmap)

    nextImage.release();
  });

});

5. 自定义数据类型的判断

判断数据类型,我们的typeof只可以判断几个基本数据类型,但是如果需要判断是否为我们自己创建的指定类型的数据结构,就需要换一种写法,如下方所示

  if (isResVoiceIdentify(value)) {
// 需要进行的操作
}

function isResVoiceIdentify(value: any): value is resVoiceIdentify {
  // 根据 resVoiceIdentify 类型的结构进行判断,比如有一个字段 `voiceId`:
  return value && typeof value === 'object' && 'voiceId' in value;
}

©著作权归作者所有,如需转载,请注明出处,否则将追究法律责任
已于2025-5-28 16:35:18修改
1
收藏 1
回复
举报
回复
    相关推荐