一、二维码生成
- 直接用QRCode组件生成二维码
scss
QRCode(this.item.id).width(160).height(160)
二、截图保存
- 关于图片我们第一时间想到的API肯定是
photoAccessHelper
重点 :
1.componentSnapshot
截图核心API
2.image
创建imagePacker实例的API
3.写入到相册fileIo/fs
4.SaveButton
保存控件
使用 componentSnapshot
实现组件截图:
typescript
import { componentSnapshot } from '@kit.ArkUI'
@Entry
@Component
struct cssPage {
@State
img: PixelMap | null = null
build() {
Column() {
Row() {
Button('点我截图')
.onClick(() => {
this.img = componentSnapshot.getSync('myrow')
})
// 生成二维码组件
QRCode('13800')
.color(Color.Green)
.height(200)
.height(200)
}
.layoutWeight(1)
.border({ width: 2, color: Color.Pink })
.backgroundColor(Color.Orange)
.width('100%')
.id('myrow')
Row() {
Image(this.img)
.width('80%')
}
.layoutWeight(1)
.justifyContent(FlexAlign.Center)
.alignItems(VerticalAlign.Center)
}
}
}
使用 SaveButton
安全控件将截图保存到相册:
csharp
import { image } from '@kit.ImageKit'
@State img: PixelMap
SaveButton()
.onClick(async () => {
// 1. 创建ImagePacker实例,压缩后获得图片的二进制数据imgBuffer
const imagePackerApi = image.createImagePacker();
let imgBuffer = await imagePackerApi.packing(this.img, { format: 'image/jpeg', quality: 100 })
// 2. 将imgBuffer写入到相册
const context = getContext();
let helper = photoAccessHelper.getPhotoAccessHelper(context);
// onClick触发后10秒内通过createAsset接口创建图片文件,10秒后createAsset权限收回。
let uri = await helper.createAsset(photoAccessHelper.PhotoType.IMAGE, 'jpeg');
// 使用uri打开文件,可以持续写入内容,写入过程不受时间限制
// Logger.debug(uri)
let file = await fileIo.open(uri, fileIo.OpenMode.READ_WRITE | fileIo.OpenMode.CREATE);
// 写入图片
fileIo.writeSync(file.fd, imgBuffer!);
// 关闭文件
await fileIo.close(file.fd);
Logger.debug('图片写入成功')
}
三、真机扫码
- 当上面的二维码写入到相册后,就可以给朋友分享二维码帮忙砍一刀了
- 然后我们用app里面的相机按钮来打开摄像头扫描二维码,通过扫码到的信息(id)跳到指定页面
- 核心API是
scanBarCode
scss
// 1. 增加扫码图标
Row({space:5}){
Image($r('app.media.icon_point'))
.width(24)
.aspectRatio(1)
.onClick(() => {
this.scanQuestionCode()
})
.layoutWeight(1)
HdSearch()
.layoutWeight(3)
HdClockIn()
.layoutWeight(1)
}
// 2. 扫码获取题目id并跳转到详情页面
async scanQuestionCode() {
if (canIUse('SystemCapability.Multimedia.Scan.ScanBarcode')) {
const result = await scanBarcode.startScanForResult(getContext(this))
if (result.originalValue) {
try {
router.pushUrl({
url: 'pages/QuestionDetailPage',
params: {
id: result.originalValue,
}
})
} catch (e) {
promptAction.showToast({ message: '没有找到试题' })
}
}
}
}
注:系统定义了API canIUse帮助开发者来判断该设备是否支持某个特定的syscap。即使是相同的系统能力,在不同的设备下,也会有能力的差异。比如同是摄像头的能力,平板设备优于智能穿戴设备。
四、上传相册内的图片
1.调起相册选择图片,配置相关的参数(一张,图片)
2.拷贝选择的图片到缓存目录
3.利用request.uploadFile 进行图片上传
在监听上传进度中我们可以通过emitter来跨线程发送数据(发布订阅)实时监听上传进度
五、语音识别
- 核心API是
speechRecognizer
,文本转语音有兴趣的可以查看我的上一篇文章
typescript
let asrEngine: speechRecognizer.SpeechRecognitionEngine;
let audioCapturer: audio.AudioCapturer;
@Entry
...
private createByCallback() {
// 设置创建引擎参数
let extraParam: Record<string, Object> = { "locate": "CN", "recognizerMode": "short" };
let initParamsInfo: speechRecognizer.CreateEngineParams = {
language: 'zh-CN',
online: 1,
extraParams: extraParam
};
// 调用createEngine方法
speechRecognizer.createEngine(initParamsInfo, (err: BusinessError, speechRecognitionEngine:
speechRecognizer.SpeechRecognitionEngine) => {
if (!err) {
// 接收创建引擎的实例
asrEngine = speechRecognitionEngine;
this.setListener()
} else {
promptAction.showToast({ message: `创建引擎失败: ${err.message}` })
}
});
}
// 开始识别
private startListeningForRecording() {
let audioParam: speechRecognizer.AudioInfo = { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 }
let extraParam: Record<string, Object> = {
"recognitionMode": 0,// 0: 短语音识别,1: 长语音识别
"vadBegin": 2000,// 开始识别的时间,单位为毫秒,默认为2000
"vadEnd": 3000,// 结束识别的时间,单位为毫秒,默认为3000
"maxAudioDuration": 20000// 最大录音时长,单位为毫秒,默认为20000
}
let recognizerParams: speechRecognizer.StartParams = {
sessionId: this.sessionId,
audioInfo: audioParam,
extraParams: extraParam
}
asrEngine.startListening(recognizerParams);
};
// 麦克风语音转文本
private async startRecording() {
if (!asrEngine) {
this.createByCallback()
await this.sleep(1000) // 等待引擎创建完成
}
this.startListeningForRecording();
//////
let audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000,
channels: audio.AudioChannel.CHANNEL_2,
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
};
let audioCapturerInfo: audio.AudioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_MIC,
capturerFlags: 0
};
let audioCapturerOptions: audio.AudioCapturerOptions = {
streamInfo: audioStreamInfo,
capturerInfo: audioCapturerInfo
};
audio.createAudioCapturer(audioCapturerOptions, (err, data) => {
if (err) {
console.error(`AudioCapturer Created : Error: ${err}`);
} else {
console.info('AudioCapturer Created : Success : SUCCESS');
audioCapturer = data;
}
});
// 初始化音频捕获器
// audioCapturer = await audio.createAudioCapturer({
// sampleRate: 16000,
// channels: 1,
// sampleFormat: 2, // PCM_16BIT
// encodingType: 1, // PCM
// inputSource: 1 // MIC
// })
if (audioCapturer) {
await audioCapturer.start()
audioCapturer.on('readData', (dataBuffer: ArrayBuffer) => {
let uint8Array: Uint8Array = new Uint8Array(dataBuffer);
asrEngine.writeAudio(this.sessionId, uint8Array);
})
}
};
// 设置回调
private setListener() {
// 创建回调对象
let setListener: speechRecognizer.RecognitionListener = {
// 开始识别成功回调
onStart(sessionId: string, eventMessage: string) {
promptAction.showToast({ message: '开始识别' })
},
// 事件回调
onEvent(sessionId: string, eventCode: number, eventMessage: string) {
},
// 识别结果回调,包括中间结果和最终结果
onResult(sessionId: string, result: speechRecognizer.SpeechRecognitionResult) {
emitter.emit({eventId:0},{data:{str:result.result}})
//promptAction.showToast({ message: `识别结果1: ${JSON.stringify(result,null,2)}` })
if (result.isFinal) {
promptAction.showToast({ message: `识别结果2: ${JSON.stringify(result,null,2)}` })
}
},
// 识别完成回调
onComplete(sessionId: string, eventMessage: string) {
promptAction.showToast({ message: '识别完成'+JSON.stringify(eventMessage,null,2) })
if (audioCapturer) {
audioCapturer.stop()
audioCapturer.release()
}
},
onError(sessionId: string, errorCode: number, errorMessage: string) {
promptAction.showToast({ message: `识别出错: ${errorMessage}` })
if (audioCapturer) {
audioCapturer.stop()
audioCapturer.release()
}
},
}
// 设置回调
asrEngine.setListener(setListener);
};
}