Skip to content
This repository was archived by the owner on Jan 27, 2026. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions lib/core/services/repository.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import 'dart:async';

abstract class Repository<I, O> {
final StreamController<I> _streamController;
var count = 0;
Repository() : _streamController = StreamController.broadcast();

Stream<I> get stream => _streamController.stream;

void add(I data) {
if (!_streamController.isClosed) {
count = 0;
_streamController.add(data);
}
}

Future<void> initialize();
Future<O?> predict(I input, Map<String, dynamic>? params);
Future<void> store(I input, O output, Map<String, dynamic>? params);

void dispose() {
_streamController.close();
}
}
24 changes: 14 additions & 10 deletions lib/features/assessment/view/ctsib/instruction_view.dart
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,21 @@ class InstructionView extends StatelessWidget {
final int index;
const InstructionView(this.title, this.index, {super.key});

static const instructions = [
"Patients' landmark boundaries must be as close to the fixed bounding box as possible",
"Camera position must be static and level",
"Lighting must be sufficient to clearly see the patient",
];

@override
Widget build(BuildContext context) {
var formattedInstructionList = instructions.map((instruction) {
return Padding(
padding: const EdgeInsets.all(8.0),
child: Text(instruction, textAlign: TextAlign.center),
);
});

return BaseView(
pageTitle: "CTSIB Instructions",
child: Column(
Expand Down Expand Up @@ -40,16 +53,7 @@ class InstructionView extends StatelessWidget {
const Text("Before getting started: ",
style: Styles.heading3, textAlign: TextAlign.center),
const Spacer(),
const Padding(
padding: EdgeInsets.all(8.0),
child: Text("Ensure device is kept still"),
),
const Padding(
padding: EdgeInsets.all(8.0),
child: Text(
"Record all tests from same relative position to patient",
textAlign: TextAlign.center),
),
...formattedInstructionList,
const Spacer(),
Padding(
padding: const EdgeInsets.all(16.0),
Expand Down
68 changes: 29 additions & 39 deletions lib/features/inference/bloc/inference_bloc.dart
Original file line number Diff line number Diff line change
@@ -1,77 +1,68 @@
import 'dart:async';
import 'dart:developer';
import 'dart:isolate';
import 'package:camera/camera.dart';
import 'package:flutter_bloc/flutter_bloc.dart';
import 'package:equatable/equatable.dart';
import 'package:visualpt/features/inference/service/ai_model.dart';
import 'package:visualpt/features/video/service/video_repository.dart';
import 'package:visualpt/features/inference/service/inference_service.dart';
import 'package:visualpt/core/services/repository.dart';
import 'package:bloc_concurrency/bloc_concurrency.dart';

part 'inference_event.dart';
part 'inference_state.dart';

class InferenceBloc extends Bloc<InferenceEvent, InferenceState> {
InferenceBloc(this.videoRepository, this.storeData)
class InferenceBloc<I, O>
extends Bloc<InferenceEvent<I, O>, InferenceState<I, O>> {
InferenceBloc(this._inferenceRepository, this.storeData)
: super(const InferenceLoading()) {
on<InferenceInit>(_onInit);
on<InferenceStart>(_onStart);
on<InferenceToggle>(_onToggle);
on<InferenceInput>(_onInput, transformer: concurrent());
on<InferenceOutput>(_onOutput, transformer: concurrent());
on<InferenceEnd>(_onEnd);
classification = true;
on<InferenceInit<I, O>>(_onInit);
on<InferenceStart<I, O>>(_onStart);
on<InferenceInput<I, O>>(_onInput, transformer: concurrent());
on<InferenceOutput<I, O>>(_onOutput, transformer: concurrent());
on<InferenceEnd<I, O>>(_onEnd);

storageBacklog = 0;

//TODO dynamically load models through constructor
_inferenceService = InferenceService();
//_inferenceService = InferenceService();
}
final VideoRepository videoRepository;
final Repository<I, O> _inferenceRepository;
final bool storeData;

static const sampleRateFrames = 30;
bool isReady = false;
bool processing = false;
int frame = 0;
late bool classification;
late InferenceService _inferenceService;
//late InferenceService _inferenceService;
late int storageBacklog;
late StreamSubscription<CameraImage> _cameraStreamSubscription;
late StreamSubscription<I> _subscription;
final nullResponsePort = ReceivePort();

//TODO: Figure out what this is doing here
FutureOr<void> _onInit(
InferenceInit event, Emitter<InferenceState> emit) async {
log("Initializing inference bloc");
await _inferenceService.initialize();
await _inferenceRepository.initialize();
isReady = true;
emit(const InferenceStopped());
emit(InferenceStopped<I, O>());
}

FutureOr<void> _onStart(
InferenceStart event, Emitter<InferenceState> emit) async {
_cameraStreamSubscription = videoRepository.frameStream.listen((image) {
_subscription = _inferenceRepository.stream.listen((data) {
if (isReady) {
add(InferenceInput(input: image, ratio: event.ratio));
add(InferenceInput(input: data, ratio: event.ratio));
}
});
emit(const InferenceRunning(null, 1));
}

FutureOr<void> _onToggle(
InferenceToggle event, Emitter<InferenceState> emit) {
classification = event.classification;
emit(InferenceRunning<I, O>(null, 1));
}

FutureOr<void> _onInput(
InferenceInput event, Emitter<InferenceState> emit) async {
if (frame++ % sampleRateFrames == 0) {
if (_inferenceRepository.count++ % sampleRateFrames == 0) {
storageBacklog++;
}
if (processing) return null;
processing = true;
final data = await _inferenceService
.inference(cameraImage: event.input, params: {"ratio": event.ratio});
final data =
await _inferenceRepository.predict(event.input, {"ratio": event.ratio});
processing = false;
if (!isClosed && !emit.isDone) {
add(InferenceOutput(
Expand All @@ -85,11 +76,11 @@ class InferenceBloc extends Bloc<InferenceEvent, InferenceState> {
if (event.output != null && storageBacklog > 0 && storeData) {
//TODO: Delegate to InferenceRepository to check if all conditions are being met and store data
storageBacklog--;
_inferenceService.store(
event.input, classification, event.ratio, event.output!);
_inferenceRepository
.store(event.input, event.output!, {"ratio": event.ratio});
}
if (!isClosed && !emit.isDone) {
emit(InferenceRunning(event.output, event.ratio));
emit(InferenceRunning<I, O>(event.output, event.ratio));
}
} catch (e) {
log("Error: $e");
Expand All @@ -99,9 +90,8 @@ class InferenceBloc extends Bloc<InferenceEvent, InferenceState> {
FutureOr<void> _onEnd(InferenceEnd event, Emitter<InferenceState> emit) {
isReady = false;
nullResponsePort.close();
_inferenceService.dispose();
_cameraStreamSubscription.cancel();
videoRepository.dispose();
emit(const InferenceStopped());
_subscription.cancel();
_inferenceRepository.dispose();
emit(InferenceStopped<I, O>());
}
}
22 changes: 11 additions & 11 deletions lib/features/inference/bloc/inference_event.dart
Original file line number Diff line number Diff line change
@@ -1,46 +1,46 @@
part of 'inference_bloc.dart';

abstract class InferenceEvent extends Equatable {
abstract class InferenceEvent<I, O> extends Equatable {
const InferenceEvent();

@override
List<Object?> get props => [];
}

class InferenceInit extends InferenceEvent {
class InferenceInit<I, O> extends InferenceEvent<I, O> {
const InferenceInit();
}

class InferenceStart extends InferenceEvent {
class InferenceStart<I, O> extends InferenceEvent<I, O> {
final double ratio;
const InferenceStart(this.ratio);
}

class InferenceToggle extends InferenceEvent {
class InferenceToggle<I, O> extends InferenceEvent<I, O> {
final bool classification;
const InferenceToggle(this.classification);
}

class InferenceInput extends InferenceEvent {
final CameraImage input;
class InferenceInput<I, O> extends InferenceEvent<I, O> {
final I input;
final double ratio;
const InferenceInput({required this.input, required this.ratio});

@override
List<Object> get props => [input];
List<Object> get props => [input.toString()];
}

class InferenceOutput extends InferenceEvent {
final CameraImage input;
class InferenceOutput<I, O> extends InferenceEvent<I, O> {
final I input;
final double ratio;
final AiModelOutput? output;
final O? output;
const InferenceOutput(
{required this.input, required this.ratio, required this.output});

@override
List<Object?> get props => [output];
}

class InferenceEnd extends InferenceEvent {
class InferenceEnd<I, O> extends InferenceEvent<I, O> {
const InferenceEnd();
}
12 changes: 6 additions & 6 deletions lib/features/inference/bloc/inference_state.dart
Original file line number Diff line number Diff line change
@@ -1,36 +1,36 @@
part of 'inference_bloc.dart';

abstract class InferenceState extends Equatable {
final AiModelOutput? response;
abstract class InferenceState<I, O> extends Equatable {
final O? response;
const InferenceState(this.response);

@override
List<Object> get props => [];
}

class InferenceRunning extends InferenceState {
class InferenceRunning<I, O> extends InferenceState<I, O> {
final double ratio;
const InferenceRunning(super.response, this.ratio);

@override
List<Object> get props => [response ?? "", ratio];
}

class InferenceStopped extends InferenceState {
class InferenceStopped<I, O> extends InferenceState<I, O> {
const InferenceStopped() : super(null);

@override
List<Object> get props => [];
}

class InferenceLoading extends InferenceState {
class InferenceLoading<I, O> extends InferenceState<I, O> {
const InferenceLoading() : super(null);

@override
List<Object> get props => [];
}

class InferenceError extends InferenceState {
class InferenceError<I, O> extends InferenceState<I, O> {
final Exception error;
const InferenceError({required this.error}) : super(null);

Expand Down
4 changes: 3 additions & 1 deletion lib/features/inference/constants/model_file.dart
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,7 @@ mixin ModelFile {
static const String faceDetection = 'models/face_detection.tflite';
static const String faceMesh = 'models/face_landmark.tflite';
static const String hands = 'models/hand_landmark.tflite';
static const String pose = 'models/pose_landmark_heavy.tflite';
static const String poseDetection = 'models/pose_landmark_heavy.tflite';
static const String poseClassification =
'models/pose_classification.tflite'; //TODO:
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ class FaceDetection extends AiModel {
FaceBboxOutput? runFaceDetector(Map<String, dynamic> params) {
final faceDetection = FaceDetection(
interpreter: Interpreter.fromAddress(params['detectorAddress']));
final image = ImageUtils.convertCameraImage(params['cameraImage'])!;
final image = ImageUtils.convertCameraImage(params['input'])!;
final result = faceDetection.predict(image, {});

return result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class FaceMesh extends AiModel {
FaceOutput? runFaceMesh(Map<String, dynamic> params) {
final faceMesh =
FaceMesh(interpreter: Interpreter.fromAddress(params['detectorAddress']));
final image = ImageUtils.convertCameraImage(params['cameraImage']);
final image = ImageUtils.convertCameraImage(params['input']);
final result = faceMesh.predict(image!, {});

return result;
Expand Down
2 changes: 1 addition & 1 deletion lib/features/inference/service/hands/hands_service.dart
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class Hands extends AiModel {
HandsOutput? runHandDetector(Map<String, dynamic> params) {
final hands =
Hands(interpreter: Interpreter.fromAddress(params['detectorAddress']));
final image = ImageUtils.convertCameraImage(params['cameraImage']);
final image = ImageUtils.convertCameraImage(params['input']);
final result = hands.predict(image!, {});

return result;
Expand Down
Loading