Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 5 additions & 20 deletions .metadata
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# This file should be version controlled and should not be manually edited.

version:
revision: "8495dee1fd4aacbe9de707e7581203232f591b2f"
revision: "67323de285b00232883f53b84095eb72be97d35c"
channel: "stable"

project_type: app
Expand All @@ -13,26 +13,11 @@ project_type: app
migration:
platforms:
- platform: root
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
- platform: android
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
- platform: ios
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
- platform: linux
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
- platform: macos
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
create_revision: 67323de285b00232883f53b84095eb72be97d35c
base_revision: 67323de285b00232883f53b84095eb72be97d35c
- platform: web
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
- platform: windows
create_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
base_revision: 8495dee1fd4aacbe9de707e7581203232f591b2f
create_revision: 67323de285b00232883f53b84095eb72be97d35c
base_revision: 67323de285b00232883f53b84095eb72be97d35c

# User provided section

Expand Down
169 changes: 115 additions & 54 deletions lib/main.dart
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,32 @@ class TranscriptionScreen extends StatefulWidget {
State<TranscriptionScreen> createState() => _TranscriptionScreenState();
}

class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTickerProviderStateMixin {
class _TranscriptionScreenState extends State<TranscriptionScreen>
with SingleTickerProviderStateMixin {
final _audioRecorder = AudioRecorder();
bool _isRecording = false;
String _transcription = '';
String _recordingPath = '';
bool _isTranscribing = false;
bool _isProcessing = false;
String selectedValue = 'gemma-3-27b-it';

final List<String> items = [
'gemma-3-27b-it',
'gemini-2.5-flash',
'gemma-3-12b-it',
'gemini-3-flash',
'gemini-2.0-flash'
];

// Data for screens
String _formattedTranscription = '';
String _summaryContent = '';
String _prescriptionContent = '';

// Chatbot service
final ChatbotService _chatbotService = ChatbotService();
ChatbotService get _chatbotService =>
ChatbotService(model: selectedValue); //Implemented a getter function
Comment on lines +73 to +74
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Getter creates a fresh ChatbotService per call — model can change between summary and prescription

_chatbotService reads selectedValue at invocation time. Inside _processWithGemini the getter is called twice (once for summary, once for prescription). Because the DropdownButton is not disabled during processing, a user can change the selected model between those two awaits, resulting in the summary and prescription being generated by different models for the same transcript.

Snapshot the selected model once at the start of _processWithGemini:

♻️ Proposed fix
-  ChatbotService get _chatbotService =>
-      ChatbotService(model: selectedValue); //Implemented a getter function
+  // Instantiate ChatbotService with the snapshot of selectedValue for the processing run.

In _processWithGemini:

-  Future<void> _processWithGemini(String transcription) async {
+  Future<void> _processWithGemini(String transcription) async {
+    final chatbotService = ChatbotService(model: selectedValue);
     try {
       final summary = await _chatbotService.getGeminiResponse(
+      final summary = await chatbotService.getGeminiResponse(
           "Generate a summary of the conversation based on this transcription: $transcription");

       final prescription = await _chatbotService.getGeminiResponse(
+      final prescription = await chatbotService.getGeminiResponse(
           "Generate a prescription based on the conversation in this transcription: $transcription");
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/main.dart` around lines 73 - 74, The _chatbotService getter reads
selectedValue each time which can change between awaits in _processWithGemini
causing summary and prescription to use different models; fix by snapshotting
selectedValue at the start of _processWithGemini into a local variable (e.g.,
final model = selectedValue) and then instantiate a single ChatbotService
instance (e.g., final chatbot = ChatbotService(model: model)) and use that
chatbot for both the summary and prescription calls instead of calling the
_chatbotService getter twice.


// For waveform animation
late AnimationController _animationController;
Expand Down Expand Up @@ -120,7 +131,8 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
try {
if (await _audioRecorder.hasPermission()) {
final directory = await getTemporaryDirectory();
_recordingPath = '${directory.path}/recording_${DateTime.now().millisecondsSinceEpoch}.m4a';
_recordingPath =
'${directory.path}/recording_${DateTime.now().millisecondsSinceEpoch}.m4a';

await _audioRecorder.start(
RecordConfig(
Expand Down Expand Up @@ -210,12 +222,14 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi

if (response.statusCode == 200) {
final decodedResponse = json.decode(response.body);
final result = decodedResponse['results']['channels'][0]['alternatives'][0]['transcript'];
final result = decodedResponse['results']['channels'][0]['alternatives']
[0]['transcript'];

setState(() {
_isTranscribing = false;
_transcription = result.isNotEmpty ? result : 'No speech detected';
Comment on lines +225 to 230
Copy link
Copy Markdown

@coderabbitai coderabbitai bot Feb 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

result can be null — result.isNotEmpty throws NPE on unexpected Deepgram response shape

If the Deepgram response doesn't include the expected path (results > channels[0] > alternatives[0] > transcript), result is null. The call to result.isNotEmpty on Line 230 then throws a NoSuchMethodError which is caught and surfaces only as a generic 'Error during transcription'.

🛡️ Proposed fix
-        final result = decodedResponse['results']['channels'][0]['alternatives']
-            [0]['transcript'];
+        final result = (decodedResponse['results']?['channels'] as List?)
+                ?.firstOrNull?['alternatives']?[0]?['transcript'] as String? ?? '';

         setState(() {
           _isTranscribing = false;
-          _transcription = result.isNotEmpty ? result : 'No speech detected';
+          _transcription = result.isNotEmpty ? result : 'No speech detected';
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
final result = decodedResponse['results']['channels'][0]['alternatives']
[0]['transcript'];
setState(() {
_isTranscribing = false;
_transcription = result.isNotEmpty ? result : 'No speech detected';
final result = (decodedResponse['results']?['channels'] as List?)
?.firstOrNull?['alternatives']?[0]?['transcript'] as String? ?? '';
setState(() {
_isTranscribing = false;
_transcription = result.isNotEmpty ? result : 'No speech detected';
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@lib/main.dart` around lines 225 - 230, The variable result extracted from
decodedResponse['results']['channels'][0]['alternatives'][0]['transcript'] may
be null; update the code around result and the setState block to guard against
null before calling isNotEmpty—e.g., compute a safe string (use a null check or
null-aware operator) like final safeResult = result ?? '' and then set
_transcription = safeResult.isNotEmpty ? safeResult : 'No speech detected' while
still setting _isTranscribing = false; ensure you reference the existing symbols
result, decodedResponse, _isTranscribing, and _transcription in the change.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will look into it in my next PR

not in the current scope of this feature

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@TheAnshulPrakash, understood! Since you're planning to address this in a future PR, would you like me to create a GitHub issue to track the null safety improvement for the Deepgram transcription response handling? This would help ensure it doesn't get overlooked.

_formattedTranscription = _transcription; // Store raw transcription directly
_formattedTranscription =
_transcription; // Store raw transcription directly
_isProcessing = true;
});

Expand All @@ -225,7 +239,8 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
print('=============================================');

// Send to Gemini for processing if we have a valid transcription
if (_transcription.isNotEmpty && _transcription != 'No speech detected') {
if (_transcription.isNotEmpty &&
_transcription != 'No speech detected') {
await _processWithGemini(_transcription);
} else {
setState(() {
Expand Down Expand Up @@ -261,13 +276,11 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi

// Prompt 2: Generate summary
final summary = await _chatbotService.getGeminiResponse(
"Generate a summary of the conversation based on this transcription: $transcription"
);
"Generate a summary of the conversation based on this transcription: $transcription");

// Prompt 3: Generate prescription
final prescription = await _chatbotService.getGeminiResponse(
"Generate a prescription based on the conversation in this transcription: $transcription"
);
"Generate a prescription based on the conversation in this transcription: $transcription");

setState(() {
// _formattedTranscription = formattedTranscription;
Expand All @@ -277,7 +290,6 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
});

print('\n============ GEMINI PROCESSING COMPLETE ============');

} catch (e) {
setState(() {
_isProcessing = false;
Expand Down Expand Up @@ -310,7 +322,8 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
),
child: SafeArea(
child: Padding(
padding: const EdgeInsets.symmetric(horizontal: 24.0, vertical: 16.0),
padding:
const EdgeInsets.symmetric(horizontal: 24.0, vertical: 16.0),
child: Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: [
Expand All @@ -324,18 +337,58 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
),
),
const SizedBox(height: 8),
Text(
_isRecording
? 'Recording your voice...'
: _isTranscribing
? 'Transcribing your voice...'
: _isProcessing
? 'Processing with Gemini...'
: 'Tap the mic to begin',
style: const TextStyle(
fontSize: 16,
color: Colors.white70,
),
Row(
mainAxisAlignment: MainAxisAlignment.spaceBetween,
children: [
Text(
_isRecording
? 'Recording your voice...'
: _isTranscribing
? 'Transcribing your voice...'
: _isProcessing
? 'Processing with Gemini...'
: 'Tap the mic to begin',
style: const TextStyle(
fontSize: 16,
color: Colors.white70,
),
),
Row(
children: [
Text(
"Choose a model: ",
style: TextStyle(color: Colors.white70),
),
SizedBox(
width: 20,
),
DropdownButton(
padding: EdgeInsets.only(left: 5.0),
value: selectedValue,
icon: const Icon(Icons.arrow_drop_down),
elevation: 16,
onChanged: (String? newValue) {
if (newValue != null) {
setState(() {
selectedValue = newValue;
});
}
},
items: items.map((String item) {
return DropdownMenuItem(
value: item,
child: Text(
item,
style: TextStyle(
color: const Color.fromARGB(255, 127, 127,
127)), // Currently no theming support, hence hardcoded color
),
);
}).toList(),
),
],
),
],
),
const SizedBox(height: 30),

Expand All @@ -351,7 +404,7 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
crossAxisAlignment: CrossAxisAlignment.end,
children: List.generate(
_waveformValues.length,
(index) {
(index) {
final value = _waveformValues[index];
return AnimatedContainer(
duration: const Duration(milliseconds: 100),
Expand All @@ -360,11 +413,11 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
decoration: BoxDecoration(
color: _isRecording
? HSLColor.fromAHSL(
1.0,
(280 + index * 2) % 360,
0.8,
0.7 + value * 0.2
).toColor()
1.0,
(280 + index * 2) % 360,
0.8,
0.7 + value * 0.2)
.toColor()
: Colors.white.withOpacity(0.5),
borderRadius: BorderRadius.circular(5),
),
Expand All @@ -380,7 +433,9 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
// Microphone button
Center(
child: GestureDetector(
onTap: (_isTranscribing || _isProcessing) ? null : _toggleRecording,
onTap: (_isTranscribing || _isProcessing)
? null
: _toggleRecording,
child: Container(
width: 100,
height: 100,
Expand All @@ -389,7 +444,8 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
color: _isRecording ? Colors.red : Colors.white,
boxShadow: [
BoxShadow(
color: (_isRecording ? Colors.red : Colors.white).withOpacity(0.3),
color: (_isRecording ? Colors.red : Colors.white)
.withOpacity(0.3),
spreadRadius: 8,
blurRadius: 20,
),
Expand All @@ -400,7 +456,9 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
child: Icon(
_isRecording ? Icons.stop : Icons.mic,
size: 50,
color: _isRecording ? Colors.white : Colors.deepPurple.shade800,
color: _isRecording
? Colors.white
: Colors.deepPurple.shade800,
),
),
),
Expand All @@ -423,20 +481,20 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
color: _isRecording
? Colors.red
: _isProcessing
? Colors.blue
: Colors.amber,
? Colors.blue
: Colors.amber,
),
),
Text(
_isRecording
? 'Recording in progress'
: _isTranscribing
? 'Processing audio...'
: _isProcessing
? 'Generating content with Gemini...'
: _transcription.isEmpty
? 'Press the microphone button to start'
: 'Ready to view results',
? 'Processing audio...'
: _isProcessing
? 'Generating content with Gemini...'
: _transcription.isEmpty
? 'Press the microphone button to start'
: 'Ready to view results',
style: const TextStyle(
fontSize: 16,
fontWeight: FontWeight.w500,
Expand All @@ -458,10 +516,11 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
'Transcription',
Icons.record_voice_over,
_formattedTranscription.isNotEmpty,
() => Navigator.push(
() => Navigator.push(
context,
MaterialPageRoute(
builder: (context) => TranscriptionDetailScreen(transcription: _formattedTranscription),
builder: (context) => TranscriptionDetailScreen(
transcription: _formattedTranscription),
),
),
),
Expand All @@ -471,10 +530,11 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
'Summary',
Icons.summarize,
_summaryContent.isNotEmpty,
() => Navigator.push(
() => Navigator.push(
context,
MaterialPageRoute(
builder: (context) => SummaryScreen(summary: _summaryContent),
builder: (context) =>
SummaryScreen(summary: _summaryContent),
),
),
),
Expand All @@ -484,10 +544,11 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
'Prescription',
Icons.medication,
_prescriptionContent.isNotEmpty,
() => Navigator.push(
() => Navigator.push(
context,
MaterialPageRoute(
builder: (context) => PrescriptionScreen(prescription: _prescriptionContent),
builder: (context) => PrescriptionScreen(
prescription: _prescriptionContent),
),
),
),
Expand All @@ -504,12 +565,12 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi

// Helper method to build navigation buttons
Widget _buildNavigationButton(
BuildContext context,
String title,
IconData icon,
bool isEnabled,
VoidCallback onPressed,
) {
BuildContext context,
String title,
IconData icon,
bool isEnabled,
VoidCallback onPressed,
) {
return SizedBox(
width: double.infinity,
child: ElevatedButton(
Expand Down Expand Up @@ -542,4 +603,4 @@ class _TranscriptionScreenState extends State<TranscriptionScreen> with SingleTi
),
);
}
}
}
Loading