216 lines
6.9 KiB
JavaScript
216 lines
6.9 KiB
JavaScript
/**
|
|
* Audio Generation Module
|
|
* UPDATED: Updates workflow progress after successful generation
|
|
*/
|
|
|
|
// ============================================
|
|
// Single Block Generation
|
|
// ============================================
|
|
|
|
async function generateBlockAudio(blockId) {
|
|
const block = document.getElementById(blockId);
|
|
if (!block) {
|
|
console.error('Block not found:', blockId);
|
|
return;
|
|
}
|
|
|
|
const blockType = block.dataset.blockType || 'paragraph';
|
|
|
|
if (blockType === 'image') {
|
|
alert('Cannot generate audio for image blocks.');
|
|
return;
|
|
}
|
|
|
|
const textarea = block.querySelector('.md-block-textarea');
|
|
const content = textarea ? textarea.value : '';
|
|
|
|
if (!content.trim()) {
|
|
alert('No text content to generate audio for.');
|
|
return;
|
|
}
|
|
|
|
if (content.trim().startsWith(' !== -1) {
|
|
alert('Cannot generate audio for image blocks.');
|
|
return;
|
|
}
|
|
|
|
const ttsText = (block.dataset.ttsText && block.dataset.ttsText.trim()) ? block.dataset.ttsText : content;
|
|
|
|
let voice = 'af_heart';
|
|
let prevElement = block.previousElementSibling;
|
|
while (prevElement) {
|
|
if (prevElement.classList.contains('chapter-marker')) {
|
|
voice = prevElement.dataset.voice || 'af_heart';
|
|
break;
|
|
}
|
|
prevElement = prevElement.previousElementSibling;
|
|
}
|
|
|
|
showLoader('Generating Audio...', 'Creating speech and timestamps');
|
|
|
|
try {
|
|
const response = await fetch('/api/generate', {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({
|
|
text: ttsText,
|
|
voice: voice,
|
|
block_id: null
|
|
})
|
|
});
|
|
|
|
const data = await response.json();
|
|
|
|
if (data.error) {
|
|
throw new Error(data.error);
|
|
}
|
|
|
|
const blockData = editorBlocks.find(b => b.id === blockId);
|
|
if (blockData) {
|
|
blockData.audio_data = data.audio_data;
|
|
blockData.audio_format = data.audio_format;
|
|
blockData.transcription = data.transcription;
|
|
}
|
|
|
|
const indicator = block.querySelector('.audio-indicator');
|
|
if (indicator) {
|
|
indicator.classList.remove('no-audio');
|
|
indicator.classList.add('has-audio');
|
|
indicator.title = 'Audio generated';
|
|
}
|
|
|
|
hideLoader();
|
|
showNotification('Audio generated successfully!', 'success');
|
|
|
|
// Update workflow to show audio is ready
|
|
updateWorkflowProgress('audio-ready');
|
|
|
|
} catch (error) {
|
|
hideLoader();
|
|
console.error('Generation error:', error);
|
|
alert('Failed to generate audio: ' + error.message);
|
|
}
|
|
}
|
|
|
|
// ============================================
|
|
// Chapter Generation
|
|
// ============================================
|
|
|
|
async function generateChapterAudio(chapterId) {
|
|
const chapterMarker = document.getElementById(chapterId);
|
|
if (!chapterMarker) {
|
|
console.error('Chapter marker not found:', chapterId);
|
|
return;
|
|
}
|
|
|
|
const voice = chapterMarker.dataset.voice || 'af_heart';
|
|
|
|
const blocksToGenerate = [];
|
|
let nextElement = chapterMarker.nextElementSibling;
|
|
|
|
while (nextElement && !nextElement.classList.contains('chapter-marker')) {
|
|
if (nextElement.classList.contains('md-block')) {
|
|
const blockType = nextElement.dataset.blockType || 'paragraph';
|
|
|
|
if (blockType === 'image') {
|
|
nextElement = nextElement.nextElementSibling;
|
|
continue;
|
|
}
|
|
|
|
const textarea = nextElement.querySelector('.md-block-textarea');
|
|
const content = textarea ? textarea.value : '';
|
|
|
|
if (!content.trim()) {
|
|
nextElement = nextElement.nextElementSibling;
|
|
continue;
|
|
}
|
|
|
|
if (content.trim().startsWith(' !== -1) {
|
|
nextElement = nextElement.nextElementSibling;
|
|
continue;
|
|
}
|
|
|
|
const ttsText = (nextElement.dataset.ttsText && nextElement.dataset.ttsText.trim())
|
|
? nextElement.dataset.ttsText
|
|
: content;
|
|
|
|
blocksToGenerate.push({
|
|
id: nextElement.id,
|
|
text: ttsText,
|
|
element: nextElement
|
|
});
|
|
}
|
|
nextElement = nextElement.nextElementSibling;
|
|
}
|
|
|
|
if (blocksToGenerate.length === 0) {
|
|
alert('No text blocks found in this chapter to generate audio for.');
|
|
return;
|
|
}
|
|
|
|
showLoader(`Generating Chapter Audio...`, `Processing ${blocksToGenerate.length} blocks`);
|
|
|
|
let successCount = 0;
|
|
let errorCount = 0;
|
|
|
|
for (let i = 0; i < blocksToGenerate.length; i++) {
|
|
const blockInfo = blocksToGenerate[i];
|
|
|
|
document.getElementById('loadingSubtext').textContent =
|
|
`Block ${i + 1} of ${blocksToGenerate.length}`;
|
|
|
|
try {
|
|
const response = await fetch('/api/generate', {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({
|
|
text: blockInfo.text,
|
|
voice: voice,
|
|
block_id: null
|
|
})
|
|
});
|
|
|
|
const data = await response.json();
|
|
|
|
if (data.error) {
|
|
console.error(`Block ${blockInfo.id} error:`, data.error);
|
|
errorCount++;
|
|
continue;
|
|
}
|
|
|
|
const blockData = editorBlocks.find(b => b.id === blockInfo.id);
|
|
if (blockData) {
|
|
blockData.audio_data = data.audio_data;
|
|
blockData.audio_format = data.audio_format;
|
|
blockData.transcription = data.transcription;
|
|
}
|
|
|
|
const indicator = blockInfo.element.querySelector('.audio-indicator');
|
|
if (indicator) {
|
|
indicator.classList.remove('no-audio');
|
|
indicator.classList.add('has-audio');
|
|
indicator.title = 'Audio generated';
|
|
}
|
|
|
|
successCount++;
|
|
|
|
} catch (error) {
|
|
console.error(`Block ${blockInfo.id} error:`, error);
|
|
errorCount++;
|
|
}
|
|
}
|
|
|
|
hideLoader();
|
|
|
|
if (errorCount > 0) {
|
|
showNotification(`Generated ${successCount} blocks, ${errorCount} failed`, 'warning');
|
|
} else {
|
|
showNotification(`Generated audio for ${successCount} blocks!`, 'success');
|
|
}
|
|
|
|
// Update workflow to show audio is ready
|
|
if (successCount > 0) {
|
|
updateWorkflowProgress('audio-ready');
|
|
}
|
|
}
|