120 lines
3.5 KiB
JavaScript
120 lines
3.5 KiB
JavaScript
/**
|
|
* Import vocabulary from JSON files into Context table
|
|
* Each vocab word will become a Context record with:
|
|
* - title: the vocabulary word
|
|
* - grade: the grade/unit/lesson code (e.g., 101, 102, etc.)
|
|
* - type: 'vocabulary'
|
|
*/
|
|
|
|
const { sequelize } = require('./config/database');
|
|
const { Context } = require('./models');
|
|
const fs = require('fs');
|
|
const path = require('path');
|
|
|
|
// Configuration
|
|
const DATA_DIR = path.join(__dirname, 'data', 'moveup');
|
|
const GRADES = ['g1', 'g2', 'g3', 'g4', 'g5'];
|
|
|
|
async function importVocabToContext() {
|
|
try {
|
|
console.log('🔄 Starting vocab import to Context table...');
|
|
|
|
await sequelize.authenticate();
|
|
console.log('✅ Database connection OK');
|
|
|
|
let totalImported = 0;
|
|
let totalSkipped = 0;
|
|
|
|
// Process each grade folder
|
|
for (const gradeFolder of GRADES) {
|
|
const gradeDir = path.join(DATA_DIR, gradeFolder);
|
|
|
|
if (!fs.existsSync(gradeDir)) {
|
|
console.log(`⚠️ Directory not found: ${gradeDir}`);
|
|
continue;
|
|
}
|
|
|
|
// Get all JSON files in the grade directory
|
|
const jsonFiles = fs.readdirSync(gradeDir).filter(f => f.endsWith('.json'));
|
|
|
|
console.log(`\n📂 Processing ${gradeFolder.toUpperCase()} (${jsonFiles.length} files)...`);
|
|
|
|
for (const jsonFile of jsonFiles) {
|
|
const filePath = path.join(gradeDir, jsonFile);
|
|
console.log(` 📄 Reading ${jsonFile}...`);
|
|
|
|
try {
|
|
const data = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
|
|
|
if (!Array.isArray(data)) {
|
|
console.log(` ⚠️ Skipping ${jsonFile}: not an array`);
|
|
continue;
|
|
}
|
|
|
|
// Process each lesson/unit entry
|
|
for (const entry of data) {
|
|
const { grade, vocab } = entry;
|
|
|
|
if (!grade || !vocab || !Array.isArray(vocab)) {
|
|
continue;
|
|
}
|
|
|
|
console.log(` 📚 Grade ${grade}: ${vocab.length} vocab words`);
|
|
|
|
// Import each vocabulary word
|
|
for (const word of vocab) {
|
|
if (!word || word.trim() === '') {
|
|
continue;
|
|
}
|
|
|
|
try {
|
|
// Check if this vocab already exists for this grade
|
|
const existing = await Context.findOne({
|
|
where: {
|
|
title: word.trim(),
|
|
type: 'vocabulary'
|
|
}
|
|
});
|
|
|
|
if (existing) {
|
|
totalSkipped++;
|
|
continue;
|
|
}
|
|
|
|
// Create new context entry for this vocab
|
|
await Context.create({
|
|
title: word.trim(),
|
|
grade: parseInt(grade),
|
|
type: 'vocabulary',
|
|
context: `Vocabulary word for grade ${grade}`,
|
|
knowledge: null,
|
|
desc: null
|
|
});
|
|
|
|
totalImported++;
|
|
} catch (err) {
|
|
console.error(` ❌ Error importing "${word}": ${err.message}`);
|
|
}
|
|
}
|
|
}
|
|
} catch (err) {
|
|
console.error(` ❌ Error reading ${jsonFile}: ${err.message}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
console.log('\n✅ Import complete!');
|
|
console.log(`📊 Total imported: ${totalImported}`);
|
|
console.log(`⏭️ Total skipped (duplicates): ${totalSkipped}`);
|
|
|
|
process.exit(0);
|
|
} catch (error) {
|
|
console.error('❌ Error during import:', error.message);
|
|
console.error(error.stack);
|
|
process.exit(1);
|
|
}
|
|
}
|
|
|
|
// Run the import
|
|
importVocabToContext();
|