Showing first 100 results. Refine your selection to see more.
)}
{filteredFeatures.length === 0 && clickedFeatureId == null && (
-
+
No features match your selection.
)}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/EmbeddingView.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/EmbeddingView.jsx
index a60508c210..7bd81a5694 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/EmbeddingView.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/EmbeddingView.jsx
@@ -9,10 +9,10 @@ const CATEGORY_COLORS = [
"#c49c94", "#f7b6d2", "#c7c7c7", "#dbdb8d", "#9edae5"
]
-// Sequential color palette (viridis-like)
+// Sequential color palette (NVIDIA brand)
const SEQUENTIAL_COLORS = [
- "#440154", "#482878", "#3e4a89", "#31688e", "#26838f",
- "#1f9e89", "#35b779", "#6ece58", "#b5de2b", "#fde725"
+ "#c359ef", "#9525C6", "#0046a4", "#0074DF", "#3f8500",
+ "#76B900", "#ef9100", "#F9C500", "#ff8181", "#EF2020"
]
// Default color for uniform coloring (NVIDIA green)
@@ -24,14 +24,15 @@ class FeatureTooltip {
this.node = node
this.inner = document.createElement("div")
this.inner.style.cssText = `
- background: white;
- border: 1px solid #ddd;
+ background: var(--bg-card);
+ border: 1px solid var(--border-input);
border-radius: 4px;
padding: 8px 12px;
font-family: 'NVIDIA Sans', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
font-size: 13px;
box-shadow: 0 2px 8px rgba(0,0,0,0.15);
max-width: 300px;
+ color: var(--text);
`
this.node.appendChild(this.inner)
this.update(props)
@@ -50,11 +51,11 @@ class FeatureTooltip {
const colorField = tooltip.fields?.color_field
this.inner.innerHTML = `
-
Feature #${featureId}
-
${label}
- ${colorField ? `
Category: ${colorField}
` : ""}
- ${logFreq !== undefined ? `
Log Frequency: ${logFreq.toFixed(3)}
` : ""}
- ${maxAct !== undefined ? `
Max Activation: ${maxAct.toFixed(2)}
` : ""}
+
Feature #${featureId}
+
${label}
+ ${colorField ? `
Category: ${colorField}
` : ""}
+ ${logFreq !== undefined ? `
Log Frequency: ${logFreq.toFixed(3)}
` : ""}
+ ${maxAct !== undefined ? `
Max Activation: ${maxAct.toFixed(2)}
` : ""}
`
}
@@ -63,7 +64,7 @@ class FeatureTooltip {
}
}
-export default function EmbeddingView({ brush, categoryColumn, categoryColumns, onFeatureClick, highlightedFeatureId, viewportState, onViewportChange, labels }) {
+export default function EmbeddingView({ brush, categoryColumn, categoryColumns, onFeatureClick, highlightedFeatureId, viewportState, onViewportChange, labels, darkMode }) {
const containerRef = useRef(null)
const viewRef = useRef(null)
const onFeatureClickRef = useRef(onFeatureClick)
@@ -166,7 +167,7 @@ export default function EmbeddingView({ brush, categoryColumn, categoryColumns,
labels: labels || null,
config: {
mode: "points",
- colorScheme: "light",
+ colorScheme: darkMode ? "dark" : "light",
autoLabelEnabled: false,
},
theme: {
@@ -211,7 +212,7 @@ export default function EmbeddingView({ brush, categoryColumn, categoryColumns,
containerRef.current.innerHTML = ''
}
}
- }, [brush, categoryColumn, categoryColumns])
+ }, [brush, categoryColumn, categoryColumns, darkMode])
// Handle resize
useEffect(() => {
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/FeatureCard.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/FeatureCard.jsx
index 104cad1193..eff497e75d 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/FeatureCard.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/FeatureCard.jsx
@@ -1,26 +1,26 @@
import React, { useState, useEffect, useRef, forwardRef } from 'react'
-import ProteinSequence from './ProteinSequence'
+import ProteinSequence, { computeAlignInfo } from './ProteinSequence'
import MolstarThumbnail from './MolstarThumbnail'
import ProteinDetailModal from './ProteinDetailModal'
import { getAccession, uniprotUrl } from './utils'
const styles = {
card: {
- background: '#fff',
+ background: 'var(--bg-card)',
borderRadius: '8px',
- border: '1px solid #e0e0e0',
+ border: '1px solid var(--border)',
flexShrink: 0,
},
cardHighlighted: {
- background: '#fff',
+ background: 'var(--bg-card)',
borderRadius: '8px',
- border: '2px solid #222',
+ border: '2px solid var(--highlight-border)',
flexShrink: 0,
- boxShadow: '0 2px 8px rgba(0, 0, 0, 0.15)',
+ boxShadow: '0 2px 8px var(--highlight-shadow)',
},
header: {
padding: '12px 14px',
- borderBottom: '1px solid #eee',
+ borderBottom: '1px solid var(--border-light)',
cursor: 'pointer',
display: 'flex',
justifyContent: 'space-between',
@@ -33,7 +33,7 @@ const styles = {
},
featureId: {
fontSize: '11px',
- color: '#888',
+ color: 'var(--text-tertiary)',
fontFamily: 'monospace',
marginBottom: '2px',
},
@@ -42,12 +42,13 @@ const styles = {
fontWeight: '500',
wordBreak: 'break-word',
lineHeight: '1.4',
+ color: 'var(--text)',
},
stats: {
display: 'flex',
gap: '12px',
fontSize: '11px',
- color: '#666',
+ color: 'var(--text-secondary)',
flexShrink: 0,
},
stat: {
@@ -56,7 +57,7 @@ const styles = {
alignItems: 'flex-end',
},
statLabel: {
- color: '#999',
+ color: 'var(--text-muted)',
fontSize: '9px',
textTransform: 'uppercase',
},
@@ -65,19 +66,19 @@ const styles = {
fontWeight: '500',
},
expandIcon: {
- color: '#999',
+ color: 'var(--text-muted)',
fontSize: '10px',
marginLeft: '6px',
},
expandedContent: {
padding: '10px 14px',
- background: '#fafafa',
+ background: 'var(--bg-card-expanded)',
maxHeight: '900px',
overflowY: 'auto',
},
sectionHeader: {
fontSize: '10px',
- color: '#888',
+ color: 'var(--text-tertiary)',
textTransform: 'uppercase',
marginBottom: '8px',
fontWeight: '500',
@@ -85,13 +86,13 @@ const styles = {
example: {
marginBottom: '8px',
padding: '8px 10px',
- background: '#fff',
+ background: 'var(--bg-example)',
borderRadius: '4px',
- border: '1px solid #eee',
+ border: '1px solid var(--border-light)',
},
exampleMeta: {
fontSize: '10px',
- color: '#999',
+ color: 'var(--text-muted)',
marginBottom: '4px',
fontFamily: 'monospace',
display: 'flex',
@@ -99,23 +100,23 @@ const styles = {
alignItems: 'center',
},
proteinId: {
- color: '#2563eb',
+ color: 'var(--link)',
fontWeight: '600',
},
annotation: {
- color: '#666',
+ color: 'var(--text-secondary)',
fontStyle: 'italic',
marginLeft: '8px',
},
uniprotLink: {
- color: '#2563eb',
+ color: 'var(--link)',
textDecoration: 'none',
fontSize: '11px',
marginLeft: '4px',
opacity: 0.6,
},
noExamples: {
- color: '#999',
+ color: 'var(--text-muted)',
fontSize: '12px',
fontStyle: 'italic',
},
@@ -127,33 +128,66 @@ const styles = {
},
structureHeader: {
fontSize: '10px',
- color: '#888',
+ color: 'var(--text-tertiary)',
textTransform: 'uppercase',
marginTop: '16px',
marginBottom: '8px',
fontWeight: '500',
},
+ alignBar: {
+ display: 'flex',
+ alignItems: 'center',
+ gap: '6px',
+ fontSize: '10px',
+ color: 'var(--text-tertiary)',
+ },
+ alignLabel: {
+ textTransform: 'uppercase',
+ fontWeight: '500',
+ },
+ alignBtn: {
+ padding: '2px 8px',
+ border: '1px solid var(--border-input)',
+ borderRadius: '3px',
+ background: 'var(--bg-input)',
+ cursor: 'pointer',
+ fontSize: '10px',
+ color: 'var(--text-secondary)',
+ },
+ alignBtnActive: {
+ padding: '2px 8px',
+ border: '1px solid var(--accent)',
+ borderRadius: '3px',
+ background: 'var(--bg-card)',
+ cursor: 'pointer',
+ fontSize: '10px',
+ color: 'var(--text)',
+ fontWeight: '600',
+ },
densityBar: {
width: '50px',
height: '3px',
- background: '#eee',
+ background: 'var(--density-bar-bg)',
borderRadius: '2px',
overflow: 'hidden',
marginTop: '3px',
},
densityFill: {
height: '100%',
- background: '#76b900',
+ background: 'var(--accent)',
borderRadius: '2px',
},
}
-const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, forceExpanded, onClick, loadExamples }, ref) {
+const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, forceExpanded, onClick, loadExamples, vocabLogits, darkMode }, ref) {
const [expanded, setExpanded] = useState(false)
const [detailProtein, setDetailProtein] = useState(null)
const [examples, setExamples] = useState([])
const [loadingExamples, setLoadingExamples] = useState(false)
const examplesCacheRef = useRef(null)
+ const scrollGroupRef = useRef([])
+ const [alignMode, setAlignMode] = useState('start')
+ const [hoveredToken, setHoveredToken] = useState(null)
// If forceExpanded changes to true, expand the card
useEffect(() => {
@@ -162,6 +196,11 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
}
}, [forceExpanded])
+ // Reset scroll group when card collapses or alignMode changes
+ useEffect(() => {
+ scrollGroupRef.current = []
+ }, [expanded, alignMode])
+
// Lazy-load examples from DuckDB when card is expanded
useEffect(() => {
if (!expanded || !loadExamples || examplesCacheRef.current) return
@@ -182,6 +221,7 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
const freq = feature.activation_freq || 0
const maxAct = feature.max_activation || 0
+ const bestF1 = feature.best_f1 || 0
const description = feature.description || `Feature ${feature.feature_id}`
const handleClick = () => {
@@ -196,7 +236,22 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
-
Feature #{feature.feature_id}
+
+
Feature #{feature.feature_id}
+ {bestF1 > 0 && (
+
= 0.5 ? 'rgba(118, 185, 0, 0.15)' : 'rgba(255, 165, 0, 0.15)',
+ color: bestF1 >= 0.5 ? '#76b900' : '#ef9100',
+ whiteSpace: 'nowrap',
+ }}>
+ F1: {bestF1.toFixed(2)}
+
+ )}
+
{description}
@@ -211,48 +266,144 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
Max
{maxAct.toFixed(1)}
-
{expanded ? '▼' : '▶'}
+
{expanded ? '\u25BC' : '\u25B6'}
{expanded && (
+ {/* Decoder Logits */}
+ {vocabLogits && vocabLogits[String(feature.feature_id)] && (() => {
+ const logits = vocabLogits[String(feature.feature_id)]
+ const tokenLogitMap = {}
+ for (const [tok, val] of (logits.top_positive || [])) tokenLogitMap[tok] = val
+ for (const [tok, val] of (logits.top_negative || [])) tokenLogitMap[tok] = val
+ // Show all 20 standard amino acids in alphabetical order
+ const AMINO_ACIDS = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
+ const allTokens = AMINO_ACIDS
+ const maxAbs = Math.max(...AMINO_ACIDS.map(aa => Math.abs(tokenLogitMap[aa] || 0)), 0.001)
+ return (
+
+
Decoder Logits (mean-centered)
+
+ {hoveredToken && (() => {
+ const val = tokenLogitMap[hoveredToken] || 0
+ return (
+
+ {hoveredToken}: {val > 0 ? '+' : ''}{val.toFixed(3)}
+
+ )
+ })()}
+
+ {allTokens.map(tok => {
+ const val = tokenLogitMap[tok] || 0
+ const h = Math.max(1, (Math.abs(val) / maxAbs) * 28)
+ const isHovered = hoveredToken === tok
+ const barColor = val === 0 ? 'var(--text-muted)' : val > 0 ? '#76b900' : '#e57373'
+ return (
+
setHoveredToken(tok)}
+ onMouseLeave={() => setHoveredToken(null)}
+ >
+
+
+ )
+ })}
+
+
+ {allTokens.map(tok => (
+
+ {tok}
+
+ ))}
+
+
+
+ Promoted
+ Suppressed
+ relative to average feature
+
+
+ )
+ })()}
+
{/* Protein sequence examples */}
-
Top Activating Proteins
+
+
Top Activating Proteins
+
+ Align by:
+ {['start', 'first_activation', 'max_activation'].map(mode => (
+ { e.stopPropagation(); setAlignMode(mode) }}
+ >
+ {mode === 'start' ? 'sequence start' : mode === 'first_activation' ? 'first activation' : 'max activation'}
+
+ ))}
+
+
{loadingExamples ? (
-
+
Loading examples...
) : examples.length > 0 ? (
<>
- {examples.slice(0, 6).map((ex, i) => (
-
-
-
- {ex.protein_id}
- e.stopPropagation()}
- title="View on UniProt"
- >
- ↗
-
- {ex.best_annotation && (
- {ex.best_annotation}
- )}
-
-
max: {ex.max_activation?.toFixed(3) || 'N/A'}
+ {(() => {
+ const visibleExamples = examples.slice(0, 6)
+ const { anchor: alignAnchor, totalLength } = computeAlignInfo(visibleExamples, alignMode)
+ return visibleExamples.map((ex, i) => (
+
-
-
- ))}
+ ))
+ })()}
{/* 2x3 Mol* structure grid */}
3D Structures (AlphaFold)
@@ -266,6 +417,7 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
activations={ex.activations}
maxActivation={ex.max_activation}
onExpand={() => setDetailProtein(ex)}
+ darkMode={darkMode}
/>
))}
@@ -280,6 +432,7 @@ const FeatureCard = forwardRef(function FeatureCard({ feature, isHighlighted, fo
setDetailProtein(null)}
+ darkMode={darkMode}
/>
)}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/Histogram.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/Histogram.jsx
index 2939dc9e11..6bd00d631e 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/Histogram.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/Histogram.jsx
@@ -2,9 +2,24 @@ import React, { useEffect, useRef } from 'react'
import * as vg from '@uwdata/vgplot'
const FILL_COLOR = "#76b900"
-const BACKGROUND_COLOR = "#e0e0e0"
-export default function Histogram({ brush, column, label }) {
+function injectAxisLine(plot, marginLeft, marginRight, marginBottom, height, axisColor) {
+ const svg = plot.tagName === 'svg' ? plot : plot.querySelector?.('svg')
+ if (!svg) return
+ svg.querySelectorAll('.x-axis-line').forEach(el => el.remove())
+ const svgWidth = svg.getAttribute('width') || svg.clientWidth
+ const line = document.createElementNS('http://www.w3.org/2000/svg', 'line')
+ line.classList.add('x-axis-line')
+ line.setAttribute('x1', marginLeft)
+ line.setAttribute('x2', svgWidth - marginRight)
+ line.setAttribute('y1', height - marginBottom)
+ line.setAttribute('y2', height - marginBottom)
+ line.setAttribute('stroke', axisColor)
+ line.setAttribute('stroke-width', '1')
+ svg.appendChild(line)
+}
+
+export default function Histogram({ brush, column, label, categoryColumns }) {
const containerRef = useRef(null)
useEffect(() => {
@@ -13,44 +28,86 @@ export default function Histogram({ brush, column, label }) {
// Clear previous content
containerRef.current.innerHTML = ''
+ const computedBg = getComputedStyle(document.documentElement).getPropertyValue('--density-bar-bg').trim() || '#e0e0e0'
+ const axisColor = getComputedStyle(document.documentElement).getPropertyValue('--text-tertiary').trim() || '#888'
const width = containerRef.current.clientWidth - 20
- const height = 80
-
- const plot = vg.plot(
- // Background histogram: full data (no filterBy)
- vg.rectY(
- vg.from("features"),
- { x: vg.bin(column), y: vg.count(), fill: BACKGROUND_COLOR, inset: 1 }
- ),
- // Foreground histogram: filtered data
- vg.rectY(
- vg.from("features", { filterBy: brush }),
- { x: vg.bin(column), y: vg.count(), fill: FILL_COLOR, inset: 1 }
- ),
- vg.intervalX({ as: brush }),
- vg.xLabel(null),
- vg.yLabel(null),
- vg.width(width),
- vg.height(height),
- vg.marginLeft(45),
- vg.marginBottom(20),
- vg.marginTop(5),
- vg.marginRight(10)
- )
+ const height = 50
+ const marginLeft = 45
+ const marginBottom = 20
+ const marginRight = 10
+ const marginTop = 5
+
+ // Check if this column is categorical
+ const colInfo = categoryColumns?.find(c => c.name === column)
+ const isCategorical = colInfo && (colInfo.type === 'string' || colInfo.type === 'integer')
+
+ let plot
+ if (isCategorical) {
+ const catHeight = 80
+ const catMarginBottom = 50
+ plot = vg.plot(
+ vg.barY(
+ vg.from("features"),
+ { x: column, y: vg.count(), fill: computedBg, inset: 1 }
+ ),
+ vg.barY(
+ vg.from("features", { filterBy: brush }),
+ { x: column, y: vg.count(), fill: FILL_COLOR, inset: 1 }
+ ),
+ vg.toggleX({ as: brush }),
+ vg.xLabel(null),
+ vg.yLabel(null),
+ vg.xTickRotate(-45),
+ vg.xTickSize(3),
+ vg.style({ fontSize: '9px' }),
+ vg.width(width),
+ vg.height(catHeight),
+ vg.marginLeft(marginLeft),
+ vg.marginBottom(catMarginBottom),
+ vg.marginTop(marginTop),
+ vg.marginRight(marginRight)
+ )
+ } else {
+ // Numeric histogram: binned rectY
+ plot = vg.plot(
+ vg.rectY(
+ vg.from("features"),
+ { x: vg.bin(column), y: vg.count(), fill: computedBg, inset: 1 }
+ ),
+ vg.rectY(
+ vg.from("features", { filterBy: brush }),
+ { x: vg.bin(column), y: vg.count(), fill: FILL_COLOR, inset: 1 }
+ ),
+ vg.intervalX({ as: brush }),
+ vg.xLabel(null),
+ vg.yLabel(null),
+ vg.width(width),
+ vg.height(height),
+ vg.marginLeft(marginLeft),
+ vg.marginBottom(marginBottom),
+ vg.marginTop(marginTop),
+ vg.marginRight(marginRight)
+ )
+ }
containerRef.current.appendChild(plot)
+ const timer = setTimeout(() => {
+ injectAxisLine(plot, marginLeft, marginRight, marginBottom, height, axisColor)
+ }, 50)
+
return () => {
+ clearTimeout(timer)
if (containerRef.current) {
containerRef.current.innerHTML = ''
}
}
- }, [brush, column, label])
+ }, [brush, column, label, categoryColumns])
return (
)
}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/MolstarThumbnail.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/MolstarThumbnail.jsx
index 7ff85dd76f..739314627e 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/MolstarThumbnail.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/MolstarThumbnail.jsx
@@ -3,9 +3,9 @@ import { getAccession } from './utils'
const styles = {
container: {
- background: '#fafafa',
+ background: 'var(--bg-card-expanded)',
borderRadius: '6px',
- border: '1px solid #eee',
+ border: '1px solid var(--border-light)',
overflow: 'hidden',
display: 'flex',
flexDirection: 'column',
@@ -21,8 +21,8 @@ const styles = {
top: '6px',
right: '6px',
zIndex: 20,
- background: 'rgba(255,255,255,0.85)',
- border: '1px solid #ddd',
+ background: 'var(--bg-card)',
+ border: '1px solid var(--border-input)',
borderRadius: '4px',
width: '24px',
height: '24px',
@@ -31,7 +31,7 @@ const styles = {
justifyContent: 'center',
cursor: 'pointer',
fontSize: '12px',
- color: '#555',
+ color: 'var(--text-secondary)',
opacity: 0,
transition: 'opacity 0.15s',
pointerEvents: 'auto',
@@ -40,18 +40,18 @@ const styles = {
padding: '4px 8px',
fontSize: '10px',
fontFamily: 'monospace',
- color: '#555',
- borderTop: '1px solid #eee',
+ color: 'var(--text-secondary)',
+ borderTop: '1px solid var(--border-light)',
display: 'flex',
justifyContent: 'space-between',
alignItems: 'center',
},
proteinId: {
fontWeight: '600',
- color: '#2563eb',
+ color: 'var(--link)',
},
activation: {
- color: '#999',
+ color: 'var(--text-muted)',
},
loading: {
position: 'absolute',
@@ -59,11 +59,11 @@ const styles = {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
- background: '#f9f9f9',
+ background: 'var(--bg-card-expanded)',
zIndex: 10,
pointerEvents: 'none',
fontSize: '10px',
- color: '#aaa',
+ color: 'var(--text-muted)',
},
error: {
position: 'absolute',
@@ -71,14 +71,14 @@ const styles = {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
- background: '#f9f9f9',
+ background: 'var(--bg-card-expanded)',
zIndex: 10,
fontSize: '10px',
color: '#e57373',
},
}
-export default function MolstarThumbnail({ proteinId, alphafoldId, sequence, activations, maxActivation, onExpand }) {
+export default function MolstarThumbnail({ proteinId, alphafoldId, sequence, activations, maxActivation, onExpand, darkMode }) {
const wrapperRef = useRef(null)
const molContainerRef = useRef(null)
const pluginRef = useRef(null)
@@ -147,6 +147,14 @@ export default function MolstarThumbnail({ proteinId, alphafoldId, sequence, act
plugin.initViewer(canvas, molDiv)
} catch { /* fallback for different Mol* versions */ }
+ // Set canvas background based on dark mode
+ try {
+ const bgColor = darkMode ? 0x000000 : 0xffffff
+ plugin.canvas3d?.setProps({
+ renderer: { backgroundColor: bgColor },
+ })
+ } catch { /* older Mol* versions may not support this */ }
+
pluginRef.current = plugin
// Custom activation color theme
@@ -173,7 +181,7 @@ export default function MolstarThumbnail({ proteinId, alphafoldId, sequence, act
}
}
} catch { /* fallback */ }
- return Color.fromRgb(200, 200, 200)
+ return darkMode ? Color.fromRgb(80, 80, 80) : Color.fromRgb(200, 200, 200)
}
const colorThemeProvider = {
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinDetailModal.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinDetailModal.jsx
index 7f6588a73f..7e6327b2d9 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinDetailModal.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinDetailModal.jsx
@@ -7,15 +7,15 @@ const styles = {
backdrop: {
position: 'fixed',
inset: 0,
- background: 'rgba(0,0,0,0.5)',
+ background: 'rgba(0,0,0,0.45)',
zIndex: 9999,
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
modal: {
- background: '#fff',
- borderRadius: '12px',
+ background: 'var(--bg-card)',
+ borderRadius: '10px',
width: '90vw',
maxWidth: '1200px',
height: '80vh',
@@ -30,8 +30,8 @@ const styles = {
top: '12px',
right: '12px',
zIndex: 10,
- background: 'rgba(255,255,255,0.9)',
- border: '1px solid #ddd',
+ background: 'var(--bg-card)',
+ border: '1px solid var(--border-input)',
borderRadius: '50%',
width: '32px',
height: '32px',
@@ -40,13 +40,13 @@ const styles = {
justifyContent: 'center',
cursor: 'pointer',
fontSize: '16px',
- color: '#555',
+ color: 'var(--text-secondary)',
},
leftPanel: {
flex: '0 0 60%',
position: 'relative',
- background: '#f5f5f5',
- borderRight: '1px solid #eee',
+ background: 'var(--bg)',
+ borderRight: '1px solid var(--border-light)',
},
viewer: {
width: '100%',
@@ -59,7 +59,7 @@ const styles = {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
- color: '#aaa',
+ color: 'var(--text-muted)',
fontSize: '13px',
},
viewerError: {
@@ -73,7 +73,7 @@ const styles = {
},
rightPanel: {
flex: 1,
- padding: '24px',
+ padding: '28px 32px',
overflowY: 'auto',
display: 'flex',
flexDirection: 'column',
@@ -89,7 +89,7 @@ const styles = {
fontSize: '18px',
fontWeight: '700',
fontFamily: 'monospace',
- color: '#222',
+ color: 'var(--text-heading)',
},
uniprotBtn: {
display: 'inline-flex',
@@ -97,9 +97,9 @@ const styles = {
gap: '4px',
padding: '4px 10px',
fontSize: '12px',
- color: '#2563eb',
- background: '#eff6ff',
- border: '1px solid #bfdbfe',
+ color: 'var(--link)',
+ background: 'var(--bg-card-expanded)',
+ border: '1px solid var(--border)',
borderRadius: '6px',
textDecoration: 'none',
fontWeight: '500',
@@ -110,13 +110,13 @@ const styles = {
},
statBox: {
padding: '10px 14px',
- background: '#f9fafb',
+ background: 'var(--bg-card-expanded)',
borderRadius: '8px',
- border: '1px solid #eee',
+ border: '1px solid var(--border-light)',
},
statLabel: {
fontSize: '10px',
- color: '#888',
+ color: 'var(--text-tertiary)',
textTransform: 'uppercase',
marginBottom: '2px',
},
@@ -124,17 +124,17 @@ const styles = {
fontSize: '14px',
fontWeight: '600',
fontFamily: 'monospace',
- color: '#333',
+ color: 'var(--text)',
},
sectionLabel: {
fontSize: '11px',
- color: '#888',
+ color: 'var(--text-tertiary)',
textTransform: 'uppercase',
fontWeight: '500',
},
sequenceBox: {
- background: '#fafafa',
- border: '1px solid #eee',
+ background: 'var(--bg-card-expanded)',
+ border: '1px solid var(--border-light)',
borderRadius: '8px',
padding: '12px',
maxHeight: '300px',
@@ -142,7 +142,7 @@ const styles = {
},
}
-export default function ProteinDetailModal({ protein, onClose }) {
+export default function ProteinDetailModal({ protein, onClose, darkMode }) {
const wrapperRef = useRef(null)
const molContainerRef = useRef(null)
const pluginRef = useRef(null)
@@ -207,6 +207,14 @@ export default function ProteinDetailModal({ protein, onClose }) {
try { plugin.initViewer(canvas, molDiv) } catch {}
+ // Set canvas background based on dark mode
+ try {
+ const bgColor = darkMode ? 0x000000 : 0xffffff
+ plugin.canvas3d?.setProps({
+ renderer: { backgroundColor: bgColor },
+ })
+ } catch { /* older Mol* versions may not support this */ }
+
pluginRef.current = plugin
// Custom activation color theme
@@ -234,7 +242,7 @@ export default function ProteinDetailModal({ protein, onClose }) {
}
}
} catch {}
- return Color.fromRgb(200, 200, 200)
+ return darkMode ? Color.fromRgb(80, 80, 80) : Color.fromRgb(200, 200, 200)
}
const colorThemeProvider = {
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinSequence.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinSequence.jsx
index 4a098928d0..240906a597 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinSequence.jsx
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/protein_dashboard/src/ProteinSequence.jsx
@@ -1,4 +1,4 @@
-import React, { useState } from 'react'
+import React, { useState, useEffect, useRef } from 'react'
// White-to-NVIDIA-green (#76b900) gradient based on activation value
function activationColorHex(value, maxValue) {
@@ -15,25 +15,53 @@ const styles = {
container: {
fontFamily: 'Monaco, Menlo, "Courier New", monospace',
fontSize: '11px',
- lineHeight: '1.4',
+ lineHeight: '1.2',
overflowX: 'auto',
- whiteSpace: 'nowrap',
position: 'relative',
},
residueRow: {
display: 'inline-flex',
+ whiteSpace: 'nowrap',
},
- residue: {
- display: 'inline-block',
- textAlign: 'center',
- minWidth: '12px',
+ residueBlock: {
+ display: 'inline-flex',
+ flexDirection: 'column',
+ alignItems: 'center',
cursor: 'default',
- borderRadius: '1px',
+ borderRadius: '2px',
+ padding: '1px 2px',
+ marginRight: '1px',
+ minWidth: '14px',
+ },
+ padBlock: {
+ display: 'inline-flex',
+ flexDirection: 'column',
+ alignItems: 'center',
+ borderRadius: '2px',
+ padding: '1px 2px',
+ marginRight: '1px',
+ minWidth: '14px',
+ background: 'var(--density-bar-bg)',
+ },
+ padText: {
+ fontSize: '10px',
+ color: 'var(--text-muted)',
+ },
+ residueText: {
+ fontSize: '10px',
+ letterSpacing: '0.5px',
+ color: 'var(--text)',
+ },
+ idxText: {
+ fontSize: '7px',
+ color: 'var(--text-tertiary)',
+ marginTop: '0px',
+ lineHeight: '1',
},
tooltip: {
position: 'fixed',
- background: '#333',
- color: '#fff',
+ background: 'var(--bg-card)',
+ color: 'var(--text)',
padding: '4px 8px',
borderRadius: '4px',
fontSize: '10px',
@@ -41,19 +69,80 @@ const styles = {
zIndex: 1000,
pointerEvents: 'none',
whiteSpace: 'nowrap',
+ border: '1px solid var(--border)',
+ boxShadow: '0 2px 8px rgba(0,0,0,0.2)',
},
}
-export default function ProteinSequence({ sequence, activations, maxActivation }) {
+export default function ProteinSequence({
+ sequence, activations, maxActivation,
+ alignMode, alignAnchor, totalLength,
+ scrollGroupRef,
+}) {
const [tooltip, setTooltip] = useState(null)
+ const scrollRef = useRef(null)
+ const anchorRef = useRef(null)
- if (!sequence || sequence.length === 0) {
- return
No sequence
+ const residues = sequence ? sequence.split('') : []
+ const acts = activations ? activations.slice(0, residues.length) : []
+ const maxAct = maxActivation || Math.max(...acts, 0.001)
+
+ // Compute local anchor index
+ let localAnchor = 0
+ if (alignMode === 'first_activation') {
+ localAnchor = acts.findIndex(a => a > 0)
+ if (localAnchor < 0) localAnchor = 0
+ } else if (alignMode === 'max_activation') {
+ let maxVal = -1
+ acts.forEach((a, i) => { if (a > maxVal) { maxVal = a; localAnchor = i } })
}
- // Trim activations to sequence length (ESM2 may add an extra token)
- const acts = activations ? activations.slice(0, sequence.length) : []
- const maxAct = maxActivation || Math.max(...acts, 0.001)
+ // Padding
+ const isAligned = alignMode && alignMode !== 'start' && alignAnchor != null
+ const leftPad = isAligned ? Math.max(0, alignAnchor - localAnchor) : 0
+ const rightPad = (totalLength != null)
+ ? Math.max(0, totalLength - leftPad - residues.length)
+ : 0
+
+ // Scroll to anchor when alignMode changes
+ useEffect(() => {
+ if (isAligned && anchorRef.current && scrollRef.current) {
+ anchorRef.current.scrollIntoView({ behavior: 'instant', inline: 'center', block: 'nearest' })
+ }
+ }, [alignMode, alignAnchor])
+
+ // Synchronized scrolling across sequences in the same card
+ useEffect(() => {
+ const el = scrollRef.current
+ if (!el || !scrollGroupRef) return
+
+ // Register this element in the group
+ if (!scrollGroupRef.current) scrollGroupRef.current = []
+ const group = scrollGroupRef.current
+ if (!group.includes(el)) group.push(el)
+
+ let isSyncing = false
+ const handleScroll = () => {
+ if (isSyncing) return
+ isSyncing = true
+ const scrollLeft = el.scrollLeft
+ for (const other of group) {
+ if (other !== el) other.scrollLeft = scrollLeft
+ }
+ isSyncing = false
+ }
+
+ el.addEventListener('scroll', handleScroll)
+ return () => {
+ el.removeEventListener('scroll', handleScroll)
+ const idx = group.indexOf(el)
+ if (idx !== -1) group.splice(idx, 1)
+ }
+ }, [scrollGroupRef])
+
+ if (!sequence || sequence.length === 0) {
+ return
No sequence
+ }
const handleMouseEnter = (e, residue, idx, act) => {
setTooltip({
@@ -74,23 +163,49 @@ export default function ProteinSequence({ sequence, activations, maxActivation }
}
return (
-
+
- {sequence.split('').map((residue, idx) => {
+ {/* Left padding */}
+ {Array.from({ length: leftPad }, (_, i) => (
+
+ ·
+
+
+ ))}
+
+ {/* Actual residues */}
+ {residues.map((residue, idx) => {
const act = acts[idx] || 0
const bg = activationColorHex(act, maxAct)
+ const hasActivation = act > 0
+ const activeTextColor = hasActivation ? '#000' : undefined
+ const isAnchor = isAligned && idx === localAnchor
return (
handleMouseEnter(e, residue, idx, act)}
onMouseMove={handleMouseMove}
onMouseLeave={handleMouseLeave}
>
- {residue}
+ {residue}
+ {idx + 1}
)
})}
+
+ {/* Right padding */}
+ {Array.from({ length: rightPad }, (_, i) => (
+
+ ·
+
+
+ ))}
{tooltip && (
@@ -100,3 +215,47 @@ export default function ProteinSequence({ sequence, activations, maxActivation }
)
}
+
+/**
+ * Compute alignment info for a set of examples.
+ */
+export function computeAlignInfo(examples, alignMode) {
+ if (!examples || examples.length === 0) return { anchor: 0, totalLength: 0 }
+
+ if (alignMode === 'start') {
+ const maxLen = Math.max(...examples.map(ex => (ex.activations || []).length))
+ return { anchor: 0, totalLength: maxLen }
+ }
+
+ let maxAnchor = 0
+ for (const ex of examples) {
+ const acts = ex.activations || []
+ let anchor = 0
+ if (alignMode === 'first_activation') {
+ anchor = acts.findIndex(a => a > 0)
+ if (anchor < 0) anchor = 0
+ } else if (alignMode === 'max_activation') {
+ let maxVal = -1
+ acts.forEach((a, i) => { if (a > maxVal) { maxVal = a; anchor = i } })
+ }
+ if (anchor > maxAnchor) maxAnchor = anchor
+ }
+
+ let totalLength = 0
+ for (const ex of examples) {
+ const acts = ex.activations || []
+ let anchor = 0
+ if (alignMode === 'first_activation') {
+ anchor = acts.findIndex(a => a > 0)
+ if (anchor < 0) anchor = 0
+ } else if (alignMode === 'max_activation') {
+ let maxVal = -1
+ acts.forEach((a, i) => { if (a > maxVal) { maxVal = a; anchor = i } })
+ }
+ const leftPad = maxAnchor - anchor
+ const thisTotal = leftPad + acts.length
+ if (thisTotal > totalLength) totalLength = thisTotal
+ }
+
+ return { anchor: maxAnchor, totalLength }
+}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/run_configs/model/650m.yaml b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/run_configs/model/650m.yaml
index d3928a4604..65220f8b0b 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/run_configs/model/650m.yaml
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/run_configs/model/650m.yaml
@@ -1,6 +1,6 @@
# @package _global_
# ESM2-650M configuration
model_name: nvidia/esm2_t33_650M_UR50D
-run_name: 650m_5k
-num_proteins: 5000
+run_name: 650m_50k
+num_proteins: 50000
batch_size: 16
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/15b.sh b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/15b.sh
similarity index 95%
rename from bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/15b.sh
rename to bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/15b.sh
index 103a4facc6..c61c25ec5d 100755
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/15b.sh
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/15b.sh
@@ -56,6 +56,9 @@ python scripts/eval.py \
--batch-size 1 \
--dtype bf16 \
--num-proteins 1000 \
+ --umap-n-neighbors 50 \
+ --umap-min-dist 0.0 \
+ --hdbscan-min-cluster-size 20 \
--output-dir ./outputs/15b_50k/eval
echo ""
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/3b.sh b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/3b.sh
similarity index 88%
rename from bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/3b.sh
rename to bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/3b.sh
index 65eb576cd3..0956d37959 100755
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/3b.sh
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/3b.sh
@@ -55,7 +55,14 @@ python scripts/eval.py \
--layer 24 \
--batch-size 4 \
--dtype bf16 \
- --num-proteins 1000 \
+ --num-proteins 2000 \
+ --f1-max-proteins 50000 \
+ --f1-min-positives 5 \
+ --f1-threshold 0.2 \
+ --normalization-n-proteins 3000 \
+ --umap-n-neighbors 50 \
+ --umap-min-dist 0.0 \
+ --hdbscan-min-cluster-size 20 \
--output-dir ./outputs/3b_50k/eval
echo ""
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/650m.sh b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/650m.sh
similarity index 66%
rename from bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/650m.sh
rename to bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/650m.sh
index d004de12f2..be88f4e2c5 100755
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/650m.sh
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/650m.sh
@@ -5,24 +5,24 @@ echo "============================================================"
echo "STEP 1: Extract activations from ESM2-650M"
echo "============================================================"
-torchrun --nproc_per_node=4 scripts/extract.py \
+torchrun --nproc_per_node=2 scripts/extract.py \
--source uniref50 \
- --num-proteins 5000 \
+ --num-proteins 50000 \
--data-dir ./data \
--layer 24 \
--model-name nvidia/esm2_t33_650M_UR50D \
--batch-size 16 \
--max-length 1024 \
--filter-length \
- --output .cache/activations/650m_5k_layer24
+ --output .cache/activations/650m_50k_layer24
echo ""
echo "============================================================"
echo "STEP 2: Train SAE on cached activations"
echo "============================================================"
-torchrun --nproc_per_node=4 scripts/train.py \
- --cache-dir .cache/activations/650m_5k_layer24 \
+torchrun --nproc_per_node=2 scripts/train.py \
+ --cache-dir .cache/activations/650m_50k_layer24 \
--model-name nvidia/esm2_t33_650M_UR50D \
--layer 24 \
--model-type topk \
@@ -36,11 +36,11 @@ torchrun --nproc_per_node=4 scripts/train.py \
--lr 3e-4 \
--log-interval 50 \
--no-wandb \
- --dp-size 4 \
+ --dp-size 2 \
--seed 42 \
- --num-proteins 5000 \
- --output-dir "$(pwd)/outputs/650m_5k" \
- --checkpoint-dir "$(pwd)/outputs/650m_5k/checkpoints" \
+ --num-proteins 50000 \
+ --output-dir "$(pwd)/outputs/650m_50k" \
+ --checkpoint-dir "$(pwd)/outputs/650m_50k/checkpoints" \
--checkpoint-steps 999999
echo ""
@@ -49,14 +49,21 @@ echo "STEP 3: Evaluate SAE + build dashboard"
echo "============================================================"
python scripts/eval.py \
- --checkpoint ./outputs/650m_5k/checkpoints/checkpoint_final.pt \
+ --checkpoint ./outputs/650m_50k/checkpoints/checkpoint_final.pt \
--top-k 32 \
--model-name nvidia/esm2_t33_650M_UR50D \
--layer 24 \
--batch-size 16 \
--dtype bf16 \
- --num-proteins 1000 \
- --output-dir ./outputs/650m_5k/eval
+ --num-proteins 2000 \
+ --f1-max-proteins 50000 \
+ --f1-min-positives 5 \
+ --f1-threshold 0.2 \
+ --normalization-n-proteins 3000 \
+ --umap-n-neighbors 50 \
+ --umap-min-dist 0.0 \
+ --hdbscan-min-cluster-size 20 \
+ --output-dir ./outputs/650m_50k/eval
echo ""
echo "============================================================"
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/eval.py b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/eval.py
index cdfab7b75e..3992f2ccfb 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/eval.py
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/eval.py
@@ -86,6 +86,20 @@ def parse_args():
# Loss recovered
p.add_argument("--loss-recovered-n-sequences", type=int, default=100)
+ # Annotation download
+ p.add_argument(
+ "--annotation-score",
+ type=int,
+ default=None,
+ help="UniProt annotation score filter (1-5, None=no filter). Default None for max coverage.",
+ )
+
+ # Dashboard / UMAP
+ p.add_argument("--umap-n-neighbors", type=int, default=50, help="UMAP n_neighbors parameter")
+ p.add_argument("--umap-min-dist", type=float, default=0.0, help="UMAP min_dist parameter")
+ p.add_argument("--hdbscan-min-cluster-size", type=int, default=20, help="HDBSCAN min_cluster_size parameter")
+ p.add_argument("--n-examples", type=int, default=6, help="Top proteins per feature for dashboard")
+
# Skip flags
p.add_argument("--skip-f1", action="store_true", help="Skip F1 evaluation")
p.add_argument("--skip-loss-recovered", action="store_true", help="Skip loss recovered evaluation")
@@ -96,6 +110,94 @@ def parse_args():
return p.parse_args()
+# ── Vocabulary logit analysis ─────────────────────────────────────────
+
+
+def compute_vocab_logits(sae, model_name, model_dtype, device="cuda"):
+ """Project SAE decoder through the ESM2 LM head to get per-feature token logits.
+
+ Returns dict mapping feature_id -> {top_positive, top_negative} with
+ mean-centered logit values (baseline subtracted).
+ """
+ from transformers import AutoModelForMaskedLM
+
+ print("Loading LM head model for vocab logits...")
+ lm_kwargs = {"trust_remote_code": True}
+ if model_dtype != torch.float32:
+ lm_kwargs["dtype"] = model_dtype
+ lm_model = AutoModelForMaskedLM.from_pretrained(model_name, **lm_kwargs).to(device).eval()
+
+ tokenizer = None
+ try:
+ from transformers import AutoTokenizer
+
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+ except Exception:
+ from transformers import AutoTokenizer
+
+ tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
+
+ # Get the LM head
+ lm_head = lm_model.lm_head if hasattr(lm_model, "lm_head") else lm_model.cls
+
+ # Decoder weights: (input_dim, n_features)
+ W_dec = sae.decoder.weight.to(device).to(model_dtype)
+
+ with torch.no_grad():
+ logits = lm_head(W_dec.T).float() # (n_features, output_vocab_size)
+
+ # Subtract mean logit vector (baseline) so values reflect
+ # feature-specific effects rather than the LM head's global bias.
+ mean_logits = logits.mean(dim=0, keepdim=True)
+ logits = logits - mean_logits
+
+ # Build vocab list matching the LM head output dimension
+ # (ESM2 pads output beyond tokenizer.vocab_size)
+ output_vocab_size = logits.shape[1]
+ vocab = []
+ for i in range(output_vocab_size):
+ if i < len(tokenizer):
+ vocab.append(tokenizer.decode([i]).strip())
+ else:
+ vocab.append(f"
")
+
+ # Special tokens to exclude from top lists
+ special_tokens = {"", "", "", "", "", "", ""}
+
+ valid_mask = torch.ones(output_vocab_size, dtype=torch.bool)
+ for i, tok in enumerate(vocab):
+ if tok.lower() in special_tokens or tok.startswith("<"):
+ valid_mask[i] = False
+
+ n_features = logits.shape[0]
+ results = {}
+ for f in range(n_features):
+ feat_logits = logits[f].cpu()
+
+ masked_logits = feat_logits.clone()
+ masked_logits[~valid_mask] = float("-inf")
+
+ top_pos_idx = masked_logits.topk(10).indices.tolist()
+
+ masked_logits_neg = feat_logits.clone()
+ masked_logits_neg[~valid_mask] = float("inf")
+ top_neg_idx = masked_logits_neg.topk(10, largest=False).indices.tolist()
+
+ top_positive = [(vocab[i], round(feat_logits[i].item(), 3)) for i in top_pos_idx]
+ top_negative = [(vocab[i], round(feat_logits[i].item(), 3)) for i in top_neg_idx]
+
+ results[f] = {
+ "top_positive": top_positive,
+ "top_negative": top_negative,
+ }
+
+ del lm_model
+ torch.cuda.empty_cache()
+
+ print(f" Computed mean-centered vocab logits for {n_features} features")
+ return results
+
+
def load_sae_from_checkpoint(checkpoint_path: str, top_k: int) -> TopKSAE:
"""Load SAE from a Trainer checkpoint, handling DDP module. prefix."""
ckpt = torch.load(checkpoint_path, map_location="cpu", weights_only=False)
@@ -290,6 +392,20 @@ def build_f1_labels(val_results, n_features, f1_threshold):
n_labeled = sum(1 for l in labels if not l.startswith("Feature "))
print(f" {n_labeled}/{n_features} features labeled (F1 >= {f1_threshold})")
+
+ # Show all matched annotation categories
+ from collections import Counter
+
+ category_counts = Counter()
+ for i, stats in feature_stats.items():
+ concept = stats["best_annotation"]
+ category = concept.split(":")[0] if ":" in concept else concept
+ category_counts[category] += 1
+ if category_counts:
+ print(f" Annotation categories matched ({len(category_counts)} types):")
+ for cat, count in category_counts.most_common():
+ print(f" {cat}: {count} features")
+
return labels, feature_stats
@@ -360,7 +476,7 @@ def get_esm2():
output_path=annotations_path,
max_length=args.max_seq_len,
reviewed_only=True,
- annotation_score=5,
+ annotation_score=args.annotation_score,
max_results=args.f1_max_proteins,
)
@@ -593,26 +709,32 @@ def get_esm2():
print(f" {activations_flat.shape[0]:,} residues, dim={activations_flat.shape[1]}")
# Step 1: Feature statistics
- print("\n[1/4] Computing feature statistics...")
+ print("\n[1/5] Computing feature statistics...")
t0 = time.time()
stats, _ = compute_feature_stats(sae, activations_flat, device=device)
print(f" Done in {time.time() - t0:.1f}s")
# Step 2: UMAP from decoder weights
- print("[2/4] Computing UMAP from decoder weights...")
+ print("[2/5] Computing UMAP from decoder weights...")
t0 = time.time()
- geometry = compute_feature_umap(sae, random_state=42)
+ geometry = compute_feature_umap(
+ sae,
+ n_neighbors=args.umap_n_neighbors,
+ min_dist=args.umap_min_dist,
+ random_state=args.seed,
+ hdbscan_min_cluster_size=args.hdbscan_min_cluster_size,
+ )
print(f" Done in {time.time() - t0:.1f}s")
# Step 3: Save feature atlas with F1 labels
- print("[3/4] Saving feature atlas...")
+ print("[3/5] Saving feature atlas...")
t0 = time.time()
atlas_path = dashboard_dir / "features_atlas.parquet"
save_feature_atlas(stats, geometry, atlas_path, labels=f1_labels)
print(f" Saved to {atlas_path} in {time.time() - t0:.1f}s")
# Step 4: Export protein examples with F1 annotations
- print("[4/4] Exporting protein examples...")
+ print("[4/5] Exporting protein examples...")
t0 = time.time()
export_protein_features_parquet(
sae=sae,
@@ -621,16 +743,26 @@ def get_esm2():
protein_ids=protein_ids,
output_dir=dashboard_dir,
masks=masks,
- n_examples=6,
+ n_examples=args.n_examples,
device=device,
feature_stats=feature_stats_for_dashboard,
)
print(f" Done in {time.time() - t0:.1f}s")
+ # Step 5: Compute vocab logits (decoder -> LM head projection)
+ print("[5/5] Computing vocab logits...")
+ t0 = time.time()
+ vocab_logits = compute_vocab_logits(sae, args.model_name, model_dtype, device=device)
+ logits_path = dashboard_dir / "vocab_logits.json"
+ with open(logits_path, "w") as f:
+ json.dump(vocab_logits, f)
+ print(f" Saved to {logits_path} in {time.time() - t0:.1f}s")
+
print(f"\nDashboard data saved to: {dashboard_dir}")
print(f" Atlas: {atlas_path}")
print(f" Features: {dashboard_dir}/feature_metadata.parquet")
print(f" Examples: {dashboard_dir}/feature_examples.parquet")
+ print(f" Logits: {logits_path}")
print("\nTo view locally:")
print(f" scp -r cluster:{dashboard_dir} ./dashboard")
print(
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/launch_dashboard.py b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/launch_dashboard.py
index 6051cfa10f..3f2dfd1a67 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/launch_dashboard.py
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/launch_dashboard.py
@@ -13,9 +13,126 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from esm2_sae import launch_protein_dashboard
+"""Launch the ESM2 SAE dashboard locally.
+Usage:
+ # After scp'ing dashboard data from server:
+ scp -r server:/path/to/outputs/650m_5k/eval/dashboard ./dash
-proc = launch_protein_dashboard("./dash/features_atlas.parquet", features_dir="dash")
-input("Dashboard running. Press enter to stop. \n")
-proc.terminate()
+ python scripts/launch_dashboard.py --data-dir ./dash
+"""
+
+import argparse
+import shutil
+import subprocess
+import time
+import webbrowser
+from pathlib import Path
+
+
+def _get_live_feature_ids(data_dir: Path):
+ """Return set of feature_ids with activation_freq > 0."""
+ import pyarrow.parquet as pq
+
+ meta_path = data_dir / "feature_metadata.parquet"
+ if not meta_path.exists():
+ return None
+ table = pq.read_table(meta_path)
+ df = table.to_pandas()
+ live = df.loc[df["activation_freq"] > 0, "feature_id"]
+ return set(live.tolist())
+
+
+def _filter_and_copy_parquet(src: Path, dst: Path, live_ids: set):
+ """Filter a parquet file to only include live feature_ids."""
+ import pyarrow as pa
+ import pyarrow.parquet as pq
+
+ table = pq.read_table(src)
+ df = table.to_pandas()
+ if "feature_id" not in df.columns:
+ shutil.copy2(src, dst)
+ return len(df), len(df)
+ n_before = len(df)
+ df = df[df["feature_id"].isin(live_ids)]
+ pq.write_table(pa.Table.from_pandas(df, preserve_index=False), dst)
+ return n_before, len(df)
+
+
+def main(): # noqa: D103
+ p = argparse.ArgumentParser(description="Launch ESM2 SAE dashboard")
+ p.add_argument(
+ "--data-dir",
+ type=str,
+ required=True,
+ help="Directory containing features_atlas.parquet, feature_metadata.parquet, feature_examples.parquet",
+ )
+ p.add_argument("--port", type=int, default=5176)
+ p.add_argument("--filter-dead", action="store_true", help="Filter out dead latents (activation_freq == 0)")
+ args = p.parse_args()
+
+ data_dir = Path(args.data_dir).resolve()
+ dashboard_dir = Path(__file__).resolve().parent.parent / "protein_dashboard"
+
+ if not (dashboard_dir / "package.json").exists():
+ raise FileNotFoundError(f"Dashboard not found at {dashboard_dir}")
+
+ # Determine live features (opt-in)
+ filter_dead = args.filter_dead
+ live_ids = None
+ if filter_dead:
+ live_ids = _get_live_feature_ids(data_dir)
+ if live_ids is not None:
+ print(f"Filtering to {len(live_ids)} live features (activation_freq > 0)")
+ else:
+ print("No feature_metadata.parquet found, skipping dead latent filtering")
+ filter_dead = False
+
+ # Copy parquet files into dashboard's public/ dir
+ public_dir = dashboard_dir / "public"
+ public_dir.mkdir(exist_ok=True)
+
+ parquet_files = ["features_atlas.parquet", "feature_metadata.parquet", "feature_examples.parquet"]
+ json_files = ["vocab_logits.json", "cluster_labels.json"]
+
+ for fname in parquet_files:
+ src = data_dir / fname
+ if not src.exists():
+ print(f"WARNING: {fname} not found in {data_dir}")
+ continue
+ if filter_dead and live_ids is not None:
+ n_before, n_after = _filter_and_copy_parquet(src, public_dir / fname, live_ids)
+ print(f"Copied {fname} ({n_after}/{n_before} rows, {n_before - n_after} dead filtered)")
+ else:
+ shutil.copy2(src, public_dir / fname)
+ print(f"Copied {fname}")
+
+ for fname in json_files:
+ src = data_dir / fname
+ if src.exists():
+ shutil.copy2(src, public_dir / fname)
+ print(f"Copied {fname}")
+
+ # Install deps if needed
+ if not (dashboard_dir / "node_modules").exists():
+ print("Installing dashboard dependencies...")
+ subprocess.run(["npm", "install"], cwd=dashboard_dir, check=True)
+
+ # Launch dev server
+ print(f"\nStarting dashboard on http://localhost:{args.port}")
+ proc = subprocess.Popen(
+ ["npx", "vite", "--port", str(args.port)],
+ cwd=dashboard_dir,
+ )
+
+ time.sleep(2)
+ webbrowser.open(f"http://localhost:{args.port}")
+
+ try:
+ input("Dashboard running. Press Enter to stop.\n")
+ finally:
+ proc.terminate()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/old_extract.py b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/old_extract.py
deleted file mode 100644
index 9822a8bb2b..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/old_extract.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: LicenseRef-Apache2
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Step 1: Extract activations from ESM2 and save to disk.
-
-Extracts layer activations from an ESM2 model for a set of protein sequences
-and writes them as sharded Parquet files via ActivationStore.
-
-Supports multi-GPU extraction via torchrun for Nx speedup:
- torchrun --nproc_per_node=4 scripts/step1_extract.py ...
-
-Single-GPU usage:
- python scripts/step1_extract.py \
- data.source=uniref50 data.data_dir=./data data.num_proteins=10000 \
- activations.model_name=facebook/esm2_t33_650M_UR50D activations.layer=25 \
- activations.cache_dir=.cache/activations/esm2_650m_layer25
-"""
-
-import json
-import os
-import shutil
-import time
-from pathlib import Path
-
-import hydra
-import torch
-from esm2_sae.data import download_swissprot, download_uniref50, read_fasta
-from esm2_sae.models import ESM2Model
-from omegaconf import DictConfig, OmegaConf
-from sae.utils import get_device, set_seed
-
-
-def resolve_data_path(cfg: DictConfig, data_dir: Path, rank: int) -> Path:
- """Resolve protein FASTA path, downloading if needed. Only rank 0 downloads."""
- source = str(cfg.data.get("source", "swissprot")).lower()
- num_proteins = cfg.data.get("num_proteins", None)
-
- if source == "swissprot":
- fasta_path = data_dir / "uniprot_sprot.fasta.gz"
- if not fasta_path.exists():
- if rank == 0:
- print(f"Downloading SwissProt to {fasta_path}")
- download_swissprot(data_dir)
- else:
- _wait_for_file(fasta_path)
- return fasta_path
-
- if source == "uniref50":
- download_max = cfg.data.get("download_max_proteins", num_proteins)
- if download_max is None:
- fasta_path = data_dir / "uniref50.fasta.gz"
- else:
- fasta_path = data_dir / f"uniref50_first_{download_max}.fasta"
-
- if not fasta_path.exists():
- if rank == 0:
- print(f"Downloading UniRef50 to {fasta_path}")
- download_uniref50(data_dir, max_proteins=download_max)
- else:
- _wait_for_file(fasta_path)
- return fasta_path
-
- raise ValueError(f"Unknown data.source='{source}'. Use 'swissprot' or 'uniref50'.")
-
-
-def _wait_for_file(path: Path, timeout_sec: int = 7200, poll_sec: float = 2.0) -> None:
- """Wait for a file to appear (non-rank-0 waits for rank 0 to download)."""
- start = time.time()
- while not path.exists():
- if (time.time() - start) > timeout_sec:
- raise TimeoutError(f"Timed out waiting for: {path}")
- time.sleep(poll_sec)
-
-
-def _merge_rank_stores(cache_path: Path, world_size: int, metadata: dict) -> None:
- """Merge per-rank temp stores into a single store by moving shard files."""
- cache_path.mkdir(parents=True, exist_ok=True)
- shard_idx = 0
- total_samples = 0
- hidden_dim = None
- shard_size = None
-
- for r in range(world_size):
- tmp_dir = cache_path / f".tmp_rank_{r}"
- with open(tmp_dir / "metadata.json") as f:
- tmp_meta = json.load(f)
-
- hidden_dim = tmp_meta["hidden_dim"]
- shard_size = tmp_meta["shard_size"]
-
- for i in range(tmp_meta["n_shards"]):
- src = tmp_dir / f"shard_{i:05d}.parquet"
- dst = cache_path / f"shard_{shard_idx:05d}.parquet"
- shutil.move(str(src), str(dst))
- shard_idx += 1
-
- total_samples += tmp_meta["n_samples"]
- shutil.rmtree(tmp_dir)
-
- metadata.update(
- n_samples=total_samples,
- n_shards=shard_idx,
- hidden_dim=hidden_dim,
- shard_size=shard_size,
- )
- with open(cache_path / "metadata.json", "w") as f:
- json.dump(metadata, f, indent=2)
-
- print(f"Merged {world_size} rank stores: {total_samples:,} tokens, {shard_idx} shards")
-
-
-@hydra.main(version_base=None, config_path="../configs", config_name="config")
-def main(cfg: DictConfig) -> None:
- """Extract ESM2 layer activations using Hydra configuration."""
- print(OmegaConf.to_yaml(cfg))
-
- set_seed(cfg.seed)
-
- # Distributed setup
- rank = int(os.environ.get("RANK", 0))
- world_size = int(os.environ.get("WORLD_SIZE", 1))
-
- if world_size > 1:
- from datetime import timedelta
-
- import torch.distributed as dist
-
- if not dist.is_initialized():
- dist.init_process_group("nccl", timeout=timedelta(hours=48))
- torch.cuda.set_device(rank)
- device = f"cuda:{rank}"
- else:
- device = get_device()
-
- print(f"[Rank {rank}/{world_size}] Device: {device}")
-
- # Resolve cache path
- cache_dir = cfg.activations.get("cache_dir", None)
- if not cache_dir:
- raise ValueError("activations.cache_dir is required for extraction.")
- cache_path = Path(hydra.utils.get_original_cwd()) / cache_dir
-
- # Check if cache already exists
- if (cache_path / "metadata.json").exists():
- if rank == 0:
- print(f"Cache already exists at {cache_path}. Skipping extraction.")
- with open(cache_path / "metadata.json") as f:
- meta = json.load(f)
- print(f" {meta['n_samples']:,} tokens, {meta['n_shards']} shards, dim={meta['hidden_dim']}")
- if world_size > 1:
- import torch.distributed as dist
-
- dist.barrier()
- dist.destroy_process_group()
- return
-
- # Clean up stale temp dirs from a previous failed multi-GPU run
- if rank == 0 and cache_path.exists():
- for tmp in cache_path.glob(".tmp_rank_*"):
- shutil.rmtree(tmp)
-
- # Load sequences
- data_dir = Path(hydra.utils.get_original_cwd()) / cfg.data.data_dir
- fasta_path = resolve_data_path(cfg, data_dir, rank)
-
- # Wait for download to finish on all ranks
- if world_size > 1:
- import torch.distributed as dist
-
- dist.barrier()
-
- num_proteins = cfg.data.get("num_proteins", None)
- records = read_fasta(
- fasta_path,
- max_sequences=num_proteins,
- max_length=cfg.data.max_seq_length,
- )
- sequences = [rec.sequence for rec in records]
- total_sequences = len(sequences)
-
- if rank == 0:
- print(f"Loaded {total_sequences} sequences from {fasta_path}")
-
- # Split sequences across ranks
- if world_size > 1:
- chunk = total_sequences // world_size
- my_start = rank * chunk
- my_end = total_sequences if rank == world_size - 1 else (rank + 1) * chunk
- my_sequences = sequences[my_start:my_end]
- print(f"[Rank {rank}] Extracting sequences {my_start}-{my_end} ({len(my_sequences)} proteins)")
- else:
- my_sequences = sequences
-
- # Create ESM2 model
- esm2 = ESM2Model(
- model_name=cfg.activations.model_name,
- layer=cfg.activations.layer,
- device=device,
- )
-
- # Extract activations and write to store
- from sae.activation_store import ActivationStore
- from tqdm import tqdm
-
- if world_size > 1:
- store_path = cache_path / f".tmp_rank_{rank}"
- else:
- store_path = cache_path
-
- store = ActivationStore(store_path)
- batch_size = cfg.activations.batch_size
- remove_special = cfg.activations.remove_special_tokens
- padding = cfg.activations.get("tokenizer_padding", "longest")
-
- n_batches = (len(my_sequences) + batch_size - 1) // batch_size
- show_progress = rank == 0
-
- iterator = range(0, len(my_sequences), batch_size)
- if show_progress:
- iterator = tqdm(iterator, total=n_batches, desc="Extracting activations")
-
- t0 = time.time()
- for i in iterator:
- batch_seqs = my_sequences[i : i + batch_size]
- batch_emb, batch_masks = esm2.generate_activations(
- sequences=batch_seqs,
- batch_size=len(batch_seqs),
- remove_special_tokens=remove_special,
- show_progress=False,
- padding=padding,
- )
- batch_flat = batch_emb[batch_masks.bool()]
- store.append(batch_flat)
-
- store.finalize(
- metadata={
- "model_name": cfg.activations.model_name,
- "layer": cfg.activations.layer,
- "n_sequences": len(my_sequences),
- }
- )
-
- elapsed = time.time() - t0
- print(
- f"[Rank {rank}] Extracted {store.metadata['n_samples']:,} tokens "
- f"from {len(my_sequences)} proteins in {elapsed:.1f}s"
- )
-
- # Free GPU memory
- del esm2
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
-
- # Multi-GPU: merge rank stores
- if world_size > 1:
- import torch.distributed as dist
-
- dist.barrier()
-
- if rank == 0:
- _merge_rank_stores(
- cache_path,
- world_size,
- metadata={
- "model_name": cfg.activations.model_name,
- "layer": cfg.activations.layer,
- "n_sequences": total_sequences,
- },
- )
-
- dist.barrier()
- dist.destroy_process_group()
-
- # Print final summary
- if rank == 0:
- with open(cache_path / "metadata.json") as f:
- meta = json.load(f)
- print("\nExtraction complete:")
- print(f" Cache: {cache_path}")
- print(f" Sequences: {meta.get('n_sequences', '?')}")
- print(f" Tokens: {meta['n_samples']:,}")
- print(f" Hidden dim: {meta['hidden_dim']}")
- print(f" Shards: {meta['n_shards']}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/train.py b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/train.py
index 6d271fc9ce..6d1aa6e37e 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/train.py
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/recipes/esm2/scripts/train.py
@@ -86,6 +86,7 @@ def parse_args():
train_group.add_argument("--max-grad-norm", type=float, default=None)
train_group.add_argument("--lr-scale-with-latents", action=argparse.BooleanOptionalAction, default=False)
train_group.add_argument("--lr-reference-hidden-dim", type=int, default=2048)
+ train_group.add_argument("--grad-accumulation-steps", type=int, default=1, help="Gradient accumulation steps")
# W&B
wb_group = p.add_argument_group("Weights & Biases")
@@ -155,6 +156,7 @@ def build_training_config(args, device: str) -> TrainingConfig:
checkpoint_steps=args.checkpoint_steps,
lr_scale_with_latents=args.lr_scale_with_latents,
lr_reference_hidden_dim=args.lr_reference_hidden_dim,
+ grad_accumulation_steps=args.grad_accumulation_steps,
)
@@ -266,6 +268,24 @@ def main():
world_size=world_size,
max_shards=max_shards,
)
+ # Compute min batch count across all ranks to keep DDP in sync
+ # Read parquet footers for all ranks' shards (a few KB each, no data loading)
+ if world_size > 1:
+ import pyarrow.parquet as pq_meta
+
+ dataset = dataloader.dataset
+ per_rank = len(dataset.shard_indices)
+ min_batches = None
+ for r in range(world_size):
+ total_rows = sum(
+ pq_meta.read_metadata(store.path / f"shard_{idx:05d}.parquet").num_rows
+ for idx in range(r * per_rank, (r + 1) * per_rank)
+ )
+ batches = total_rows // args.batch_size
+ if min_batches is None or batches < min_batches:
+ min_batches = batches
+ dataset.max_batches = min_batches
+ print(f"[rank {rank}] capped to {min_batches} batches/epoch for DDP sync")
trainer.fit(
dataloader,
max_grad_norm=args.max_grad_norm,
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/pyproject.toml b/bionemo-recipes/interpretability/sparse_autoencoders/sae/pyproject.toml
index a9a9813469..ec08e494cb 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/pyproject.toml
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/sae/pyproject.toml
@@ -23,10 +23,6 @@ dependencies = [
]
[project.optional-dependencies]
-steering = [
- "fastapi>=0.100",
- "uvicorn>=0.20",
-]
tracking = [
"wandb>=0.15",
]
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/__init__.py b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/__init__.py
index aa38288728..51bd5ff777 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/__init__.py
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/__init__.py
@@ -82,7 +82,6 @@
)
from .perf_logger import PerfLogger
from .process_group_manager import ProcessGroupManager
-from .steering import Intervention, InterventionMode, SteeredModel
from .training import ParallelConfig, Trainer, TrainingConfig, WandbConfig
from .utils import get_device, set_seed
@@ -106,8 +105,6 @@
"FeatureLogits",
"FeatureSampler",
"FeatureStats",
- "Intervention",
- "InterventionMode",
"LLMClient",
"LLMResponse",
"LossRecoveredResult",
@@ -122,7 +119,6 @@
"ReLUSAE",
"SparseAutoencoder",
"SparsityMetrics",
- "SteeredModel",
"TokenActivationCollector",
"TokenExample",
"TopExample",
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering.py b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering.py
deleted file mode 100644
index 62d753d5dc..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: LicenseRef-Apache2
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Feature steering via SAE interventions at inference time.
-
-Intercepts a model's residual stream at a target layer, modifies specific
-SAE feature activations (amplify, suppress, or clamp), and re-injects the
-modified activations. Supports three intervention modes:
-
-- additive_code: encode → codes[f] += weight → decode → replace
-- multiplicative_code: encode → codes[f] *= weight → decode → replace
-- direct: activations += weight * W_dec[f] (no encode/decode)
-"""
-
-from __future__ import annotations
-
-from contextlib import contextmanager
-from dataclasses import dataclass
-from enum import Enum
-from typing import List, Optional
-
-import torch
-import torch.nn as nn
-
-from .architectures.base import SparseAutoencoder
-
-
-class InterventionMode(str, Enum):
- """Modes for SAE feature interventions."""
-
- ADDITIVE_CODE = "additive_code"
- MULTIPLICATIVE_CODE = "multiplicative_code"
- DIRECT = "direct"
-
-
-@dataclass
-class Intervention:
- """A single feature intervention."""
-
- feature_id: int
- weight: float
- mode: InterventionMode = InterventionMode.ADDITIVE_CODE
-
-
-class SteeredModel:
- """Wraps a language model + SAE to apply feature steering at inference time.
-
- Registers a forward hook on the target transformer layer that intercepts
- the residual stream, applies interventions, and re-injects modified
- activations.
-
- Args:
- model: A HuggingFace-style transformer model (has .transformer.h or
- .model.layers attribute).
- sae: A trained SparseAutoencoder instance.
- layer: The layer index where the SAE was trained.
- device: Device for computations. If None, uses model's device.
-
- Example:
- >>> steered = SteeredModel(gpt2_model, sae, layer=6)
- >>> steered.set_interventions([
- ... Intervention(feature_id=42, weight=3.0, mode=InterventionMode.ADDITIVE_CODE),
- ... ])
- >>> output = model.generate(input_ids)
- """
-
- def __init__(
- self,
- model: nn.Module,
- sae: SparseAutoencoder,
- layer: int,
- device: Optional[torch.device] = None,
- ):
- """Initialize the steered model with a language model, SAE, and target layer."""
- self.model = model
- self.sae = sae
- self.layer = layer
- self.device = device or next(model.parameters()).device
- self._interventions: List[Intervention] = []
- self._hook_handle = None
-
- # Move SAE to same device as model, in eval mode
- self.sae = self.sae.to(self.device).eval()
-
- # Resolve the target layer module
- self._target_module = self._resolve_layer(model, layer)
-
- @staticmethod
- def _resolve_layer(model: nn.Module, layer: int) -> nn.Module:
- """Find the transformer block at the given layer index."""
- # GPT-2 style: model.transformer.h[layer]
- if hasattr(model, "transformer") and hasattr(model.transformer, "h"):
- return model.transformer.h[layer]
- # LLaMA / Mistral style: model.model.layers[layer]
- if hasattr(model, "model") and hasattr(model.model, "layers"):
- return model.model.layers[layer]
- # Generic: try common attribute names
- for attr in ("layers", "blocks", "encoder.layer", "decoder.layer"):
- parts = attr.split(".")
- obj = model
- try:
- for p in parts:
- obj = getattr(obj, p)
- return obj[layer]
- except (AttributeError, IndexError, TypeError):
- continue
- raise ValueError(
- f"Cannot find transformer layer {layer}. "
- "Supported layouts: model.transformer.h[], model.model.layers[], "
- "model.layers[], model.blocks[]"
- )
-
- def set_interventions(self, interventions: List[Intervention]) -> None:
- """Set active interventions and register/update the forward hook."""
- self._interventions = list(interventions)
- self._unregister_hook()
- if self._interventions:
- self._register_hook()
-
- def clear_interventions(self) -> None:
- """Remove all interventions and unregister the hook."""
- self._interventions = []
- self._unregister_hook()
-
- @contextmanager
- def intervene(self, interventions: List[Intervention]):
- """Context manager for temporary steering."""
- prev = self._interventions[:]
- self.set_interventions(interventions)
- try:
- yield self
- finally:
- self.set_interventions(prev)
-
- def _register_hook(self) -> None:
- self._hook_handle = self._target_module.register_forward_hook(self._hook_fn)
-
- def _unregister_hook(self) -> None:
- if self._hook_handle is not None:
- self._hook_handle.remove()
- self._hook_handle = None
-
- @torch.no_grad()
- def _hook_fn(self, module, input, output):
- """Forward hook that applies interventions to the residual stream."""
- # HuggingFace transformer blocks return (hidden_states, ...) tuples
- if isinstance(output, tuple):
- hidden_states = output[0]
- rest = output[1:]
- else:
- hidden_states = output
- rest = None
-
- # Separate interventions by type
- direct_interventions = [iv for iv in self._interventions if iv.mode == InterventionMode.DIRECT]
- code_interventions = [iv for iv in self._interventions if iv.mode != InterventionMode.DIRECT]
-
- # Apply direct interventions: activations += weight * W_dec[feature_id]
- if direct_interventions:
- # Get decoder weight matrix: shape [input_dim, hidden_dim]
- W_dec = self.sae.decoder.weight.data
- for iv in direct_interventions:
- # W_dec[:, feature_id] is the decoder direction for this feature
- direction = W_dec[:, iv.feature_id] # [input_dim]
- hidden_states = hidden_states + iv.weight * direction
-
- # Apply code-space interventions: encode → modify → decode → replace
- if code_interventions:
- original_shape = hidden_states.shape # [batch, seq_len, hidden_dim]
- flat = hidden_states.reshape(-1, original_shape[-1]) # [B*T, D]
-
- # Encode
- codes = self.sae.encode(flat) # [B*T, n_features]
-
- # Apply modifications
- for iv in code_interventions:
- if iv.mode == InterventionMode.ADDITIVE_CODE:
- codes[:, iv.feature_id] = codes[:, iv.feature_id] + iv.weight
- elif iv.mode == InterventionMode.MULTIPLICATIVE_CODE:
- codes[:, iv.feature_id] = codes[:, iv.feature_id] * iv.weight
-
- # Decode back — use base decode (without normalization info)
- reconstructed = self.sae.decode(codes) # [B*T, D]
-
- # Compute the SAE residual on the unmodified input to preserve
- # information not captured by the SAE
- flat_original = hidden_states.reshape(-1, original_shape[-1])
- codes_original = self.sae.encode(flat_original)
- recon_original = self.sae.decode(codes_original)
- residual = flat_original - recon_original # what SAE can't represent
-
- # Final output: steered reconstruction + original residual
- hidden_states = (reconstructed + residual).reshape(original_shape)
-
- if rest is not None:
- return (hidden_states,) + rest
- return hidden_states
-
- def generate(
- self,
- input_ids: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- **generate_kwargs,
- ) -> torch.Tensor:
- """Run model.generate() with current interventions active.
-
- This is a convenience wrapper — the hook fires automatically on each
- forward pass during autoregressive generation.
- """
- return self.model.generate(
- input_ids=input_ids,
- attention_mask=attention_mask,
- **generate_kwargs,
- )
-
- def __del__(self):
- """Clean up by unregistering the forward hook."""
- self._unregister_hook()
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_server.py b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_server.py
deleted file mode 100644
index 9eb7c86b58..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_server.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: LicenseRef-Apache2
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""FastAPI server for SAE feature steering with SSE streaming.
-
-Provides three endpoints:
-- GET /features — feature metadata from parquet (for the picker UI)
-- POST /chat — steered text generation with SSE streaming
-- GET /health — model info
-
-Launch via launch_steering_server() or use create_app() for custom setups.
-"""
-
-from __future__ import annotations
-
-import json
-from pathlib import Path
-from typing import Any, Dict, List, Optional
-
-import torch
-
-from .steering import Intervention, InterventionMode, SteeredModel
-
-
-def load_feature_metadata(parquet_dir: str | Path) -> List[Dict[str, Any]]:
- """Read feature_metadata.parquet into a list of dicts."""
- import pyarrow.parquet as pq
-
- parquet_dir = Path(parquet_dir)
- meta_path = parquet_dir / "feature_metadata.parquet"
- if not meta_path.exists():
- return []
- table = pq.read_table(meta_path)
- rows = table.to_pydict()
- n = len(rows.get("feature_id", []))
- features = []
- for i in range(n):
- feat = {}
- for col in rows:
- feat[col] = rows[col][i]
- features.append(feat)
- return features
-
-
-def create_app(
- steered_model: SteeredModel,
- tokenizer: Any,
- parquet_dir: str | Path,
- max_new_tokens: int = 256,
-) -> Any:
- """Create a FastAPI application for steering.
-
- Args:
- steered_model: SteeredModel wrapping the LM + SAE.
- tokenizer: HuggingFace tokenizer for the model.
- parquet_dir: Directory containing feature_metadata.parquet.
- max_new_tokens: Default max tokens for generation.
-
- Returns:
- FastAPI app instance.
- """
- from fastapi import FastAPI, Request
- from fastapi.middleware.cors import CORSMiddleware
- from fastapi.responses import StreamingResponse
-
- app = FastAPI(title="SAE Steering Server")
- app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"],
- allow_methods=["*"],
- allow_headers=["*"],
- )
-
- # Load feature metadata at startup
- feature_metadata = load_feature_metadata(parquet_dir)
-
- @app.get("/health")
- def health():
- return {
- "status": "ok",
- "model": getattr(steered_model.model, "name_or_path", str(type(steered_model.model).__name__)),
- "sae_hidden_dim": steered_model.sae.hidden_dim,
- "sae_input_dim": steered_model.sae.input_dim,
- "layer": steered_model.layer,
- "n_features": len(feature_metadata),
- }
-
- @app.get("/features")
- def get_features(search: Optional[str] = None, limit: int = 200):
- results = feature_metadata
- if search:
- q = search.lower()
- results = [
- f for f in results if q in str(f.get("description", "")).lower() or q in str(f.get("feature_id", ""))
- ]
- return results[:limit]
-
- @app.post("/chat")
- async def chat(request: Request):
- body = await request.json()
- messages = body.get("messages", [])
- raw_interventions = body.get("interventions", [])
- compare = body.get("compare", False)
- max_tokens = body.get("max_tokens", max_new_tokens)
-
- # Build prompt from messages
- prompt = _build_prompt(messages)
-
- # Parse interventions
- interventions = [
- Intervention(
- feature_id=iv["feature_id"],
- weight=iv["weight"],
- mode=InterventionMode(iv.get("mode", "additive_code")),
- )
- for iv in raw_interventions
- ]
-
- async def event_stream():
- # Generate steered response
- if interventions:
- steered_model.set_interventions(interventions)
- else:
- steered_model.clear_interventions()
-
- steered_tokens = _generate_tokens(steered_model, tokenizer, prompt, max_tokens)
- for token_text in steered_tokens:
- event = json.dumps({"source": "steered", "token": token_text})
- yield f"event: token\ndata: {event}\n\n"
-
- # Generate baseline response if compare mode
- if compare:
- steered_model.clear_interventions()
- baseline_tokens = _generate_tokens(steered_model, tokenizer, prompt, max_tokens)
- for token_text in baseline_tokens:
- event = json.dumps({"source": "baseline", "token": token_text})
- yield f"event: token\ndata: {event}\n\n"
-
- # Clean up: restore interventions if they were set
- if interventions and not compare:
- pass # Already set
- elif interventions:
- steered_model.set_interventions(interventions)
-
- yield "event: done\ndata: {}\n\n"
-
- return StreamingResponse(
- event_stream(),
- media_type="text/event-stream",
- headers={
- "Cache-Control": "no-cache",
- "Connection": "keep-alive",
- "X-Accel-Buffering": "no",
- },
- )
-
- return app
-
-
-def _build_prompt(messages: List[Dict[str, str]]) -> str:
- """Convert chat messages into a single prompt string for the LM."""
- parts = []
- for msg in messages:
- role = msg.get("role", "user")
- content = msg.get("content", "")
- if role == "user":
- parts.append(f"User: {content}")
- elif role == "assistant":
- parts.append(f"Assistant: {content}")
- elif role == "system":
- parts.append(content)
- parts.append("Assistant:")
- return "\n".join(parts)
-
-
-def _generate_tokens(
- steered_model: SteeredModel,
- tokenizer: Any,
- prompt: str,
- max_new_tokens: int,
-) -> List[str]:
- """Generate tokens one at a time, returning a list of token strings.
-
- Uses model.generate() with the steering hook active, then splits
- the output into individual tokens for streaming.
- """
- device = steered_model.device
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
- input_ids = inputs["input_ids"].to(device)
- attention_mask = inputs["attention_mask"].to(device)
- prompt_len = input_ids.shape[1]
-
- with torch.no_grad():
- output_ids = steered_model.generate(
- input_ids=input_ids,
- attention_mask=attention_mask,
- max_new_tokens=max_new_tokens,
- do_sample=True,
- temperature=0.7,
- top_p=0.9,
- pad_token_id=tokenizer.eos_token_id,
- )
-
- # Extract only the new tokens
- new_token_ids = output_ids[0, prompt_len:]
-
- # Decode each token individually for streaming
- tokens = []
- for tid in new_token_ids:
- if tid.item() == tokenizer.eos_token_id:
- break
- tokens.append(tokenizer.decode([tid.item()]))
-
- return tokens
-
-
-def launch_steering_server(
- steered_model: SteeredModel,
- tokenizer: Any,
- parquet_dir: str | Path,
- port: int = 8000,
- host: str = "127.0.0.1",
-) -> None:
- """Start the steering API server.
-
- Args:
- steered_model: SteeredModel wrapping the LM + SAE.
- tokenizer: HuggingFace tokenizer.
- parquet_dir: Directory with feature_metadata.parquet.
- port: Server port (default: 8000).
- host: Server host (default: 127.0.0.1).
- """
- import uvicorn
-
- app = create_app(steered_model, tokenizer, parquet_dir)
- print(f"Starting steering server at http://{host}:{port}")
- print(f" Features loaded from: {parquet_dir}")
- uvicorn.run(app, host=host, port=port)
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/index.html b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/index.html
deleted file mode 100644
index 0ad5c2fac2..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/index.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
-
-
- SAE Steering
-
-
-
-
-
-
-
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/package-lock.json b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/package-lock.json
deleted file mode 100644
index 972424c25b..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/package-lock.json
+++ /dev/null
@@ -1,1674 +0,0 @@
-{
- "name": "sae-steering-ui",
- "version": "0.1.0",
- "lockfileVersion": 3,
- "requires": true,
- "packages": {
- "": {
- "name": "sae-steering-ui",
- "version": "0.1.0",
- "dependencies": {
- "react": "^18.2.0",
- "react-dom": "^18.2.0"
- },
- "devDependencies": {
- "@vitejs/plugin-react": "^4.2.0",
- "vite": "^5.0.0"
- }
- },
- "node_modules/@babel/code-frame": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz",
- "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/helper-validator-identifier": "^7.28.5",
- "js-tokens": "^4.0.0",
- "picocolors": "^1.1.1"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/compat-data": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz",
- "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/core": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz",
- "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/code-frame": "^7.29.0",
- "@babel/generator": "^7.29.0",
- "@babel/helper-compilation-targets": "^7.28.6",
- "@babel/helper-module-transforms": "^7.28.6",
- "@babel/helpers": "^7.28.6",
- "@babel/parser": "^7.29.0",
- "@babel/template": "^7.28.6",
- "@babel/traverse": "^7.29.0",
- "@babel/types": "^7.29.0",
- "@jridgewell/remapping": "^2.3.5",
- "convert-source-map": "^2.0.0",
- "debug": "^4.1.0",
- "gensync": "^1.0.0-beta.2",
- "json5": "^2.2.3",
- "semver": "^6.3.1"
- },
- "engines": {
- "node": ">=6.9.0"
- },
- "funding": {
- "type": "opencollective",
- "url": "https://opencollective.com/babel"
- }
- },
- "node_modules/@babel/generator": {
- "version": "7.29.1",
- "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz",
- "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/parser": "^7.29.0",
- "@babel/types": "^7.29.0",
- "@jridgewell/gen-mapping": "^0.3.12",
- "@jridgewell/trace-mapping": "^0.3.28",
- "jsesc": "^3.0.2"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-compilation-targets": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz",
- "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/compat-data": "^7.28.6",
- "@babel/helper-validator-option": "^7.27.1",
- "browserslist": "^4.24.0",
- "lru-cache": "^5.1.1",
- "semver": "^6.3.1"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-globals": {
- "version": "7.28.0",
- "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
- "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-module-imports": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz",
- "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/traverse": "^7.28.6",
- "@babel/types": "^7.28.6"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-module-transforms": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz",
- "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/helper-module-imports": "^7.28.6",
- "@babel/helper-validator-identifier": "^7.28.5",
- "@babel/traverse": "^7.28.6"
- },
- "engines": {
- "node": ">=6.9.0"
- },
- "peerDependencies": {
- "@babel/core": "^7.0.0"
- }
- },
- "node_modules/@babel/helper-plugin-utils": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz",
- "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-string-parser": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
- "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-validator-identifier": {
- "version": "7.28.5",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
- "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helper-validator-option": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
- "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/helpers": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz",
- "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/template": "^7.28.6",
- "@babel/types": "^7.28.6"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/parser": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz",
- "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/types": "^7.29.0"
- },
- "bin": {
- "parser": "bin/babel-parser.js"
- },
- "engines": {
- "node": ">=6.0.0"
- }
- },
- "node_modules/@babel/plugin-transform-react-jsx-self": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
- "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/helper-plugin-utils": "^7.27.1"
- },
- "engines": {
- "node": ">=6.9.0"
- },
- "peerDependencies": {
- "@babel/core": "^7.0.0-0"
- }
- },
- "node_modules/@babel/plugin-transform-react-jsx-source": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
- "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/helper-plugin-utils": "^7.27.1"
- },
- "engines": {
- "node": ">=6.9.0"
- },
- "peerDependencies": {
- "@babel/core": "^7.0.0-0"
- }
- },
- "node_modules/@babel/template": {
- "version": "7.28.6",
- "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz",
- "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/code-frame": "^7.28.6",
- "@babel/parser": "^7.28.6",
- "@babel/types": "^7.28.6"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/traverse": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz",
- "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/code-frame": "^7.29.0",
- "@babel/generator": "^7.29.0",
- "@babel/helper-globals": "^7.28.0",
- "@babel/parser": "^7.29.0",
- "@babel/template": "^7.28.6",
- "@babel/types": "^7.29.0",
- "debug": "^4.3.1"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@babel/types": {
- "version": "7.29.0",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz",
- "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/helper-string-parser": "^7.27.1",
- "@babel/helper-validator-identifier": "^7.28.5"
- },
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/@esbuild/aix-ppc64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
- "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
- "cpu": [
- "ppc64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "aix"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/android-arm": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
- "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
- "cpu": [
- "arm"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "android"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/android-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
- "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "android"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/android-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
- "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "android"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/darwin-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
- "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "darwin"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/darwin-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
- "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "darwin"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/freebsd-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
- "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "freebsd"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/freebsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
- "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "freebsd"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-arm": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
- "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
- "cpu": [
- "arm"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
- "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-ia32": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
- "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
- "cpu": [
- "ia32"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-loong64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
- "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
- "cpu": [
- "loong64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-mips64el": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
- "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
- "cpu": [
- "mips64el"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-ppc64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
- "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
- "cpu": [
- "ppc64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-riscv64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
- "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
- "cpu": [
- "riscv64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-s390x": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
- "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
- "cpu": [
- "s390x"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/linux-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
- "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/netbsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
- "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "netbsd"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/openbsd-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
- "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "openbsd"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/sunos-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
- "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "sunos"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/win32-arm64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
- "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/win32-ia32": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
- "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
- "cpu": [
- "ia32"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@esbuild/win32-x64": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
- "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ],
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@jridgewell/gen-mapping": {
- "version": "0.3.13",
- "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
- "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@jridgewell/sourcemap-codec": "^1.5.0",
- "@jridgewell/trace-mapping": "^0.3.24"
- }
- },
- "node_modules/@jridgewell/remapping": {
- "version": "2.3.5",
- "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
- "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@jridgewell/gen-mapping": "^0.3.5",
- "@jridgewell/trace-mapping": "^0.3.24"
- }
- },
- "node_modules/@jridgewell/resolve-uri": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
- "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.0.0"
- }
- },
- "node_modules/@jridgewell/sourcemap-codec": {
- "version": "1.5.5",
- "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
- "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/@jridgewell/trace-mapping": {
- "version": "0.3.31",
- "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
- "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@jridgewell/resolve-uri": "^3.1.0",
- "@jridgewell/sourcemap-codec": "^1.4.14"
- }
- },
- "node_modules/@rolldown/pluginutils": {
- "version": "1.0.0-beta.27",
- "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
- "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/@rollup/rollup-android-arm-eabi": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz",
- "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==",
- "cpu": [
- "arm"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "android"
- ]
- },
- "node_modules/@rollup/rollup-android-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz",
- "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "android"
- ]
- },
- "node_modules/@rollup/rollup-darwin-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz",
- "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "darwin"
- ]
- },
- "node_modules/@rollup/rollup-darwin-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz",
- "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "darwin"
- ]
- },
- "node_modules/@rollup/rollup-freebsd-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz",
- "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "freebsd"
- ]
- },
- "node_modules/@rollup/rollup-freebsd-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz",
- "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "freebsd"
- ]
- },
- "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz",
- "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==",
- "cpu": [
- "arm"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-arm-musleabihf": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz",
- "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==",
- "cpu": [
- "arm"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-arm64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz",
- "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-arm64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz",
- "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-loong64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz",
- "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==",
- "cpu": [
- "loong64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-loong64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz",
- "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==",
- "cpu": [
- "loong64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-ppc64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz",
- "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==",
- "cpu": [
- "ppc64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-ppc64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz",
- "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==",
- "cpu": [
- "ppc64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-riscv64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz",
- "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==",
- "cpu": [
- "riscv64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-riscv64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz",
- "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==",
- "cpu": [
- "riscv64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-s390x-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz",
- "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==",
- "cpu": [
- "s390x"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-x64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz",
- "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-linux-x64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz",
- "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "linux"
- ]
- },
- "node_modules/@rollup/rollup-openbsd-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz",
- "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "openbsd"
- ]
- },
- "node_modules/@rollup/rollup-openharmony-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz",
- "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "openharmony"
- ]
- },
- "node_modules/@rollup/rollup-win32-arm64-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz",
- "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==",
- "cpu": [
- "arm64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ]
- },
- "node_modules/@rollup/rollup-win32-ia32-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz",
- "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==",
- "cpu": [
- "ia32"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ]
- },
- "node_modules/@rollup/rollup-win32-x64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz",
- "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ]
- },
- "node_modules/@rollup/rollup-win32-x64-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz",
- "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==",
- "cpu": [
- "x64"
- ],
- "dev": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "win32"
- ]
- },
- "node_modules/@types/babel__core": {
- "version": "7.20.5",
- "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
- "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/parser": "^7.20.7",
- "@babel/types": "^7.20.7",
- "@types/babel__generator": "*",
- "@types/babel__template": "*",
- "@types/babel__traverse": "*"
- }
- },
- "node_modules/@types/babel__generator": {
- "version": "7.27.0",
- "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
- "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/types": "^7.0.0"
- }
- },
- "node_modules/@types/babel__template": {
- "version": "7.4.4",
- "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
- "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/parser": "^7.1.0",
- "@babel/types": "^7.0.0"
- }
- },
- "node_modules/@types/babel__traverse": {
- "version": "7.28.0",
- "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
- "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/types": "^7.28.2"
- }
- },
- "node_modules/@types/estree": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
- "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/@vitejs/plugin-react": {
- "version": "4.7.0",
- "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
- "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@babel/core": "^7.28.0",
- "@babel/plugin-transform-react-jsx-self": "^7.27.1",
- "@babel/plugin-transform-react-jsx-source": "^7.27.1",
- "@rolldown/pluginutils": "1.0.0-beta.27",
- "@types/babel__core": "^7.20.5",
- "react-refresh": "^0.17.0"
- },
- "engines": {
- "node": "^14.18.0 || >=16.0.0"
- },
- "peerDependencies": {
- "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
- }
- },
- "node_modules/baseline-browser-mapping": {
- "version": "2.9.19",
- "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz",
- "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==",
- "dev": true,
- "license": "Apache-2.0",
- "bin": {
- "baseline-browser-mapping": "dist/cli.js"
- }
- },
- "node_modules/browserslist": {
- "version": "4.28.1",
- "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
- "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
- "dev": true,
- "funding": [
- {
- "type": "opencollective",
- "url": "https://opencollective.com/browserslist"
- },
- {
- "type": "tidelift",
- "url": "https://tidelift.com/funding/github/npm/browserslist"
- },
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
- "license": "MIT",
- "dependencies": {
- "baseline-browser-mapping": "^2.9.0",
- "caniuse-lite": "^1.0.30001759",
- "electron-to-chromium": "^1.5.263",
- "node-releases": "^2.0.27",
- "update-browserslist-db": "^1.2.0"
- },
- "bin": {
- "browserslist": "cli.js"
- },
- "engines": {
- "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
- }
- },
- "node_modules/caniuse-lite": {
- "version": "1.0.30001769",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001769.tgz",
- "integrity": "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg==",
- "dev": true,
- "funding": [
- {
- "type": "opencollective",
- "url": "https://opencollective.com/browserslist"
- },
- {
- "type": "tidelift",
- "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
- },
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
- "license": "CC-BY-4.0"
- },
- "node_modules/convert-source-map": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
- "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/debug": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
- "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ms": "^2.1.3"
- },
- "engines": {
- "node": ">=6.0"
- },
- "peerDependenciesMeta": {
- "supports-color": {
- "optional": true
- }
- }
- },
- "node_modules/electron-to-chromium": {
- "version": "1.5.286",
- "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz",
- "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/esbuild": {
- "version": "0.21.5",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
- "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
- "dev": true,
- "hasInstallScript": true,
- "license": "MIT",
- "bin": {
- "esbuild": "bin/esbuild"
- },
- "engines": {
- "node": ">=12"
- },
- "optionalDependencies": {
- "@esbuild/aix-ppc64": "0.21.5",
- "@esbuild/android-arm": "0.21.5",
- "@esbuild/android-arm64": "0.21.5",
- "@esbuild/android-x64": "0.21.5",
- "@esbuild/darwin-arm64": "0.21.5",
- "@esbuild/darwin-x64": "0.21.5",
- "@esbuild/freebsd-arm64": "0.21.5",
- "@esbuild/freebsd-x64": "0.21.5",
- "@esbuild/linux-arm": "0.21.5",
- "@esbuild/linux-arm64": "0.21.5",
- "@esbuild/linux-ia32": "0.21.5",
- "@esbuild/linux-loong64": "0.21.5",
- "@esbuild/linux-mips64el": "0.21.5",
- "@esbuild/linux-ppc64": "0.21.5",
- "@esbuild/linux-riscv64": "0.21.5",
- "@esbuild/linux-s390x": "0.21.5",
- "@esbuild/linux-x64": "0.21.5",
- "@esbuild/netbsd-x64": "0.21.5",
- "@esbuild/openbsd-x64": "0.21.5",
- "@esbuild/sunos-x64": "0.21.5",
- "@esbuild/win32-arm64": "0.21.5",
- "@esbuild/win32-ia32": "0.21.5",
- "@esbuild/win32-x64": "0.21.5"
- }
- },
- "node_modules/escalade": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
- "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/fsevents": {
- "version": "2.3.3",
- "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
- "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
- "dev": true,
- "hasInstallScript": true,
- "license": "MIT",
- "optional": true,
- "os": [
- "darwin"
- ],
- "engines": {
- "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
- }
- },
- "node_modules/gensync": {
- "version": "1.0.0-beta.2",
- "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
- "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6.9.0"
- }
- },
- "node_modules/js-tokens": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
- "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
- "license": "MIT"
- },
- "node_modules/jsesc": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
- "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
- "dev": true,
- "license": "MIT",
- "bin": {
- "jsesc": "bin/jsesc"
- },
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/json5": {
- "version": "2.2.3",
- "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
- "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
- "dev": true,
- "license": "MIT",
- "bin": {
- "json5": "lib/cli.js"
- },
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/loose-envify": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
- "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
- "license": "MIT",
- "dependencies": {
- "js-tokens": "^3.0.0 || ^4.0.0"
- },
- "bin": {
- "loose-envify": "cli.js"
- }
- },
- "node_modules/lru-cache": {
- "version": "5.1.1",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
- "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "yallist": "^3.0.2"
- }
- },
- "node_modules/ms": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
- "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/nanoid": {
- "version": "3.3.11",
- "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
- "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
- "dev": true,
- "funding": [
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
- "license": "MIT",
- "bin": {
- "nanoid": "bin/nanoid.cjs"
- },
- "engines": {
- "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
- }
- },
- "node_modules/node-releases": {
- "version": "2.0.27",
- "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
- "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/picocolors": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
- "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/postcss": {
- "version": "8.5.6",
- "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
- "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
- "dev": true,
- "funding": [
- {
- "type": "opencollective",
- "url": "https://opencollective.com/postcss/"
- },
- {
- "type": "tidelift",
- "url": "https://tidelift.com/funding/github/npm/postcss"
- },
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
- "license": "MIT",
- "dependencies": {
- "nanoid": "^3.3.11",
- "picocolors": "^1.1.1",
- "source-map-js": "^1.2.1"
- },
- "engines": {
- "node": "^10 || ^12 || >=14"
- }
- },
- "node_modules/react": {
- "version": "18.3.1",
- "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
- "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
- "license": "MIT",
- "dependencies": {
- "loose-envify": "^1.1.0"
- },
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/react-dom": {
- "version": "18.3.1",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
- "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
- "license": "MIT",
- "dependencies": {
- "loose-envify": "^1.1.0",
- "scheduler": "^0.23.2"
- },
- "peerDependencies": {
- "react": "^18.3.1"
- }
- },
- "node_modules/react-refresh": {
- "version": "0.17.0",
- "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
- "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/rollup": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz",
- "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "@types/estree": "1.0.8"
- },
- "bin": {
- "rollup": "dist/bin/rollup"
- },
- "engines": {
- "node": ">=18.0.0",
- "npm": ">=8.0.0"
- },
- "optionalDependencies": {
- "@rollup/rollup-android-arm-eabi": "4.57.1",
- "@rollup/rollup-android-arm64": "4.57.1",
- "@rollup/rollup-darwin-arm64": "4.57.1",
- "@rollup/rollup-darwin-x64": "4.57.1",
- "@rollup/rollup-freebsd-arm64": "4.57.1",
- "@rollup/rollup-freebsd-x64": "4.57.1",
- "@rollup/rollup-linux-arm-gnueabihf": "4.57.1",
- "@rollup/rollup-linux-arm-musleabihf": "4.57.1",
- "@rollup/rollup-linux-arm64-gnu": "4.57.1",
- "@rollup/rollup-linux-arm64-musl": "4.57.1",
- "@rollup/rollup-linux-loong64-gnu": "4.57.1",
- "@rollup/rollup-linux-loong64-musl": "4.57.1",
- "@rollup/rollup-linux-ppc64-gnu": "4.57.1",
- "@rollup/rollup-linux-ppc64-musl": "4.57.1",
- "@rollup/rollup-linux-riscv64-gnu": "4.57.1",
- "@rollup/rollup-linux-riscv64-musl": "4.57.1",
- "@rollup/rollup-linux-s390x-gnu": "4.57.1",
- "@rollup/rollup-linux-x64-gnu": "4.57.1",
- "@rollup/rollup-linux-x64-musl": "4.57.1",
- "@rollup/rollup-openbsd-x64": "4.57.1",
- "@rollup/rollup-openharmony-arm64": "4.57.1",
- "@rollup/rollup-win32-arm64-msvc": "4.57.1",
- "@rollup/rollup-win32-ia32-msvc": "4.57.1",
- "@rollup/rollup-win32-x64-gnu": "4.57.1",
- "@rollup/rollup-win32-x64-msvc": "4.57.1",
- "fsevents": "~2.3.2"
- }
- },
- "node_modules/scheduler": {
- "version": "0.23.2",
- "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
- "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
- "license": "MIT",
- "dependencies": {
- "loose-envify": "^1.1.0"
- }
- },
- "node_modules/semver": {
- "version": "6.3.1",
- "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
- "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
- "dev": true,
- "license": "ISC",
- "bin": {
- "semver": "bin/semver.js"
- }
- },
- "node_modules/source-map-js": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
- "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
- "dev": true,
- "license": "BSD-3-Clause",
- "engines": {
- "node": ">=0.10.0"
- }
- },
- "node_modules/update-browserslist-db": {
- "version": "1.2.3",
- "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz",
- "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==",
- "dev": true,
- "funding": [
- {
- "type": "opencollective",
- "url": "https://opencollective.com/browserslist"
- },
- {
- "type": "tidelift",
- "url": "https://tidelift.com/funding/github/npm/browserslist"
- },
- {
- "type": "github",
- "url": "https://github.com/sponsors/ai"
- }
- ],
- "license": "MIT",
- "dependencies": {
- "escalade": "^3.2.0",
- "picocolors": "^1.1.1"
- },
- "bin": {
- "update-browserslist-db": "cli.js"
- },
- "peerDependencies": {
- "browserslist": ">= 4.21.0"
- }
- },
- "node_modules/vite": {
- "version": "5.4.21",
- "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
- "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "esbuild": "^0.21.3",
- "postcss": "^8.4.43",
- "rollup": "^4.20.0"
- },
- "bin": {
- "vite": "bin/vite.js"
- },
- "engines": {
- "node": "^18.0.0 || >=20.0.0"
- },
- "funding": {
- "url": "https://github.com/vitejs/vite?sponsor=1"
- },
- "optionalDependencies": {
- "fsevents": "~2.3.3"
- },
- "peerDependencies": {
- "@types/node": "^18.0.0 || >=20.0.0",
- "less": "*",
- "lightningcss": "^1.21.0",
- "sass": "*",
- "sass-embedded": "*",
- "stylus": "*",
- "sugarss": "*",
- "terser": "^5.4.0"
- },
- "peerDependenciesMeta": {
- "@types/node": {
- "optional": true
- },
- "less": {
- "optional": true
- },
- "lightningcss": {
- "optional": true
- },
- "sass": {
- "optional": true
- },
- "sass-embedded": {
- "optional": true
- },
- "stylus": {
- "optional": true
- },
- "sugarss": {
- "optional": true
- },
- "terser": {
- "optional": true
- }
- }
- },
- "node_modules/yallist": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
- "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
- "dev": true,
- "license": "ISC"
- }
- }
-}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/App.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/App.jsx
deleted file mode 100644
index a427b732ba..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/App.jsx
+++ /dev/null
@@ -1,296 +0,0 @@
-import React, { useState, useEffect, useCallback } from 'react'
-import FeaturePicker from './FeaturePicker'
-import InterventionPanel from './InterventionPanel'
-import ChatPanel from './ChatPanel'
-
-const API_BASE = '/api'
-
-const styles = {
- container: {
- display: 'flex',
- height: '100vh',
- overflow: 'hidden',
- },
- sidebar: {
- width: '360px',
- flexShrink: 0,
- background: '#fff',
- borderRight: '1px solid #e0e0e0',
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- },
- sidebarHeader: {
- padding: '16px 16px 12px',
- borderBottom: '1px solid #e0e0e0',
- flexShrink: 0,
- },
- title: {
- fontSize: '18px',
- fontWeight: '700',
- marginBottom: '2px',
- },
- subtitle: {
- fontSize: '12px',
- color: '#888',
- },
- sidebarContent: {
- flex: 1,
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- minHeight: 0,
- },
- main: {
- flex: 1,
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- minWidth: 0,
- },
- mainHeader: {
- padding: '12px 20px',
- borderBottom: '1px solid #e0e0e0',
- background: '#fff',
- display: 'flex',
- justifyContent: 'space-between',
- alignItems: 'center',
- flexShrink: 0,
- },
- compareToggle: {
- display: 'flex',
- alignItems: 'center',
- gap: '8px',
- fontSize: '13px',
- color: '#555',
- },
- toggleSwitch: {
- position: 'relative',
- width: '36px',
- height: '20px',
- borderRadius: '10px',
- cursor: 'pointer',
- transition: 'background 0.2s',
- },
- toggleKnob: {
- position: 'absolute',
- top: '2px',
- width: '16px',
- height: '16px',
- borderRadius: '50%',
- background: '#fff',
- transition: 'left 0.2s',
- boxShadow: '0 1px 3px rgba(0,0,0,0.2)',
- },
- modelInfo: {
- fontSize: '12px',
- color: '#999',
- fontFamily: 'monospace',
- },
-}
-
-export default function App() {
- const [features, setFeatures] = useState([])
- const [interventions, setInterventions] = useState([])
- const [messages, setMessages] = useState([])
- const [compare, setCompare] = useState(true)
- const [loading, setLoading] = useState(false)
- const [modelInfo, setModelInfo] = useState(null)
-
- // Fetch features and health on mount
- useEffect(() => {
- fetch(`${API_BASE}/features?limit=500`)
- .then(r => r.json())
- .then(setFeatures)
- .catch(err => console.error('Failed to load features:', err))
-
- fetch(`${API_BASE}/health`)
- .then(r => r.json())
- .then(setModelInfo)
- .catch(err => console.error('Failed to load health:', err))
- }, [])
-
- const handleAddIntervention = useCallback((feature) => {
- setInterventions(prev => {
- if (prev.some(iv => iv.feature_id === feature.feature_id)) return prev
- return [...prev, {
- feature_id: feature.feature_id,
- description: feature.description || `Feature ${feature.feature_id}`,
- weight: 3.0,
- mode: 'additive_code',
- }]
- })
- }, [])
-
- const handleUpdateIntervention = useCallback((featureId, updates) => {
- setInterventions(prev =>
- prev.map(iv => iv.feature_id === featureId ? { ...iv, ...updates } : iv)
- )
- }, [])
-
- const handleRemoveIntervention = useCallback((featureId) => {
- setInterventions(prev => prev.filter(iv => iv.feature_id !== featureId))
- }, [])
-
- const handleClearInterventions = useCallback(() => {
- setInterventions([])
- }, [])
-
- const handleSendMessage = useCallback(async (content) => {
- const newMessages = [...messages, { role: 'user', content }]
- setMessages(newMessages)
- setLoading(true)
-
- // Prepare steered (and optionally baseline) placeholders
- const steeredMsg = { role: 'assistant', content: '', source: 'steered' }
- const baselineMsg = compare ? { role: 'assistant', content: '', source: 'baseline' } : null
-
- setMessages(prev => [
- ...prev,
- steeredMsg,
- ...(baselineMsg ? [baselineMsg] : []),
- ])
-
- try {
- const response = await fetch(`${API_BASE}/chat`, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- messages: newMessages,
- interventions: interventions.map(iv => ({
- feature_id: iv.feature_id,
- weight: iv.weight,
- mode: iv.mode,
- })),
- compare,
- max_tokens: 256,
- }),
- })
-
- const reader = response.body.getReader()
- const decoder = new TextDecoder()
- let buffer = ''
- let steeredText = ''
- let baselineText = ''
-
- while (true) {
- const { done, value } = await reader.read()
- if (done) break
-
- buffer += decoder.decode(value, { stream: true })
- const lines = buffer.split('\n')
- buffer = lines.pop() || ''
-
- for (const line of lines) {
- if (line.startsWith('data: ')) {
- try {
- const data = JSON.parse(line.slice(6))
- if (data.token !== undefined) {
- if (data.source === 'steered') {
- steeredText += data.token
- } else if (data.source === 'baseline') {
- baselineText += data.token
- }
-
- // Update messages in place
- setMessages(prev => {
- const updated = [...prev]
- // Find and update the steered message
- const steeredIdx = updated.findIndex(
- (m, i) => i >= newMessages.length && m.source === 'steered'
- )
- if (steeredIdx >= 0) {
- updated[steeredIdx] = { ...updated[steeredIdx], content: steeredText }
- }
- // Find and update the baseline message
- if (compare) {
- const baselineIdx = updated.findIndex(
- (m, i) => i >= newMessages.length && m.source === 'baseline'
- )
- if (baselineIdx >= 0) {
- updated[baselineIdx] = { ...updated[baselineIdx], content: baselineText }
- }
- }
- return updated
- })
- }
- } catch (e) {
- // Skip malformed data lines
- }
- }
- }
- }
- } catch (err) {
- console.error('Chat error:', err)
- setMessages(prev => [
- ...prev.slice(0, -1 - (compare ? 1 : 0)),
- { role: 'assistant', content: `Error: ${err.message}`, source: 'error' },
- ])
- }
-
- setLoading(false)
- }, [messages, interventions, compare])
-
- const handleClearChat = useCallback(() => {
- setMessages([])
- }, [])
-
- return (
-
-
-
-
SAE Steering
-
- Select features and adjust weights to steer model behavior
-
-
-
-
- iv.feature_id))}
- onSelect={handleAddIntervention}
- />
-
-
-
-
-
-
-
setCompare(c => !c)}
- style={{
- ...styles.toggleSwitch,
- background: compare ? '#76b900' : '#ccc',
- }}
- >
-
-
-
Compare with baseline
-
- {modelInfo && (
-
- {modelInfo.model} | layer {modelInfo.layer} | {modelInfo.n_features} features
-
- )}
-
-
-
-
- )
-}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/ChatPanel.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/ChatPanel.jsx
deleted file mode 100644
index c7ab22feb2..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/ChatPanel.jsx
+++ /dev/null
@@ -1,326 +0,0 @@
-import React, { useState, useRef, useEffect } from 'react'
-
-const styles = {
- container: {
- flex: 1,
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- background: '#f5f5f5',
- },
- messages: {
- flex: 1,
- overflowY: 'auto',
- padding: '20px',
- display: 'flex',
- flexDirection: 'column',
- gap: '16px',
- },
- empty: {
- flex: 1,
- display: 'flex',
- flexDirection: 'column',
- alignItems: 'center',
- justifyContent: 'center',
- color: '#bbb',
- gap: '8px',
- },
- emptyTitle: {
- fontSize: '18px',
- fontWeight: '600',
- color: '#999',
- },
- emptyHint: {
- fontSize: '13px',
- maxWidth: '360px',
- textAlign: 'center',
- lineHeight: '1.5',
- },
- userMessage: {
- alignSelf: 'flex-end',
- maxWidth: '70%',
- padding: '10px 14px',
- background: '#1a1a1a',
- color: '#fff',
- borderRadius: '16px 16px 4px 16px',
- fontSize: '14px',
- lineHeight: '1.5',
- whiteSpace: 'pre-wrap',
- },
- assistantRow: {
- alignSelf: 'flex-start',
- maxWidth: '90%',
- width: '100%',
- },
- compareRow: {
- display: 'grid',
- gridTemplateColumns: '1fr 1fr',
- gap: '12px',
- },
- singleRow: {
- display: 'flex',
- },
- responseCard: {
- padding: '12px 14px',
- background: '#fff',
- borderRadius: '12px',
- border: '1px solid #e0e0e0',
- fontSize: '14px',
- lineHeight: '1.6',
- whiteSpace: 'pre-wrap',
- },
- responseLabel: {
- fontSize: '10px',
- fontWeight: '600',
- textTransform: 'uppercase',
- letterSpacing: '0.5px',
- marginBottom: '6px',
- display: 'flex',
- alignItems: 'center',
- gap: '6px',
- },
- steeredLabel: {
- color: '#76b900',
- },
- baselineLabel: {
- color: '#888',
- },
- dot: {
- width: '6px',
- height: '6px',
- borderRadius: '50%',
- display: 'inline-block',
- },
- errorCard: {
- padding: '12px 14px',
- background: '#fff5f5',
- borderRadius: '12px',
- border: '1px solid #ffcdd2',
- color: '#c62828',
- fontSize: '13px',
- },
- inputArea: {
- padding: '12px 20px 16px',
- background: '#fff',
- borderTop: '1px solid #e0e0e0',
- display: 'flex',
- gap: '10px',
- alignItems: 'flex-end',
- flexShrink: 0,
- },
- textarea: {
- flex: 1,
- padding: '10px 14px',
- fontSize: '14px',
- border: '1px solid #ddd',
- borderRadius: '12px',
- outline: 'none',
- resize: 'none',
- fontFamily: 'inherit',
- lineHeight: '1.4',
- maxHeight: '120px',
- minHeight: '42px',
- },
- sendBtn: {
- padding: '10px 20px',
- fontSize: '13px',
- fontWeight: '600',
- border: 'none',
- borderRadius: '10px',
- cursor: 'pointer',
- flexShrink: 0,
- transition: 'background 0.15s',
- },
- clearBtn: {
- padding: '10px 14px',
- fontSize: '12px',
- background: 'none',
- border: '1px solid #ddd',
- borderRadius: '10px',
- cursor: 'pointer',
- color: '#888',
- flexShrink: 0,
- },
- cursor: {
- display: 'inline-block',
- width: '2px',
- height: '14px',
- background: '#76b900',
- marginLeft: '1px',
- verticalAlign: 'text-bottom',
- animation: 'blink 1s step-end infinite',
- },
-}
-
-// Add blink keyframe
-if (typeof document !== 'undefined' && !document.getElementById('steering-blink-style')) {
- const style = document.createElement('style')
- style.id = 'steering-blink-style'
- style.textContent = '@keyframes blink { 50% { opacity: 0; } }'
- document.head.appendChild(style)
-}
-
-export default function ChatPanel({ messages, loading, compare, interventionCount, onSend, onClear }) {
- const [input, setInput] = useState('')
- const messagesEndRef = useRef(null)
- const textareaRef = useRef(null)
-
- // Auto-scroll on new messages
- useEffect(() => {
- messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' })
- }, [messages])
-
- // Auto-resize textarea
- useEffect(() => {
- if (textareaRef.current) {
- textareaRef.current.style.height = 'auto'
- textareaRef.current.style.height = Math.min(textareaRef.current.scrollHeight, 120) + 'px'
- }
- }, [input])
-
- const handleSubmit = () => {
- if (!input.trim() || loading) return
- onSend(input.trim())
- setInput('')
- }
-
- const handleKeyDown = (e) => {
- if (e.key === 'Enter' && !e.shiftKey) {
- e.preventDefault()
- handleSubmit()
- }
- }
-
- // Group messages for compare mode rendering
- const renderMessages = () => {
- const elements = []
- let i = 0
- while (i < messages.length) {
- const msg = messages[i]
-
- if (msg.role === 'user') {
- elements.push(
- {msg.content}
- )
- i++
- } else if (msg.source === 'error') {
- elements.push(
-
- )
- i++
- } else if (msg.source === 'steered') {
- const steered = msg
- const baseline = (i + 1 < messages.length && messages[i + 1].source === 'baseline')
- ? messages[i + 1]
- : null
- const isStreaming = loading && i >= messages.length - (baseline ? 2 : 1)
-
- if (baseline) {
- // Compare mode: side by side
- elements.push(
-
-
-
-
-
- Steered
-
-
- {steered.content || '\u00A0'}
- {isStreaming && !steered.content && }
-
-
-
-
-
- Baseline
-
-
- {baseline.content || '\u00A0'}
- {isStreaming && steered.content && !baseline.content && }
-
-
-
-
- )
- i += 2
- } else {
- // Single mode
- elements.push(
-
-
-
- {steered.content || '\u00A0'}
- {isStreaming && }
-
-
-
- )
- i++
- }
- } else {
- // Fallback for any other message type
- elements.push(
-
- )
- i++
- }
- }
- return elements
- }
-
- return (
-
-
- {messages.length === 0 ? (
-
-
SAE Feature Steering
-
- {interventionCount > 0
- ? `${interventionCount} intervention${interventionCount > 1 ? 's' : ''} active. Send a message to see how steering affects the model's output.`
- : 'Add feature interventions from the sidebar, then start chatting to see their effect on generation.'}
-
-
- ) : (
- renderMessages()
- )}
-
-
-
-
-
-
- )
-}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/FeaturePicker.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/FeaturePicker.jsx
deleted file mode 100644
index 9359eacf34..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/FeaturePicker.jsx
+++ /dev/null
@@ -1,178 +0,0 @@
-import React, { useState, useMemo } from 'react'
-
-const styles = {
- container: {
- flex: 1,
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- minHeight: 0,
- borderTop: '1px solid #e0e0e0',
- },
- header: {
- padding: '10px 16px 8px',
- flexShrink: 0,
- },
- label: {
- fontSize: '11px',
- fontWeight: '600',
- textTransform: 'uppercase',
- color: '#888',
- letterSpacing: '0.5px',
- },
- searchRow: {
- display: 'flex',
- gap: '6px',
- padding: '0 16px 8px',
- flexShrink: 0,
- },
- searchInput: {
- flex: 1,
- padding: '6px 10px',
- fontSize: '12px',
- border: '1px solid #ddd',
- borderRadius: '5px',
- outline: 'none',
- },
- sortSelect: {
- padding: '6px 8px',
- fontSize: '11px',
- border: '1px solid #ddd',
- borderRadius: '5px',
- background: 'white',
- cursor: 'pointer',
- },
- list: {
- flex: 1,
- overflowY: 'auto',
- padding: '0 16px 12px',
- },
- item: {
- padding: '8px 10px',
- borderRadius: '6px',
- cursor: 'pointer',
- marginBottom: '4px',
- border: '1px solid #eee',
- transition: 'background 0.1s',
- },
- itemActive: {
- padding: '8px 10px',
- borderRadius: '6px',
- marginBottom: '4px',
- border: '1px solid #76b900',
- background: '#f0fae0',
- opacity: 0.7,
- cursor: 'default',
- },
- itemId: {
- fontSize: '10px',
- color: '#999',
- fontFamily: 'monospace',
- },
- itemDesc: {
- fontSize: '12px',
- fontWeight: '500',
- lineHeight: '1.3',
- marginTop: '1px',
- },
- itemStats: {
- display: 'flex',
- gap: '10px',
- marginTop: '3px',
- fontSize: '10px',
- color: '#888',
- fontFamily: 'monospace',
- },
- count: {
- fontSize: '11px',
- color: '#999',
- padding: '0 16px 4px',
- flexShrink: 0,
- },
-}
-
-export default function FeaturePicker({ features, activeFeatureIds, onSelect }) {
- const [search, setSearch] = useState('')
- const [sortBy, setSortBy] = useState('frequency')
-
- const filtered = useMemo(() => {
- let result = features
- if (search.trim()) {
- const q = search.toLowerCase()
- result = result.filter(f =>
- (f.description || '').toLowerCase().includes(q) ||
- String(f.feature_id).includes(q)
- )
- }
- if (sortBy === 'frequency') {
- result = [...result].sort((a, b) => (b.activation_freq || 0) - (a.activation_freq || 0))
- } else if (sortBy === 'max_activation') {
- result = [...result].sort((a, b) => (b.max_activation || 0) - (a.max_activation || 0))
- } else if (sortBy === 'feature_id') {
- result = [...result].sort((a, b) => a.feature_id - b.feature_id)
- }
- return result
- }, [features, search, sortBy])
-
- return (
-
-
-
- setSearch(e.target.value)}
- style={styles.searchInput}
- />
- setSortBy(e.target.value)} style={styles.sortSelect}>
- Freq
- Max
- ID
-
-
-
- {filtered.length} feature{filtered.length !== 1 ? 's' : ''}
-
-
- {filtered.slice(0, 100).map(feature => {
- const isActive = activeFeatureIds.has(feature.feature_id)
- return (
-
!isActive && onSelect(feature)}
- onMouseEnter={e => {
- if (!isActive) e.currentTarget.style.background = '#f8f8f8'
- }}
- onMouseLeave={e => {
- if (!isActive) e.currentTarget.style.background = ''
- }}
- >
-
#{feature.feature_id}
-
- {feature.description || `Feature ${feature.feature_id}`}
-
-
- freq: {((feature.activation_freq || 0) * 100).toFixed(1)}%
- max: {(feature.max_activation || 0).toFixed(1)}
-
-
- )
- })}
- {filtered.length > 100 && (
-
- Showing 100 of {filtered.length}. Refine your search.
-
- )}
- {filtered.length === 0 && (
-
- No features match your search.
-
- )}
-
-
- )
-}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/InterventionPanel.jsx b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/InterventionPanel.jsx
deleted file mode 100644
index dcb31d44f0..0000000000
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/steering_ui/src/InterventionPanel.jsx
+++ /dev/null
@@ -1,184 +0,0 @@
-import React from 'react'
-
-const MODES = [
- { value: 'additive_code', label: 'Additive (code)' },
- { value: 'multiplicative_code', label: 'Multiply (code)' },
- { value: 'direct', label: 'Direct' },
-]
-
-const styles = {
- container: {
- flexShrink: 0,
- maxHeight: '40%',
- display: 'flex',
- flexDirection: 'column',
- overflow: 'hidden',
- },
- header: {
- display: 'flex',
- justifyContent: 'space-between',
- alignItems: 'center',
- padding: '10px 16px 6px',
- flexShrink: 0,
- },
- label: {
- fontSize: '11px',
- fontWeight: '600',
- textTransform: 'uppercase',
- color: '#888',
- letterSpacing: '0.5px',
- },
- clearBtn: {
- fontSize: '11px',
- color: '#c00',
- cursor: 'pointer',
- background: 'none',
- border: 'none',
- padding: '2px 6px',
- },
- list: {
- flex: 1,
- overflowY: 'auto',
- padding: '0 16px 8px',
- },
- empty: {
- padding: '12px 16px',
- fontSize: '12px',
- color: '#bbb',
- fontStyle: 'italic',
- },
- item: {
- padding: '8px 10px',
- marginBottom: '6px',
- background: '#f8f8f8',
- borderRadius: '6px',
- border: '1px solid #e8e8e8',
- },
- itemHeader: {
- display: 'flex',
- justifyContent: 'space-between',
- alignItems: 'center',
- marginBottom: '6px',
- },
- itemDesc: {
- fontSize: '12px',
- fontWeight: '500',
- flex: 1,
- overflow: 'hidden',
- textOverflow: 'ellipsis',
- whiteSpace: 'nowrap',
- marginRight: '8px',
- },
- removeBtn: {
- background: 'none',
- border: 'none',
- cursor: 'pointer',
- fontSize: '14px',
- color: '#999',
- padding: '0 4px',
- lineHeight: 1,
- },
- controls: {
- display: 'flex',
- alignItems: 'center',
- gap: '8px',
- },
- slider: {
- flex: 1,
- height: '4px',
- cursor: 'pointer',
- accentColor: '#76b900',
- },
- weightValue: {
- fontSize: '12px',
- fontFamily: 'monospace',
- fontWeight: '600',
- minWidth: '36px',
- textAlign: 'right',
- },
- modeSelect: {
- fontSize: '10px',
- padding: '2px 4px',
- border: '1px solid #ddd',
- borderRadius: '3px',
- background: 'white',
- cursor: 'pointer',
- },
- featureIdBadge: {
- fontSize: '9px',
- fontFamily: 'monospace',
- color: '#aaa',
- marginLeft: '6px',
- },
-}
-
-export default function InterventionPanel({ interventions, onUpdate, onRemove, onClear }) {
- if (interventions.length === 0) {
- return (
-
-
- Active Interventions
-
-
- Click a feature below to add an intervention.
-
-
- )
- }
-
- return (
-
-
-
- Active Interventions ({interventions.length})
-
- Clear all
-
-
- {interventions.map(iv => (
-
-
-
- {iv.description}
- #{iv.feature_id}
-
-
onRemove(iv.feature_id)}
- title="Remove intervention"
- >
- ×
-
-
-
- onUpdate(iv.feature_id, { mode: e.target.value })}
- style={styles.modeSelect}
- >
- {MODES.map(m => (
- {m.label}
- ))}
-
- onUpdate(iv.feature_id, { weight: parseFloat(e.target.value) })}
- style={styles.slider}
- />
- 0 ? '#2e7d32' : iv.weight < 0 ? '#c62828' : '#666',
- }}>
- {iv.weight > 0 ? '+' : ''}{iv.weight.toFixed(1)}
-
-
-
- ))}
-
-
- )
-}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/training.py b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/training.py
index e445acad78..c9d8a9070a 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/training.py
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/sae/src/sae/training.py
@@ -19,6 +19,7 @@
separating training logic from the SAE model architecture.
"""
+import contextlib
import os
from dataclasses import dataclass, field
from pathlib import Path
@@ -62,6 +63,7 @@ class TrainingConfig:
lr_scale_with_latents: Scale LR by 1/sqrt(hidden_dim/reference_dim) per OpenAI paper
lr_reference_hidden_dim: Reference hidden_dim for LR scaling (default 2048)
warmup_steps: Number of steps for linear LR warmup (0 = no warmup)
+ grad_accumulation_steps: Number of microsteps to accumulate gradients before an optimizer step (1 = no accumulation)
"""
lr: float = 3e-4
@@ -77,6 +79,7 @@ class TrainingConfig:
lr_scale_with_latents: bool = False
lr_reference_hidden_dim: int = 2048
warmup_steps: int = 0
+ grad_accumulation_steps: int = 1
@dataclass
@@ -206,6 +209,7 @@ def _setup_dataloader(self, data: Union[torch.Tensor, DataLoader]) -> DataLoader
sampler=sampler,
num_workers=self.config.num_workers,
pin_memory=self.config.pin_memory,
+ drop_last=self.is_distributed,
)
elif isinstance(data, DataLoader):
return data
@@ -496,8 +500,9 @@ def fit(
model = self._get_model()
self.dead_latent_tracker = DeadLatentTracker(model.hidden_dim, device=self.config.device)
- # Compute global batch size
- global_batch_size = self.config.batch_size * self.parallel_config.dp_size
+ # Compute global batch size (accounts for gradient accumulation)
+ accum_steps = self.config.grad_accumulation_steps
+ global_batch_size = self.config.batch_size * self.parallel_config.dp_size * accum_steps
remaining_info = ""
if resume_from is not None:
@@ -509,6 +514,8 @@ def fit(
self._print_rank0("Batches per epoch: unknown (streaming)")
self._print_rank0(f"Batch size per GPU: {self.config.batch_size}")
self._print_rank0(f"Global batch size: {global_batch_size}")
+ if accum_steps > 1:
+ self._print_rank0(f"Gradient accumulation: {accum_steps} microsteps")
if self.config.warmup_steps > 0:
self._print_rank0(f"LR warmup: {self.config.warmup_steps} steps")
@@ -530,22 +537,41 @@ def fit(
if self.is_distributed and hasattr(self.dataloader.sampler, "set_epoch"):
self.dataloader.sampler.set_epoch(epoch)
+ optimizer.zero_grad()
+
for batch_idx, batch in enumerate(self.dataloader):
# Handle batch from TensorDataset
if isinstance(batch, (tuple, list)):
batch = batch[0]
batch = batch.to(self.config.device)
- # Update learning rate (handles warmup)
- self._update_lr(optimizer, self.global_step)
+ micro_step = batch_idx % accum_steps
+ is_accum_step = (micro_step == accum_steps - 1) or (
+ batch_idx == len(self.dataloader) - 1 if hasattr(self.dataloader, "__len__") else False
+ )
- # Forward pass
- loss_dict = loss_fn(batch, **loss_kwargs)
- loss = loss_dict["total"]
+ # Skip DDP gradient allreduce on non-final accumulation microsteps
+ maybe_no_sync = (
+ self.model.no_sync if (self.is_distributed and not is_accum_step) else contextlib.nullcontext
+ )
+ with maybe_no_sync():
+ # Forward pass
+ loss_dict = loss_fn(batch, **loss_kwargs)
+ loss = loss_dict["total"] / accum_steps
- # Backward pass
- optimizer.zero_grad()
- loss.backward()
+ # Backward pass (DDP allreduce only fires on the final microstep)
+ loss.backward()
+
+ # Track losses (unscaled for logging)
+ batch_losses.append(loss_dict["total"].item())
+
+ if not is_accum_step:
+ continue
+
+ # --- Optimizer step (every accum_steps microsteps) ---
+
+ # Update learning rate (handles warmup)
+ self._update_lr(optimizer, self.global_step)
# Sync dead latent stats across GPUs (for auxk loss)
self._sync_dead_latent_stats()
@@ -559,14 +585,12 @@ def fit(
grad_norm = self._compute_grad_norm()
optimizer.step()
+ optimizer.zero_grad()
# Post-step hook (e.g., normalize decoder)
if hasattr(model, "post_step"):
model.post_step()
- # Track losses
- batch_losses.append(loss.item())
-
# Log with PerfLogger if provided (only on rank 0)
if self.perf_logger is not None and self.rank == 0:
extra_metrics = {
@@ -581,8 +605,6 @@ def fit(
dead_stats = self.dead_latent_tracker.get_stats()
extra_metrics["dead_latents"] = dead_stats["dead_pct"]
- # Reconstruction metrics come from loss_dict (no extra forward pass needed)
-
# Reset external dead stats tracker periodically (only used as fallback)
if self.global_step % 1000 == 0 and self.dead_latent_tracker:
self.dead_latent_tracker.reset()
@@ -599,7 +621,7 @@ def fit(
# Fallback to basic wandb logging if no perf_logger (only on rank 0)
elif self.wandb_run and self.rank == 0 and (self.global_step % self.wandb_config.log_interval == 0):
log_dict = {
- "train/loss": loss.item(),
+ "train/loss": loss.item() * accum_steps,
"train/step": self.global_step,
"train/global_batch_size": global_batch_size,
}
diff --git a/bionemo-recipes/interpretability/sparse_autoencoders/uv.lock b/bionemo-recipes/interpretability/sparse_autoencoders/uv.lock
index ffe5a4e566..62a4a25deb 100644
--- a/bionemo-recipes/interpretability/sparse_autoencoders/uv.lock
+++ b/bionemo-recipes/interpretability/sparse_autoencoders/uv.lock
@@ -12,7 +12,6 @@ resolution-markers = [
[manifest]
members = [
"biosae-workspace",
- "codonfm-sae",
"esm2-sae",
"sae",
]
@@ -142,7 +141,7 @@ wheels = [
[[package]]
name = "biosae-workspace"
version = "0.1.0"
-source = { virtual = "." }
+source = { editable = "." }
dependencies = [
{ name = "duckdb" },
{ name = "huggingface-hub" },
@@ -285,63 +284,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274 },
]
-[[package]]
-name = "codonfm-sae"
-version = "0.1.0"
-source = { editable = "recipes/codonfm" }
-dependencies = [
- { name = "hydra-core" },
- { name = "numpy" },
- { name = "omegaconf" },
- { name = "pandas" },
- { name = "sae" },
- { name = "safetensors" },
- { name = "torch" },
- { name = "tqdm" },
-]
-
-[package.optional-dependencies]
-dev = [
- { name = "pytest" },
- { name = "ruff" },
-]
-export = [
- { name = "duckdb" },
- { name = "pandas" },
- { name = "pyarrow" },
-]
-tracking = [
- { name = "wandb" },
-]
-viz = [
- { name = "duckdb" },
- { name = "hdbscan" },
- { name = "pyarrow" },
- { name = "umap-learn" },
-]
-
-[package.metadata]
-requires-dist = [
- { name = "duckdb", marker = "extra == 'export'", specifier = ">=0.9" },
- { name = "duckdb", marker = "extra == 'viz'", specifier = ">=0.9" },
- { name = "hdbscan", marker = "extra == 'viz'", specifier = ">=0.8" },
- { name = "hydra-core", specifier = ">=1.3" },
- { name = "numpy", specifier = ">=1.20" },
- { name = "omegaconf", specifier = ">=2.3" },
- { name = "pandas", specifier = ">=1.5" },
- { name = "pandas", marker = "extra == 'export'", specifier = ">=1.5" },
- { name = "pyarrow", marker = "extra == 'export'", specifier = ">=10.0" },
- { name = "pyarrow", marker = "extra == 'viz'", specifier = ">=10.0" },
- { name = "pytest", marker = "extra == 'dev'", specifier = ">=7.0" },
- { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1" },
- { name = "sae", editable = "sae" },
- { name = "safetensors", specifier = ">=0.3" },
- { name = "torch", specifier = ">=2.0" },
- { name = "tqdm", specifier = ">=4.60" },
- { name = "umap-learn", marker = "extra == 'viz'", specifier = ">=0.5" },
- { name = "wandb", marker = "extra == 'tracking'", specifier = ">=0.15" },
-]
-
[[package]]
name = "colorama"
version = "0.4.6"