-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstreaming.ts
More file actions
548 lines (459 loc) Β· 16.1 KB
/
streaming.ts
File metadata and controls
548 lines (459 loc) Β· 16.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
/**
* Streaming Examples for HelpingAI JavaScript SDK
*
* This example demonstrates streaming responses from the HelpingAI API:
* - Basic streaming
* - Server-sent events handling
* - Error handling in streams
* - Browser vs Node.js differences
* - Stream processing techniques
*/
import { HelpingAI } from '../src';
/**
* Example 1: Basic Streaming
*/
async function basicStreamingExample(): Promise<void> {
console.log('=== Example 1: Basic Streaming ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{ role: 'user', content: 'Tell me a story about artificial intelligence and empathy' },
],
stream: true,
max_tokens: 500,
});
console.log('π‘ Starting stream...\n');
let fullContent = '';
// Process streaming response
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
if (chunk.choices[0].delta.content) {
const content = chunk.choices[0].delta.content;
fullContent += content;
// Print content as it arrives (simulating real-time display)
console.log(content);
}
// Check if stream is complete
if (chunk.choices[0].finish_reason) {
console.log(`\nβ
Stream completed. Reason: ${chunk.choices[0].finish_reason}`);
break;
}
}
}
console.log(`\nπ Total characters received: ${fullContent.length}`);
} catch (error: any) {
console.error('β Streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 2: Streaming with Progress Tracking
*/
async function streamingWithProgressExample(): Promise<void> {
console.log('\n=== Example 2: Streaming with Progress ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{ role: 'user', content: 'Explain the importance of emotional intelligence in leadership' },
],
stream: true,
max_tokens: 300,
});
console.log('π‘ Starting stream with progress tracking...\n');
let chunkCount = 0;
let totalTokens = 0;
let fullContent = '';
const startTime = Date.now();
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
chunkCount++;
if (chunk.choices[0].delta.content) {
const content = chunk.choices[0].delta.content;
fullContent += content;
// Estimate tokens (rough approximation: 1 token β 4 characters)
totalTokens += Math.ceil(content.length / 4);
// Show progress every 10 chunks
if (chunkCount % 10 === 0) {
const elapsed = (Date.now() - startTime) / 1000;
console.log(
`π Progress: ${chunkCount} chunks, ~${totalTokens} tokens, ${elapsed.toFixed(1)}s`
);
}
console.log(content);
}
if (chunk.choices[0].finish_reason) {
const elapsed = (Date.now() - startTime) / 1000;
console.log(`\nβ
Stream completed in ${elapsed.toFixed(2)}s`);
console.log(`π Final stats: ${chunkCount} chunks, ~${totalTokens} tokens`);
console.log(`π Total content length: ${fullContent.length} characters`);
break;
}
}
}
} catch (error: any) {
console.error('β Streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 3: Streaming with Tool Calls
*/
async function streamingWithToolsExample(): Promise<void> {
console.log('\n=== Example 3: Streaming with Tools ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{
role: 'user',
content: "What's 15 * 23 and can you search for information about emotional AI?",
},
],
tools: ['code_interpreter', 'web_search'],
stream: true,
});
console.log('π‘ Starting stream with tools...\n');
let fullContent = '';
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const toolCalls: any[] = [];
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
// Handle content delta
if (chunk.choices[0].delta.content) {
const content = chunk.choices[0].delta.content;
fullContent += content;
console.log(content);
}
// Handle tool calls delta
if (chunk.choices[0].delta.tool_calls) {
console.log('π§ Tool call detected in stream');
toolCalls.push(...chunk.choices[0].delta.tool_calls);
}
if (chunk.choices[0].finish_reason) {
console.log(`\nβ
Stream completed. Reason: ${chunk.choices[0].finish_reason}`);
if (toolCalls.length > 0) {
console.log(`π§ ${toolCalls.length} tool calls were made during streaming`);
}
console.log(`π Content received: ${fullContent.length} characters`);
break;
}
}
}
} catch (error: any) {
console.error('β Streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 4: Error Handling in Streams
*/
async function streamingErrorHandlingExample(): Promise<void> {
console.log('\n=== Example 4: Stream Error Handling ===');
const client = new HelpingAI({
apiKey: 'invalid-key', // Intentionally invalid
timeout: 5000, // Short timeout for demonstration
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{ role: 'user', content: 'This will fail due to invalid API key' }],
stream: true,
});
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
console.log('Received chunk:', chunk);
}
}
} catch (error: any) {
console.log('β
Caught expected error:');
if (error.name === 'AuthenticationError') {
console.log(' - Authentication failed (invalid API key)');
} else if (error.name === 'TimeoutError') {
console.log(' - Request timed out');
} else if (error.name === 'NetworkError') {
console.log(' - Network connection failed');
} else {
console.log(` - ${error.name}: ${error.message}`);
}
console.log('π‘ Tip: Always handle errors when streaming');
} finally {
await client.cleanup();
}
}
/**
* Example 5: Stream Processing with Buffering
*/
async function streamBufferingExample(): Promise<void> {
console.log('\n=== Example 5: Stream Buffering ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{ role: 'user', content: 'Write a detailed explanation of machine learning' }],
stream: true,
max_tokens: 400,
});
console.log('π‘ Starting buffered streaming...\n');
let buffer = '';
let sentenceCount = 0;
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
if (chunk.choices[0].delta.content) {
buffer += chunk.choices[0].delta.content;
// Process complete sentences
const sentences = buffer.split(/[.!?]+/);
// Keep the last incomplete sentence in buffer
buffer = sentences.pop() || '';
// Process complete sentences
for (const sentence of sentences) {
if (sentence.trim()) {
sentenceCount++;
console.log(`[Sentence ${sentenceCount}] ${sentence.trim()}.`);
console.log(''); // Add spacing
}
}
}
if (chunk.choices[0].finish_reason) {
// Process any remaining content in buffer
if (buffer.trim()) {
sentenceCount++;
console.log(`[Final] ${buffer.trim()}`);
}
console.log(`\nβ
Processed ${sentenceCount} sentences`);
break;
}
}
}
} catch (error: any) {
console.error('β Streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 6: Streaming with Custom Processing
*/
async function customStreamProcessingExample(): Promise<void> {
console.log('\n=== Example 6: Custom Stream Processing ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{ role: 'user', content: 'List 5 benefits of emotional intelligence' }],
stream: true,
});
console.log('π‘ Starting custom stream processing...\n');
let fullContent = '';
let wordCount = 0;
const listItems: string[] = [];
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
if (chunk.choices[0].delta.content) {
const content = chunk.choices[0].delta.content;
fullContent += content;
// Count words
const words = content.split(/\s+/).filter(word => word.length > 0);
wordCount += words.length;
// Extract list items (simple pattern matching)
const lines = fullContent.split('\n');
for (const line of lines) {
const trimmed = line.trim();
if (trimmed.match(/^\d+\.|^-|^\*/)) {
if (!listItems.includes(trimmed)) {
listItems.push(trimmed);
console.log(`π Found list item: ${trimmed}`);
}
}
}
// Show real-time word count every 50 words
if (wordCount > 0 && wordCount % 50 === 0) {
console.log(`π Word count: ${wordCount}`);
}
}
if (chunk.choices[0].finish_reason) {
console.log(`\nβ
Stream completed`);
console.log(`π Final word count: ${wordCount}`);
console.log(`π List items found: ${listItems.length}`);
break;
}
}
}
} catch (error: any) {
console.error('β Streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 7: Streaming Performance Comparison
*/
async function streamingPerformanceExample(): Promise<void> {
console.log('\n=== Example 7: Performance Comparison ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
const prompt = 'Explain the role of empathy in artificial intelligence development';
try {
// Non-streaming request
console.log('π Testing non-streaming...');
const startNonStream = Date.now();
const nonStreamResponse = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{ role: 'user', content: prompt }],
stream: false,
max_tokens: 200,
});
const nonStreamTime = Date.now() - startNonStream;
let nonStreamLength = 0;
if ('choices' in nonStreamResponse) {
nonStreamLength = nonStreamResponse.choices[0].message.content?.length || 0;
}
console.log(`β
Non-streaming: ${nonStreamTime}ms, ${nonStreamLength} characters`);
// Streaming request
console.log('π Testing streaming...');
const startStream = Date.now();
let firstChunkTime = 0;
let streamLength = 0;
const streamResponse = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [{ role: 'user', content: prompt }],
stream: true,
max_tokens: 200,
});
if (Symbol.asyncIterator in streamResponse) {
let isFirstChunk = true;
for await (const chunk of streamResponse) {
if (chunk.choices[0].delta.content) {
if (isFirstChunk) {
firstChunkTime = Date.now() - startStream;
isFirstChunk = false;
}
streamLength += chunk.choices[0].delta.content.length;
}
if (chunk.choices[0].finish_reason) {
break;
}
}
}
const totalStreamTime = Date.now() - startStream;
console.log(
`β
Streaming: ${totalStreamTime}ms total, ${firstChunkTime}ms to first chunk, ${streamLength} characters`
);
console.log('\nπ Performance Analysis:');
console.log(` - Time to first content: ${firstChunkTime}ms (streaming advantage)`);
console.log(` - Total time difference: ${totalStreamTime - nonStreamTime}ms`);
console.log(` - Streaming provides faster perceived response time`);
} catch (error: any) {
console.error('β Performance test error:', error.message || error);
} finally {
await client.cleanup();
}
}
/**
* Example 8: Browser vs Node.js Streaming Differences
*/
async function crossPlatformStreamingExample(): Promise<void> {
console.log('\n=== Example 8: Cross-Platform Streaming ===');
const client = new HelpingAI({
apiKey: 'your-api-key',
});
try {
console.log('π Platform detection:');
// Detect environment
const isBrowser = typeof window !== 'undefined';
const isNode = typeof globalThis !== 'undefined' && (globalThis as any).process?.versions?.node;
console.log(` - Browser: ${isBrowser}`);
console.log(` - Node.js: ${isNode}`);
const response = await client.chat.completions.create({
model: 'Dhanishtha-2.0-preview',
messages: [
{ role: 'user', content: 'Explain how streaming works differently in browsers vs Node.js' },
],
stream: true,
max_tokens: 300,
});
console.log('π‘ Starting cross-platform streaming...\n');
if (Symbol.asyncIterator in response) {
for await (const chunk of response) {
if (chunk.choices[0].delta.content) {
const content = chunk.choices[0].delta.content;
// Platform-specific output handling
if (isBrowser) {
// In browser, we might update DOM elements
console.log(`[Browser] ${content}`);
} else {
// In Node.js, we can write to stdout
console.log(`[Node.js] ${content}`);
}
}
if (chunk.choices[0].finish_reason) {
console.log('\nβ
Cross-platform streaming completed');
break;
}
}
}
} catch (error: any) {
console.error('β Cross-platform streaming error:', error.message || error);
} finally {
await client.cleanup();
}
}
// Main execution function
async function main(): Promise<void> {
console.log('π HelpingAI JavaScript SDK - Streaming Examples\n');
console.log('β οΈ Remember to replace "your-api-key" with your actual API key');
console.log(' Get your API key from: https://helpingai.co/dashboard\n');
try {
await basicStreamingExample();
await streamingWithProgressExample();
await streamingWithToolsExample();
await streamingErrorHandlingExample();
await streamBufferingExample();
await customStreamProcessingExample();
await streamingPerformanceExample();
await crossPlatformStreamingExample();
console.log('\nβ
All streaming examples completed!');
console.log('π Key takeaways:');
console.log(' - Streaming provides faster perceived response time');
console.log(' - Always handle errors in streaming scenarios');
console.log(' - Buffer content for better user experience');
console.log(' - Consider platform differences (Browser vs Node.js)');
} catch (error) {
console.error('Error in main:', error);
}
}
// Run the example if this file is executed directly
declare const require: any;
declare const module: any;
if (typeof require !== 'undefined' && typeof module !== 'undefined' && require.main === module) {
main().catch(console.error);
}
// Export functions for use in other modules
export {
basicStreamingExample,
streamingWithProgressExample,
streamingWithToolsExample,
streamingErrorHandlingExample,
streamBufferingExample,
customStreamProcessingExample,
streamingPerformanceExample,
crossPlatformStreamingExample,
};